1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright 2018 Mellanox Technologies, Ltd 3 */ 4 5 #ifndef RTE_PMD_MLX5_FLOW_H_ 6 #define RTE_PMD_MLX5_FLOW_H_ 7 8 #include <netinet/in.h> 9 #include <sys/queue.h> 10 #include <stdalign.h> 11 #include <stdint.h> 12 #include <string.h> 13 14 /* Verbs header. */ 15 /* ISO C doesn't support unnamed structs/unions, disabling -pedantic. */ 16 #ifdef PEDANTIC 17 #pragma GCC diagnostic ignored "-Wpedantic" 18 #endif 19 #include <infiniband/verbs.h> 20 #ifdef PEDANTIC 21 #pragma GCC diagnostic error "-Wpedantic" 22 #endif 23 24 #include "mlx5.h" 25 #include "mlx5_prm.h" 26 27 /* Pattern outer Layer bits. */ 28 #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) 29 #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) 30 #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) 31 #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) 32 #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) 33 #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) 34 35 /* Pattern inner Layer bits. */ 36 #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) 37 #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) 38 #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) 39 #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) 40 #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) 41 #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) 42 43 /* Pattern tunnel Layer bits. */ 44 #define MLX5_FLOW_LAYER_VXLAN (1u << 12) 45 #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) 46 #define MLX5_FLOW_LAYER_GRE (1u << 14) 47 #define MLX5_FLOW_LAYER_MPLS (1u << 15) 48 49 /* General pattern items bits. */ 50 #define MLX5_FLOW_ITEM_METADATA (1u << 16) 51 52 /* Outer Masks. */ 53 #define MLX5_FLOW_LAYER_OUTER_L3 \ 54 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) 55 #define MLX5_FLOW_LAYER_OUTER_L4 \ 56 (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) 57 #define MLX5_FLOW_LAYER_OUTER \ 58 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ 59 MLX5_FLOW_LAYER_OUTER_L4) 60 61 /* Tunnel Masks. */ 62 #define MLX5_FLOW_LAYER_TUNNEL \ 63 (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ 64 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_MPLS) 65 66 /* Inner Masks. */ 67 #define MLX5_FLOW_LAYER_INNER_L3 \ 68 (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 69 #define MLX5_FLOW_LAYER_INNER_L4 \ 70 (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) 71 #define MLX5_FLOW_LAYER_INNER \ 72 (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ 73 MLX5_FLOW_LAYER_INNER_L4) 74 75 /* Layer Masks. */ 76 #define MLX5_FLOW_LAYER_L2 \ 77 (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2) 78 #define MLX5_FLOW_LAYER_L3_IPV4 \ 79 (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4) 80 #define MLX5_FLOW_LAYER_L3_IPV6 \ 81 (MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 82 #define MLX5_FLOW_LAYER_L3 \ 83 (MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6) 84 #define MLX5_FLOW_LAYER_L4 \ 85 (MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4) 86 87 /* Actions */ 88 #define MLX5_FLOW_ACTION_DROP (1u << 0) 89 #define MLX5_FLOW_ACTION_QUEUE (1u << 1) 90 #define MLX5_FLOW_ACTION_RSS (1u << 2) 91 #define MLX5_FLOW_ACTION_FLAG (1u << 3) 92 #define MLX5_FLOW_ACTION_MARK (1u << 4) 93 #define MLX5_FLOW_ACTION_COUNT (1u << 5) 94 #define MLX5_FLOW_ACTION_PORT_ID (1u << 6) 95 #define MLX5_FLOW_ACTION_OF_POP_VLAN (1u << 7) 96 #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1u << 8) 97 #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1u << 9) 98 #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1u << 10) 99 #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1u << 11) 100 #define MLX5_FLOW_ACTION_SET_IPV4_DST (1u << 12) 101 #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1u << 13) 102 #define MLX5_FLOW_ACTION_SET_IPV6_DST (1u << 14) 103 #define MLX5_FLOW_ACTION_SET_TP_SRC (1u << 15) 104 #define MLX5_FLOW_ACTION_SET_TP_DST (1u << 16) 105 #define MLX5_FLOW_ACTION_JUMP (1u << 17) 106 #define MLX5_FLOW_ACTION_SET_TTL (1u << 18) 107 #define MLX5_FLOW_ACTION_DEC_TTL (1u << 19) 108 #define MLX5_FLOW_ACTION_SET_MAC_SRC (1u << 20) 109 #define MLX5_FLOW_ACTION_SET_MAC_DST (1u << 21) 110 #define MLX5_FLOW_ACTION_VXLAN_ENCAP (1u << 22) 111 #define MLX5_FLOW_ACTION_VXLAN_DECAP (1u << 23) 112 #define MLX5_FLOW_ACTION_NVGRE_ENCAP (1u << 24) 113 #define MLX5_FLOW_ACTION_NVGRE_DECAP (1u << 25) 114 #define MLX5_FLOW_ACTION_RAW_ENCAP (1u << 26) 115 #define MLX5_FLOW_ACTION_RAW_DECAP (1u << 27) 116 117 #define MLX5_FLOW_FATE_ACTIONS \ 118 (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | MLX5_FLOW_ACTION_RSS) 119 120 #define MLX5_FLOW_ENCAP_ACTIONS (MLX5_FLOW_ACTION_VXLAN_ENCAP | \ 121 MLX5_FLOW_ACTION_NVGRE_ENCAP | \ 122 MLX5_FLOW_ACTION_RAW_ENCAP) 123 124 #define MLX5_FLOW_DECAP_ACTIONS (MLX5_FLOW_ACTION_VXLAN_DECAP | \ 125 MLX5_FLOW_ACTION_NVGRE_DECAP | \ 126 MLX5_FLOW_ACTION_RAW_DECAP) 127 128 #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \ 129 MLX5_FLOW_ACTION_SET_IPV4_DST | \ 130 MLX5_FLOW_ACTION_SET_IPV6_SRC | \ 131 MLX5_FLOW_ACTION_SET_IPV6_DST | \ 132 MLX5_FLOW_ACTION_SET_TP_SRC | \ 133 MLX5_FLOW_ACTION_SET_TP_DST | \ 134 MLX5_FLOW_ACTION_SET_TTL | \ 135 MLX5_FLOW_ACTION_DEC_TTL | \ 136 MLX5_FLOW_ACTION_SET_MAC_SRC | \ 137 MLX5_FLOW_ACTION_SET_MAC_DST) 138 139 #ifndef IPPROTO_MPLS 140 #define IPPROTO_MPLS 137 141 #endif 142 143 /* UDP port number for MPLS */ 144 #define MLX5_UDP_PORT_MPLS 6635 145 146 /* UDP port numbers for VxLAN. */ 147 #define MLX5_UDP_PORT_VXLAN 4789 148 #define MLX5_UDP_PORT_VXLAN_GPE 4790 149 150 /* Priority reserved for default flows. */ 151 #define MLX5_FLOW_PRIO_RSVD ((uint32_t)-1) 152 153 /* 154 * Number of sub priorities. 155 * For each kind of pattern matching i.e. L2, L3, L4 to have a correct 156 * matching on the NIC (firmware dependent) L4 most have the higher priority 157 * followed by L3 and ending with L2. 158 */ 159 #define MLX5_PRIORITY_MAP_L2 2 160 #define MLX5_PRIORITY_MAP_L3 1 161 #define MLX5_PRIORITY_MAP_L4 0 162 #define MLX5_PRIORITY_MAP_MAX 3 163 164 /* Valid layer type for IPV4 RSS. */ 165 #define MLX5_IPV4_LAYER_TYPES \ 166 (ETH_RSS_IPV4 | ETH_RSS_FRAG_IPV4 | \ 167 ETH_RSS_NONFRAG_IPV4_TCP | ETH_RSS_NONFRAG_IPV4_UDP | \ 168 ETH_RSS_NONFRAG_IPV4_OTHER) 169 170 /* IBV hash source bits for IPV4. */ 171 #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4) 172 173 /* Valid layer type for IPV6 RSS. */ 174 #define MLX5_IPV6_LAYER_TYPES \ 175 (ETH_RSS_IPV6 | ETH_RSS_FRAG_IPV6 | ETH_RSS_NONFRAG_IPV6_TCP | \ 176 ETH_RSS_NONFRAG_IPV6_UDP | ETH_RSS_IPV6_EX | ETH_RSS_IPV6_TCP_EX | \ 177 ETH_RSS_IPV6_UDP_EX | ETH_RSS_NONFRAG_IPV6_OTHER) 178 179 /* IBV hash source bits for IPV6. */ 180 #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) 181 182 enum mlx5_flow_drv_type { 183 MLX5_FLOW_TYPE_MIN, 184 MLX5_FLOW_TYPE_DV, 185 MLX5_FLOW_TYPE_TCF, 186 MLX5_FLOW_TYPE_VERBS, 187 MLX5_FLOW_TYPE_MAX, 188 }; 189 190 /* Matcher PRM representation */ 191 struct mlx5_flow_dv_match_params { 192 size_t size; 193 /**< Size of match value. Do NOT split size and key! */ 194 uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)]; 195 /**< Matcher value. This value is used as the mask or as a key. */ 196 }; 197 198 /* Matcher structure. */ 199 struct mlx5_flow_dv_matcher { 200 LIST_ENTRY(mlx5_flow_dv_matcher) next; 201 /* Pointer to the next element. */ 202 rte_atomic32_t refcnt; /**< Reference counter. */ 203 void *matcher_object; /**< Pointer to DV matcher */ 204 uint16_t crc; /**< CRC of key. */ 205 uint16_t priority; /**< Priority of matcher. */ 206 uint8_t egress; /**< Egress matcher. */ 207 struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */ 208 }; 209 210 #define MLX5_ENCAP_MAX_LEN 132 211 212 /* Encap/decap resource structure. */ 213 struct mlx5_flow_dv_encap_decap_resource { 214 LIST_ENTRY(mlx5_flow_dv_encap_decap_resource) next; 215 /* Pointer to next element. */ 216 rte_atomic32_t refcnt; /**< Reference counter. */ 217 struct ibv_flow_action *verbs_action; 218 /**< Verbs encap/decap action object. */ 219 uint8_t buf[MLX5_ENCAP_MAX_LEN]; 220 size_t size; 221 uint8_t reformat_type; 222 uint8_t ft_type; 223 }; 224 225 /* Number of modification commands. */ 226 #define MLX5_MODIFY_NUM 8 227 228 /* Modify resource structure */ 229 struct mlx5_flow_dv_modify_hdr_resource { 230 LIST_ENTRY(mlx5_flow_dv_modify_hdr_resource) next; 231 /* Pointer to next element. */ 232 rte_atomic32_t refcnt; /**< Reference counter. */ 233 struct ibv_flow_action *verbs_action; 234 /**< Verbs modify header action object. */ 235 uint8_t ft_type; /**< Flow table type, Rx or Tx. */ 236 uint32_t actions_num; /**< Number of modification actions. */ 237 struct mlx5_modification_cmd actions[MLX5_MODIFY_NUM]; 238 /**< Modification actions. */ 239 }; 240 241 /* 242 * Max number of actions per DV flow. 243 * See CREATE_FLOW_MAX_FLOW_ACTIONS_SUPPORTED 244 * In rdma-core file providers/mlx5/verbs.c 245 */ 246 #define MLX5_DV_MAX_NUMBER_OF_ACTIONS 8 247 248 /* DV flows structure. */ 249 struct mlx5_flow_dv { 250 uint64_t hash_fields; /**< Fields that participate in the hash. */ 251 struct mlx5_hrxq *hrxq; /**< Hash Rx queues. */ 252 /* Flow DV api: */ 253 struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ 254 struct mlx5_flow_dv_match_params value; 255 /**< Holds the value that the packet is compared to. */ 256 struct mlx5_flow_dv_encap_decap_resource *encap_decap; 257 /**< Pointer to encap/decap resource in cache. */ 258 struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 259 /**< Pointer to modify header resource in cache. */ 260 struct ibv_flow *flow; /**< Installed flow. */ 261 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 262 struct mlx5dv_flow_action_attr actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; 263 /**< Action list. */ 264 #endif 265 int actions_n; /**< number of actions. */ 266 }; 267 268 /** Linux TC flower driver for E-Switch flow. */ 269 struct mlx5_flow_tcf { 270 struct nlmsghdr *nlh; 271 struct tcmsg *tcm; 272 uint32_t *ptc_flags; /**< tc rule applied flags. */ 273 union { /**< Tunnel encap/decap descriptor. */ 274 struct flow_tcf_tunnel_hdr *tunnel; 275 struct flow_tcf_vxlan_decap *vxlan_decap; 276 struct flow_tcf_vxlan_encap *vxlan_encap; 277 }; 278 uint32_t applied:1; /**< Whether rule is currently applied. */ 279 #ifndef NDEBUG 280 uint32_t nlsize; /**< Size of NL message buffer for debug check. */ 281 #endif 282 }; 283 284 /* Verbs specification header. */ 285 struct ibv_spec_header { 286 enum ibv_flow_spec_type type; 287 uint16_t size; 288 }; 289 290 /** Handles information leading to a drop fate. */ 291 struct mlx5_flow_verbs { 292 LIST_ENTRY(mlx5_flow_verbs) next; 293 unsigned int size; /**< Size of the attribute. */ 294 struct { 295 struct ibv_flow_attr *attr; 296 /**< Pointer to the Specification buffer. */ 297 uint8_t *specs; /**< Pointer to the specifications. */ 298 }; 299 struct ibv_flow *flow; /**< Verbs flow pointer. */ 300 struct mlx5_hrxq *hrxq; /**< Hash Rx queue object. */ 301 uint64_t hash_fields; /**< Verbs hash Rx queue hash fields. */ 302 }; 303 304 /** Device flow structure. */ 305 struct mlx5_flow { 306 LIST_ENTRY(mlx5_flow) next; 307 struct rte_flow *flow; /**< Pointer to the main flow. */ 308 uint64_t layers; 309 /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ 310 union { 311 #ifdef HAVE_IBV_FLOW_DV_SUPPORT 312 struct mlx5_flow_dv dv; 313 #endif 314 struct mlx5_flow_tcf tcf; 315 struct mlx5_flow_verbs verbs; 316 }; 317 }; 318 319 /* Counters information. */ 320 struct mlx5_flow_counter { 321 LIST_ENTRY(mlx5_flow_counter) next; /**< Pointer to the next counter. */ 322 uint32_t shared:1; /**< Share counter ID with other flow rules. */ 323 uint32_t ref_cnt:31; /**< Reference counter. */ 324 uint32_t id; /**< Counter ID. */ 325 union { /**< Holds the counters for the rule. */ 326 #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) 327 struct ibv_counter_set *cs; 328 #elif defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 329 struct ibv_counters *cs; 330 #endif 331 struct mlx5_devx_counter_set *dcs; 332 }; 333 uint64_t hits; /**< Number of packets matched by the rule. */ 334 uint64_t bytes; /**< Number of bytes matched by the rule. */ 335 }; 336 337 /* Flow structure. */ 338 struct rte_flow { 339 TAILQ_ENTRY(rte_flow) next; /**< Pointer to the next flow structure. */ 340 enum mlx5_flow_drv_type drv_type; /**< Drvier type. */ 341 struct mlx5_flow_counter *counter; /**< Holds flow counter. */ 342 struct rte_flow_action_rss rss;/**< RSS context. */ 343 uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ 344 uint16_t (*queue)[]; /**< Destination queues to redirect traffic to. */ 345 LIST_HEAD(dev_flows, mlx5_flow) dev_flows; 346 /**< Device flows that are part of the flow. */ 347 uint64_t actions; 348 /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ 349 struct mlx5_fdir *fdir; /**< Pointer to associated FDIR if any. */ 350 }; 351 352 typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, 353 const struct rte_flow_attr *attr, 354 const struct rte_flow_item items[], 355 const struct rte_flow_action actions[], 356 struct rte_flow_error *error); 357 typedef struct mlx5_flow *(*mlx5_flow_prepare_t) 358 (const struct rte_flow_attr *attr, const struct rte_flow_item items[], 359 const struct rte_flow_action actions[], struct rte_flow_error *error); 360 typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev, 361 struct mlx5_flow *dev_flow, 362 const struct rte_flow_attr *attr, 363 const struct rte_flow_item items[], 364 const struct rte_flow_action actions[], 365 struct rte_flow_error *error); 366 typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow, 367 struct rte_flow_error *error); 368 typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev, 369 struct rte_flow *flow); 370 typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev, 371 struct rte_flow *flow); 372 typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev, 373 struct rte_flow *flow, 374 const struct rte_flow_action *actions, 375 void *data, 376 struct rte_flow_error *error); 377 struct mlx5_flow_driver_ops { 378 mlx5_flow_validate_t validate; 379 mlx5_flow_prepare_t prepare; 380 mlx5_flow_translate_t translate; 381 mlx5_flow_apply_t apply; 382 mlx5_flow_remove_t remove; 383 mlx5_flow_destroy_t destroy; 384 mlx5_flow_query_t query; 385 }; 386 387 /* mlx5_flow.c */ 388 389 uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow *dev_flow, int tunnel, 390 uint64_t layer_types, 391 uint64_t hash_fields); 392 uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 393 uint32_t subpriority); 394 int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, 395 const struct rte_flow_attr *attr, 396 struct rte_flow_error *error); 397 int mlx5_flow_validate_action_drop(uint64_t action_flags, 398 const struct rte_flow_attr *attr, 399 struct rte_flow_error *error); 400 int mlx5_flow_validate_action_flag(uint64_t action_flags, 401 const struct rte_flow_attr *attr, 402 struct rte_flow_error *error); 403 int mlx5_flow_validate_action_mark(const struct rte_flow_action *action, 404 uint64_t action_flags, 405 const struct rte_flow_attr *attr, 406 struct rte_flow_error *error); 407 int mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 408 uint64_t action_flags, 409 struct rte_eth_dev *dev, 410 const struct rte_flow_attr *attr, 411 struct rte_flow_error *error); 412 int mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 413 uint64_t action_flags, 414 struct rte_eth_dev *dev, 415 const struct rte_flow_attr *attr, 416 struct rte_flow_error *error); 417 int mlx5_flow_validate_attributes(struct rte_eth_dev *dev, 418 const struct rte_flow_attr *attributes, 419 struct rte_flow_error *error); 420 int mlx5_flow_item_acceptable(const struct rte_flow_item *item, 421 const uint8_t *mask, 422 const uint8_t *nic_mask, 423 unsigned int size, 424 struct rte_flow_error *error); 425 int mlx5_flow_validate_item_eth(const struct rte_flow_item *item, 426 uint64_t item_flags, 427 struct rte_flow_error *error); 428 int mlx5_flow_validate_item_gre(const struct rte_flow_item *item, 429 uint64_t item_flags, 430 uint8_t target_protocol, 431 struct rte_flow_error *error); 432 int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item, 433 uint64_t item_flags, 434 const struct rte_flow_item_ipv4 *acc_mask, 435 struct rte_flow_error *error); 436 int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item, 437 uint64_t item_flags, 438 const struct rte_flow_item_ipv6 *acc_mask, 439 struct rte_flow_error *error); 440 int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev, 441 const struct rte_flow_item *item, 442 uint64_t item_flags, 443 uint64_t prev_layer, 444 struct rte_flow_error *error); 445 int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item, 446 uint64_t item_flags, 447 uint8_t target_protocol, 448 const struct rte_flow_item_tcp *flow_mask, 449 struct rte_flow_error *error); 450 int mlx5_flow_validate_item_udp(const struct rte_flow_item *item, 451 uint64_t item_flags, 452 uint8_t target_protocol, 453 struct rte_flow_error *error); 454 int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 455 uint64_t item_flags, 456 struct rte_flow_error *error); 457 int mlx5_flow_validate_item_vxlan(const struct rte_flow_item *item, 458 uint64_t item_flags, 459 struct rte_flow_error *error); 460 int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 461 uint64_t item_flags, 462 struct rte_eth_dev *dev, 463 struct rte_flow_error *error); 464 465 /* mlx5_flow_tcf.c */ 466 467 int mlx5_flow_tcf_init(struct mlx5_flow_tcf_context *ctx, 468 unsigned int ifindex, struct rte_flow_error *error); 469 struct mlx5_flow_tcf_context *mlx5_flow_tcf_context_create(void); 470 void mlx5_flow_tcf_context_destroy(struct mlx5_flow_tcf_context *ctx); 471 472 #endif /* RTE_PMD_MLX5_FLOW_H_ */ 473