184c406e7SOri Kam /* SPDX-License-Identifier: BSD-3-Clause 284c406e7SOri Kam * Copyright 2018 Mellanox Technologies, Ltd 384c406e7SOri Kam */ 484c406e7SOri Kam 584c406e7SOri Kam #ifndef RTE_PMD_MLX5_FLOW_H_ 684c406e7SOri Kam #define RTE_PMD_MLX5_FLOW_H_ 784c406e7SOri Kam 884c406e7SOri Kam #include <stdalign.h> 984c406e7SOri Kam #include <stdint.h> 1084c406e7SOri Kam #include <string.h> 1189813a52SDmitry Kozlyuk #include <sys/queue.h> 1284c406e7SOri Kam 13f15db67dSMatan Azrad #include <rte_alarm.h> 143bd26b23SSuanming Mou #include <rte_mtr.h> 15f15db67dSMatan Azrad 169d60f545SOphir Munk #include <mlx5_glue.h> 177b4f1e6bSMatan Azrad #include <mlx5_prm.h> 187b4f1e6bSMatan Azrad 19f5bf91deSMoti Haimovsky #include "mlx5.h" 205f5e2f86SAlexander Kozyrev #include "rte_pmd_mlx5.h" 2122681deeSAlex Vesker #include "hws/mlx5dr.h" 227aa6c077SSuanming Mou #include "mlx5_tx.h" 23f5bf91deSMoti Haimovsky 24a5640386SXueming Li /* E-Switch Manager port, used for rte_flow_item_port_id. */ 25a5640386SXueming Li #define MLX5_PORT_ESW_MGR UINT32_MAX 26a5640386SXueming Li 2733d506b9SShun Hao /* E-Switch Manager port, used for rte_flow_item_ethdev. */ 2833d506b9SShun Hao #define MLX5_REPRESENTED_PORT_ESW_MGR UINT16_MAX 2933d506b9SShun Hao 3070d84dc7SOri Kam /* Private rte flow items. */ 3170d84dc7SOri Kam enum mlx5_rte_flow_item_type { 3270d84dc7SOri Kam MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN, 3370d84dc7SOri Kam MLX5_RTE_FLOW_ITEM_TYPE_TAG, 3475a00812SSuanming Mou MLX5_RTE_FLOW_ITEM_TYPE_SQ, 3550f576d6SSuanming Mou MLX5_RTE_FLOW_ITEM_TYPE_VLAN, 364ec6360dSGregory Etelson MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL, 3770d84dc7SOri Kam }; 3870d84dc7SOri Kam 39baf516beSViacheslav Ovsiienko /* Private (internal) rte flow actions. */ 4070d84dc7SOri Kam enum mlx5_rte_flow_action_type { 4170d84dc7SOri Kam MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN, 4270d84dc7SOri Kam MLX5_RTE_FLOW_ACTION_TYPE_TAG, 43dd3c774fSViacheslav Ovsiienko MLX5_RTE_FLOW_ACTION_TYPE_MARK, 44baf516beSViacheslav Ovsiienko MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG, 453c78124fSShiri Kuzin MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS, 464ec6360dSGregory Etelson MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET, 4781073e1fSMatan Azrad MLX5_RTE_FLOW_ACTION_TYPE_AGE, 4851ec04dcSShun Hao MLX5_RTE_FLOW_ACTION_TYPE_COUNT, 49f3191849SMichael Baum MLX5_RTE_FLOW_ACTION_TYPE_JUMP, 507ab3962dSSuanming Mou MLX5_RTE_FLOW_ACTION_TYPE_RSS, 5148fbb0e9SAlexander Kozyrev MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK, 5270d84dc7SOri Kam }; 5370d84dc7SOri Kam 54ddb68e47SBing Zhao /* Private (internal) Field IDs for MODIFY_FIELD action. */ 55ddb68e47SBing Zhao enum mlx5_rte_flow_field_id { 56ddb68e47SBing Zhao MLX5_RTE_FLOW_FIELD_END = INT_MIN, 57ddb68e47SBing Zhao MLX5_RTE_FLOW_FIELD_META_REG, 58ddb68e47SBing Zhao }; 59ddb68e47SBing Zhao 6048fbb0e9SAlexander Kozyrev #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29 614a42ac1fSMatan Azrad 62478ba4bbSSuanming Mou #define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \ 63478ba4bbSSuanming Mou (((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET) 64478ba4bbSSuanming Mou 65478ba4bbSSuanming Mou #define MLX5_INDIRECT_ACTION_IDX_GET(handle) \ 66478ba4bbSSuanming Mou (((uint32_t)(uintptr_t)(handle)) & \ 67478ba4bbSSuanming Mou ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1)) 68478ba4bbSSuanming Mou 693564e928SGregory Etelson enum mlx5_indirect_type { 704b61b877SBing Zhao MLX5_INDIRECT_ACTION_TYPE_RSS, 714b61b877SBing Zhao MLX5_INDIRECT_ACTION_TYPE_AGE, 72f3191849SMichael Baum MLX5_INDIRECT_ACTION_TYPE_COUNT, 732db75e8bSBing Zhao MLX5_INDIRECT_ACTION_TYPE_CT, 7448fbb0e9SAlexander Kozyrev MLX5_INDIRECT_ACTION_TYPE_METER_MARK, 7515896eafSGregory Etelson MLX5_INDIRECT_ACTION_TYPE_QUOTA, 764a42ac1fSMatan Azrad }; 774a42ac1fSMatan Azrad 7848fbb0e9SAlexander Kozyrev /* Now, the maximal ports will be supported is 16, action number is 32M. */ 7948fbb0e9SAlexander Kozyrev #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10 804f74cb68SBing Zhao 814487a792SDariusz Sosnowski #define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 25 824f74cb68SBing Zhao #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1) 834f74cb68SBing Zhao 844c9e67b5SDariusz Sosnowski /* 854c9e67b5SDariusz Sosnowski * When SW steering flow engine is used, the CT action handles are encoded in a following way: 864c9e67b5SDariusz Sosnowski * - bits 31:29 - type 874c9e67b5SDariusz Sosnowski * - bits 28:25 - port index of the action owner 884c9e67b5SDariusz Sosnowski * - bits 24:0 - action index 894c9e67b5SDariusz Sosnowski */ 904f74cb68SBing Zhao #define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \ 914f74cb68SBing Zhao ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \ 924f74cb68SBing Zhao (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \ 934f74cb68SBing Zhao MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index)) 944f74cb68SBing Zhao 954f74cb68SBing Zhao #define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \ 964f74cb68SBing Zhao (((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \ 974f74cb68SBing Zhao MLX5_INDIRECT_ACT_CT_OWNER_MASK) 984f74cb68SBing Zhao 994f74cb68SBing Zhao #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \ 1004f74cb68SBing Zhao ((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1)) 1014f74cb68SBing Zhao 1024c9e67b5SDariusz Sosnowski /* 1034c9e67b5SDariusz Sosnowski * When HW steering flow engine is used, the CT action handles are encoded in a following way: 1044c9e67b5SDariusz Sosnowski * - bits 31:29 - type 1054c9e67b5SDariusz Sosnowski * - bits 28:0 - action index 1064c9e67b5SDariusz Sosnowski */ 1074c9e67b5SDariusz Sosnowski #define MLX5_INDIRECT_ACT_HWS_CT_GEN_IDX(index) \ 1084c9e67b5SDariusz Sosnowski ((struct rte_flow_action_handle *)(uintptr_t) \ 1094c9e67b5SDariusz Sosnowski ((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | (index))) 110463170a7SSuanming Mou 1113564e928SGregory Etelson enum mlx5_indirect_list_type { 112e26f50adSGregory Etelson MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0, 113e26f50adSGregory Etelson MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1, 114e26f50adSGregory Etelson MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2, 1155e26c99fSRongwei Liu MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT = 3, 1163564e928SGregory Etelson }; 1173564e928SGregory Etelson 118e26f50adSGregory Etelson /** 1193564e928SGregory Etelson * Base type for indirect list type. 1203564e928SGregory Etelson */ 1213564e928SGregory Etelson struct mlx5_indirect_list { 122e26f50adSGregory Etelson /* Indirect list type. */ 1233564e928SGregory Etelson enum mlx5_indirect_list_type type; 124e26f50adSGregory Etelson /* Optional storage list entry */ 1253564e928SGregory Etelson LIST_ENTRY(mlx5_indirect_list) entry; 1263564e928SGregory Etelson }; 1273564e928SGregory Etelson 128e26f50adSGregory Etelson static __rte_always_inline void 129e26f50adSGregory Etelson mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem) 1303564e928SGregory Etelson { 131e26f50adSGregory Etelson LIST_HEAD(, mlx5_indirect_list) *h = head; 132e26f50adSGregory Etelson 133e26f50adSGregory Etelson LIST_INSERT_HEAD(h, elem, entry); 134e26f50adSGregory Etelson } 135e26f50adSGregory Etelson 136e26f50adSGregory Etelson static __rte_always_inline void 137e26f50adSGregory Etelson mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem) 138e26f50adSGregory Etelson { 139e26f50adSGregory Etelson if (elem->entry.le_prev) 140e26f50adSGregory Etelson LIST_REMOVE(elem, entry); 141e26f50adSGregory Etelson } 142e26f50adSGregory Etelson 143e26f50adSGregory Etelson static __rte_always_inline enum mlx5_indirect_list_type 144e26f50adSGregory Etelson mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj) 145e26f50adSGregory Etelson { 146e26f50adSGregory Etelson return ((const struct mlx5_indirect_list *)obj)->type; 1473564e928SGregory Etelson } 1483564e928SGregory Etelson 14970d84dc7SOri Kam /* Matches on selected register. */ 15070d84dc7SOri Kam struct mlx5_rte_flow_item_tag { 151baf516beSViacheslav Ovsiienko enum modify_reg id; 152cff811c7SViacheslav Ovsiienko uint32_t data; 15370d84dc7SOri Kam }; 15470d84dc7SOri Kam 15570d84dc7SOri Kam /* Modify selected register. */ 15670d84dc7SOri Kam struct mlx5_rte_flow_action_set_tag { 157baf516beSViacheslav Ovsiienko enum modify_reg id; 158a597ef33SShun Hao uint8_t offset; 159a597ef33SShun Hao uint8_t length; 160cff811c7SViacheslav Ovsiienko uint32_t data; 16170d84dc7SOri Kam }; 16270d84dc7SOri Kam 163baf516beSViacheslav Ovsiienko struct mlx5_flow_action_copy_mreg { 164baf516beSViacheslav Ovsiienko enum modify_reg dst; 165baf516beSViacheslav Ovsiienko enum modify_reg src; 166baf516beSViacheslav Ovsiienko }; 167baf516beSViacheslav Ovsiienko 1683c84f34eSOri Kam /* Matches on source queue. */ 16975a00812SSuanming Mou struct mlx5_rte_flow_item_sq { 17026e1eaf2SDariusz Sosnowski uint32_t queue; /* DevX SQ number */ 1717c66fa49SGregory Etelson #ifdef RTE_ARCH_64 1727c66fa49SGregory Etelson uint32_t reserved; 1737c66fa49SGregory Etelson #endif 1743c84f34eSOri Kam }; 1753c84f34eSOri Kam 176840f09fbSBing Zhao /* Map from registers to modify fields. */ 177840f09fbSBing Zhao extern enum mlx5_modification_field reg_to_field[]; 178840f09fbSBing Zhao extern const size_t mlx5_mod_reg_size; 179840f09fbSBing Zhao 180840f09fbSBing Zhao static __rte_always_inline enum mlx5_modification_field 181840f09fbSBing Zhao mlx5_convert_reg_to_field(enum modify_reg reg) 182840f09fbSBing Zhao { 183840f09fbSBing Zhao MLX5_ASSERT((size_t)reg < mlx5_mod_reg_size); 184840f09fbSBing Zhao return reg_to_field[reg]; 185840f09fbSBing Zhao } 186840f09fbSBing Zhao 1873e8edd0eSViacheslav Ovsiienko /* Feature name to allocate metadata register. */ 1883e8edd0eSViacheslav Ovsiienko enum mlx5_feature_name { 1893e8edd0eSViacheslav Ovsiienko MLX5_HAIRPIN_RX, 1903e8edd0eSViacheslav Ovsiienko MLX5_HAIRPIN_TX, 1913e8edd0eSViacheslav Ovsiienko MLX5_METADATA_RX, 1923e8edd0eSViacheslav Ovsiienko MLX5_METADATA_TX, 1933e8edd0eSViacheslav Ovsiienko MLX5_METADATA_FDB, 1943e8edd0eSViacheslav Ovsiienko MLX5_FLOW_MARK, 1953e8edd0eSViacheslav Ovsiienko MLX5_APP_TAG, 1963e8edd0eSViacheslav Ovsiienko MLX5_COPY_MARK, 19727efd5deSSuanming Mou MLX5_MTR_COLOR, 19883306d6cSShun Hao MLX5_MTR_ID, 19931ef2982SDekel Peled MLX5_ASO_FLOW_HIT, 2008ebbc01fSBing Zhao MLX5_ASO_CONNTRACK, 201a9b6ea45SJiawei Wang MLX5_SAMPLE_ID, 2023e8edd0eSViacheslav Ovsiienko }; 2033e8edd0eSViacheslav Ovsiienko 2048bb81f26SXueming Li /* Default queue number. */ 2058bb81f26SXueming Li #define MLX5_RSSQ_DEFAULT_NUM 16 2068bb81f26SXueming Li 20784c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0) 20884c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1) 20984c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2) 21084c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3) 21184c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4) 21284c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5) 21384c406e7SOri Kam 21484c406e7SOri Kam /* Pattern inner Layer bits. */ 21584c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6) 21684c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7) 21784c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8) 21884c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9) 21984c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10) 22084c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11) 22184c406e7SOri Kam 22284c406e7SOri Kam /* Pattern tunnel Layer bits. */ 22384c406e7SOri Kam #define MLX5_FLOW_LAYER_VXLAN (1u << 12) 22484c406e7SOri Kam #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13) 22584c406e7SOri Kam #define MLX5_FLOW_LAYER_GRE (1u << 14) 22684c406e7SOri Kam #define MLX5_FLOW_LAYER_MPLS (1u << 15) 227ea81c1b8SDekel Peled /* List of tunnel Layer bits continued below. */ 22884c406e7SOri Kam 2296bd7fbd0SDekel Peled /* General pattern items bits. */ 2306bd7fbd0SDekel Peled #define MLX5_FLOW_ITEM_METADATA (1u << 16) 2312e4c987aSOri Kam #define MLX5_FLOW_ITEM_PORT_ID (1u << 17) 23270d84dc7SOri Kam #define MLX5_FLOW_ITEM_TAG (1u << 18) 23355deee17SViacheslav Ovsiienko #define MLX5_FLOW_ITEM_MARK (1u << 19) 2346bd7fbd0SDekel Peled 235d53aa89aSXiaoyu Min /* Pattern MISC bits. */ 23620ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_ICMP (1u << 20) 23720ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_ICMP6 (1u << 21) 23820ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22) 239d53aa89aSXiaoyu Min 240ea81c1b8SDekel Peled /* Pattern tunnel Layer bits (continued). */ 24120ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_IPIP (1u << 23) 24220ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24) 24320ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_NVGRE (1u << 25) 24420ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_GENEVE (1u << 26) 2455e33bebdSXiaoyu Min 2463c84f34eSOri Kam /* Queue items. */ 24775a00812SSuanming Mou #define MLX5_FLOW_ITEM_SQ (1u << 27) 2483c84f34eSOri Kam 249f31d7a01SDekel Peled /* Pattern tunnel Layer bits (continued). */ 250f31d7a01SDekel Peled #define MLX5_FLOW_LAYER_GTP (1u << 28) 251f31d7a01SDekel Peled 252c7eca236SBing Zhao /* Pattern eCPRI Layer bit. */ 253c7eca236SBing Zhao #define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29) 254c7eca236SBing Zhao 2550e5a0d8fSDekel Peled /* IPv6 Fragment Extension Header bit. */ 2560e5a0d8fSDekel Peled #define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30) 2570e5a0d8fSDekel Peled #define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31) 2580e5a0d8fSDekel Peled 2592c9f9617SShiri Kuzin /* Pattern tunnel Layer bits (continued). */ 260f7239fceSShiri Kuzin #define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32) 2612c9f9617SShiri Kuzin #define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33) 2622c9f9617SShiri Kuzin 26306741117SGregory Etelson /* INTEGRITY item bits */ 26406741117SGregory Etelson #define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34) 26506741117SGregory Etelson #define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35) 26623b0a8b2SGregory Etelson #define MLX5_FLOW_ITEM_INTEGRITY \ 26723b0a8b2SGregory Etelson (MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY) 26879f89527SGregory Etelson 269aca19061SBing Zhao /* Conntrack item. */ 27006741117SGregory Etelson #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36) 271aca19061SBing Zhao 272a23e9b6eSGregory Etelson /* Flex item */ 27360bc2805SGregory Etelson #define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37) 27460bc2805SGregory Etelson #define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38) 27560bc2805SGregory Etelson #define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39) 276a23e9b6eSGregory Etelson 27780c67625SGregory Etelson #define MLX5_FLOW_ITEM_FLEX \ 27880c67625SGregory Etelson (MLX5_FLOW_ITEM_OUTER_FLEX | MLX5_FLOW_ITEM_INNER_FLEX | \ 27980c67625SGregory Etelson MLX5_FLOW_ITEM_FLEX_TUNNEL) 28080c67625SGregory Etelson 28118ca4a4eSRaja Zidane /* ESP item */ 28218ca4a4eSRaja Zidane #define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40) 28318ca4a4eSRaja Zidane 284e8146c63SSean Zhang /* Port Representor/Represented Port item */ 285e8146c63SSean Zhang #define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41) 286e8146c63SSean Zhang #define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42) 287e8146c63SSean Zhang 28875a00812SSuanming Mou /* Meter color item */ 28975a00812SSuanming Mou #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44) 29015896eafSGregory Etelson #define MLX5_FLOW_ITEM_QUOTA (UINT64_C(1) << 45) 29115896eafSGregory Etelson 29275a00812SSuanming Mou 29300e57916SRongwei Liu /* IPv6 routing extension item */ 29400e57916SRongwei Liu #define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45) 29500e57916SRongwei Liu #define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46) 29600e57916SRongwei Liu 297674afdf0SJiawei Wang /* Aggregated affinity item */ 298674afdf0SJiawei Wang #define MLX5_FLOW_ITEM_AGGR_AFFINITY (UINT64_C(1) << 49) 299674afdf0SJiawei Wang 30032c2847aSDong Zhou /* IB BTH ITEM. */ 30132c2847aSDong Zhou #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51) 30232c2847aSDong Zhou 303ad17988aSAlexander Kozyrev /* PTYPE ITEM */ 304ad17988aSAlexander Kozyrev #define MLX5_FLOW_ITEM_PTYPE (1ull << 52) 305ad17988aSAlexander Kozyrev 3066f7d6622SHaifei Luo /* NSH ITEM */ 3076f7d6622SHaifei Luo #define MLX5_FLOW_ITEM_NSH (1ull << 53) 3086f7d6622SHaifei Luo 309cb25df7cSSuanming Mou /* COMPARE ITEM */ 310cb25df7cSSuanming Mou #define MLX5_FLOW_ITEM_COMPARE (1ull << 54) 311cb25df7cSSuanming Mou 312fcd7b8c6SErez Shitrit /* Random ITEM */ 313fcd7b8c6SErez Shitrit #define MLX5_FLOW_ITEM_RANDOM (1ull << 55) 314fcd7b8c6SErez Shitrit 31584c406e7SOri Kam /* Outer Masks. */ 31684c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L3 \ 31784c406e7SOri Kam (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6) 31884c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L4 \ 31984c406e7SOri Kam (MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP) 32084c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER \ 32184c406e7SOri Kam (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \ 32284c406e7SOri Kam MLX5_FLOW_LAYER_OUTER_L4) 32384c406e7SOri Kam 32484c406e7SOri Kam /* Tunnel Masks. */ 32584c406e7SOri Kam #define MLX5_FLOW_LAYER_TUNNEL \ 32684c406e7SOri Kam (MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \ 327ea81c1b8SDekel Peled MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \ 328e59a5dbcSMoti Haimovsky MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \ 329a23e9b6eSGregory Etelson MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \ 330a23e9b6eSGregory Etelson MLX5_FLOW_ITEM_FLEX_TUNNEL) 33184c406e7SOri Kam 33284c406e7SOri Kam /* Inner Masks. */ 33384c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L3 \ 33484c406e7SOri Kam (MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 33584c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L4 \ 33684c406e7SOri Kam (MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP) 33784c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER \ 33884c406e7SOri Kam (MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \ 33984c406e7SOri Kam MLX5_FLOW_LAYER_INNER_L4) 34084c406e7SOri Kam 3414bb14c83SDekel Peled /* Layer Masks. */ 3424bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L2 \ 3434bb14c83SDekel Peled (MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2) 3444bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L3_IPV4 \ 3454bb14c83SDekel Peled (MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4) 3464bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L3_IPV6 \ 3474bb14c83SDekel Peled (MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6) 3484bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L3 \ 3494bb14c83SDekel Peled (MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6) 3504bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L4 \ 3514bb14c83SDekel Peled (MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4) 3524bb14c83SDekel Peled 35384c406e7SOri Kam /* Actions */ 354e5517406SShun Hao #define MLX5_FLOW_ACTION_DROP (1ull << 0) 355e5517406SShun Hao #define MLX5_FLOW_ACTION_QUEUE (1ull << 1) 356e5517406SShun Hao #define MLX5_FLOW_ACTION_RSS (1ull << 2) 357e5517406SShun Hao #define MLX5_FLOW_ACTION_FLAG (1ull << 3) 358e5517406SShun Hao #define MLX5_FLOW_ACTION_MARK (1ull << 4) 359e5517406SShun Hao #define MLX5_FLOW_ACTION_COUNT (1ull << 5) 360e5517406SShun Hao #define MLX5_FLOW_ACTION_PORT_ID (1ull << 6) 361e5517406SShun Hao #define MLX5_FLOW_ACTION_OF_POP_VLAN (1ull << 7) 362e5517406SShun Hao #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1ull << 8) 363e5517406SShun Hao #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1ull << 9) 364e5517406SShun Hao #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1ull << 10) 365e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1ull << 11) 366e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_IPV4_DST (1ull << 12) 367e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1ull << 13) 368e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_IPV6_DST (1ull << 14) 369e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_TP_SRC (1ull << 15) 370e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_TP_DST (1ull << 16) 371e5517406SShun Hao #define MLX5_FLOW_ACTION_JUMP (1ull << 17) 372e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_TTL (1ull << 18) 373e5517406SShun Hao #define MLX5_FLOW_ACTION_DEC_TTL (1ull << 19) 374e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_MAC_SRC (1ull << 20) 375e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_MAC_DST (1ull << 21) 376e5517406SShun Hao #define MLX5_FLOW_ACTION_ENCAP (1ull << 22) 377e5517406SShun Hao #define MLX5_FLOW_ACTION_DECAP (1ull << 23) 378e5517406SShun Hao #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1ull << 24) 379e5517406SShun Hao #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1ull << 25) 380e5517406SShun Hao #define MLX5_FLOW_ACTION_INC_TCP_ACK (1ull << 26) 381e5517406SShun Hao #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1ull << 27) 38206387be8SMatan Azrad #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28) 38306387be8SMatan Azrad #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29) 38406387be8SMatan Azrad #define MLX5_FLOW_ACTION_SET_META (1ull << 30) 38506387be8SMatan Azrad #define MLX5_FLOW_ACTION_METER (1ull << 31) 38606387be8SMatan Azrad #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32) 38706387be8SMatan Azrad #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33) 388fa2d01c8SDong Zhou #define MLX5_FLOW_ACTION_AGE (1ull << 34) 3893c78124fSShiri Kuzin #define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35) 39096b1f027SJiawei Wang #define MLX5_FLOW_ACTION_SAMPLE (1ull << 36) 3914ec6360dSGregory Etelson #define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37) 3924ec6360dSGregory Etelson #define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38) 393641dbe4fSAlexander Kozyrev #define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39) 39444432018SLi Zhang #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40) 3952d084f69SBing Zhao #define MLX5_FLOW_ACTION_CT (1ull << 41) 39625c4d6dfSMichael Savisko #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42) 39704a4de75SMichael Baum #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43) 39804a4de75SMichael Baum #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44) 39915896eafSGregory Etelson #define MLX5_FLOW_ACTION_QUOTA (1ull << 46) 4003dce73a2SSuanming Mou #define MLX5_FLOW_ACTION_PORT_REPRESENTOR (1ull << 47) 4011be65c39SRongwei Liu #define MLX5_FLOW_ACTION_IPV6_ROUTING_REMOVE (1ull << 48) 4021be65c39SRongwei Liu #define MLX5_FLOW_ACTION_IPV6_ROUTING_PUSH (1ull << 49) 40365340facSBing Zhao #define MLX5_FLOW_ACTION_NAT64 (1ull << 50) 404af154d7aSAlexander Kozyrev #define MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX (1ull << 51) 40584c406e7SOri Kam 406e2b05b22SShun Hao #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \ 407e2b05b22SShun Hao (MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE) 408e2b05b22SShun Hao 40984c406e7SOri Kam #define MLX5_FLOW_FATE_ACTIONS \ 410684b9a1bSOri Kam (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \ 4113c78124fSShiri Kuzin MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \ 41244432018SLi Zhang MLX5_FLOW_ACTION_DEFAULT_MISS | \ 41325c4d6dfSMichael Savisko MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \ 4143dce73a2SSuanming Mou MLX5_FLOW_ACTION_SEND_TO_KERNEL | \ 415af154d7aSAlexander Kozyrev MLX5_FLOW_ACTION_PORT_REPRESENTOR | \ 416af154d7aSAlexander Kozyrev MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX) 41784c406e7SOri Kam 4182e4c987aSOri Kam #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \ 4192e4c987aSOri Kam (MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \ 420b2cd3918SJiawei Wang MLX5_FLOW_ACTION_SEND_TO_KERNEL | \ 421af154d7aSAlexander Kozyrev MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \ 422af154d7aSAlexander Kozyrev MLX5_FLOW_ACTION_JUMP_TO_TABLE_INDEX) 4234b8727f0SDekel Peled 4244bb14c83SDekel Peled #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \ 4254bb14c83SDekel Peled MLX5_FLOW_ACTION_SET_IPV4_DST | \ 4264bb14c83SDekel Peled MLX5_FLOW_ACTION_SET_IPV6_SRC | \ 4274bb14c83SDekel Peled MLX5_FLOW_ACTION_SET_IPV6_DST | \ 4284bb14c83SDekel Peled MLX5_FLOW_ACTION_SET_TP_SRC | \ 4294bb14c83SDekel Peled MLX5_FLOW_ACTION_SET_TP_DST | \ 4304bb14c83SDekel Peled MLX5_FLOW_ACTION_SET_TTL | \ 4314bb14c83SDekel Peled MLX5_FLOW_ACTION_DEC_TTL | \ 4324bb14c83SDekel Peled MLX5_FLOW_ACTION_SET_MAC_SRC | \ 433585b99fbSDekel Peled MLX5_FLOW_ACTION_SET_MAC_DST | \ 434585b99fbSDekel Peled MLX5_FLOW_ACTION_INC_TCP_SEQ | \ 435585b99fbSDekel Peled MLX5_FLOW_ACTION_DEC_TCP_SEQ | \ 436585b99fbSDekel Peled MLX5_FLOW_ACTION_INC_TCP_ACK | \ 4375f163d52SMoti Haimovsky MLX5_FLOW_ACTION_DEC_TCP_ACK | \ 43870d84dc7SOri Kam MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \ 43955deee17SViacheslav Ovsiienko MLX5_FLOW_ACTION_SET_TAG | \ 440fcc8d2f7SViacheslav Ovsiienko MLX5_FLOW_ACTION_MARK_EXT | \ 4416f26e604SSuanming Mou MLX5_FLOW_ACTION_SET_META | \ 4426f26e604SSuanming Mou MLX5_FLOW_ACTION_SET_IPV4_DSCP | \ 443641dbe4fSAlexander Kozyrev MLX5_FLOW_ACTION_SET_IPV6_DSCP | \ 444641dbe4fSAlexander Kozyrev MLX5_FLOW_ACTION_MODIFY_FIELD) 4454bb14c83SDekel Peled 4469aee7a84SMoti Haimovsky #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \ 4479aee7a84SMoti Haimovsky MLX5_FLOW_ACTION_OF_PUSH_VLAN) 44806387be8SMatan Azrad 44906387be8SMatan Azrad #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP) 45006387be8SMatan Azrad 45184c406e7SOri Kam #ifndef IPPROTO_MPLS 45284c406e7SOri Kam #define IPPROTO_MPLS 137 45384c406e7SOri Kam #endif 45484c406e7SOri Kam 455ec1e7a5cSGavin Li #define MLX5_IPV6_HDR_ECN_MASK 0x3 456ec1e7a5cSGavin Li #define MLX5_IPV6_HDR_DSCP_SHIFT 2 457ec1e7a5cSGavin Li 458d1abe664SDekel Peled /* UDP port number for MPLS */ 459d1abe664SDekel Peled #define MLX5_UDP_PORT_MPLS 6635 460d1abe664SDekel Peled 461fc2c498cSOri Kam /* UDP port numbers for VxLAN. */ 462fc2c498cSOri Kam #define MLX5_UDP_PORT_VXLAN 4789 463fc2c498cSOri Kam #define MLX5_UDP_PORT_VXLAN_GPE 4790 464fc2c498cSOri Kam 46532c2847aSDong Zhou /* UDP port numbers for RoCEv2. */ 46632c2847aSDong Zhou #define MLX5_UDP_PORT_ROCEv2 4791 46732c2847aSDong Zhou 468e59a5dbcSMoti Haimovsky /* UDP port numbers for GENEVE. */ 469e59a5dbcSMoti Haimovsky #define MLX5_UDP_PORT_GENEVE 6081 470e59a5dbcSMoti Haimovsky 4715f8ae44dSDong Zhou /* Lowest priority indicator. */ 4725f8ae44dSDong Zhou #define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1) 4735f8ae44dSDong Zhou 4745f8ae44dSDong Zhou /* 4755f8ae44dSDong Zhou * Max priority for ingress\egress flow groups 4765f8ae44dSDong Zhou * greater than 0 and for any transfer flow group. 4775f8ae44dSDong Zhou * From user configation: 0 - 21843. 4785f8ae44dSDong Zhou */ 4795f8ae44dSDong Zhou #define MLX5_NON_ROOT_FLOW_MAX_PRIO (21843 + 1) 48084c406e7SOri Kam 48184c406e7SOri Kam /* 48284c406e7SOri Kam * Number of sub priorities. 48384c406e7SOri Kam * For each kind of pattern matching i.e. L2, L3, L4 to have a correct 48484c406e7SOri Kam * matching on the NIC (firmware dependent) L4 most have the higher priority 48584c406e7SOri Kam * followed by L3 and ending with L2. 48684c406e7SOri Kam */ 48784c406e7SOri Kam #define MLX5_PRIORITY_MAP_L2 2 48884c406e7SOri Kam #define MLX5_PRIORITY_MAP_L3 1 48984c406e7SOri Kam #define MLX5_PRIORITY_MAP_L4 0 49084c406e7SOri Kam #define MLX5_PRIORITY_MAP_MAX 3 49184c406e7SOri Kam 492fc2c498cSOri Kam /* Valid layer type for IPV4 RSS. */ 493fc2c498cSOri Kam #define MLX5_IPV4_LAYER_TYPES \ 494295968d1SFerruh Yigit (RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \ 495295968d1SFerruh Yigit RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \ 496295968d1SFerruh Yigit RTE_ETH_RSS_NONFRAG_IPV4_OTHER) 497fc2c498cSOri Kam 498ae67e3c4SGregory Etelson /* Valid L4 RSS types */ 499ae67e3c4SGregory Etelson #define MLX5_L4_RSS_TYPES (RTE_ETH_RSS_L4_SRC_ONLY | RTE_ETH_RSS_L4_DST_ONLY) 500ae67e3c4SGregory Etelson 501fc2c498cSOri Kam /* IBV hash source bits for IPV4. */ 502fc2c498cSOri Kam #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4) 503fc2c498cSOri Kam 504fc2c498cSOri Kam /* Valid layer type for IPV6 RSS. */ 505fc2c498cSOri Kam #define MLX5_IPV6_LAYER_TYPES \ 506295968d1SFerruh Yigit (RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \ 507295968d1SFerruh Yigit RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX | RTE_ETH_RSS_IPV6_TCP_EX | \ 508295968d1SFerruh Yigit RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER) 509fc2c498cSOri Kam 510fc2c498cSOri Kam /* IBV hash source bits for IPV6. */ 511fc2c498cSOri Kam #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) 512fc2c498cSOri Kam 513c3e33304SDekel Peled /* IBV hash bits for L3 SRC. */ 514c3e33304SDekel Peled #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6) 515c3e33304SDekel Peled 516c3e33304SDekel Peled /* IBV hash bits for L3 DST. */ 517c3e33304SDekel Peled #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6) 518c3e33304SDekel Peled 519c3e33304SDekel Peled /* IBV hash bits for TCP. */ 520c3e33304SDekel Peled #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ 521c3e33304SDekel Peled IBV_RX_HASH_DST_PORT_TCP) 522c3e33304SDekel Peled 523c3e33304SDekel Peled /* IBV hash bits for UDP. */ 524c3e33304SDekel Peled #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \ 525c3e33304SDekel Peled IBV_RX_HASH_DST_PORT_UDP) 526c3e33304SDekel Peled 527c3e33304SDekel Peled /* IBV hash bits for L4 SRC. */ 528c3e33304SDekel Peled #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \ 529c3e33304SDekel Peled IBV_RX_HASH_SRC_PORT_UDP) 530c3e33304SDekel Peled 531c3e33304SDekel Peled /* IBV hash bits for L4 DST. */ 532c3e33304SDekel Peled #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \ 533c3e33304SDekel Peled IBV_RX_HASH_DST_PORT_UDP) 534e59a5dbcSMoti Haimovsky 535e59a5dbcSMoti Haimovsky /* Geneve header first 16Bit */ 536e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_VER_MASK 0x3 537e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_VER_SHIFT 14 538e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_VER_VAL(a) \ 539e59a5dbcSMoti Haimovsky (((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK)) 540e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OPTLEN_MASK 0x3F 541e440d6cfSShiri Kuzin #define MLX5_GENEVE_OPTLEN_SHIFT 8 542e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OPTLEN_VAL(a) \ 543e59a5dbcSMoti Haimovsky (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK)) 544e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OAMF_MASK 0x1 545e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OAMF_SHIFT 7 546e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OAMF_VAL(a) \ 547e59a5dbcSMoti Haimovsky (((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK)) 548e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_CRITO_MASK 0x1 549e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_CRITO_SHIFT 6 550e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_CRITO_VAL(a) \ 551e59a5dbcSMoti Haimovsky (((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK)) 552e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_RSVD_MASK 0x3F 553e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK)) 554e59a5dbcSMoti Haimovsky /* 555e59a5dbcSMoti Haimovsky * The length of the Geneve options fields, expressed in four byte multiples, 556e59a5dbcSMoti Haimovsky * not including the eight byte fixed tunnel. 557e59a5dbcSMoti Haimovsky */ 558e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OPT_LEN_0 14 559e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OPT_LEN_1 63 560e59a5dbcSMoti Haimovsky 561f9210259SViacheslav Ovsiienko #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \ 562f9210259SViacheslav Ovsiienko sizeof(struct rte_ipv4_hdr)) 5632c9f9617SShiri Kuzin /* GTP extension header flag. */ 5642c9f9617SShiri Kuzin #define MLX5_GTP_EXT_HEADER_FLAG 4 5652c9f9617SShiri Kuzin 56606cd4cf6SShiri Kuzin /* GTP extension header PDU type shift. */ 56706cd4cf6SShiri Kuzin #define MLX5_GTP_PDU_TYPE_SHIFT(a) ((a) << 4) 56806cd4cf6SShiri Kuzin 5696859e67eSDekel Peled /* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */ 5706859e67eSDekel Peled #define MLX5_IPV4_FRAG_OFFSET_MASK \ 5716859e67eSDekel Peled (RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG) 5726859e67eSDekel Peled 5736859e67eSDekel Peled /* Specific item's fields can accept a range of values (using spec and last). */ 5746859e67eSDekel Peled #define MLX5_ITEM_RANGE_NOT_ACCEPTED false 5756859e67eSDekel Peled #define MLX5_ITEM_RANGE_ACCEPTED true 5766859e67eSDekel Peled 57772a944dbSBing Zhao /* Software header modify action numbers of a flow. */ 57872a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_IPV4 1 57972a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_IPV6 4 58072a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_MAC 2 58172a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_VID 1 582ea7cc15aSDmitry Kozlyuk #define MLX5_ACT_NUM_MDF_PORT 1 58372a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_TTL 1 58472a944dbSBing Zhao #define MLX5_ACT_NUM_DEC_TTL MLX5_ACT_NUM_MDF_TTL 58572a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_TCPSEQ 1 58672a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_TCPACK 1 58772a944dbSBing Zhao #define MLX5_ACT_NUM_SET_REG 1 58872a944dbSBing Zhao #define MLX5_ACT_NUM_SET_TAG 1 58972a944dbSBing Zhao #define MLX5_ACT_NUM_CPY_MREG MLX5_ACT_NUM_SET_TAG 59072a944dbSBing Zhao #define MLX5_ACT_NUM_SET_MARK MLX5_ACT_NUM_SET_TAG 59172a944dbSBing Zhao #define MLX5_ACT_NUM_SET_META MLX5_ACT_NUM_SET_TAG 59272a944dbSBing Zhao #define MLX5_ACT_NUM_SET_DSCP 1 59372a944dbSBing Zhao 594641dbe4fSAlexander Kozyrev /* Maximum number of fields to modify in MODIFY_FIELD */ 595641dbe4fSAlexander Kozyrev #define MLX5_ACT_MAX_MOD_FIELDS 5 596641dbe4fSAlexander Kozyrev 5975cac1a5cSBing Zhao /* Syndrome bits definition for connection tracking. */ 5985cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_VALID (0x0 << 6) 5995cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_INVALID (0x1 << 6) 6005cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_TRAP (0x2 << 6) 6015cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_STATE_CHANGE (0x1 << 1) 6025cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_BAD_PACKET (0x1 << 0) 6035cac1a5cSBing Zhao 6040c76d1c9SYongseok Koh enum mlx5_flow_drv_type { 6050c76d1c9SYongseok Koh MLX5_FLOW_TYPE_MIN, 6060c76d1c9SYongseok Koh MLX5_FLOW_TYPE_DV, 6070c76d1c9SYongseok Koh MLX5_FLOW_TYPE_VERBS, 6082b679150SSuanming Mou MLX5_FLOW_TYPE_HW, 6090c76d1c9SYongseok Koh MLX5_FLOW_TYPE_MAX, 6100c76d1c9SYongseok Koh }; 6110c76d1c9SYongseok Koh 612488d13abSSuanming Mou /* Fate action type. */ 613488d13abSSuanming Mou enum mlx5_flow_fate_type { 614488d13abSSuanming Mou MLX5_FLOW_FATE_NONE, /* Egress flow. */ 615488d13abSSuanming Mou MLX5_FLOW_FATE_QUEUE, 616488d13abSSuanming Mou MLX5_FLOW_FATE_JUMP, 617488d13abSSuanming Mou MLX5_FLOW_FATE_PORT_ID, 618488d13abSSuanming Mou MLX5_FLOW_FATE_DROP, 6193c78124fSShiri Kuzin MLX5_FLOW_FATE_DEFAULT_MISS, 620fabf8a37SSuanming Mou MLX5_FLOW_FATE_SHARED_RSS, 62150cc92ddSShun Hao MLX5_FLOW_FATE_MTR, 62225c4d6dfSMichael Savisko MLX5_FLOW_FATE_SEND_TO_KERNEL, 623488d13abSSuanming Mou MLX5_FLOW_FATE_MAX, 624488d13abSSuanming Mou }; 625488d13abSSuanming Mou 626865a0c15SOri Kam /* Matcher PRM representation */ 627865a0c15SOri Kam struct mlx5_flow_dv_match_params { 628865a0c15SOri Kam size_t size; 629865a0c15SOri Kam /**< Size of match value. Do NOT split size and key! */ 630865a0c15SOri Kam uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)]; 631865a0c15SOri Kam /**< Matcher value. This value is used as the mask or as a key. */ 632865a0c15SOri Kam }; 633865a0c15SOri Kam 634865a0c15SOri Kam /* Matcher structure. */ 635865a0c15SOri Kam struct mlx5_flow_dv_matcher { 636e78e5408SMatan Azrad struct mlx5_list_entry entry; /**< Pointer to the next element. */ 637e38776c3SMaayan Kashani union { 638e9e36e52SBing Zhao struct mlx5_flow_tbl_resource *tbl; 639e38776c3SMaayan Kashani /**< Pointer to the table(group) the matcher associated with for DV flow. */ 640e38776c3SMaayan Kashani struct mlx5_flow_group *group; 641e38776c3SMaayan Kashani /* Group of this matcher for HWS non template flow. */ 642e38776c3SMaayan Kashani }; 643865a0c15SOri Kam void *matcher_object; /**< Pointer to DV matcher */ 644865a0c15SOri Kam uint16_t crc; /**< CRC of key. */ 645865a0c15SOri Kam uint16_t priority; /**< Priority of matcher. */ 646865a0c15SOri Kam struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */ 647865a0c15SOri Kam }; 648865a0c15SOri Kam 649c513f05cSDekel Peled /* Encap/decap resource structure. */ 650c513f05cSDekel Peled struct mlx5_flow_dv_encap_decap_resource { 651961b6774SMatan Azrad struct mlx5_list_entry entry; 652c513f05cSDekel Peled /* Pointer to next element. */ 653cf7d1995SAlexander Kozyrev uint32_t refcnt; /**< Reference counter. */ 6546ad7cfaaSDekel Peled void *action; 6556ad7cfaaSDekel Peled /**< Encap/decap action object. */ 656c513f05cSDekel Peled uint8_t buf[MLX5_ENCAP_MAX_LEN]; 657c513f05cSDekel Peled size_t size; 658c513f05cSDekel Peled uint8_t reformat_type; 659c513f05cSDekel Peled uint8_t ft_type; 6604f84a197SOri Kam uint64_t flags; /**< Flags for RDMA API. */ 661bf615b07SSuanming Mou uint32_t idx; /**< Index for the index memory pool. */ 662c513f05cSDekel Peled }; 663c513f05cSDekel Peled 664cbb66daaSOri Kam /* Tag resource structure. */ 665cbb66daaSOri Kam struct mlx5_flow_dv_tag_resource { 666961b6774SMatan Azrad struct mlx5_list_entry entry; 667e484e403SBing Zhao /**< hash list entry for tag resource, tag value as the key. */ 668cbb66daaSOri Kam void *action; 6696ad7cfaaSDekel Peled /**< Tag action object. */ 670cf7d1995SAlexander Kozyrev uint32_t refcnt; /**< Reference counter. */ 6715f114269SSuanming Mou uint32_t idx; /**< Index for the index memory pool. */ 672f5b0aed2SSuanming Mou uint32_t tag_id; /**< Tag ID. */ 673cbb66daaSOri Kam }; 674cbb66daaSOri Kam 6754bb14c83SDekel Peled /* Modify resource structure */ 676e7750639SAndre Muezerie struct __rte_packed_begin mlx5_flow_dv_modify_hdr_resource { 677961b6774SMatan Azrad struct mlx5_list_entry entry; 67816a7dbc4SXueming Li void *action; /**< Modify header action object. */ 6794f3d8d0eSMatan Azrad uint32_t idx; 680ff4064d5SMaayan Kashani uint64_t flags; /**< Flags for RDMA API(HWS only). */ 68116a7dbc4SXueming Li /* Key area for hash list matching: */ 6824bb14c83SDekel Peled uint8_t ft_type; /**< Flow table type, Rx or Tx. */ 683e681eb05SMatan Azrad uint8_t actions_num; /**< Number of modification actions. */ 684e681eb05SMatan Azrad bool root; /**< Whether action is in root table. */ 685024e9575SBing Zhao struct mlx5_modification_cmd actions[]; 686024e9575SBing Zhao /**< Modification actions. */ 687e7750639SAndre Muezerie } __rte_packed_end; 6884bb14c83SDekel Peled 6893fe88961SSuanming Mou /* Modify resource key of the hash organization. */ 6903fe88961SSuanming Mou union mlx5_flow_modify_hdr_key { 6913fe88961SSuanming Mou struct { 6923fe88961SSuanming Mou uint32_t ft_type:8; /**< Flow table type, Rx or Tx. */ 6933fe88961SSuanming Mou uint32_t actions_num:5; /**< Number of modification actions. */ 6943fe88961SSuanming Mou uint32_t group:19; /**< Flow group id. */ 6953fe88961SSuanming Mou uint32_t cksum; /**< Actions check sum. */ 6963fe88961SSuanming Mou }; 6973fe88961SSuanming Mou uint64_t v64; /**< full 64bits value of key */ 6983fe88961SSuanming Mou }; 6993fe88961SSuanming Mou 700684b9a1bSOri Kam /* Jump action resource structure. */ 701684b9a1bSOri Kam struct mlx5_flow_dv_jump_tbl_resource { 7026c1d9a64SBing Zhao void *action; /**< Pointer to the rdma core action. */ 703684b9a1bSOri Kam }; 704684b9a1bSOri Kam 705c269b517SOri Kam /* Port ID resource structure. */ 706c269b517SOri Kam struct mlx5_flow_dv_port_id_action_resource { 707e78e5408SMatan Azrad struct mlx5_list_entry entry; 7080fd5f82aSXueming Li void *action; /**< Action object. */ 709c269b517SOri Kam uint32_t port_id; /**< Port ID value. */ 7100fd5f82aSXueming Li uint32_t idx; /**< Indexed pool memory index. */ 711c269b517SOri Kam }; 712c269b517SOri Kam 7139aee7a84SMoti Haimovsky /* Push VLAN action resource structure */ 7149aee7a84SMoti Haimovsky struct mlx5_flow_dv_push_vlan_action_resource { 715e78e5408SMatan Azrad struct mlx5_list_entry entry; /* Cache entry. */ 7166ad7cfaaSDekel Peled void *action; /**< Action object. */ 7179aee7a84SMoti Haimovsky uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */ 7189aee7a84SMoti Haimovsky rte_be32_t vlan_tag; /**< VLAN tag value. */ 7193422af2aSXueming Li uint32_t idx; /**< Indexed pool memory index. */ 7209aee7a84SMoti Haimovsky }; 7219aee7a84SMoti Haimovsky 722dd3c774fSViacheslav Ovsiienko /* Metadata register copy table entry. */ 723dd3c774fSViacheslav Ovsiienko struct mlx5_flow_mreg_copy_resource { 724dd3c774fSViacheslav Ovsiienko /* 725dd3c774fSViacheslav Ovsiienko * Hash list entry for copy table. 726dd3c774fSViacheslav Ovsiienko * - Key is 32/64-bit MARK action ID. 727dd3c774fSViacheslav Ovsiienko * - MUST be the first entry. 728dd3c774fSViacheslav Ovsiienko */ 729961b6774SMatan Azrad struct mlx5_list_entry hlist_ent; 730dd3c774fSViacheslav Ovsiienko LIST_ENTRY(mlx5_flow_mreg_copy_resource) next; 731dd3c774fSViacheslav Ovsiienko /* List entry for device flows. */ 73290e6053aSSuanming Mou uint32_t idx; 733f5b0aed2SSuanming Mou uint32_t mark_id; 734821a6a5cSBing Zhao union { 735821a6a5cSBing Zhao uint32_t rix_flow; /* Built flow for copy. */ 736821a6a5cSBing Zhao uintptr_t hw_flow; 737821a6a5cSBing Zhao }; 738dd3c774fSViacheslav Ovsiienko }; 739dd3c774fSViacheslav Ovsiienko 740afd7a625SXueming Li /* Table tunnel parameter. */ 741afd7a625SXueming Li struct mlx5_flow_tbl_tunnel_prm { 742afd7a625SXueming Li const struct mlx5_flow_tunnel *tunnel; 743afd7a625SXueming Li uint32_t group_id; 744afd7a625SXueming Li bool external; 745afd7a625SXueming Li }; 746afd7a625SXueming Li 747860897d2SBing Zhao /* Table data structure of the hash organization. */ 748860897d2SBing Zhao struct mlx5_flow_tbl_data_entry { 749961b6774SMatan Azrad struct mlx5_list_entry entry; 750e9e36e52SBing Zhao /**< hash list entry, 64-bits key inside. */ 751860897d2SBing Zhao struct mlx5_flow_tbl_resource tbl; 752e9e36e52SBing Zhao /**< flow table resource. */ 753679f46c7SMatan Azrad struct mlx5_list *matchers; 754e9e36e52SBing Zhao /**< matchers' header associated with the flow table. */ 7556c1d9a64SBing Zhao struct mlx5_flow_dv_jump_tbl_resource jump; 7566c1d9a64SBing Zhao /**< jump resource, at most one for each table created. */ 7577ac99475SSuanming Mou uint32_t idx; /**< index for the indexed mempool. */ 7584ec6360dSGregory Etelson /**< tunnel offload */ 7594ec6360dSGregory Etelson const struct mlx5_flow_tunnel *tunnel; 7604ec6360dSGregory Etelson uint32_t group_id; 761f5b0aed2SSuanming Mou uint32_t external:1; 7627be78d02SJosh Soref uint32_t tunnel_offload:1; /* Tunnel offload table or not. */ 763f5b0aed2SSuanming Mou uint32_t is_egress:1; /**< Egress table. */ 764f5b0aed2SSuanming Mou uint32_t is_transfer:1; /**< Transfer table. */ 765f5b0aed2SSuanming Mou uint32_t dummy:1; /**< DR table. */ 7662d2cef5dSLi Zhang uint32_t id:22; /**< Table ID. */ 7672d2cef5dSLi Zhang uint32_t reserve:5; /**< Reserved to future using. */ 7682d2cef5dSLi Zhang uint32_t level; /**< Table level. */ 769860897d2SBing Zhao }; 770860897d2SBing Zhao 771b4c0ddbfSJiawei Wang /* Sub rdma-core actions list. */ 772b4c0ddbfSJiawei Wang struct mlx5_flow_sub_actions_list { 773b4c0ddbfSJiawei Wang uint32_t actions_num; /**< Number of sample actions. */ 774b4c0ddbfSJiawei Wang uint64_t action_flags; 775b4c0ddbfSJiawei Wang void *dr_queue_action; 776b4c0ddbfSJiawei Wang void *dr_tag_action; 777b4c0ddbfSJiawei Wang void *dr_cnt_action; 77800c10c22SJiawei Wang void *dr_port_id_action; 77900c10c22SJiawei Wang void *dr_encap_action; 7806a951567SJiawei Wang void *dr_jump_action; 781b4c0ddbfSJiawei Wang }; 782b4c0ddbfSJiawei Wang 783b4c0ddbfSJiawei Wang /* Sample sub-actions resource list. */ 784b4c0ddbfSJiawei Wang struct mlx5_flow_sub_actions_idx { 785b4c0ddbfSJiawei Wang uint32_t rix_hrxq; /**< Hash Rx queue object index. */ 786b4c0ddbfSJiawei Wang uint32_t rix_tag; /**< Index to the tag action. */ 78700c10c22SJiawei Wang uint32_t rix_port_id_action; /**< Index to port ID action resource. */ 78800c10c22SJiawei Wang uint32_t rix_encap_decap; /**< Index to encap/decap resource. */ 7896a951567SJiawei Wang uint32_t rix_jump; /**< Index to the jump action resource. */ 790b4c0ddbfSJiawei Wang }; 791b4c0ddbfSJiawei Wang 792b4c0ddbfSJiawei Wang /* Sample action resource structure. */ 793b4c0ddbfSJiawei Wang struct mlx5_flow_dv_sample_resource { 794e78e5408SMatan Azrad struct mlx5_list_entry entry; /**< Cache entry. */ 79519784141SSuanming Mou union { 796b4c0ddbfSJiawei Wang void *verbs_action; /**< Verbs sample action object. */ 79719784141SSuanming Mou void **sub_actions; /**< Sample sub-action array. */ 79819784141SSuanming Mou }; 79901c05ee0SSuanming Mou struct rte_eth_dev *dev; /**< Device registers the action. */ 80019784141SSuanming Mou uint32_t idx; /** Sample object index. */ 801b4c0ddbfSJiawei Wang uint8_t ft_type; /** Flow Table Type */ 802b4c0ddbfSJiawei Wang uint32_t ft_id; /** Flow Table Level */ 803b4c0ddbfSJiawei Wang uint32_t ratio; /** Sample Ratio */ 804b4c0ddbfSJiawei Wang uint64_t set_action; /** Restore reg_c0 value */ 805b4c0ddbfSJiawei Wang void *normal_path_tbl; /** Flow Table pointer */ 806b4c0ddbfSJiawei Wang struct mlx5_flow_sub_actions_idx sample_idx; 807b4c0ddbfSJiawei Wang /**< Action index resources. */ 808b4c0ddbfSJiawei Wang struct mlx5_flow_sub_actions_list sample_act; 809b4c0ddbfSJiawei Wang /**< Action resources. */ 810b4c0ddbfSJiawei Wang }; 811b4c0ddbfSJiawei Wang 81200c10c22SJiawei Wang #define MLX5_MAX_DEST_NUM 2 81300c10c22SJiawei Wang 81400c10c22SJiawei Wang /* Destination array action resource structure. */ 81500c10c22SJiawei Wang struct mlx5_flow_dv_dest_array_resource { 816e78e5408SMatan Azrad struct mlx5_list_entry entry; /**< Cache entry. */ 81719784141SSuanming Mou uint32_t idx; /** Destination array action object index. */ 81800c10c22SJiawei Wang uint8_t ft_type; /** Flow Table Type */ 81900c10c22SJiawei Wang uint8_t num_of_dest; /**< Number of destination actions. */ 82001c05ee0SSuanming Mou struct rte_eth_dev *dev; /**< Device registers the action. */ 82100c10c22SJiawei Wang void *action; /**< Pointer to the rdma core action. */ 82200c10c22SJiawei Wang struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM]; 82300c10c22SJiawei Wang /**< Action index resources. */ 82400c10c22SJiawei Wang struct mlx5_flow_sub_actions_list sample_act[MLX5_MAX_DEST_NUM]; 82500c10c22SJiawei Wang /**< Action resources. */ 82600c10c22SJiawei Wang }; 82700c10c22SJiawei Wang 828750ff30aSGregory Etelson /* PMD flow priority for tunnel */ 829750ff30aSGregory Etelson #define MLX5_TUNNEL_PRIO_GET(rss_desc) \ 830750ff30aSGregory Etelson ((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4) 831750ff30aSGregory Etelson 832e745f900SSuanming Mou 833c42f44bdSBing Zhao /** Device flow handle structure for DV mode only. */ 834e7750639SAndre Muezerie struct __rte_packed_begin mlx5_flow_handle_dv { 835c42f44bdSBing Zhao /* Flow DV api: */ 836c42f44bdSBing Zhao struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */ 837c42f44bdSBing Zhao struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 838c42f44bdSBing Zhao /**< Pointer to modify header resource in cache. */ 83977749adaSSuanming Mou uint32_t rix_encap_decap; 84077749adaSSuanming Mou /**< Index to encap/decap resource in cache. */ 84177749adaSSuanming Mou uint32_t rix_push_vlan; 8428acf8ac9SSuanming Mou /**< Index to push VLAN action resource in cache. */ 84377749adaSSuanming Mou uint32_t rix_tag; 8445f114269SSuanming Mou /**< Index to the tag action. */ 845b4c0ddbfSJiawei Wang uint32_t rix_sample; 846b4c0ddbfSJiawei Wang /**< Index to sample action resource in cache. */ 84700c10c22SJiawei Wang uint32_t rix_dest_array; 84800c10c22SJiawei Wang /**< Index to destination array resource in cache. */ 849e7750639SAndre Muezerie } __rte_packed_end; 850c42f44bdSBing Zhao 851c42f44bdSBing Zhao /** Device flow handle structure: used both for creating & destroying. */ 852e7750639SAndre Muezerie struct __rte_packed_begin mlx5_flow_handle { 853b88341caSSuanming Mou SILIST_ENTRY(uint32_t)next; 85477749adaSSuanming Mou struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */ 855b88341caSSuanming Mou /**< Index to next device flow handle. */ 8560ddd1143SYongseok Koh uint64_t layers; 85724663641SYongseok Koh /**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */ 858341c8941SDekel Peled void *drv_flow; /**< pointer to driver flow object. */ 85983306d6cSShun Hao uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */ 8607be78d02SJosh Soref uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */ 86125c4d6dfSMichael Savisko uint32_t fate_action:4; /**< Fate action type. */ 8626fc18392SSuanming Mou union { 86377749adaSSuanming Mou uint32_t rix_hrxq; /**< Hash Rx queue object index. */ 86477749adaSSuanming Mou uint32_t rix_jump; /**< Index to the jump action resource. */ 86577749adaSSuanming Mou uint32_t rix_port_id_action; 8666fc18392SSuanming Mou /**< Index to port ID action resource. */ 86777749adaSSuanming Mou uint32_t rix_fate; 868488d13abSSuanming Mou /**< Generic value indicates the fate action. */ 8693c78124fSShiri Kuzin uint32_t rix_default_fate; 8703c78124fSShiri Kuzin /**< Indicates default miss fate action. */ 871fabf8a37SSuanming Mou uint32_t rix_srss; 872fabf8a37SSuanming Mou /**< Indicates shared RSS fate action. */ 8736fc18392SSuanming Mou }; 874f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 875c42f44bdSBing Zhao struct mlx5_flow_handle_dv dvh; 876c42f44bdSBing Zhao #endif 877cfe337e7SGregory Etelson uint8_t flex_item; /**< referenced Flex Item bitmask. */ 878e7750639SAndre Muezerie } __rte_packed_end; 879c42f44bdSBing Zhao 880c42f44bdSBing Zhao /* 881e7bfa359SBing Zhao * Size for Verbs device flow handle structure only. Do not use the DV only 882e7bfa359SBing Zhao * structure in Verbs. No DV flows attributes will be accessed. 883e7bfa359SBing Zhao * Macro offsetof() could also be used here. 884e7bfa359SBing Zhao */ 885f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 886e7bfa359SBing Zhao #define MLX5_FLOW_HANDLE_VERBS_SIZE \ 887e7bfa359SBing Zhao (sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv)) 888e7bfa359SBing Zhao #else 889e7bfa359SBing Zhao #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle)) 890e7bfa359SBing Zhao #endif 891e7bfa359SBing Zhao 892c42f44bdSBing Zhao /** Device flow structure only for DV flow creation. */ 893e7bfa359SBing Zhao struct mlx5_flow_dv_workspace { 894c42f44bdSBing Zhao uint32_t group; /**< The group index. */ 8952d2cef5dSLi Zhang uint32_t table_id; /**< Flow table identifier. */ 896c42f44bdSBing Zhao uint8_t transfer; /**< 1 if the flow is E-Switch flow. */ 897c42f44bdSBing Zhao int actions_n; /**< number of actions. */ 898c42f44bdSBing Zhao void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */ 899014d1cbeSSuanming Mou struct mlx5_flow_dv_encap_decap_resource *encap_decap; 900014d1cbeSSuanming Mou /**< Pointer to encap/decap resource in cache. */ 9018acf8ac9SSuanming Mou struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res; 9028acf8ac9SSuanming Mou /**< Pointer to push VLAN action resource in cache. */ 9035f114269SSuanming Mou struct mlx5_flow_dv_tag_resource *tag_resource; 9047ac99475SSuanming Mou /**< pointer to the tag action. */ 905f3faf9eaSSuanming Mou struct mlx5_flow_dv_port_id_action_resource *port_id_action; 906f3faf9eaSSuanming Mou /**< Pointer to port ID action resource. */ 9077ac99475SSuanming Mou struct mlx5_flow_dv_jump_tbl_resource *jump; 9087ac99475SSuanming Mou /**< Pointer to the jump action resource. */ 909c42f44bdSBing Zhao struct mlx5_flow_dv_match_params value; 910c42f44bdSBing Zhao /**< Holds the value that the packet is compared to. */ 911b4c0ddbfSJiawei Wang struct mlx5_flow_dv_sample_resource *sample_res; 912b4c0ddbfSJiawei Wang /**< Pointer to the sample action resource. */ 91300c10c22SJiawei Wang struct mlx5_flow_dv_dest_array_resource *dest_array_res; 91400c10c22SJiawei Wang /**< Pointer to the destination array resource. */ 915c42f44bdSBing Zhao }; 916c42f44bdSBing Zhao 917f1ae0b35SOphir Munk #ifdef HAVE_INFINIBAND_VERBS_H 918e7bfa359SBing Zhao /* 919e7bfa359SBing Zhao * Maximal Verbs flow specifications & actions size. 920e7bfa359SBing Zhao * Some elements are mutually exclusive, but enough space should be allocated. 921e7bfa359SBing Zhao * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers. 922e7bfa359SBing Zhao * 2. One tunnel header (exception: GRE + MPLS), 923e7bfa359SBing Zhao * SPEC length: GRE == tunnel. 924e7bfa359SBing Zhao * Actions: 1. 1 Mark OR Flag. 925e7bfa359SBing Zhao * 2. 1 Drop (if any). 926e7bfa359SBing Zhao * 3. No limitation for counters, but it makes no sense to support too 927e7bfa359SBing Zhao * many counters in a single device flow. 928e7bfa359SBing Zhao */ 929e7bfa359SBing Zhao #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT 930e7bfa359SBing Zhao #define MLX5_VERBS_MAX_SPEC_SIZE \ 931e7bfa359SBing Zhao ( \ 932e7bfa359SBing Zhao (2 * (sizeof(struct ibv_flow_spec_eth) + \ 933e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_ipv6) + \ 934e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_tcp_udp)) + \ 935e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_gre) + \ 936e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_mpls)) \ 937e7bfa359SBing Zhao ) 938e7bfa359SBing Zhao #else 939e7bfa359SBing Zhao #define MLX5_VERBS_MAX_SPEC_SIZE \ 940e7bfa359SBing Zhao ( \ 941e7bfa359SBing Zhao (2 * (sizeof(struct ibv_flow_spec_eth) + \ 942e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_ipv6) + \ 943e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_tcp_udp)) + \ 944e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_tunnel)) \ 945e7bfa359SBing Zhao ) 946e7bfa359SBing Zhao #endif 947e7bfa359SBing Zhao 948e7bfa359SBing Zhao #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \ 949e7bfa359SBing Zhao defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45) 950e7bfa359SBing Zhao #define MLX5_VERBS_MAX_ACT_SIZE \ 951e7bfa359SBing Zhao ( \ 952e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_action_tag) + \ 953e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_action_drop) + \ 954e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_counter_action) * 4 \ 955e7bfa359SBing Zhao ) 956e7bfa359SBing Zhao #else 957e7bfa359SBing Zhao #define MLX5_VERBS_MAX_ACT_SIZE \ 958e7bfa359SBing Zhao ( \ 959e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_action_tag) + \ 960e7bfa359SBing Zhao sizeof(struct ibv_flow_spec_action_drop) \ 961e7bfa359SBing Zhao ) 962e7bfa359SBing Zhao #endif 963e7bfa359SBing Zhao 964e7bfa359SBing Zhao #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \ 965e7bfa359SBing Zhao (MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE) 966e7bfa359SBing Zhao 967c42f44bdSBing Zhao /** Device flow structure only for Verbs flow creation. */ 968e7bfa359SBing Zhao struct mlx5_flow_verbs_workspace { 969c42f44bdSBing Zhao unsigned int size; /**< Size of the attribute. */ 970e7bfa359SBing Zhao struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */ 971e7bfa359SBing Zhao uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE]; 972e7bfa359SBing Zhao /**< Specifications & actions buffer of verbs flow. */ 973c42f44bdSBing Zhao }; 974f1ae0b35SOphir Munk #endif /* HAVE_INFINIBAND_VERBS_H */ 975c42f44bdSBing Zhao 976ae2927cdSJiawei Wang #define MLX5_SCALE_FLOW_GROUP_BIT 0 977ae2927cdSJiawei Wang #define MLX5_SCALE_JUMP_FLOW_GROUP_BIT 1 978ae2927cdSJiawei Wang 979e7bfa359SBing Zhao /** Maximal number of device sub-flows supported. */ 98062919d32SGregory Etelson #define MLX5_NUM_MAX_DEV_FLOWS 64 981e7bfa359SBing Zhao 9828c5a231bSGregory Etelson /** 9838c5a231bSGregory Etelson * tunnel offload rules type 9848c5a231bSGregory Etelson */ 9858c5a231bSGregory Etelson enum mlx5_tof_rule_type { 9868c5a231bSGregory Etelson MLX5_TUNNEL_OFFLOAD_NONE = 0, 9878c5a231bSGregory Etelson MLX5_TUNNEL_OFFLOAD_SET_RULE, 9888c5a231bSGregory Etelson MLX5_TUNNEL_OFFLOAD_MATCH_RULE, 9898c5a231bSGregory Etelson MLX5_TUNNEL_OFFLOAD_MISS_RULE, 9908c5a231bSGregory Etelson }; 9918c5a231bSGregory Etelson 992c42f44bdSBing Zhao /** Device flow structure. */ 9939ade91dfSJiawei Wang __extension__ 994c42f44bdSBing Zhao struct mlx5_flow { 995c42f44bdSBing Zhao struct rte_flow *flow; /**< Pointer to the main flow. */ 996fa2d01c8SDong Zhou uint32_t flow_idx; /**< The memory pool index to the main flow. */ 9976ad7cfaaSDekel Peled uint64_t hash_fields; /**< Hash Rx queue hash fields. */ 998488d13abSSuanming Mou uint64_t act_flags; 999488d13abSSuanming Mou /**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */ 1000b67b4ecbSDekel Peled bool external; /**< true if the flow is created external to PMD. */ 10019ade91dfSJiawei Wang uint8_t ingress:1; /**< 1 if the flow is ingress. */ 1002ae2927cdSJiawei Wang uint8_t skip_scale:2; 10030e04e1e2SXueming Li uint8_t symmetric_hash_function:1; 1004ae2927cdSJiawei Wang /** 1005ae2927cdSJiawei Wang * Each Bit be set to 1 if Skip the scale the flow group with factor. 1006ae2927cdSJiawei Wang * If bit0 be set to 1, then skip the scale the original flow group; 1007ae2927cdSJiawei Wang * If bit1 be set to 1, then skip the scale the jump flow group if 1008ae2927cdSJiawei Wang * having jump action. 1009ae2927cdSJiawei Wang * 00: Enable scale in a flow, default value. 1010ae2927cdSJiawei Wang * 01: Skip scale the flow group with factor, enable scale the group 1011ae2927cdSJiawei Wang * of jump action. 1012ae2927cdSJiawei Wang * 10: Enable scale the group with factor, skip scale the group of 1013ae2927cdSJiawei Wang * jump action. 1014ae2927cdSJiawei Wang * 11: Skip scale the table with factor both for flow group and jump 1015ae2927cdSJiawei Wang * group. 1016ae2927cdSJiawei Wang */ 1017c42f44bdSBing Zhao union { 1018f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 1019e7bfa359SBing Zhao struct mlx5_flow_dv_workspace dv; 1020c42f44bdSBing Zhao #endif 1021f1ae0b35SOphir Munk #ifdef HAVE_INFINIBAND_VERBS_H 1022e7bfa359SBing Zhao struct mlx5_flow_verbs_workspace verbs; 1023f1ae0b35SOphir Munk #endif 1024c42f44bdSBing Zhao }; 1025e7bfa359SBing Zhao struct mlx5_flow_handle *handle; 1026b88341caSSuanming Mou uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */ 10274ec6360dSGregory Etelson const struct mlx5_flow_tunnel *tunnel; 10288c5a231bSGregory Etelson enum mlx5_tof_rule_type tof_type; 102984c406e7SOri Kam }; 103084c406e7SOri Kam 103133e01809SSuanming Mou /* Flow meter state. */ 103233e01809SSuanming Mou #define MLX5_FLOW_METER_DISABLE 0 103333e01809SSuanming Mou #define MLX5_FLOW_METER_ENABLE 1 103433e01809SSuanming Mou 103529efa63aSLi Zhang #define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u 103629efa63aSLi Zhang #define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u 1037e6100c7bSLi Zhang 1038ebaf1b31SBing Zhao #define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES 1039ebaf1b31SBing Zhao 10403bd26b23SSuanming Mou #define MLX5_MAN_WIDTH 8 1041e6100c7bSLi Zhang /* Legacy Meter parameter structure. */ 1042e6100c7bSLi Zhang struct mlx5_legacy_flow_meter { 1043e6100c7bSLi Zhang struct mlx5_flow_meter_info fm; 1044e6100c7bSLi Zhang /* Must be the first in struct. */ 1045e6100c7bSLi Zhang TAILQ_ENTRY(mlx5_legacy_flow_meter) next; 10463f373f35SSuanming Mou /**< Pointer to the next flow meter structure. */ 104744432018SLi Zhang uint32_t idx; 104844432018SLi Zhang /* Index to meter object. */ 10493bd26b23SSuanming Mou }; 10503bd26b23SSuanming Mou 10514ec6360dSGregory Etelson #define MLX5_MAX_TUNNELS 256 10524ec6360dSGregory Etelson #define MLX5_TNL_MISS_RULE_PRIORITY 3 10534ec6360dSGregory Etelson #define MLX5_TNL_MISS_FDB_JUMP_GRP 0x1234faac 10544ec6360dSGregory Etelson 10554ec6360dSGregory Etelson /* 10564ec6360dSGregory Etelson * When tunnel offload is active, all JUMP group ids are converted 10574ec6360dSGregory Etelson * using the same method. That conversion is applied both to tunnel and 10584ec6360dSGregory Etelson * regular rule types. 10594ec6360dSGregory Etelson * Group ids used in tunnel rules are relative to it's tunnel (!). 10604ec6360dSGregory Etelson * Application can create number of steer rules, using the same 10614ec6360dSGregory Etelson * tunnel, with different group id in each rule. 10624ec6360dSGregory Etelson * Each tunnel stores its groups internally in PMD tunnel object. 10634ec6360dSGregory Etelson * Groups used in regular rules do not belong to any tunnel and are stored 10644ec6360dSGregory Etelson * in tunnel hub. 10654ec6360dSGregory Etelson */ 10664ec6360dSGregory Etelson 10674ec6360dSGregory Etelson struct mlx5_flow_tunnel { 10684ec6360dSGregory Etelson LIST_ENTRY(mlx5_flow_tunnel) chain; 10694ec6360dSGregory Etelson struct rte_flow_tunnel app_tunnel; /** app tunnel copy */ 10704ec6360dSGregory Etelson uint32_t tunnel_id; /** unique tunnel ID */ 1071e12a0166STyler Retzlaff RTE_ATOMIC(uint32_t) refctn; 10724ec6360dSGregory Etelson struct rte_flow_action action; 10734ec6360dSGregory Etelson struct rte_flow_item item; 10744ec6360dSGregory Etelson struct mlx5_hlist *groups; /** tunnel groups */ 10754ec6360dSGregory Etelson }; 10764ec6360dSGregory Etelson 10774ec6360dSGregory Etelson /** PMD tunnel related context */ 10784ec6360dSGregory Etelson struct mlx5_flow_tunnel_hub { 1079868d2e34SGregory Etelson /* Tunnels list 1080868d2e34SGregory Etelson * Access to the list MUST be MT protected 1081868d2e34SGregory Etelson */ 10824ec6360dSGregory Etelson LIST_HEAD(, mlx5_flow_tunnel) tunnels; 1083868d2e34SGregory Etelson /* protect access to the tunnels list */ 1084868d2e34SGregory Etelson rte_spinlock_t sl; 10854ec6360dSGregory Etelson struct mlx5_hlist *groups; /** non tunnel groups */ 10864ec6360dSGregory Etelson }; 10874ec6360dSGregory Etelson 10884ec6360dSGregory Etelson /* convert jump group to flow table ID in tunnel rules */ 10894ec6360dSGregory Etelson struct tunnel_tbl_entry { 1090961b6774SMatan Azrad struct mlx5_list_entry hash; 10914ec6360dSGregory Etelson uint32_t flow_table; 1092f5b0aed2SSuanming Mou uint32_t tunnel_id; 1093f5b0aed2SSuanming Mou uint32_t group; 10944ec6360dSGregory Etelson }; 10954ec6360dSGregory Etelson 10964ec6360dSGregory Etelson static inline uint32_t 10974ec6360dSGregory Etelson tunnel_id_to_flow_tbl(uint32_t id) 10984ec6360dSGregory Etelson { 10994ec6360dSGregory Etelson return id | (1u << 16); 11004ec6360dSGregory Etelson } 11014ec6360dSGregory Etelson 11024ec6360dSGregory Etelson static inline uint32_t 11034ec6360dSGregory Etelson tunnel_flow_tbl_to_id(uint32_t flow_tbl) 11044ec6360dSGregory Etelson { 11054ec6360dSGregory Etelson return flow_tbl & ~(1u << 16); 11064ec6360dSGregory Etelson } 11074ec6360dSGregory Etelson 11084ec6360dSGregory Etelson union tunnel_tbl_key { 11094ec6360dSGregory Etelson uint64_t val; 11104ec6360dSGregory Etelson struct { 11114ec6360dSGregory Etelson uint32_t tunnel_id; 11124ec6360dSGregory Etelson uint32_t group; 11134ec6360dSGregory Etelson }; 11144ec6360dSGregory Etelson }; 11154ec6360dSGregory Etelson 11164ec6360dSGregory Etelson static inline struct mlx5_flow_tunnel_hub * 11174ec6360dSGregory Etelson mlx5_tunnel_hub(struct rte_eth_dev *dev) 11184ec6360dSGregory Etelson { 11194ec6360dSGregory Etelson struct mlx5_priv *priv = dev->data->dev_private; 11204ec6360dSGregory Etelson return priv->sh->tunnel_hub; 11214ec6360dSGregory Etelson } 11224ec6360dSGregory Etelson 11234ec6360dSGregory Etelson static inline bool 11248c5a231bSGregory Etelson is_tunnel_offload_active(const struct rte_eth_dev *dev) 11254ec6360dSGregory Etelson { 1126bc1d90a3SGregory Etelson #ifdef HAVE_IBV_FLOW_DV_SUPPORT 11278c5a231bSGregory Etelson const struct mlx5_priv *priv = dev->data->dev_private; 1128a13ec19cSMichael Baum return !!priv->sh->config.dv_miss_info; 1129bc1d90a3SGregory Etelson #else 1130bc1d90a3SGregory Etelson RTE_SET_USED(dev); 1131bc1d90a3SGregory Etelson return false; 1132bc1d90a3SGregory Etelson #endif 11334ec6360dSGregory Etelson } 11344ec6360dSGregory Etelson 11354ec6360dSGregory Etelson static inline bool 11368c5a231bSGregory Etelson is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type) 11374ec6360dSGregory Etelson { 11388c5a231bSGregory Etelson return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE; 11394ec6360dSGregory Etelson } 11404ec6360dSGregory Etelson 11414ec6360dSGregory Etelson static inline bool 11428c5a231bSGregory Etelson is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type) 11434ec6360dSGregory Etelson { 11448c5a231bSGregory Etelson return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE; 11454ec6360dSGregory Etelson } 11464ec6360dSGregory Etelson 11474ec6360dSGregory Etelson static inline const struct mlx5_flow_tunnel * 11484ec6360dSGregory Etelson flow_actions_to_tunnel(const struct rte_flow_action actions[]) 11494ec6360dSGregory Etelson { 11504ec6360dSGregory Etelson return actions[0].conf; 11514ec6360dSGregory Etelson } 11524ec6360dSGregory Etelson 11534ec6360dSGregory Etelson static inline const struct mlx5_flow_tunnel * 11544ec6360dSGregory Etelson flow_items_to_tunnel(const struct rte_flow_item items[]) 11554ec6360dSGregory Etelson { 11564ec6360dSGregory Etelson return items[0].spec; 11574ec6360dSGregory Etelson } 11584ec6360dSGregory Etelson 11590f4aa72bSSuanming Mou /** 1160c23626f2SMichael Baum * Gets the tag array given for RTE_FLOW_FIELD_TAG type. 1161c23626f2SMichael Baum * 1162c23626f2SMichael Baum * In old API the value was provided in "level" field, but in new API 1163c23626f2SMichael Baum * it is provided in "tag_array" field. Since encapsulation level is not 1164c23626f2SMichael Baum * relevant for metadata, the tag array can be still provided in "level" 1165c23626f2SMichael Baum * for backwards compatibility. 1166c23626f2SMichael Baum * 1167c23626f2SMichael Baum * @param[in] data 1168c23626f2SMichael Baum * Pointer to tag modify data structure. 1169c23626f2SMichael Baum * 1170c23626f2SMichael Baum * @return 1171c23626f2SMichael Baum * Tag array index. 1172c23626f2SMichael Baum */ 1173c23626f2SMichael Baum static inline uint8_t 117477edfda9SSuanming Mou flow_tag_index_get(const struct rte_flow_field_data *data) 1175c23626f2SMichael Baum { 1176c23626f2SMichael Baum return data->tag_index ? data->tag_index : data->level; 1177c23626f2SMichael Baum } 1178c23626f2SMichael Baum 1179c23626f2SMichael Baum /** 11800f4aa72bSSuanming Mou * Fetch 1, 2, 3 or 4 byte field from the byte array 11810f4aa72bSSuanming Mou * and return as unsigned integer in host-endian format. 11820f4aa72bSSuanming Mou * 11830f4aa72bSSuanming Mou * @param[in] data 11840f4aa72bSSuanming Mou * Pointer to data array. 11850f4aa72bSSuanming Mou * @param[in] size 11860f4aa72bSSuanming Mou * Size of field to extract. 11870f4aa72bSSuanming Mou * 11880f4aa72bSSuanming Mou * @return 11890f4aa72bSSuanming Mou * converted field in host endian format. 11900f4aa72bSSuanming Mou */ 11910f4aa72bSSuanming Mou static inline uint32_t 11920f4aa72bSSuanming Mou flow_dv_fetch_field(const uint8_t *data, uint32_t size) 11930f4aa72bSSuanming Mou { 11940f4aa72bSSuanming Mou uint32_t ret; 11950f4aa72bSSuanming Mou 11960f4aa72bSSuanming Mou switch (size) { 11970f4aa72bSSuanming Mou case 1: 11980f4aa72bSSuanming Mou ret = *data; 11990f4aa72bSSuanming Mou break; 12000f4aa72bSSuanming Mou case 2: 12010f4aa72bSSuanming Mou ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 12020f4aa72bSSuanming Mou break; 12030f4aa72bSSuanming Mou case 3: 12040f4aa72bSSuanming Mou ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data); 12050f4aa72bSSuanming Mou ret = (ret << 8) | *(data + sizeof(uint16_t)); 12060f4aa72bSSuanming Mou break; 12070f4aa72bSSuanming Mou case 4: 12080f4aa72bSSuanming Mou ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data); 12090f4aa72bSSuanming Mou break; 12100f4aa72bSSuanming Mou default: 12110f4aa72bSSuanming Mou MLX5_ASSERT(false); 12120f4aa72bSSuanming Mou ret = 0; 12130f4aa72bSSuanming Mou break; 12140f4aa72bSSuanming Mou } 12150f4aa72bSSuanming Mou return ret; 12160f4aa72bSSuanming Mou } 12170f4aa72bSSuanming Mou 12183c37110eSMichael Baum static inline bool 12193c37110eSMichael Baum flow_modify_field_support_tag_array(enum rte_flow_field_id field) 12203c37110eSMichael Baum { 12219e21f6cdSBing Zhao switch ((int)field) { 12223c37110eSMichael Baum case RTE_FLOW_FIELD_TAG: 12234580dcecSMichael Baum case RTE_FLOW_FIELD_MPLS: 12249e21f6cdSBing Zhao case MLX5_RTE_FLOW_FIELD_META_REG: 12253c37110eSMichael Baum return true; 12263c37110eSMichael Baum default: 12273c37110eSMichael Baum break; 12283c37110eSMichael Baum } 12293c37110eSMichael Baum return false; 12303c37110eSMichael Baum } 12313c37110eSMichael Baum 12320f4aa72bSSuanming Mou struct field_modify_info { 12330f4aa72bSSuanming Mou uint32_t size; /* Size of field in protocol header, in bytes. */ 12340f4aa72bSSuanming Mou uint32_t offset; /* Offset of field in protocol header, in bytes. */ 12350f4aa72bSSuanming Mou enum mlx5_modification_field id; 12366b6c0b8dSRongwei Liu uint32_t shift; 12376b6c0b8dSRongwei Liu uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */ 12380f4aa72bSSuanming Mou }; 12390f4aa72bSSuanming Mou 124075a00812SSuanming Mou /* HW steering flow attributes. */ 124175a00812SSuanming Mou struct mlx5_flow_attr { 124275a00812SSuanming Mou uint32_t port_id; /* Port index. */ 124375a00812SSuanming Mou uint32_t group; /* Flow group. */ 124475a00812SSuanming Mou uint32_t priority; /* Original Priority. */ 124575a00812SSuanming Mou /* rss level, used by priority adjustment. */ 124675a00812SSuanming Mou uint32_t rss_level; 124775a00812SSuanming Mou /* Action flags, used by priority adjustment. */ 124875a00812SSuanming Mou uint32_t act_flags; 124975a00812SSuanming Mou uint32_t tbl_type; /* Flow table type. */ 125075a00812SSuanming Mou }; 125175a00812SSuanming Mou 125284c406e7SOri Kam /* Flow structure. */ 1253e7750639SAndre Muezerie struct __rte_packed_begin rte_flow { 1254b88341caSSuanming Mou uint32_t dev_handles; 1255e7bfa359SBing Zhao /**< Device flow handles that are part of the flow. */ 1256b4edeaf3SSuanming Mou uint32_t type:2; 12570136df99SSuanming Mou uint32_t drv_type:2; /**< Driver type. */ 12584ec6360dSGregory Etelson uint32_t tunnel:1; 1259e6100c7bSLi Zhang uint32_t meter:24; /**< Holds flow meter id. */ 12602d084f69SBing Zhao uint32_t indirect_type:2; /**< Indirect action type. */ 1261654ebd8cSGregory Etelson uint32_t matcher_selector:1; /**< Matcher index in resizable table. */ 12620136df99SSuanming Mou uint32_t rix_mreg_copy; 12630136df99SSuanming Mou /**< Index to metadata register copy table resource. */ 12640136df99SSuanming Mou uint32_t counter; /**< Holds flow counter. */ 12654ec6360dSGregory Etelson uint32_t tunnel_id; /**< Tunnel id */ 12662d084f69SBing Zhao union { 1267f935ed4bSDekel Peled uint32_t age; /**< Holds ASO age bit index. */ 12682d084f69SBing Zhao uint32_t ct; /**< Holds ASO CT index. */ 12692d084f69SBing Zhao }; 1270f15f0c38SShiri Kuzin uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */ 1271e7750639SAndre Muezerie } __rte_packed_end; 12722720f833SYongseok Koh 127304a4de75SMichael Baum /* 127404a4de75SMichael Baum * HWS COUNTER ID's layout 127504a4de75SMichael Baum * 3 2 1 0 127604a4de75SMichael Baum * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 127704a4de75SMichael Baum * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 127804a4de75SMichael Baum * | T | | D | | 127904a4de75SMichael Baum * ~ Y | | C | IDX ~ 128004a4de75SMichael Baum * | P | | S | | 128104a4de75SMichael Baum * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ 128204a4de75SMichael Baum * 128304a4de75SMichael Baum * Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10 128404a4de75SMichael Baum * Bit 25:24 = DCS index 128504a4de75SMichael Baum * Bit 23:00 = IDX in this counter belonged DCS bulk. 128604a4de75SMichael Baum */ 128704a4de75SMichael Baum typedef uint32_t cnt_id_t; 128804a4de75SMichael Baum 128942431df9SSuanming Mou #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 129042431df9SSuanming Mou 12917cfb022bSDariusz Sosnowski enum { 12927cfb022bSDariusz Sosnowski MLX5_FLOW_HW_FLOW_OP_TYPE_NONE, 12937cfb022bSDariusz Sosnowski MLX5_FLOW_HW_FLOW_OP_TYPE_CREATE, 12947cfb022bSDariusz Sosnowski MLX5_FLOW_HW_FLOW_OP_TYPE_DESTROY, 12957cfb022bSDariusz Sosnowski MLX5_FLOW_HW_FLOW_OP_TYPE_UPDATE, 12967cfb022bSDariusz Sosnowski MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_CREATE, 12977cfb022bSDariusz Sosnowski MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_DESTROY, 12987cfb022bSDariusz Sosnowski MLX5_FLOW_HW_FLOW_OP_TYPE_RSZ_TBL_MOVE, 12997cfb022bSDariusz Sosnowski }; 13007cfb022bSDariusz Sosnowski 13012fda185aSDariusz Sosnowski enum { 13022fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_CNT_ID = RTE_BIT32(0), 13032fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP = RTE_BIT32(1), 13042fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ = RTE_BIT32(2), 13052fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX = RTE_BIT32(3), 13062fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_MTR_ID = RTE_BIT32(4), 13072fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR = RTE_BIT32(5), 13082fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW = RTE_BIT32(6), 13092fda185aSDariusz Sosnowski }; 13102fda185aSDariusz Sosnowski 13112fda185aSDariusz Sosnowski #define MLX5_FLOW_HW_FLOW_FLAGS_ALL ( \ 13122fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_CNT_ID | \ 13132fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_FATE_JUMP | \ 13142fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_FATE_HRXQ | \ 13152fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_AGE_IDX | \ 13162fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_MTR_ID | \ 13172fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_MATCHER_SELECTOR | \ 13182fda185aSDariusz Sosnowski MLX5_FLOW_HW_FLOW_FLAG_UPD_FLOW \ 13192fda185aSDariusz Sosnowski ) 13202fda185aSDariusz Sosnowski 132122681deeSAlex Vesker #ifdef PEDANTIC 132222681deeSAlex Vesker #pragma GCC diagnostic ignored "-Wpedantic" 132322681deeSAlex Vesker #endif 132422681deeSAlex Vesker 1325e38776c3SMaayan Kashani #define MLX5_DR_RULE_SIZE 72 1326e38776c3SMaayan Kashani 1327ae67e3c4SGregory Etelson SLIST_HEAD(mlx5_nta_rss_flow_head, rte_flow_hw); 1328ae67e3c4SGregory Etelson 1329e38776c3SMaayan Kashani /** HWS non template flow data. */ 1330e38776c3SMaayan Kashani struct rte_flow_nt2hws { 1331e38776c3SMaayan Kashani /** BWC rule pointer. */ 1332e38776c3SMaayan Kashani struct mlx5dr_bwc_rule *nt_rule; 1333e38776c3SMaayan Kashani /** The matcher for non template api. */ 1334e38776c3SMaayan Kashani struct mlx5_flow_dv_matcher *matcher; 133527d171b8SMaayan Kashani /**< Auxiliary data stored per flow. */ 133627d171b8SMaayan Kashani struct rte_flow_hw_aux *flow_aux; 1337ff4064d5SMaayan Kashani /** Modify header pointer. */ 1338ff4064d5SMaayan Kashani struct mlx5_flow_dv_modify_hdr_resource *modify_hdr; 133981676f7fSMaayan Kashani /** Chain NTA flows. */ 134081676f7fSMaayan Kashani SLIST_ENTRY(rte_flow_hw) next; 1341ff4064d5SMaayan Kashani /** Encap/decap index. */ 1342ff4064d5SMaayan Kashani uint32_t rix_encap_decap; 1343821a6a5cSBing Zhao uint32_t rix_mreg_copy; 1344ae67e3c4SGregory Etelson uint8_t chaned_flow; 134581676f7fSMaayan Kashani }; 1346e38776c3SMaayan Kashani 1347b2dc01c1SDariusz Sosnowski /** HWS flow struct. */ 1348c40c061aSSuanming Mou struct rte_flow_hw { 1349e38776c3SMaayan Kashani union { 1350b2dc01c1SDariusz Sosnowski /** The table flow allcated from. */ 1351b2dc01c1SDariusz Sosnowski struct rte_flow_template_table *table; 1352e38776c3SMaayan Kashani /** Data needed for non template flows. */ 1353e38776c3SMaayan Kashani struct rte_flow_nt2hws *nt2hws; 1354e38776c3SMaayan Kashani }; 1355b2dc01c1SDariusz Sosnowski /** Application's private data passed to enqueued flow operation. */ 1356b2dc01c1SDariusz Sosnowski void *user_data; 135781676f7fSMaayan Kashani union { 135881676f7fSMaayan Kashani /** Jump action. */ 135981676f7fSMaayan Kashani struct mlx5_hw_jump_action *jump; 136081676f7fSMaayan Kashani /** TIR action. */ 136181676f7fSMaayan Kashani struct mlx5_hrxq *hrxq; 136281676f7fSMaayan Kashani }; 1363b2dc01c1SDariusz Sosnowski /** Flow index from indexed pool. */ 1364b2dc01c1SDariusz Sosnowski uint32_t idx; 1365b2dc01c1SDariusz Sosnowski /** Resource index from indexed pool. */ 1366b2dc01c1SDariusz Sosnowski uint32_t res_idx; 1367b2dc01c1SDariusz Sosnowski /** HWS flow rule index passed to mlx5dr. */ 136860db7673SAlexander Kozyrev uint32_t rule_idx; 13692fda185aSDariusz Sosnowski /** Which flow fields (inline or in auxiliary struct) are used. */ 13702fda185aSDariusz Sosnowski uint32_t flags; 137181676f7fSMaayan Kashani /** COUNT action index. */ 137281676f7fSMaayan Kashani cnt_id_t cnt_id; 1373b2dc01c1SDariusz Sosnowski /** Ongoing flow operation type. */ 1374b2dc01c1SDariusz Sosnowski uint8_t operation_type; 1375b2dc01c1SDariusz Sosnowski /** Index of pattern template this flow is based on. */ 1376b2dc01c1SDariusz Sosnowski uint8_t mt_idx; 137727d171b8SMaayan Kashani /** Equals true if it is non template rule. */ 137827d171b8SMaayan Kashani bool nt_rule; 1379b2dc01c1SDariusz Sosnowski /** 1380b2dc01c1SDariusz Sosnowski * Padding for alignment to 56 bytes. 1381b2dc01c1SDariusz Sosnowski * Since mlx5dr rule is 72 bytes, whole flow is contained within 128 B (2 cache lines). 1382b2dc01c1SDariusz Sosnowski * This space is reserved for future additions to flow struct. 1383b2dc01c1SDariusz Sosnowski */ 138427d171b8SMaayan Kashani uint8_t padding[9]; 1385b2dc01c1SDariusz Sosnowski /** HWS layer data struct. */ 1386b2dc01c1SDariusz Sosnowski uint8_t rule[]; 138781676f7fSMaayan Kashani }; 1388c40c061aSSuanming Mou 1389b2dc01c1SDariusz Sosnowski /** Auxiliary data fields that are updatable. */ 1390b2dc01c1SDariusz Sosnowski struct rte_flow_hw_aux_fields { 1391b2dc01c1SDariusz Sosnowski /** AGE action index. */ 1392b2dc01c1SDariusz Sosnowski uint32_t age_idx; 1393b2dc01c1SDariusz Sosnowski /** Direct meter (METER or METER_MARK) action index. */ 1394b2dc01c1SDariusz Sosnowski uint32_t mtr_id; 1395b2dc01c1SDariusz Sosnowski }; 1396b2dc01c1SDariusz Sosnowski 139771c7abd2SDariusz Sosnowski /** Auxiliary data stored per flow which is not required to be stored in main flow structure. */ 139871c7abd2SDariusz Sosnowski struct rte_flow_hw_aux { 1399b2dc01c1SDariusz Sosnowski /** Auxiliary fields associated with the original flow. */ 1400b2dc01c1SDariusz Sosnowski struct rte_flow_hw_aux_fields orig; 1401b2dc01c1SDariusz Sosnowski /** Auxiliary fields associated with the updated flow. */ 1402b2dc01c1SDariusz Sosnowski struct rte_flow_hw_aux_fields upd; 1403b2dc01c1SDariusz Sosnowski /** Index of resizable matcher associated with this flow. */ 1404b2dc01c1SDariusz Sosnowski uint8_t matcher_selector; 140571c7abd2SDariusz Sosnowski /** Placeholder flow struct used during flow rule update operation. */ 140671c7abd2SDariusz Sosnowski struct rte_flow_hw upd_flow; 140771c7abd2SDariusz Sosnowski }; 140871c7abd2SDariusz Sosnowski 140922681deeSAlex Vesker #ifdef PEDANTIC 141022681deeSAlex Vesker #pragma GCC diagnostic error "-Wpedantic" 141122681deeSAlex Vesker #endif 141222681deeSAlex Vesker 1413e26f50adSGregory Etelson struct mlx5_action_construct_data; 1414e26f50adSGregory Etelson typedef int 1415e26f50adSGregory Etelson (*indirect_list_callback_t)(struct rte_eth_dev *, 1416e26f50adSGregory Etelson const struct mlx5_action_construct_data *, 1417e26f50adSGregory Etelson const struct rte_flow_action *, 1418e26f50adSGregory Etelson struct mlx5dr_rule_action *); 14193564e928SGregory Etelson 1420f13fab23SSuanming Mou /* rte flow action translate to DR action struct. */ 1421f13fab23SSuanming Mou struct mlx5_action_construct_data { 1422f13fab23SSuanming Mou LIST_ENTRY(mlx5_action_construct_data) next; 1423f13fab23SSuanming Mou /* Ensure the action types are matched. */ 1424f13fab23SSuanming Mou int type; 1425f13fab23SSuanming Mou uint32_t idx; /* Data index. */ 1426f13fab23SSuanming Mou uint16_t action_src; /* rte_flow_action src offset. */ 1427f13fab23SSuanming Mou uint16_t action_dst; /* mlx5dr_rule_action dst offset. */ 1428e26f50adSGregory Etelson indirect_list_callback_t indirect_list_cb; 14297ab3962dSSuanming Mou union { 14307ab3962dSSuanming Mou struct { 14314e07b130SDariusz Sosnowski /* Expected type of indirection action. */ 14324e07b130SDariusz Sosnowski enum rte_flow_action_type expected_type; 14334e07b130SDariusz Sosnowski } indirect; 14344e07b130SDariusz Sosnowski struct { 1435fe3620aaSSuanming Mou /* encap data len. */ 1436fe3620aaSSuanming Mou uint16_t len; 1437fe3620aaSSuanming Mou } encap; 1438fe3620aaSSuanming Mou struct { 14390f4aa72bSSuanming Mou /* Modify header action offset in pattern. */ 14400f4aa72bSSuanming Mou uint16_t mhdr_cmds_off; 14410f4aa72bSSuanming Mou /* Offset in pattern after modify header actions. */ 14420f4aa72bSSuanming Mou uint16_t mhdr_cmds_end; 14430f4aa72bSSuanming Mou /* 14440f4aa72bSSuanming Mou * True if this action is masked and does not need to 14450f4aa72bSSuanming Mou * be generated. 14460f4aa72bSSuanming Mou */ 14470f4aa72bSSuanming Mou bool shared; 14480f4aa72bSSuanming Mou /* 14490f4aa72bSSuanming Mou * Modified field definitions in dst field (SET, ADD) 14500f4aa72bSSuanming Mou * or src field (COPY). 14510f4aa72bSSuanming Mou */ 14520f4aa72bSSuanming Mou struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS]; 14530f4aa72bSSuanming Mou /* Modified field definitions in dst field (COPY). */ 14540f4aa72bSSuanming Mou struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS]; 14550f4aa72bSSuanming Mou /* 14560f4aa72bSSuanming Mou * Masks applied to field values to generate 14570f4aa72bSSuanming Mou * PRM actions. 14580f4aa72bSSuanming Mou */ 14590f4aa72bSSuanming Mou uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS]; 1460c145898cSDariusz Sosnowski /* Copy of action passed to the action template. */ 1461c145898cSDariusz Sosnowski struct rte_flow_action_modify_field action; 14620f4aa72bSSuanming Mou } modify_header; 14630f4aa72bSSuanming Mou struct { 14640e04e1e2SXueming Li bool symmetric_hash_function; /* Symmetric RSS hash */ 14657ab3962dSSuanming Mou uint64_t types; /* RSS hash types. */ 14667ab3962dSSuanming Mou uint32_t level; /* RSS level. */ 14677ab3962dSSuanming Mou uint32_t idx; /* Shared action index. */ 14687ab3962dSSuanming Mou } shared_rss; 14694d368e1dSXiaoyu Min struct { 147004a4de75SMichael Baum cnt_id_t id; 14714d368e1dSXiaoyu Min } shared_counter; 147248fbb0e9SAlexander Kozyrev struct { 14731be65c39SRongwei Liu /* IPv6 extension push data len. */ 14741be65c39SRongwei Liu uint16_t len; 14751be65c39SRongwei Liu } ipv6_ext; 14761be65c39SRongwei Liu struct { 147748fbb0e9SAlexander Kozyrev uint32_t id; 1478e26f50adSGregory Etelson uint32_t conf_masked:1; 147948fbb0e9SAlexander Kozyrev } shared_meter; 14807ab3962dSSuanming Mou }; 1481f13fab23SSuanming Mou }; 1482f13fab23SSuanming Mou 1483f5177bdcSMichael Baum #define MAX_GENEVE_OPTIONS_RESOURCES 7 1484f5177bdcSMichael Baum 148585738168SMichael Baum /* GENEVE TLV options manager structure. */ 148685738168SMichael Baum struct mlx5_geneve_tlv_options_mng { 148785738168SMichael Baum uint8_t nb_options; /* Number of options inside the template. */ 148885738168SMichael Baum struct { 148985738168SMichael Baum uint8_t opt_type; 149085738168SMichael Baum uint16_t opt_class; 149185738168SMichael Baum } options[MAX_GENEVE_OPTIONS_RESOURCES]; 149285738168SMichael Baum }; 149385738168SMichael Baum 149442431df9SSuanming Mou /* Flow item template struct. */ 149542431df9SSuanming Mou struct rte_flow_pattern_template { 149642431df9SSuanming Mou LIST_ENTRY(rte_flow_pattern_template) next; 149742431df9SSuanming Mou /* Template attributes. */ 149842431df9SSuanming Mou struct rte_flow_pattern_template_attr attr; 149942431df9SSuanming Mou struct mlx5dr_match_template *mt; /* mlx5 match template. */ 15007ab3962dSSuanming Mou uint64_t item_flags; /* Item layer flags. */ 1501483181f7SDariusz Sosnowski uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */ 1502e12a0166STyler Retzlaff RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */ 15031939eb6fSDariusz Sosnowski /* 15041939eb6fSDariusz Sosnowski * If true, then rule pattern should be prepended with 15051939eb6fSDariusz Sosnowski * represented_port pattern item. 15061939eb6fSDariusz Sosnowski */ 15071939eb6fSDariusz Sosnowski bool implicit_port; 1508483181f7SDariusz Sosnowski /* 1509483181f7SDariusz Sosnowski * If true, then rule pattern should be prepended with 1510483181f7SDariusz Sosnowski * tag pattern item for representor matching. 1511483181f7SDariusz Sosnowski */ 1512483181f7SDariusz Sosnowski bool implicit_tag; 151385738168SMichael Baum /* Manages all GENEVE TLV options used by this pattern template. */ 151485738168SMichael Baum struct mlx5_geneve_tlv_options_mng geneve_opt_mng; 15158c0ca752SRongwei Liu uint8_t flex_item; /* flex item index. */ 1516b0e0c9a7SDariusz Sosnowski /* Items on which this pattern template is based on. */ 1517b0e0c9a7SDariusz Sosnowski struct rte_flow_item *items; 151842431df9SSuanming Mou }; 151942431df9SSuanming Mou 1520836b5c9bSSuanming Mou /* Flow action template struct. */ 1521836b5c9bSSuanming Mou struct rte_flow_actions_template { 1522836b5c9bSSuanming Mou LIST_ENTRY(rte_flow_actions_template) next; 1523836b5c9bSSuanming Mou /* Template attributes. */ 1524836b5c9bSSuanming Mou struct rte_flow_actions_template_attr attr; 1525836b5c9bSSuanming Mou struct rte_flow_action *actions; /* Cached flow actions. */ 152601608fceSDariusz Sosnowski struct rte_flow_action *orig_actions; /* Original flow actions. */ 1527836b5c9bSSuanming Mou struct rte_flow_action *masks; /* Cached action masks.*/ 1528f1fecffaSDariusz Sosnowski struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */ 152904a4de75SMichael Baum uint64_t action_flags; /* Bit-map of all valid action in template. */ 1530f1fecffaSDariusz Sosnowski uint16_t dr_actions_num; /* Amount of DR rules actions. */ 1531f1fecffaSDariusz Sosnowski uint16_t actions_num; /* Amount of flow actions */ 1532ca00eb69SGregory Etelson uint16_t *dr_off; /* DR action offset for given rte action offset. */ 1533ca00eb69SGregory Etelson uint16_t *src_off; /* RTE action displacement from app. template */ 1534f1fecffaSDariusz Sosnowski uint16_t reformat_off; /* Offset of DR reformat action. */ 15350f4aa72bSSuanming Mou uint16_t mhdr_off; /* Offset of DR modify header action. */ 15361be65c39SRongwei Liu uint16_t recom_off; /* Offset of DR IPv6 routing push remove action. */ 1537e12a0166STyler Retzlaff RTE_ATOMIC(uint32_t) refcnt; /* Reference counter. */ 15386b6c0b8dSRongwei Liu uint8_t flex_item; /* flex item index. */ 1539836b5c9bSSuanming Mou }; 1540836b5c9bSSuanming Mou 1541d1559d66SSuanming Mou /* Jump action struct. */ 1542d1559d66SSuanming Mou struct mlx5_hw_jump_action { 1543d1559d66SSuanming Mou /* Action jump from root. */ 1544d1559d66SSuanming Mou struct mlx5dr_action *root_action; 1545d1559d66SSuanming Mou /* HW steering jump action. */ 1546d1559d66SSuanming Mou struct mlx5dr_action *hws_action; 1547d1559d66SSuanming Mou }; 1548d1559d66SSuanming Mou 1549fe3620aaSSuanming Mou /* Encap decap action struct. */ 1550fe3620aaSSuanming Mou struct mlx5_hw_encap_decap_action { 15515e26c99fSRongwei Liu struct mlx5_indirect_list indirect; 15525e26c99fSRongwei Liu enum mlx5dr_action_type action_type; 1553fe3620aaSSuanming Mou struct mlx5dr_action *action; /* Action object. */ 15547f6daa49SSuanming Mou /* Is header_reformat action shared across flows in table. */ 15552e543b6fSGregory Etelson uint32_t shared:1; 15562e543b6fSGregory Etelson uint32_t multi_pattern:1; 1557fe3620aaSSuanming Mou size_t data_size; /* Action metadata size. */ 1558fe3620aaSSuanming Mou uint8_t data[]; /* Action data. */ 1559fe3620aaSSuanming Mou }; 1560fe3620aaSSuanming Mou 15611be65c39SRongwei Liu /* Push remove action struct. */ 15621be65c39SRongwei Liu struct mlx5_hw_push_remove_action { 15631be65c39SRongwei Liu struct mlx5dr_action *action; /* Action object. */ 15641be65c39SRongwei Liu /* Is push_remove action shared across flows in table. */ 15651be65c39SRongwei Liu uint8_t shared; 15661be65c39SRongwei Liu size_t data_size; /* Action metadata size. */ 15671be65c39SRongwei Liu uint8_t data[]; /* Action data. */ 15681be65c39SRongwei Liu }; 15690f4aa72bSSuanming Mou 15700f4aa72bSSuanming Mou /* Modify field action struct. */ 15710f4aa72bSSuanming Mou struct mlx5_hw_modify_header_action { 15720f4aa72bSSuanming Mou /* Reference to DR action */ 15730f4aa72bSSuanming Mou struct mlx5dr_action *action; 15740f4aa72bSSuanming Mou /* Modify header action position in action rule table. */ 15750f4aa72bSSuanming Mou uint16_t pos; 15760f4aa72bSSuanming Mou /* Is MODIFY_HEADER action shared across flows in table. */ 15772e543b6fSGregory Etelson uint32_t shared:1; 15782e543b6fSGregory Etelson uint32_t multi_pattern:1; 15790f4aa72bSSuanming Mou /* Amount of modification commands stored in the precompiled buffer. */ 15800f4aa72bSSuanming Mou uint32_t mhdr_cmds_num; 15810f4aa72bSSuanming Mou /* Precompiled modification commands. */ 15820f4aa72bSSuanming Mou struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD]; 15830f4aa72bSSuanming Mou }; 15840f4aa72bSSuanming Mou 1585f13fab23SSuanming Mou /* The maximum actions support in the flow. */ 1586f13fab23SSuanming Mou #define MLX5_HW_MAX_ACTS 16 1587f13fab23SSuanming Mou 1588d1559d66SSuanming Mou /* DR action set struct. */ 1589d1559d66SSuanming Mou struct mlx5_hw_actions { 1590f13fab23SSuanming Mou /* Dynamic action list. */ 1591f13fab23SSuanming Mou LIST_HEAD(act_list, mlx5_action_construct_data) act_list; 1592f13fab23SSuanming Mou struct mlx5_hw_jump_action *jump; /* Jump action. */ 15933a2f674bSSuanming Mou struct mlx5_hrxq *tir; /* TIR action. */ 15940f4aa72bSSuanming Mou struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */ 1595fe3620aaSSuanming Mou /* Encap/Decap action. */ 1596fe3620aaSSuanming Mou struct mlx5_hw_encap_decap_action *encap_decap; 1597fe3620aaSSuanming Mou uint16_t encap_decap_pos; /* Encap/Decap action position. */ 15981be65c39SRongwei Liu /* Push/remove action. */ 15991be65c39SRongwei Liu struct mlx5_hw_push_remove_action *push_remove; 16001be65c39SRongwei Liu uint16_t push_remove_pos; /* Push/remove action position. */ 16011deadfd7SSuanming Mou uint32_t mark:1; /* Indicate the mark action. */ 160204a4de75SMichael Baum cnt_id_t cnt_id; /* Counter id. */ 160348fbb0e9SAlexander Kozyrev uint32_t mtr_id; /* Meter id. */ 1604f13fab23SSuanming Mou /* Translated DR action array from action template. */ 1605f13fab23SSuanming Mou struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS]; 1606d1559d66SSuanming Mou }; 1607d1559d66SSuanming Mou 1608d1559d66SSuanming Mou /* mlx5 action template struct. */ 1609d1559d66SSuanming Mou struct mlx5_hw_action_template { 1610d1559d66SSuanming Mou /* Action template pointer. */ 1611d1559d66SSuanming Mou struct rte_flow_actions_template *action_template; 1612d1559d66SSuanming Mou struct mlx5_hw_actions acts; /* Template actions. */ 1613d1559d66SSuanming Mou }; 1614d1559d66SSuanming Mou 1615d1559d66SSuanming Mou /* mlx5 flow group struct. */ 1616d1559d66SSuanming Mou struct mlx5_flow_group { 1617d1559d66SSuanming Mou struct mlx5_list_entry entry; 16188ce638efSTomer Shmilovich LIST_ENTRY(mlx5_flow_group) next; 16191939eb6fSDariusz Sosnowski struct rte_eth_dev *dev; /* Reference to corresponding device. */ 1620d1559d66SSuanming Mou struct mlx5dr_table *tbl; /* HWS table object. */ 1621d1559d66SSuanming Mou struct mlx5_hw_jump_action jump; /* Jump action. */ 16228ce638efSTomer Shmilovich struct mlx5_flow_group *miss_group; /* Group pointed to by miss action. */ 1623d1559d66SSuanming Mou enum mlx5dr_table_type type; /* Table type. */ 1624d1559d66SSuanming Mou uint32_t group_id; /* Group id. */ 1625d1559d66SSuanming Mou uint32_t idx; /* Group memory index. */ 1626e38776c3SMaayan Kashani /* List of all matchers created for this group in non template api */ 1627e38776c3SMaayan Kashani struct mlx5_list *matchers; 1628d1559d66SSuanming Mou }; 1629d1559d66SSuanming Mou 1630d1559d66SSuanming Mou 1631b7d19ee4SOri Kam #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 32 1632d1559d66SSuanming Mou #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32 1633d1559d66SSuanming Mou 1634f7bd7d96SGregory Etelson #define MLX5_MULTIPATTERN_ENCAP_NUM 5 1635f7bd7d96SGregory Etelson #define MLX5_MAX_TABLE_RESIZE_NUM 64 1636f7bd7d96SGregory Etelson 1637f7bd7d96SGregory Etelson struct mlx5_multi_pattern_segment { 1638654ebd8cSGregory Etelson /* 1639654ebd8cSGregory Etelson * Modify Header Argument Objects number allocated for action in that 1640654ebd8cSGregory Etelson * segment. 1641654ebd8cSGregory Etelson * Capacity is always power of 2. 1642654ebd8cSGregory Etelson */ 1643f7bd7d96SGregory Etelson uint32_t capacity; 1644f7bd7d96SGregory Etelson uint32_t head_index; 1645f7bd7d96SGregory Etelson struct mlx5dr_action *mhdr_action; 1646f7bd7d96SGregory Etelson struct mlx5dr_action *reformat_action[MLX5_MULTIPATTERN_ENCAP_NUM]; 1647f7bd7d96SGregory Etelson }; 1648f7bd7d96SGregory Etelson 1649f7bd7d96SGregory Etelson struct mlx5_tbl_multi_pattern_ctx { 1650f7bd7d96SGregory Etelson struct { 1651f7bd7d96SGregory Etelson uint32_t elements_num; 1652f7bd7d96SGregory Etelson struct mlx5dr_action_reformat_header reformat_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE]; 1653f7bd7d96SGregory Etelson /** 1654f7bd7d96SGregory Etelson * insert_header structure is larger than reformat_header. 1655f7bd7d96SGregory Etelson * Enclosing these structures with union will case a gap between 1656f7bd7d96SGregory Etelson * reformat_hdr array elements. 1657f7bd7d96SGregory Etelson * mlx5dr_action_create_reformat() expects adjacent array elements. 1658f7bd7d96SGregory Etelson */ 1659f7bd7d96SGregory Etelson struct mlx5dr_action_insert_header insert_hdr[MLX5_HW_TBL_MAX_ACTION_TEMPLATE]; 1660f7bd7d96SGregory Etelson } reformat[MLX5_MULTIPATTERN_ENCAP_NUM]; 1661f7bd7d96SGregory Etelson 1662f7bd7d96SGregory Etelson struct { 1663f7bd7d96SGregory Etelson uint32_t elements_num; 1664f7bd7d96SGregory Etelson struct mlx5dr_action_mh_pattern pattern[MLX5_HW_TBL_MAX_ACTION_TEMPLATE]; 1665f7bd7d96SGregory Etelson } mh; 1666f7bd7d96SGregory Etelson struct mlx5_multi_pattern_segment segments[MLX5_MAX_TABLE_RESIZE_NUM]; 1667f7bd7d96SGregory Etelson }; 1668f7bd7d96SGregory Etelson 1669f7bd7d96SGregory Etelson static __rte_always_inline void 1670f7bd7d96SGregory Etelson mlx5_multi_pattern_activate(struct mlx5_tbl_multi_pattern_ctx *mpctx) 1671f7bd7d96SGregory Etelson { 1672f7bd7d96SGregory Etelson mpctx->segments[0].head_index = 1; 1673f7bd7d96SGregory Etelson } 1674f7bd7d96SGregory Etelson 1675f7bd7d96SGregory Etelson static __rte_always_inline bool 1676f7bd7d96SGregory Etelson mlx5_is_multi_pattern_active(const struct mlx5_tbl_multi_pattern_ctx *mpctx) 1677f7bd7d96SGregory Etelson { 1678f7bd7d96SGregory Etelson return mpctx->segments[0].head_index == 1; 1679f7bd7d96SGregory Etelson } 1680f7bd7d96SGregory Etelson 1681ddb68e47SBing Zhao struct mlx5_flow_template_table_cfg { 1682ddb68e47SBing Zhao struct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */ 1683ddb68e47SBing Zhao bool external; /* True if created by flow API, false if table is internal to PMD. */ 1684ddb68e47SBing Zhao }; 1685ddb68e47SBing Zhao 1686654ebd8cSGregory Etelson struct mlx5_matcher_info { 1687654ebd8cSGregory Etelson struct mlx5dr_matcher *matcher; /* Template matcher. */ 1688af154d7aSAlexander Kozyrev struct mlx5dr_action *jump; /* Jump to matcher action. */ 1689654ebd8cSGregory Etelson RTE_ATOMIC(uint32_t) refcnt; 1690654ebd8cSGregory Etelson }; 1691654ebd8cSGregory Etelson 169227595cd8STyler Retzlaff struct __rte_cache_aligned mlx5_dr_rule_action_container { 1693525cdf79SDariusz Sosnowski struct mlx5dr_rule_action acts[MLX5_HW_MAX_ACTS]; 169427595cd8STyler Retzlaff }; 1695525cdf79SDariusz Sosnowski 1696d1559d66SSuanming Mou struct rte_flow_template_table { 1697d1559d66SSuanming Mou LIST_ENTRY(rte_flow_template_table) next; 1698d1559d66SSuanming Mou struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */ 1699654ebd8cSGregory Etelson struct mlx5_matcher_info matcher_info[2]; 1700654ebd8cSGregory Etelson uint32_t matcher_selector; 1701654ebd8cSGregory Etelson rte_rwlock_t matcher_replace_rwlk; /* RW lock for resizable tables */ 1702d1559d66SSuanming Mou /* Item templates bind to the table. */ 1703d1559d66SSuanming Mou struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE]; 1704d1559d66SSuanming Mou /* Action templates bind to the table. */ 1705d1559d66SSuanming Mou struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE]; 1706d1559d66SSuanming Mou struct mlx5_indexed_pool *flow; /* The table's flow ipool. */ 170771c7abd2SDariusz Sosnowski struct rte_flow_hw_aux *flow_aux; /**< Auxiliary data stored per flow. */ 170863296851SAlexander Kozyrev struct mlx5_indexed_pool *resource; /* The table's resource ipool. */ 1709ddb68e47SBing Zhao struct mlx5_flow_template_table_cfg cfg; 1710d1559d66SSuanming Mou uint32_t type; /* Flow table type RX/TX/FDB. */ 1711d1559d66SSuanming Mou uint8_t nb_item_templates; /* Item template number. */ 1712d1559d66SSuanming Mou uint8_t nb_action_templates; /* Action template number. */ 1713d1559d66SSuanming Mou uint32_t refcnt; /* Table reference counter. */ 1714f7bd7d96SGregory Etelson struct mlx5_tbl_multi_pattern_ctx mpctx; 1715654ebd8cSGregory Etelson struct mlx5dr_matcher_attr matcher_attr; 1716525cdf79SDariusz Sosnowski /** 1717525cdf79SDariusz Sosnowski * Variable length array of containers containing precalculated templates of DR actions 1718525cdf79SDariusz Sosnowski * arrays. This array is allocated at template table creation time and contains 1719525cdf79SDariusz Sosnowski * one container per each queue, per each actions template. 1720525cdf79SDariusz Sosnowski * Essentially rule_acts is a 2-dimensional array indexed with (AT index, queue) pair. 1721525cdf79SDariusz Sosnowski * Each container will provide a local "queue buffer" to work on for flow creation 1722525cdf79SDariusz Sosnowski * operations when using a given actions template. 1723525cdf79SDariusz Sosnowski */ 1724525cdf79SDariusz Sosnowski struct mlx5_dr_rule_action_container rule_acts[]; 1725d1559d66SSuanming Mou }; 1726d1559d66SSuanming Mou 1727654ebd8cSGregory Etelson static __rte_always_inline struct mlx5dr_matcher * 1728654ebd8cSGregory Etelson mlx5_table_matcher(const struct rte_flow_template_table *table) 1729654ebd8cSGregory Etelson { 1730654ebd8cSGregory Etelson return table->matcher_info[table->matcher_selector].matcher; 1731654ebd8cSGregory Etelson } 1732654ebd8cSGregory Etelson 1733654ebd8cSGregory Etelson static __rte_always_inline struct mlx5_multi_pattern_segment * 1734654ebd8cSGregory Etelson mlx5_multi_pattern_segment_find(struct rte_flow_template_table *table, 1735654ebd8cSGregory Etelson uint32_t flow_resource_ix) 1736654ebd8cSGregory Etelson { 1737654ebd8cSGregory Etelson int i; 1738654ebd8cSGregory Etelson struct mlx5_tbl_multi_pattern_ctx *mpctx = &table->mpctx; 1739654ebd8cSGregory Etelson 1740654ebd8cSGregory Etelson if (likely(!rte_flow_template_table_resizable(0, &table->cfg.attr))) 1741654ebd8cSGregory Etelson return &mpctx->segments[0]; 1742654ebd8cSGregory Etelson for (i = 0; i < MLX5_MAX_TABLE_RESIZE_NUM; i++) { 1743654ebd8cSGregory Etelson uint32_t limit = mpctx->segments[i].head_index + 1744654ebd8cSGregory Etelson mpctx->segments[i].capacity; 1745654ebd8cSGregory Etelson 1746654ebd8cSGregory Etelson if (flow_resource_ix < limit) 1747654ebd8cSGregory Etelson return &mpctx->segments[i]; 1748654ebd8cSGregory Etelson } 1749654ebd8cSGregory Etelson return NULL; 1750654ebd8cSGregory Etelson } 1751654ebd8cSGregory Etelson 175210943706SMichael Baum /* 175310943706SMichael Baum * Convert metadata or tag to the actual register. 175410943706SMichael Baum * META: Fixed C_1 for FDB mode, REG_A for NIC TX and REG_B for NIC RX. 175510943706SMichael Baum * TAG: C_x expect meter color reg and the reserved ones. 175610943706SMichael Baum */ 175710943706SMichael Baum static __rte_always_inline int 175810943706SMichael Baum flow_hw_get_reg_id_by_domain(struct rte_eth_dev *dev, 175910943706SMichael Baum enum rte_flow_item_type type, 176010943706SMichael Baum enum mlx5dr_table_type domain_type, uint32_t id) 176110943706SMichael Baum { 176210943706SMichael Baum struct mlx5_dev_ctx_shared *sh = MLX5_SH(dev); 176310943706SMichael Baum struct mlx5_dev_registers *reg = &sh->registers; 176410943706SMichael Baum 176510943706SMichael Baum switch (type) { 176610943706SMichael Baum case RTE_FLOW_ITEM_TYPE_META: 176710943706SMichael Baum if (sh->config.dv_esw_en && 176810943706SMichael Baum sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) { 176910943706SMichael Baum return REG_C_1; 177010943706SMichael Baum } 177110943706SMichael Baum /* 177210943706SMichael Baum * On root table - PMD allows only egress META matching, thus 177310943706SMichael Baum * REG_A matching is sufficient. 177410943706SMichael Baum * 177510943706SMichael Baum * On non-root tables - REG_A corresponds to general_purpose_lookup_field, 177610943706SMichael Baum * which translates to REG_A in NIC TX and to REG_B in NIC RX. 177710943706SMichael Baum * However, current FW does not implement REG_B case right now, so 177810943706SMichael Baum * REG_B case is return explicitly by this function for NIC RX. 177910943706SMichael Baum */ 178010943706SMichael Baum if (domain_type == MLX5DR_TABLE_TYPE_NIC_RX) 178110943706SMichael Baum return REG_B; 178210943706SMichael Baum return REG_A; 178310943706SMichael Baum case RTE_FLOW_ITEM_TYPE_CONNTRACK: 178410943706SMichael Baum case RTE_FLOW_ITEM_TYPE_METER_COLOR: 178510943706SMichael Baum return reg->aso_reg; 178610943706SMichael Baum case RTE_FLOW_ITEM_TYPE_TAG: 178710943706SMichael Baum if (id == RTE_PMD_MLX5_LINEAR_HASH_TAG_INDEX) 178810943706SMichael Baum return REG_C_3; 178910943706SMichael Baum MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX); 179010943706SMichael Baum return reg->hw_avl_tags[id]; 179110943706SMichael Baum default: 179210943706SMichael Baum return REG_NON; 179310943706SMichael Baum } 179410943706SMichael Baum } 179510943706SMichael Baum 179610943706SMichael Baum static __rte_always_inline int 179710943706SMichael Baum flow_hw_get_reg_id_from_ctx(void *dr_ctx, enum rte_flow_item_type type, 179810943706SMichael Baum enum mlx5dr_table_type domain_type, uint32_t id) 179910943706SMichael Baum { 180010943706SMichael Baum uint16_t port; 180110943706SMichael Baum 180210943706SMichael Baum MLX5_ETH_FOREACH_DEV(port, NULL) { 180310943706SMichael Baum struct mlx5_priv *priv; 180410943706SMichael Baum 180510943706SMichael Baum priv = rte_eth_devices[port].data->dev_private; 180610943706SMichael Baum if (priv->dr_ctx == dr_ctx) 180710943706SMichael Baum return flow_hw_get_reg_id_by_domain(&rte_eth_devices[port], 180810943706SMichael Baum type, domain_type, id); 180910943706SMichael Baum } 181010943706SMichael Baum return REG_NON; 181110943706SMichael Baum } 181210943706SMichael Baum 181342431df9SSuanming Mou #endif 181442431df9SSuanming Mou 1815d7cfcdddSAndrey Vesnovaty /* 1816d7cfcdddSAndrey Vesnovaty * Define list of valid combinations of RX Hash fields 1817d7cfcdddSAndrey Vesnovaty * (see enum ibv_rx_hash_fields). 1818d7cfcdddSAndrey Vesnovaty */ 1819d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4) 1820d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV4_TCP \ 1821d7cfcdddSAndrey Vesnovaty (MLX5_RSS_HASH_IPV4 | \ 1822c83456cdSDekel Peled IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP) 1823d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV4_UDP \ 1824d7cfcdddSAndrey Vesnovaty (MLX5_RSS_HASH_IPV4 | \ 1825c83456cdSDekel Peled IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP) 1826d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6) 1827d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV6_TCP \ 1828d7cfcdddSAndrey Vesnovaty (MLX5_RSS_HASH_IPV6 | \ 1829c83456cdSDekel Peled IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP) 1830d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV6_UDP \ 1831d7cfcdddSAndrey Vesnovaty (MLX5_RSS_HASH_IPV6 | \ 1832c83456cdSDekel Peled IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP) 1833212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_SRC_ONLY IBV_RX_HASH_SRC_IPV4 1834212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_DST_ONLY IBV_RX_HASH_DST_IPV4 1835212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_SRC_ONLY IBV_RX_HASH_SRC_IPV6 1836212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_DST_ONLY IBV_RX_HASH_DST_IPV6 1837212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY \ 1838212d17b6SXiaoyu Min (MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_UDP) 1839212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_UDP_DST_ONLY \ 1840212d17b6SXiaoyu Min (MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_UDP) 1841212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY \ 1842212d17b6SXiaoyu Min (MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_UDP) 1843212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_UDP_DST_ONLY \ 1844212d17b6SXiaoyu Min (MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_UDP) 1845212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY \ 1846212d17b6SXiaoyu Min (MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_TCP) 1847212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_TCP_DST_ONLY \ 1848212d17b6SXiaoyu Min (MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_TCP) 1849212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY \ 1850212d17b6SXiaoyu Min (MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP) 1851212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \ 1852212d17b6SXiaoyu Min (MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP) 185318ca4a4eSRaja Zidane 185418ca4a4eSRaja Zidane #ifndef HAVE_IBV_RX_HASH_IPSEC_SPI 185518ca4a4eSRaja Zidane #define IBV_RX_HASH_IPSEC_SPI (1U << 8) 185618ca4a4eSRaja Zidane #endif 185718ca4a4eSRaja Zidane 185818ca4a4eSRaja Zidane #define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI 185918ca4a4eSRaja Zidane #define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \ 186018ca4a4eSRaja Zidane MLX5_RSS_HASH_ESP_SPI) 186118ca4a4eSRaja Zidane #define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \ 186218ca4a4eSRaja Zidane MLX5_RSS_HASH_ESP_SPI) 1863d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_NONE 0ULL 1864d7cfcdddSAndrey Vesnovaty 18650e04e1e2SXueming Li #define MLX5_RSS_IS_SYMM(func) \ 186676f3d99cSXueming Li (((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) || \ 186776f3d99cSXueming Li ((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT)) 186879f89527SGregory Etelson 186979f89527SGregory Etelson /* extract next protocol type from Ethernet & VLAN headers */ 187079f89527SGregory Etelson #define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \ 187179f89527SGregory Etelson (_prt) = ((const struct _s *)(_itm)->mask)->_m; \ 187279f89527SGregory Etelson (_prt) &= ((const struct _s *)(_itm)->spec)->_m; \ 187379f89527SGregory Etelson (_prt) = rte_be_to_cpu_16((_prt)); \ 187479f89527SGregory Etelson } while (0) 187579f89527SGregory Etelson 1876d7cfcdddSAndrey Vesnovaty /* array of valid combinations of RX Hash fields for RSS */ 1877d7cfcdddSAndrey Vesnovaty static const uint64_t mlx5_rss_hash_fields[] = { 1878d7cfcdddSAndrey Vesnovaty MLX5_RSS_HASH_IPV4, 1879d7cfcdddSAndrey Vesnovaty MLX5_RSS_HASH_IPV4_TCP, 1880d7cfcdddSAndrey Vesnovaty MLX5_RSS_HASH_IPV4_UDP, 188118ca4a4eSRaja Zidane MLX5_RSS_HASH_IPV4_ESP, 1882d7cfcdddSAndrey Vesnovaty MLX5_RSS_HASH_IPV6, 1883d7cfcdddSAndrey Vesnovaty MLX5_RSS_HASH_IPV6_TCP, 1884d7cfcdddSAndrey Vesnovaty MLX5_RSS_HASH_IPV6_UDP, 188518ca4a4eSRaja Zidane MLX5_RSS_HASH_IPV6_ESP, 188618ca4a4eSRaja Zidane MLX5_RSS_HASH_ESP_SPI, 1887d7cfcdddSAndrey Vesnovaty MLX5_RSS_HASH_NONE, 1888d7cfcdddSAndrey Vesnovaty }; 1889d7cfcdddSAndrey Vesnovaty 1890d7cfcdddSAndrey Vesnovaty /* Shared RSS action structure */ 1891d7cfcdddSAndrey Vesnovaty struct mlx5_shared_action_rss { 18924a42ac1fSMatan Azrad ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */ 1893e12a0166STyler Retzlaff RTE_ATOMIC(uint32_t) refcnt; /**< Atomically accessed refcnt. */ 1894d7cfcdddSAndrey Vesnovaty struct rte_flow_action_rss origin; /**< Original rte RSS action. */ 1895d7cfcdddSAndrey Vesnovaty uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */ 1896fa7ad49eSAndrey Vesnovaty struct mlx5_ind_table_obj *ind_tbl; 1897fa7ad49eSAndrey Vesnovaty /**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */ 1898d7cfcdddSAndrey Vesnovaty uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN]; 1899d7cfcdddSAndrey Vesnovaty /**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */ 1900fa7ad49eSAndrey Vesnovaty rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */ 1901d7cfcdddSAndrey Vesnovaty }; 1902d7cfcdddSAndrey Vesnovaty 19034b61b877SBing Zhao struct rte_flow_action_handle { 19044a42ac1fSMatan Azrad uint32_t id; 1905d7cfcdddSAndrey Vesnovaty }; 1906d7cfcdddSAndrey Vesnovaty 19078bb81f26SXueming Li /* Thread specific flow workspace intermediate data. */ 19088bb81f26SXueming Li struct mlx5_flow_workspace { 19090064bf43SXueming Li /* If creating another flow in same thread, push new as stack. */ 19100064bf43SXueming Li struct mlx5_flow_workspace *prev; 19110064bf43SXueming Li struct mlx5_flow_workspace *next; 1912dc7c5e0aSGregory Etelson struct mlx5_flow_workspace *gc; 19130064bf43SXueming Li uint32_t inuse; /* can't create new flow with current. */ 19148bb81f26SXueming Li struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS]; 19150064bf43SXueming Li struct mlx5_flow_rss_desc rss_desc; 191638c6dc20SXueming Li uint32_t flow_idx; /* Intermediate device flow index. */ 1917e6100c7bSLi Zhang struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */ 191850cc92ddSShun Hao struct mlx5_flow_meter_policy *policy; 191950cc92ddSShun Hao /* The meter policy used by meter in flow. */ 192050cc92ddSShun Hao struct mlx5_flow_meter_policy *final_policy; 192150cc92ddSShun Hao /* The final policy when meter policy is hierarchy. */ 1922*3cd695c3SBing Zhao #ifdef HAVE_MLX5_HWS_SUPPORT 1923*3cd695c3SBing Zhao struct rte_flow_template_table *table; 1924*3cd695c3SBing Zhao #endif 192551ec04dcSShun Hao uint32_t skip_matcher_reg:1; 192651ec04dcSShun Hao /* Indicates if need to skip matcher register in translate. */ 1927082becbfSRaja Zidane uint32_t mark:1; /* Indicates if flow contains mark action. */ 1928cd4ab742SSuanming Mou uint32_t vport_meta_tag; /* Used for vport index match. */ 1929cd4ab742SSuanming Mou }; 1930cd4ab742SSuanming Mou 1931cd4ab742SSuanming Mou /* Matcher translate type. */ 1932cd4ab742SSuanming Mou enum MLX5_SET_MATCHER { 1933cd4ab742SSuanming Mou MLX5_SET_MATCHER_SW_V = 1 << 0, 1934cd4ab742SSuanming Mou MLX5_SET_MATCHER_SW_M = 1 << 1, 1935cd4ab742SSuanming Mou MLX5_SET_MATCHER_HS_V = 1 << 2, 1936cd4ab742SSuanming Mou MLX5_SET_MATCHER_HS_M = 1 << 3, 1937cd4ab742SSuanming Mou }; 1938cd4ab742SSuanming Mou 1939cd4ab742SSuanming Mou #define MLX5_SET_MATCHER_SW (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_SW_M) 1940cd4ab742SSuanming Mou #define MLX5_SET_MATCHER_HS (MLX5_SET_MATCHER_HS_V | MLX5_SET_MATCHER_HS_M) 1941cd4ab742SSuanming Mou #define MLX5_SET_MATCHER_V (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_HS_V) 1942cd4ab742SSuanming Mou #define MLX5_SET_MATCHER_M (MLX5_SET_MATCHER_SW_M | MLX5_SET_MATCHER_HS_M) 1943cd4ab742SSuanming Mou 1944cd4ab742SSuanming Mou /* Flow matcher workspace intermediate data. */ 1945cd4ab742SSuanming Mou struct mlx5_dv_matcher_workspace { 1946cd4ab742SSuanming Mou uint8_t priority; /* Flow priority. */ 1947cd4ab742SSuanming Mou uint64_t last_item; /* Last item in pattern. */ 1948cd4ab742SSuanming Mou uint64_t item_flags; /* Flow item pattern flags. */ 1949cd4ab742SSuanming Mou uint64_t action_flags; /* Flow action flags. */ 1950cd4ab742SSuanming Mou bool external; /* External flow or not. */ 1951cd4ab742SSuanming Mou uint32_t vlan_tag:12; /* Flow item VLAN tag. */ 1952cd4ab742SSuanming Mou uint8_t next_protocol; /* Tunnel next protocol */ 1953cd4ab742SSuanming Mou uint32_t geneve_tlv_option; /* Flow item Geneve TLV option. */ 1954cd4ab742SSuanming Mou uint32_t group; /* Flow group. */ 1955cd4ab742SSuanming Mou uint16_t udp_dport; /* Flow item UDP port. */ 1956cd4ab742SSuanming Mou const struct rte_flow_attr *attr; /* Flow attribute. */ 1957cd4ab742SSuanming Mou struct mlx5_flow_rss_desc *rss_desc; /* RSS descriptor. */ 1958cd4ab742SSuanming Mou const struct rte_flow_item *tunnel_item; /* Flow tunnel item. */ 1959cd4ab742SSuanming Mou const struct rte_flow_item *gre_item; /* Flow GRE item. */ 1960a3778a47SGregory Etelson const struct rte_flow_item *integrity_items[2]; 19618bb81f26SXueming Li }; 19628bb81f26SXueming Li 19639ade91dfSJiawei Wang struct mlx5_flow_split_info { 1964693c7d4bSJiawei Wang uint32_t external:1; 19659ade91dfSJiawei Wang /**< True if flow is created by request external to PMD. */ 1966693c7d4bSJiawei Wang uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */ 1967693c7d4bSJiawei Wang uint32_t skip_scale:8; /**< Skip the scale the table with factor. */ 19689ade91dfSJiawei Wang uint32_t flow_idx; /**< This memory pool index to the flow. */ 19692d2cef5dSLi Zhang uint32_t table_id; /**< Flow table identifier. */ 1970693c7d4bSJiawei Wang uint64_t prefix_layers; /**< Prefix subflow layers. */ 19719ade91dfSJiawei Wang }; 19729ade91dfSJiawei Wang 1973821a6a5cSBing Zhao struct mlx5_flow_hw_partial_resource { 1974821a6a5cSBing Zhao const struct rte_flow_attr *attr; 1975821a6a5cSBing Zhao const struct rte_flow_item *items; 1976821a6a5cSBing Zhao const struct rte_flow_action *actions; 1977821a6a5cSBing Zhao }; 1978821a6a5cSBing Zhao 1979821a6a5cSBing Zhao struct mlx5_flow_hw_split_resource { 1980821a6a5cSBing Zhao struct mlx5_flow_hw_partial_resource prefix; 1981821a6a5cSBing Zhao struct mlx5_flow_hw_partial_resource suffix; 1982821a6a5cSBing Zhao void *buf_start; /* start address of continuous buffer. */ 1983821a6a5cSBing Zhao uint32_t flow_idx; /* This memory pool index to the flow. */ 1984821a6a5cSBing Zhao }; 1985821a6a5cSBing Zhao 1986f5177bdcSMichael Baum struct mlx5_hl_data { 1987f5177bdcSMichael Baum uint8_t dw_offset; 1988f5177bdcSMichael Baum uint32_t dw_mask; 1989f5177bdcSMichael Baum }; 1990f5177bdcSMichael Baum 19915bd0e3e6SDariusz Sosnowski extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS]; 19925bd0e3e6SDariusz Sosnowski 19935bd0e3e6SDariusz Sosnowski /* 19947aa6c077SSuanming Mou * Get sqn for given tx_queue. 19957aa6c077SSuanming Mou * Used in HWS rule creation. 19967aa6c077SSuanming Mou */ 19977aa6c077SSuanming Mou static __rte_always_inline int 19987aa6c077SSuanming Mou flow_hw_get_sqn(struct rte_eth_dev *dev, uint16_t tx_queue, uint32_t *sqn) 19997aa6c077SSuanming Mou { 20007aa6c077SSuanming Mou struct mlx5_txq_ctrl *txq; 20011944fbc3SSuanming Mou struct mlx5_external_q *ext_txq; 20027aa6c077SSuanming Mou 20037aa6c077SSuanming Mou /* Means Tx queue is PF0. */ 20047aa6c077SSuanming Mou if (tx_queue == UINT16_MAX) { 20057aa6c077SSuanming Mou *sqn = 0; 20067aa6c077SSuanming Mou return 0; 20077aa6c077SSuanming Mou } 20081944fbc3SSuanming Mou if (mlx5_is_external_txq(dev, tx_queue)) { 20091944fbc3SSuanming Mou ext_txq = mlx5_ext_txq_get(dev, tx_queue); 20101944fbc3SSuanming Mou *sqn = ext_txq->hw_id; 20111944fbc3SSuanming Mou return 0; 20121944fbc3SSuanming Mou } 20137aa6c077SSuanming Mou txq = mlx5_txq_get(dev, tx_queue); 20147aa6c077SSuanming Mou if (unlikely(!txq)) 20157aa6c077SSuanming Mou return -ENOENT; 20167aa6c077SSuanming Mou *sqn = mlx5_txq_get_sqn(txq); 20177aa6c077SSuanming Mou mlx5_txq_release(dev, tx_queue); 20187aa6c077SSuanming Mou return 0; 20197aa6c077SSuanming Mou } 20207aa6c077SSuanming Mou 20217aa6c077SSuanming Mou /* 20227aa6c077SSuanming Mou * Convert sqn for given rte_eth_dev port. 20237aa6c077SSuanming Mou * Used in HWS rule creation. 20247aa6c077SSuanming Mou */ 20257aa6c077SSuanming Mou static __rte_always_inline int 20267aa6c077SSuanming Mou flow_hw_conv_sqn(uint16_t port_id, uint16_t tx_queue, uint32_t *sqn) 20277aa6c077SSuanming Mou { 20287aa6c077SSuanming Mou if (port_id >= RTE_MAX_ETHPORTS) 20297aa6c077SSuanming Mou return -EINVAL; 20307aa6c077SSuanming Mou return flow_hw_get_sqn(&rte_eth_devices[port_id], tx_queue, sqn); 20317aa6c077SSuanming Mou } 20327aa6c077SSuanming Mou 20337aa6c077SSuanming Mou /* 20347aa6c077SSuanming Mou * Get given rte_eth_dev port_id. 20357aa6c077SSuanming Mou * Used in HWS rule creation. 20367aa6c077SSuanming Mou */ 20377aa6c077SSuanming Mou static __rte_always_inline uint16_t 20387aa6c077SSuanming Mou flow_hw_get_port_id(void *dr_ctx) 20397aa6c077SSuanming Mou { 20407aa6c077SSuanming Mou #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 20417aa6c077SSuanming Mou uint16_t port_id; 20427aa6c077SSuanming Mou 20437aa6c077SSuanming Mou MLX5_ETH_FOREACH_DEV(port_id, NULL) { 20447aa6c077SSuanming Mou struct mlx5_priv *priv; 20457aa6c077SSuanming Mou 20467aa6c077SSuanming Mou priv = rte_eth_devices[port_id].data->dev_private; 20477aa6c077SSuanming Mou if (priv->dr_ctx == dr_ctx) 20487aa6c077SSuanming Mou return port_id; 20497aa6c077SSuanming Mou } 20507aa6c077SSuanming Mou #else 20517aa6c077SSuanming Mou RTE_SET_USED(dr_ctx); 20527aa6c077SSuanming Mou #endif 20537aa6c077SSuanming Mou return UINT16_MAX; 20547aa6c077SSuanming Mou } 20557aa6c077SSuanming Mou 20567aa6c077SSuanming Mou /* 20574cbeba6fSSuanming Mou * Get given eswitch manager id. 20584cbeba6fSSuanming Mou * Used in HWS match with port creation. 20594cbeba6fSSuanming Mou */ 20604cbeba6fSSuanming Mou static __rte_always_inline const struct flow_hw_port_info * 20614cbeba6fSSuanming Mou flow_hw_get_esw_mgr_id(void *dr_ctx) 20624cbeba6fSSuanming Mou { 20634cbeba6fSSuanming Mou #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 20644cbeba6fSSuanming Mou uint16_t port_id; 20654cbeba6fSSuanming Mou 20664cbeba6fSSuanming Mou MLX5_ETH_FOREACH_DEV(port_id, NULL) { 20674cbeba6fSSuanming Mou struct mlx5_priv *priv; 20684cbeba6fSSuanming Mou 20694cbeba6fSSuanming Mou priv = rte_eth_devices[port_id].data->dev_private; 20704cbeba6fSSuanming Mou if (priv->dr_ctx == dr_ctx) 20714cbeba6fSSuanming Mou return &priv->sh->dev_cap.esw_info; 20724cbeba6fSSuanming Mou } 20734cbeba6fSSuanming Mou #else 20744cbeba6fSSuanming Mou RTE_SET_USED(dr_ctx); 20754cbeba6fSSuanming Mou #endif 20764cbeba6fSSuanming Mou return NULL; 20774cbeba6fSSuanming Mou } 20784cbeba6fSSuanming Mou 20794cbeba6fSSuanming Mou /* 20805bd0e3e6SDariusz Sosnowski * Get metadata match tag and mask for given rte_eth_dev port. 20815bd0e3e6SDariusz Sosnowski * Used in HWS rule creation. 20825bd0e3e6SDariusz Sosnowski */ 20835bd0e3e6SDariusz Sosnowski static __rte_always_inline const struct flow_hw_port_info * 20844cbeba6fSSuanming Mou flow_hw_conv_port_id(void *ctx, const uint16_t port_id) 20855bd0e3e6SDariusz Sosnowski { 20865bd0e3e6SDariusz Sosnowski struct flow_hw_port_info *port_info; 20875bd0e3e6SDariusz Sosnowski 20884cbeba6fSSuanming Mou if (port_id == UINT16_MAX && ctx) 20894cbeba6fSSuanming Mou return flow_hw_get_esw_mgr_id(ctx); 20904cbeba6fSSuanming Mou 20915bd0e3e6SDariusz Sosnowski if (port_id >= RTE_MAX_ETHPORTS) 20925bd0e3e6SDariusz Sosnowski return NULL; 20935bd0e3e6SDariusz Sosnowski port_info = &mlx5_flow_hw_port_infos[port_id]; 20945bd0e3e6SDariusz Sosnowski return !!port_info->regc_mask ? port_info : NULL; 20955bd0e3e6SDariusz Sosnowski } 20965bd0e3e6SDariusz Sosnowski 20975bd0e3e6SDariusz Sosnowski #ifdef HAVE_IBV_FLOW_DV_SUPPORT 20985bd0e3e6SDariusz Sosnowski /* 20995bd0e3e6SDariusz Sosnowski * Get metadata match tag and mask for the uplink port represented 21005bd0e3e6SDariusz Sosnowski * by given IB context. Used in HWS context creation. 21015bd0e3e6SDariusz Sosnowski */ 21025bd0e3e6SDariusz Sosnowski static __rte_always_inline const struct flow_hw_port_info * 21035bd0e3e6SDariusz Sosnowski flow_hw_get_wire_port(struct ibv_context *ibctx) 21045bd0e3e6SDariusz Sosnowski { 21055bd0e3e6SDariusz Sosnowski struct ibv_device *ibdev = ibctx->device; 21065bd0e3e6SDariusz Sosnowski uint16_t port_id; 21075bd0e3e6SDariusz Sosnowski 21085bd0e3e6SDariusz Sosnowski MLX5_ETH_FOREACH_DEV(port_id, NULL) { 21095bd0e3e6SDariusz Sosnowski const struct mlx5_priv *priv = 21105bd0e3e6SDariusz Sosnowski rte_eth_devices[port_id].data->dev_private; 21115bd0e3e6SDariusz Sosnowski 21125bd0e3e6SDariusz Sosnowski if (priv && priv->master) { 21135bd0e3e6SDariusz Sosnowski struct ibv_context *port_ibctx = priv->sh->cdev->ctx; 21145bd0e3e6SDariusz Sosnowski 21155bd0e3e6SDariusz Sosnowski if (port_ibctx->device == ibdev) 21164cbeba6fSSuanming Mou return flow_hw_conv_port_id(priv->dr_ctx, port_id); 21175bd0e3e6SDariusz Sosnowski } 21185bd0e3e6SDariusz Sosnowski } 21195bd0e3e6SDariusz Sosnowski return NULL; 21205bd0e3e6SDariusz Sosnowski } 21215bd0e3e6SDariusz Sosnowski #endif 21225bd0e3e6SDariusz Sosnowski 21238a89038fSBing Zhao static __rte_always_inline int 212404e740e6SGregory Etelson flow_hw_get_reg_id(struct rte_eth_dev *dev, 212504e740e6SGregory Etelson enum rte_flow_item_type type, uint32_t id) 21268a89038fSBing Zhao { 212710943706SMichael Baum #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 212810943706SMichael Baum return flow_hw_get_reg_id_by_domain(dev, type, 21292b45a773SMichael Baum MLX5DR_TABLE_TYPE_MAX, id); 213004e740e6SGregory Etelson #else 213110943706SMichael Baum RTE_SET_USED(dev); 213204e740e6SGregory Etelson RTE_SET_USED(type); 213304e740e6SGregory Etelson RTE_SET_USED(id); 213404e740e6SGregory Etelson return REG_NON; 213510943706SMichael Baum #endif 213604e740e6SGregory Etelson } 213704e740e6SGregory Etelson 2138572fe9efSErez Shitrit static __rte_always_inline int 2139572fe9efSErez Shitrit flow_hw_get_port_id_from_ctx(void *dr_ctx, uint32_t *port_val) 2140572fe9efSErez Shitrit { 2141572fe9efSErez Shitrit #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H) 2142572fe9efSErez Shitrit uint32_t port; 2143572fe9efSErez Shitrit 2144572fe9efSErez Shitrit MLX5_ETH_FOREACH_DEV(port, NULL) { 2145572fe9efSErez Shitrit struct mlx5_priv *priv; 2146572fe9efSErez Shitrit priv = rte_eth_devices[port].data->dev_private; 2147572fe9efSErez Shitrit 2148572fe9efSErez Shitrit if (priv->dr_ctx == dr_ctx) { 2149572fe9efSErez Shitrit *port_val = port; 2150572fe9efSErez Shitrit return 0; 2151572fe9efSErez Shitrit } 2152572fe9efSErez Shitrit } 2153572fe9efSErez Shitrit #else 2154572fe9efSErez Shitrit RTE_SET_USED(dr_ctx); 2155572fe9efSErez Shitrit RTE_SET_USED(port_val); 2156572fe9efSErez Shitrit #endif 2157572fe9efSErez Shitrit return -EINVAL; 2158572fe9efSErez Shitrit } 2159572fe9efSErez Shitrit 2160232b349bSMichael Baum /** 2161232b349bSMichael Baum * Get GENEVE TLV option FW information according type and class. 2162232b349bSMichael Baum * 2163232b349bSMichael Baum * @param[in] dr_ctx 2164232b349bSMichael Baum * Pointer to HW steering DR context. 2165232b349bSMichael Baum * @param[in] type 2166232b349bSMichael Baum * GENEVE TLV option type. 2167232b349bSMichael Baum * @param[in] class 2168232b349bSMichael Baum * GENEVE TLV option class. 2169232b349bSMichael Baum * @param[out] hl_ok_bit 2170232b349bSMichael Baum * Pointer to header layout structure describing OK bit FW information. 2171232b349bSMichael Baum * @param[out] num_of_dws 2172232b349bSMichael Baum * Pointer to fill inside the size of 'hl_dws' array. 2173232b349bSMichael Baum * @param[out] hl_dws 2174232b349bSMichael Baum * Pointer to header layout array describing data DWs FW information. 2175232b349bSMichael Baum * @param[out] ok_bit_on_class 2176232b349bSMichael Baum * Pointer to an indicator whether OK bit includes class along with type. 2177232b349bSMichael Baum * 2178232b349bSMichael Baum * @return 2179232b349bSMichael Baum * 0 on success, negative errno otherwise and rte_errno is set. 2180232b349bSMichael Baum */ 2181232b349bSMichael Baum int 2182232b349bSMichael Baum mlx5_get_geneve_hl_data(const void *dr_ctx, uint8_t type, uint16_t class, 2183232b349bSMichael Baum struct mlx5_hl_data ** const hl_ok_bit, 2184232b349bSMichael Baum uint8_t *num_of_dws, 2185232b349bSMichael Baum struct mlx5_hl_data ** const hl_dws, 2186232b349bSMichael Baum bool *ok_bit_on_class); 2187232b349bSMichael Baum 21881caa89ecSMichael Baum /** 21891caa89ecSMichael Baum * Get modify field ID for single DW inside configured GENEVE TLV option. 21901caa89ecSMichael Baum * 21911caa89ecSMichael Baum * @param[in] dr_ctx 21921caa89ecSMichael Baum * Pointer to HW steering DR context. 21931caa89ecSMichael Baum * @param[in] type 21941caa89ecSMichael Baum * GENEVE TLV option type. 21951caa89ecSMichael Baum * @param[in] class 21961caa89ecSMichael Baum * GENEVE TLV option class. 21971caa89ecSMichael Baum * @param[in] dw_offset 21981caa89ecSMichael Baum * Offset of DW inside the option. 21991caa89ecSMichael Baum * 22001caa89ecSMichael Baum * @return 22011caa89ecSMichael Baum * Modify field ID on success, negative errno otherwise and rte_errno is set. 22021caa89ecSMichael Baum */ 22031caa89ecSMichael Baum int 22041caa89ecSMichael Baum mlx5_get_geneve_option_modify_field_id(const void *dr_ctx, uint8_t type, 22051caa89ecSMichael Baum uint16_t class, uint8_t dw_offset); 22061caa89ecSMichael Baum 2207f5177bdcSMichael Baum void * 2208f5177bdcSMichael Baum mlx5_geneve_tlv_parser_create(uint16_t port_id, 2209f5177bdcSMichael Baum const struct rte_pmd_mlx5_geneve_tlv tlv_list[], 2210f5177bdcSMichael Baum uint8_t nb_options); 2211f5177bdcSMichael Baum int mlx5_geneve_tlv_parser_destroy(void *handle); 221285738168SMichael Baum int mlx5_flow_geneve_tlv_option_validate(struct mlx5_priv *priv, 221385738168SMichael Baum const struct rte_flow_item *geneve_opt, 221485738168SMichael Baum struct rte_flow_error *error); 22151caa89ecSMichael Baum int mlx5_geneve_opt_modi_field_get(struct mlx5_priv *priv, 22161caa89ecSMichael Baum const struct rte_flow_field_data *data); 221785738168SMichael Baum 221885738168SMichael Baum struct mlx5_geneve_tlv_options_mng; 221985738168SMichael Baum int mlx5_geneve_tlv_option_register(struct mlx5_priv *priv, 222085738168SMichael Baum const struct rte_flow_item_geneve_opt *spec, 222185738168SMichael Baum struct mlx5_geneve_tlv_options_mng *mng); 222285738168SMichael Baum void mlx5_geneve_tlv_options_unregister(struct mlx5_priv *priv, 222385738168SMichael Baum struct mlx5_geneve_tlv_options_mng *mng); 2224f5177bdcSMichael Baum 22255bd0e3e6SDariusz Sosnowski void flow_hw_set_port_info(struct rte_eth_dev *dev); 22265bd0e3e6SDariusz Sosnowski void flow_hw_clear_port_info(struct rte_eth_dev *dev); 22271939eb6fSDariusz Sosnowski int flow_hw_create_vport_action(struct rte_eth_dev *dev); 22281939eb6fSDariusz Sosnowski void flow_hw_destroy_vport_action(struct rte_eth_dev *dev); 2229e38776c3SMaayan Kashani int 2230e38776c3SMaayan Kashani flow_hw_init(struct rte_eth_dev *dev, 2231e38776c3SMaayan Kashani struct rte_flow_error *error); 22321939eb6fSDariusz Sosnowski 223327d171b8SMaayan Kashani typedef uintptr_t (*mlx5_flow_list_create_t)(struct rte_eth_dev *dev, 2234e38776c3SMaayan Kashani enum mlx5_flow_type type, 2235e38776c3SMaayan Kashani const struct rte_flow_attr *attr, 2236e38776c3SMaayan Kashani const struct rte_flow_item items[], 2237e38776c3SMaayan Kashani const struct rte_flow_action actions[], 2238e38776c3SMaayan Kashani bool external, 2239e38776c3SMaayan Kashani struct rte_flow_error *error); 2240e38776c3SMaayan Kashani typedef void (*mlx5_flow_list_destroy_t)(struct rte_eth_dev *dev, 2241e38776c3SMaayan Kashani enum mlx5_flow_type type, 224227d171b8SMaayan Kashani uintptr_t flow_idx); 224384c406e7SOri Kam typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev, 224484c406e7SOri Kam const struct rte_flow_attr *attr, 224584c406e7SOri Kam const struct rte_flow_item items[], 224684c406e7SOri Kam const struct rte_flow_action actions[], 2247b67b4ecbSDekel Peled bool external, 224872a944dbSBing Zhao int hairpin, 224984c406e7SOri Kam struct rte_flow_error *error); 225084c406e7SOri Kam typedef struct mlx5_flow *(*mlx5_flow_prepare_t) 2251e7bfa359SBing Zhao (struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 2252e7bfa359SBing Zhao const struct rte_flow_item items[], 2253c1cfb132SYongseok Koh const struct rte_flow_action actions[], struct rte_flow_error *error); 225484c406e7SOri Kam typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev, 225584c406e7SOri Kam struct mlx5_flow *dev_flow, 225684c406e7SOri Kam const struct rte_flow_attr *attr, 225784c406e7SOri Kam const struct rte_flow_item items[], 225884c406e7SOri Kam const struct rte_flow_action actions[], 225984c406e7SOri Kam struct rte_flow_error *error); 226084c406e7SOri Kam typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow, 226184c406e7SOri Kam struct rte_flow_error *error); 226284c406e7SOri Kam typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev, 226384c406e7SOri Kam struct rte_flow *flow); 226484c406e7SOri Kam typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev, 226584c406e7SOri Kam struct rte_flow *flow); 2266684dafe7SMoti Haimovsky typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev, 2267684dafe7SMoti Haimovsky struct rte_flow *flow, 2268684dafe7SMoti Haimovsky const struct rte_flow_action *actions, 2269684dafe7SMoti Haimovsky void *data, 2270684dafe7SMoti Haimovsky struct rte_flow_error *error); 227144432018SLi Zhang typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev, 227244432018SLi Zhang struct mlx5_flow_meter_info *fm, 227344432018SLi Zhang uint32_t mtr_idx, 227444432018SLi Zhang uint8_t domain_bitmap); 227544432018SLi Zhang typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev, 227644432018SLi Zhang struct mlx5_flow_meter_info *fm); 2277afb4aa4fSLi Zhang typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev); 2278fc6ce56bSLi Zhang typedef struct mlx5_flow_meter_sub_policy * 2279fc6ce56bSLi Zhang (*mlx5_flow_meter_sub_policy_rss_prepare_t) 2280fc6ce56bSLi Zhang (struct rte_eth_dev *dev, 2281fc6ce56bSLi Zhang struct mlx5_flow_meter_policy *mtr_policy, 2282fc6ce56bSLi Zhang struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]); 22838e5c9feaSShun Hao typedef int (*mlx5_flow_meter_hierarchy_rule_create_t) 22848e5c9feaSShun Hao (struct rte_eth_dev *dev, 22858e5c9feaSShun Hao struct mlx5_flow_meter_info *fm, 22868e5c9feaSShun Hao int32_t src_port, 22878e5c9feaSShun Hao const struct rte_flow_item *item, 22888e5c9feaSShun Hao struct rte_flow_error *error); 2289ec962badSLi Zhang typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t) 2290ec962badSLi Zhang (struct rte_eth_dev *dev, 2291ec962badSLi Zhang struct mlx5_flow_meter_policy *mtr_policy); 2292e6100c7bSLi Zhang typedef uint32_t (*mlx5_flow_mtr_alloc_t) 2293e6100c7bSLi Zhang (struct rte_eth_dev *dev); 2294e6100c7bSLi Zhang typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev, 2295e6100c7bSLi Zhang uint32_t mtr_idx); 2296956d5c74SSuanming Mou typedef uint32_t (*mlx5_flow_counter_alloc_t) 2297e189f55cSSuanming Mou (struct rte_eth_dev *dev); 2298e189f55cSSuanming Mou typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev, 2299956d5c74SSuanming Mou uint32_t cnt); 2300e189f55cSSuanming Mou typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev, 2301956d5c74SSuanming Mou uint32_t cnt, 2302e189f55cSSuanming Mou bool clear, uint64_t *pkts, 23039b57df55SHaifei Luo uint64_t *bytes, void **action); 2304fa2d01c8SDong Zhou typedef int (*mlx5_flow_get_aged_flows_t) 2305fa2d01c8SDong Zhou (struct rte_eth_dev *dev, 2306fa2d01c8SDong Zhou void **context, 2307fa2d01c8SDong Zhou uint32_t nb_contexts, 2308fa2d01c8SDong Zhou struct rte_flow_error *error); 230904a4de75SMichael Baum typedef int (*mlx5_flow_get_q_aged_flows_t) 231004a4de75SMichael Baum (struct rte_eth_dev *dev, 231104a4de75SMichael Baum uint32_t queue_id, 231204a4de75SMichael Baum void **context, 231304a4de75SMichael Baum uint32_t nb_contexts, 231404a4de75SMichael Baum struct rte_flow_error *error); 2315d7cfcdddSAndrey Vesnovaty typedef int (*mlx5_flow_action_validate_t) 2316d7cfcdddSAndrey Vesnovaty (struct rte_eth_dev *dev, 23174b61b877SBing Zhao const struct rte_flow_indir_action_conf *conf, 2318d7cfcdddSAndrey Vesnovaty const struct rte_flow_action *action, 2319d7cfcdddSAndrey Vesnovaty struct rte_flow_error *error); 23204b61b877SBing Zhao typedef struct rte_flow_action_handle *(*mlx5_flow_action_create_t) 2321d7cfcdddSAndrey Vesnovaty (struct rte_eth_dev *dev, 23224b61b877SBing Zhao const struct rte_flow_indir_action_conf *conf, 2323d7cfcdddSAndrey Vesnovaty const struct rte_flow_action *action, 2324d7cfcdddSAndrey Vesnovaty struct rte_flow_error *error); 2325d7cfcdddSAndrey Vesnovaty typedef int (*mlx5_flow_action_destroy_t) 2326d7cfcdddSAndrey Vesnovaty (struct rte_eth_dev *dev, 23274b61b877SBing Zhao struct rte_flow_action_handle *action, 2328d7cfcdddSAndrey Vesnovaty struct rte_flow_error *error); 2329d7cfcdddSAndrey Vesnovaty typedef int (*mlx5_flow_action_update_t) 2330d7cfcdddSAndrey Vesnovaty (struct rte_eth_dev *dev, 23314b61b877SBing Zhao struct rte_flow_action_handle *action, 23324b61b877SBing Zhao const void *update, 2333d7cfcdddSAndrey Vesnovaty struct rte_flow_error *error); 233481073e1fSMatan Azrad typedef int (*mlx5_flow_action_query_t) 233581073e1fSMatan Azrad (struct rte_eth_dev *dev, 23364b61b877SBing Zhao const struct rte_flow_action_handle *action, 233781073e1fSMatan Azrad void *data, 233881073e1fSMatan Azrad struct rte_flow_error *error); 233915896eafSGregory Etelson typedef int (*mlx5_flow_action_query_update_t) 234015896eafSGregory Etelson (struct rte_eth_dev *dev, 234115896eafSGregory Etelson struct rte_flow_action_handle *handle, 234215896eafSGregory Etelson const void *update, void *data, 234315896eafSGregory Etelson enum rte_flow_query_update_mode qu_mode, 234415896eafSGregory Etelson struct rte_flow_error *error); 23453564e928SGregory Etelson typedef struct rte_flow_action_list_handle * 23463564e928SGregory Etelson (*mlx5_flow_action_list_handle_create_t) 23473564e928SGregory Etelson (struct rte_eth_dev *dev, 23483564e928SGregory Etelson const struct rte_flow_indir_action_conf *conf, 23493564e928SGregory Etelson const struct rte_flow_action *actions, 23503564e928SGregory Etelson struct rte_flow_error *error); 23513564e928SGregory Etelson typedef int 23523564e928SGregory Etelson (*mlx5_flow_action_list_handle_destroy_t) 23533564e928SGregory Etelson (struct rte_eth_dev *dev, 23543564e928SGregory Etelson struct rte_flow_action_list_handle *handle, 23553564e928SGregory Etelson struct rte_flow_error *error); 235623f627e0SBing Zhao typedef int (*mlx5_flow_sync_domain_t) 235723f627e0SBing Zhao (struct rte_eth_dev *dev, 235823f627e0SBing Zhao uint32_t domains, 235923f627e0SBing Zhao uint32_t flags); 2360afb4aa4fSLi Zhang typedef int (*mlx5_flow_validate_mtr_acts_t) 2361afb4aa4fSLi Zhang (struct rte_eth_dev *dev, 2362afb4aa4fSLi Zhang const struct rte_flow_action *actions[RTE_COLORS], 2363afb4aa4fSLi Zhang struct rte_flow_attr *attr, 2364afb4aa4fSLi Zhang bool *is_rss, 2365afb4aa4fSLi Zhang uint8_t *domain_bitmap, 23664b7bf3ffSBing Zhao uint8_t *policy_mode, 2367afb4aa4fSLi Zhang struct rte_mtr_error *error); 2368afb4aa4fSLi Zhang typedef int (*mlx5_flow_create_mtr_acts_t) 2369afb4aa4fSLi Zhang (struct rte_eth_dev *dev, 2370afb4aa4fSLi Zhang struct mlx5_flow_meter_policy *mtr_policy, 2371afb4aa4fSLi Zhang const struct rte_flow_action *actions[RTE_COLORS], 23726431068dSSean Zhang struct rte_flow_attr *attr, 2373afb4aa4fSLi Zhang struct rte_mtr_error *error); 2374afb4aa4fSLi Zhang typedef void (*mlx5_flow_destroy_mtr_acts_t) 2375afb4aa4fSLi Zhang (struct rte_eth_dev *dev, 2376afb4aa4fSLi Zhang struct mlx5_flow_meter_policy *mtr_policy); 2377afb4aa4fSLi Zhang typedef int (*mlx5_flow_create_policy_rules_t) 2378afb4aa4fSLi Zhang (struct rte_eth_dev *dev, 2379afb4aa4fSLi Zhang struct mlx5_flow_meter_policy *mtr_policy); 2380afb4aa4fSLi Zhang typedef void (*mlx5_flow_destroy_policy_rules_t) 2381afb4aa4fSLi Zhang (struct rte_eth_dev *dev, 2382afb4aa4fSLi Zhang struct mlx5_flow_meter_policy *mtr_policy); 2383afb4aa4fSLi Zhang typedef int (*mlx5_flow_create_def_policy_t) 2384afb4aa4fSLi Zhang (struct rte_eth_dev *dev); 2385afb4aa4fSLi Zhang typedef void (*mlx5_flow_destroy_def_policy_t) 2386afb4aa4fSLi Zhang (struct rte_eth_dev *dev); 2387c5042f93SDmitry Kozlyuk typedef int (*mlx5_flow_discover_priorities_t) 2388c5042f93SDmitry Kozlyuk (struct rte_eth_dev *dev, 2389c5042f93SDmitry Kozlyuk const uint16_t *vprio, int vprio_n); 2390db25cadcSViacheslav Ovsiienko typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t) 2391db25cadcSViacheslav Ovsiienko (struct rte_eth_dev *dev, 2392db25cadcSViacheslav Ovsiienko const struct rte_flow_item_flex_conf *conf, 2393db25cadcSViacheslav Ovsiienko struct rte_flow_error *error); 2394db25cadcSViacheslav Ovsiienko typedef int (*mlx5_flow_item_release_t) 2395db25cadcSViacheslav Ovsiienko (struct rte_eth_dev *dev, 2396db25cadcSViacheslav Ovsiienko const struct rte_flow_item_flex_handle *handle, 2397db25cadcSViacheslav Ovsiienko struct rte_flow_error *error); 2398db25cadcSViacheslav Ovsiienko typedef int (*mlx5_flow_item_update_t) 2399db25cadcSViacheslav Ovsiienko (struct rte_eth_dev *dev, 2400db25cadcSViacheslav Ovsiienko const struct rte_flow_item_flex_handle *handle, 2401db25cadcSViacheslav Ovsiienko const struct rte_flow_item_flex_conf *conf, 2402db25cadcSViacheslav Ovsiienko struct rte_flow_error *error); 2403b401400dSSuanming Mou typedef int (*mlx5_flow_info_get_t) 2404b401400dSSuanming Mou (struct rte_eth_dev *dev, 2405b401400dSSuanming Mou struct rte_flow_port_info *port_info, 2406b401400dSSuanming Mou struct rte_flow_queue_info *queue_info, 2407b401400dSSuanming Mou struct rte_flow_error *error); 2408b401400dSSuanming Mou typedef int (*mlx5_flow_port_configure_t) 2409b401400dSSuanming Mou (struct rte_eth_dev *dev, 2410b401400dSSuanming Mou const struct rte_flow_port_attr *port_attr, 2411b401400dSSuanming Mou uint16_t nb_queue, 2412b401400dSSuanming Mou const struct rte_flow_queue_attr *queue_attr[], 2413b401400dSSuanming Mou struct rte_flow_error *err); 241424865366SAlexander Kozyrev typedef int (*mlx5_flow_pattern_validate_t) 241524865366SAlexander Kozyrev (struct rte_eth_dev *dev, 241624865366SAlexander Kozyrev const struct rte_flow_pattern_template_attr *attr, 241724865366SAlexander Kozyrev const struct rte_flow_item items[], 241880c67625SGregory Etelson uint64_t *item_flags, 241924865366SAlexander Kozyrev struct rte_flow_error *error); 242042431df9SSuanming Mou typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t) 242142431df9SSuanming Mou (struct rte_eth_dev *dev, 242242431df9SSuanming Mou const struct rte_flow_pattern_template_attr *attr, 242342431df9SSuanming Mou const struct rte_flow_item items[], 242442431df9SSuanming Mou struct rte_flow_error *error); 242542431df9SSuanming Mou typedef int (*mlx5_flow_pattern_template_destroy_t) 242642431df9SSuanming Mou (struct rte_eth_dev *dev, 242742431df9SSuanming Mou struct rte_flow_pattern_template *template, 242842431df9SSuanming Mou struct rte_flow_error *error); 242924865366SAlexander Kozyrev typedef int (*mlx5_flow_actions_validate_t) 243024865366SAlexander Kozyrev (struct rte_eth_dev *dev, 243124865366SAlexander Kozyrev const struct rte_flow_actions_template_attr *attr, 243224865366SAlexander Kozyrev const struct rte_flow_action actions[], 243324865366SAlexander Kozyrev const struct rte_flow_action masks[], 243424865366SAlexander Kozyrev struct rte_flow_error *error); 2435836b5c9bSSuanming Mou typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t) 2436836b5c9bSSuanming Mou (struct rte_eth_dev *dev, 2437836b5c9bSSuanming Mou const struct rte_flow_actions_template_attr *attr, 2438836b5c9bSSuanming Mou const struct rte_flow_action actions[], 2439836b5c9bSSuanming Mou const struct rte_flow_action masks[], 2440836b5c9bSSuanming Mou struct rte_flow_error *error); 2441836b5c9bSSuanming Mou typedef int (*mlx5_flow_actions_template_destroy_t) 2442836b5c9bSSuanming Mou (struct rte_eth_dev *dev, 2443836b5c9bSSuanming Mou struct rte_flow_actions_template *template, 2444836b5c9bSSuanming Mou struct rte_flow_error *error); 2445d1559d66SSuanming Mou typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t) 2446d1559d66SSuanming Mou (struct rte_eth_dev *dev, 2447d1559d66SSuanming Mou const struct rte_flow_template_table_attr *attr, 2448d1559d66SSuanming Mou struct rte_flow_pattern_template *item_templates[], 2449d1559d66SSuanming Mou uint8_t nb_item_templates, 2450d1559d66SSuanming Mou struct rte_flow_actions_template *action_templates[], 2451d1559d66SSuanming Mou uint8_t nb_action_templates, 2452d1559d66SSuanming Mou struct rte_flow_error *error); 2453d1559d66SSuanming Mou typedef int (*mlx5_flow_table_destroy_t) 2454d1559d66SSuanming Mou (struct rte_eth_dev *dev, 2455d1559d66SSuanming Mou struct rte_flow_template_table *table, 2456d1559d66SSuanming Mou struct rte_flow_error *error); 24578ce638efSTomer Shmilovich typedef int (*mlx5_flow_group_set_miss_actions_t) 24588ce638efSTomer Shmilovich (struct rte_eth_dev *dev, 24598ce638efSTomer Shmilovich uint32_t group_id, 24608ce638efSTomer Shmilovich const struct rte_flow_group_attr *attr, 24618ce638efSTomer Shmilovich const struct rte_flow_action actions[], 24628ce638efSTomer Shmilovich struct rte_flow_error *error); 2463c40c061aSSuanming Mou typedef struct rte_flow *(*mlx5_flow_async_flow_create_t) 2464c40c061aSSuanming Mou (struct rte_eth_dev *dev, 2465c40c061aSSuanming Mou uint32_t queue, 2466c40c061aSSuanming Mou const struct rte_flow_op_attr *attr, 2467c40c061aSSuanming Mou struct rte_flow_template_table *table, 2468c40c061aSSuanming Mou const struct rte_flow_item items[], 2469c40c061aSSuanming Mou uint8_t pattern_template_index, 2470c40c061aSSuanming Mou const struct rte_flow_action actions[], 2471c40c061aSSuanming Mou uint8_t action_template_index, 2472c40c061aSSuanming Mou void *user_data, 2473c40c061aSSuanming Mou struct rte_flow_error *error); 247460db7673SAlexander Kozyrev typedef struct rte_flow *(*mlx5_flow_async_flow_create_by_index_t) 247560db7673SAlexander Kozyrev (struct rte_eth_dev *dev, 247660db7673SAlexander Kozyrev uint32_t queue, 247760db7673SAlexander Kozyrev const struct rte_flow_op_attr *attr, 247860db7673SAlexander Kozyrev struct rte_flow_template_table *table, 247960db7673SAlexander Kozyrev uint32_t rule_index, 248060db7673SAlexander Kozyrev const struct rte_flow_action actions[], 248160db7673SAlexander Kozyrev uint8_t action_template_index, 248260db7673SAlexander Kozyrev void *user_data, 248360db7673SAlexander Kozyrev struct rte_flow_error *error); 248463296851SAlexander Kozyrev typedef int (*mlx5_flow_async_flow_update_t) 248563296851SAlexander Kozyrev (struct rte_eth_dev *dev, 248663296851SAlexander Kozyrev uint32_t queue, 248763296851SAlexander Kozyrev const struct rte_flow_op_attr *attr, 248863296851SAlexander Kozyrev struct rte_flow *flow, 248963296851SAlexander Kozyrev const struct rte_flow_action actions[], 249063296851SAlexander Kozyrev uint8_t action_template_index, 249163296851SAlexander Kozyrev void *user_data, 249263296851SAlexander Kozyrev struct rte_flow_error *error); 2493c40c061aSSuanming Mou typedef int (*mlx5_flow_async_flow_destroy_t) 2494c40c061aSSuanming Mou (struct rte_eth_dev *dev, 2495c40c061aSSuanming Mou uint32_t queue, 2496c40c061aSSuanming Mou const struct rte_flow_op_attr *attr, 2497c40c061aSSuanming Mou struct rte_flow *flow, 2498c40c061aSSuanming Mou void *user_data, 2499c40c061aSSuanming Mou struct rte_flow_error *error); 2500c40c061aSSuanming Mou typedef int (*mlx5_flow_pull_t) 2501c40c061aSSuanming Mou (struct rte_eth_dev *dev, 2502c40c061aSSuanming Mou uint32_t queue, 2503c40c061aSSuanming Mou struct rte_flow_op_result res[], 2504c40c061aSSuanming Mou uint16_t n_res, 2505c40c061aSSuanming Mou struct rte_flow_error *error); 2506c40c061aSSuanming Mou typedef int (*mlx5_flow_push_t) 2507c40c061aSSuanming Mou (struct rte_eth_dev *dev, 2508c40c061aSSuanming Mou uint32_t queue, 2509c40c061aSSuanming Mou struct rte_flow_error *error); 251081073e1fSMatan Azrad 25117ab3962dSSuanming Mou typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t) 25127ab3962dSSuanming Mou (struct rte_eth_dev *dev, 25137ab3962dSSuanming Mou uint32_t queue, 25147ab3962dSSuanming Mou const struct rte_flow_op_attr *attr, 25157ab3962dSSuanming Mou const struct rte_flow_indir_action_conf *conf, 25167ab3962dSSuanming Mou const struct rte_flow_action *action, 25177ab3962dSSuanming Mou void *user_data, 25187ab3962dSSuanming Mou struct rte_flow_error *error); 25197ab3962dSSuanming Mou 25207ab3962dSSuanming Mou typedef int (*mlx5_flow_async_action_handle_update_t) 25217ab3962dSSuanming Mou (struct rte_eth_dev *dev, 25227ab3962dSSuanming Mou uint32_t queue, 25237ab3962dSSuanming Mou const struct rte_flow_op_attr *attr, 25247ab3962dSSuanming Mou struct rte_flow_action_handle *handle, 25257ab3962dSSuanming Mou const void *update, 25267ab3962dSSuanming Mou void *user_data, 25277ab3962dSSuanming Mou struct rte_flow_error *error); 252815896eafSGregory Etelson typedef int (*mlx5_flow_async_action_handle_query_update_t) 252915896eafSGregory Etelson (struct rte_eth_dev *dev, uint32_t queue_id, 253015896eafSGregory Etelson const struct rte_flow_op_attr *op_attr, 253115896eafSGregory Etelson struct rte_flow_action_handle *action_handle, 253215896eafSGregory Etelson const void *update, void *data, 253315896eafSGregory Etelson enum rte_flow_query_update_mode qu_mode, 253415896eafSGregory Etelson void *user_data, struct rte_flow_error *error); 2535478ba4bbSSuanming Mou typedef int (*mlx5_flow_async_action_handle_query_t) 2536478ba4bbSSuanming Mou (struct rte_eth_dev *dev, 2537478ba4bbSSuanming Mou uint32_t queue, 2538478ba4bbSSuanming Mou const struct rte_flow_op_attr *attr, 2539478ba4bbSSuanming Mou const struct rte_flow_action_handle *handle, 2540478ba4bbSSuanming Mou void *data, 2541478ba4bbSSuanming Mou void *user_data, 2542478ba4bbSSuanming Mou struct rte_flow_error *error); 2543478ba4bbSSuanming Mou 25447ab3962dSSuanming Mou typedef int (*mlx5_flow_async_action_handle_destroy_t) 25457ab3962dSSuanming Mou (struct rte_eth_dev *dev, 25467ab3962dSSuanming Mou uint32_t queue, 25477ab3962dSSuanming Mou const struct rte_flow_op_attr *attr, 25487ab3962dSSuanming Mou struct rte_flow_action_handle *handle, 25497ab3962dSSuanming Mou void *user_data, 25507ab3962dSSuanming Mou struct rte_flow_error *error); 25513564e928SGregory Etelson typedef struct rte_flow_action_list_handle * 25523564e928SGregory Etelson (*mlx5_flow_async_action_list_handle_create_t) 25533564e928SGregory Etelson (struct rte_eth_dev *dev, uint32_t queue_id, 25543564e928SGregory Etelson const struct rte_flow_op_attr *attr, 25553564e928SGregory Etelson const struct rte_flow_indir_action_conf *conf, 25563564e928SGregory Etelson const struct rte_flow_action *actions, 25573564e928SGregory Etelson void *user_data, struct rte_flow_error *error); 25583564e928SGregory Etelson typedef int 25593564e928SGregory Etelson (*mlx5_flow_async_action_list_handle_destroy_t) 25603564e928SGregory Etelson (struct rte_eth_dev *dev, uint32_t queue_id, 25613564e928SGregory Etelson const struct rte_flow_op_attr *op_attr, 25623564e928SGregory Etelson struct rte_flow_action_list_handle *action_handle, 25633564e928SGregory Etelson void *user_data, struct rte_flow_error *error); 2564e26f50adSGregory Etelson typedef int 2565e26f50adSGregory Etelson (*mlx5_flow_action_list_handle_query_update_t) 2566e26f50adSGregory Etelson (struct rte_eth_dev *dev, 2567e26f50adSGregory Etelson const struct rte_flow_action_list_handle *handle, 2568e26f50adSGregory Etelson const void **update, void **query, 2569e26f50adSGregory Etelson enum rte_flow_query_update_mode mode, 2570e26f50adSGregory Etelson struct rte_flow_error *error); 2571e26f50adSGregory Etelson typedef int 2572e26f50adSGregory Etelson (*mlx5_flow_async_action_list_handle_query_update_t) 2573e26f50adSGregory Etelson (struct rte_eth_dev *dev, uint32_t queue_id, 2574e26f50adSGregory Etelson const struct rte_flow_op_attr *attr, 2575e26f50adSGregory Etelson const struct rte_flow_action_list_handle *handle, 2576e26f50adSGregory Etelson const void **update, void **query, 2577e26f50adSGregory Etelson enum rte_flow_query_update_mode mode, 2578e26f50adSGregory Etelson void *user_data, struct rte_flow_error *error); 25796c991cd9SOri Kam typedef int 25806c991cd9SOri Kam (*mlx5_flow_calc_table_hash_t) 25816c991cd9SOri Kam (struct rte_eth_dev *dev, 25826c991cd9SOri Kam const struct rte_flow_template_table *table, 25836c991cd9SOri Kam const struct rte_flow_item pattern[], 25846c991cd9SOri Kam uint8_t pattern_template_index, 25856c991cd9SOri Kam uint32_t *hash, struct rte_flow_error *error); 2586bb328f44SOri Kam typedef int 2587bb328f44SOri Kam (*mlx5_flow_calc_encap_hash_t) 2588bb328f44SOri Kam (struct rte_eth_dev *dev, 2589bb328f44SOri Kam const struct rte_flow_item pattern[], 2590bb328f44SOri Kam enum rte_flow_encap_hash_field dest_field, 2591bb328f44SOri Kam uint8_t *hash, 2592bb328f44SOri Kam struct rte_flow_error *error); 2593654ebd8cSGregory Etelson typedef int (*mlx5_table_resize_t)(struct rte_eth_dev *dev, 2594654ebd8cSGregory Etelson struct rte_flow_template_table *table, 2595654ebd8cSGregory Etelson uint32_t nb_rules, struct rte_flow_error *error); 2596654ebd8cSGregory Etelson typedef int (*mlx5_flow_update_resized_t) 2597654ebd8cSGregory Etelson (struct rte_eth_dev *dev, uint32_t queue, 2598654ebd8cSGregory Etelson const struct rte_flow_op_attr *attr, 2599654ebd8cSGregory Etelson struct rte_flow *rule, void *user_data, 2600654ebd8cSGregory Etelson struct rte_flow_error *error); 2601654ebd8cSGregory Etelson typedef int (*table_resize_complete_t)(struct rte_eth_dev *dev, 2602654ebd8cSGregory Etelson struct rte_flow_template_table *table, 2603654ebd8cSGregory Etelson struct rte_flow_error *error); 26047ab3962dSSuanming Mou 260584c406e7SOri Kam struct mlx5_flow_driver_ops { 2606e38776c3SMaayan Kashani mlx5_flow_list_create_t list_create; 2607e38776c3SMaayan Kashani mlx5_flow_list_destroy_t list_destroy; 260884c406e7SOri Kam mlx5_flow_validate_t validate; 260984c406e7SOri Kam mlx5_flow_prepare_t prepare; 261084c406e7SOri Kam mlx5_flow_translate_t translate; 261184c406e7SOri Kam mlx5_flow_apply_t apply; 261284c406e7SOri Kam mlx5_flow_remove_t remove; 261384c406e7SOri Kam mlx5_flow_destroy_t destroy; 2614684dafe7SMoti Haimovsky mlx5_flow_query_t query; 261546a5e6bcSSuanming Mou mlx5_flow_create_mtr_tbls_t create_mtr_tbls; 261646a5e6bcSSuanming Mou mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls; 2617afb4aa4fSLi Zhang mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls; 2618e6100c7bSLi Zhang mlx5_flow_mtr_alloc_t create_meter; 2619e6100c7bSLi Zhang mlx5_flow_mtr_free_t free_meter; 2620afb4aa4fSLi Zhang mlx5_flow_validate_mtr_acts_t validate_mtr_acts; 2621afb4aa4fSLi Zhang mlx5_flow_create_mtr_acts_t create_mtr_acts; 2622afb4aa4fSLi Zhang mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts; 2623afb4aa4fSLi Zhang mlx5_flow_create_policy_rules_t create_policy_rules; 2624afb4aa4fSLi Zhang mlx5_flow_destroy_policy_rules_t destroy_policy_rules; 2625afb4aa4fSLi Zhang mlx5_flow_create_def_policy_t create_def_policy; 2626afb4aa4fSLi Zhang mlx5_flow_destroy_def_policy_t destroy_def_policy; 2627fc6ce56bSLi Zhang mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare; 26288e5c9feaSShun Hao mlx5_flow_meter_hierarchy_rule_create_t meter_hierarchy_rule_create; 2629ec962badSLi Zhang mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq; 2630e189f55cSSuanming Mou mlx5_flow_counter_alloc_t counter_alloc; 2631e189f55cSSuanming Mou mlx5_flow_counter_free_t counter_free; 2632e189f55cSSuanming Mou mlx5_flow_counter_query_t counter_query; 2633fa2d01c8SDong Zhou mlx5_flow_get_aged_flows_t get_aged_flows; 263404a4de75SMichael Baum mlx5_flow_get_q_aged_flows_t get_q_aged_flows; 2635d7cfcdddSAndrey Vesnovaty mlx5_flow_action_validate_t action_validate; 2636d7cfcdddSAndrey Vesnovaty mlx5_flow_action_create_t action_create; 2637d7cfcdddSAndrey Vesnovaty mlx5_flow_action_destroy_t action_destroy; 2638d7cfcdddSAndrey Vesnovaty mlx5_flow_action_update_t action_update; 263981073e1fSMatan Azrad mlx5_flow_action_query_t action_query; 264015896eafSGregory Etelson mlx5_flow_action_query_update_t action_query_update; 26413564e928SGregory Etelson mlx5_flow_action_list_handle_create_t action_list_handle_create; 26423564e928SGregory Etelson mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy; 264323f627e0SBing Zhao mlx5_flow_sync_domain_t sync_domain; 2644c5042f93SDmitry Kozlyuk mlx5_flow_discover_priorities_t discover_priorities; 2645db25cadcSViacheslav Ovsiienko mlx5_flow_item_create_t item_create; 2646db25cadcSViacheslav Ovsiienko mlx5_flow_item_release_t item_release; 2647db25cadcSViacheslav Ovsiienko mlx5_flow_item_update_t item_update; 2648b401400dSSuanming Mou mlx5_flow_info_get_t info_get; 2649b401400dSSuanming Mou mlx5_flow_port_configure_t configure; 265024865366SAlexander Kozyrev mlx5_flow_pattern_validate_t pattern_validate; 265142431df9SSuanming Mou mlx5_flow_pattern_template_create_t pattern_template_create; 265242431df9SSuanming Mou mlx5_flow_pattern_template_destroy_t pattern_template_destroy; 265324865366SAlexander Kozyrev mlx5_flow_actions_validate_t actions_validate; 2654836b5c9bSSuanming Mou mlx5_flow_actions_template_create_t actions_template_create; 2655836b5c9bSSuanming Mou mlx5_flow_actions_template_destroy_t actions_template_destroy; 2656d1559d66SSuanming Mou mlx5_flow_table_create_t template_table_create; 2657d1559d66SSuanming Mou mlx5_flow_table_destroy_t template_table_destroy; 26588ce638efSTomer Shmilovich mlx5_flow_group_set_miss_actions_t group_set_miss_actions; 2659c40c061aSSuanming Mou mlx5_flow_async_flow_create_t async_flow_create; 266060db7673SAlexander Kozyrev mlx5_flow_async_flow_create_by_index_t async_flow_create_by_index; 266163296851SAlexander Kozyrev mlx5_flow_async_flow_update_t async_flow_update; 2662c40c061aSSuanming Mou mlx5_flow_async_flow_destroy_t async_flow_destroy; 2663c40c061aSSuanming Mou mlx5_flow_pull_t pull; 2664c40c061aSSuanming Mou mlx5_flow_push_t push; 26657ab3962dSSuanming Mou mlx5_flow_async_action_handle_create_t async_action_create; 26667ab3962dSSuanming Mou mlx5_flow_async_action_handle_update_t async_action_update; 266715896eafSGregory Etelson mlx5_flow_async_action_handle_query_update_t async_action_query_update; 2668478ba4bbSSuanming Mou mlx5_flow_async_action_handle_query_t async_action_query; 26697ab3962dSSuanming Mou mlx5_flow_async_action_handle_destroy_t async_action_destroy; 26703564e928SGregory Etelson mlx5_flow_async_action_list_handle_create_t 26713564e928SGregory Etelson async_action_list_handle_create; 26723564e928SGregory Etelson mlx5_flow_async_action_list_handle_destroy_t 26733564e928SGregory Etelson async_action_list_handle_destroy; 2674e26f50adSGregory Etelson mlx5_flow_action_list_handle_query_update_t 2675e26f50adSGregory Etelson action_list_handle_query_update; 2676e26f50adSGregory Etelson mlx5_flow_async_action_list_handle_query_update_t 2677e26f50adSGregory Etelson async_action_list_handle_query_update; 26786c991cd9SOri Kam mlx5_flow_calc_table_hash_t flow_calc_table_hash; 2679bb328f44SOri Kam mlx5_flow_calc_encap_hash_t flow_calc_encap_hash; 2680654ebd8cSGregory Etelson mlx5_table_resize_t table_resize; 2681654ebd8cSGregory Etelson mlx5_flow_update_resized_t flow_update_resized; 2682654ebd8cSGregory Etelson table_resize_complete_t table_resize_complete; 268384c406e7SOri Kam }; 268484c406e7SOri Kam 268584c406e7SOri Kam /* mlx5_flow.c */ 268684c406e7SOri Kam 268775a00812SSuanming Mou struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void); 268875a00812SSuanming Mou void mlx5_flow_pop_thread_workspace(void); 26898bb81f26SXueming Li struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void); 2690dc7c5e0aSGregory Etelson 26914ec6360dSGregory Etelson __extension__ 26924ec6360dSGregory Etelson struct flow_grp_info { 26934ec6360dSGregory Etelson uint64_t external:1; 26944ec6360dSGregory Etelson uint64_t transfer:1; 26954ec6360dSGregory Etelson uint64_t fdb_def_rule:1; 26964ec6360dSGregory Etelson /* force standard group translation */ 26974ec6360dSGregory Etelson uint64_t std_tbl_fix:1; 2698ae2927cdSJiawei Wang uint64_t skip_scale:2; 26994ec6360dSGregory Etelson }; 27004ec6360dSGregory Etelson 27014ec6360dSGregory Etelson static inline bool 27024ec6360dSGregory Etelson tunnel_use_standard_attr_group_translate 27038c5a231bSGregory Etelson (const struct rte_eth_dev *dev, 27044ec6360dSGregory Etelson const struct rte_flow_attr *attr, 27058c5a231bSGregory Etelson const struct mlx5_flow_tunnel *tunnel, 27068c5a231bSGregory Etelson enum mlx5_tof_rule_type tof_rule_type) 27074ec6360dSGregory Etelson { 27084ec6360dSGregory Etelson bool verdict; 27094ec6360dSGregory Etelson 27104ec6360dSGregory Etelson if (!is_tunnel_offload_active(dev)) 27114ec6360dSGregory Etelson /* no tunnel offload API */ 27124ec6360dSGregory Etelson verdict = true; 27134ec6360dSGregory Etelson else if (tunnel) { 27144ec6360dSGregory Etelson /* 27154ec6360dSGregory Etelson * OvS will use jump to group 0 in tunnel steer rule. 27164ec6360dSGregory Etelson * If tunnel steer rule starts from group 0 (attr.group == 0) 27174ec6360dSGregory Etelson * that 0 group must be translated with standard method. 27184ec6360dSGregory Etelson * attr.group == 0 in tunnel match rule translated with tunnel 27194ec6360dSGregory Etelson * method 27204ec6360dSGregory Etelson */ 27214ec6360dSGregory Etelson verdict = !attr->group && 27228c5a231bSGregory Etelson is_flow_tunnel_steer_rule(tof_rule_type); 27234ec6360dSGregory Etelson } else { 27244ec6360dSGregory Etelson /* 27254ec6360dSGregory Etelson * non-tunnel group translation uses standard method for 27264ec6360dSGregory Etelson * root group only: attr.group == 0 27274ec6360dSGregory Etelson */ 27284ec6360dSGregory Etelson verdict = !attr->group; 27294ec6360dSGregory Etelson } 27304ec6360dSGregory Etelson 27314ec6360dSGregory Etelson return verdict; 27324ec6360dSGregory Etelson } 27334ec6360dSGregory Etelson 2734e6100c7bSLi Zhang /** 2735e6100c7bSLi Zhang * Get DV flow aso meter by index. 2736e6100c7bSLi Zhang * 2737e6100c7bSLi Zhang * @param[in] dev 2738e6100c7bSLi Zhang * Pointer to the Ethernet device structure. 2739e6100c7bSLi Zhang * @param[in] idx 2740e6100c7bSLi Zhang * mlx5 flow aso meter index in the container. 2741e6100c7bSLi Zhang * @param[out] ppool 2742e6100c7bSLi Zhang * mlx5 flow aso meter pool in the container, 2743e6100c7bSLi Zhang * 2744e6100c7bSLi Zhang * @return 2745e6100c7bSLi Zhang * Pointer to the aso meter, NULL otherwise. 2746e6100c7bSLi Zhang */ 2747e6100c7bSLi Zhang static inline struct mlx5_aso_mtr * 2748e6100c7bSLi Zhang mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx) 2749e6100c7bSLi Zhang { 2750e6100c7bSLi Zhang struct mlx5_aso_mtr_pool *pool; 2751afb4aa4fSLi Zhang struct mlx5_aso_mtr_pools_mng *pools_mng = 2752afb4aa4fSLi Zhang &priv->sh->mtrmng->pools_mng; 2753e6100c7bSLi Zhang 275424865366SAlexander Kozyrev if (priv->mtr_bulk.aso) 275524865366SAlexander Kozyrev return priv->mtr_bulk.aso + idx; 275648fbb0e9SAlexander Kozyrev /* Decrease to original index. */ 275748fbb0e9SAlexander Kozyrev idx--; 2758afb4aa4fSLi Zhang MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n); 27597797b0feSJiawei Wang rte_rwlock_read_lock(&pools_mng->resize_mtrwl); 2760afb4aa4fSLi Zhang pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL]; 27617797b0feSJiawei Wang rte_rwlock_read_unlock(&pools_mng->resize_mtrwl); 2762e6100c7bSLi Zhang return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL]; 2763e6100c7bSLi Zhang } 2764e6100c7bSLi Zhang 276579f89527SGregory Etelson static __rte_always_inline const struct rte_flow_item * 276679f89527SGregory Etelson mlx5_find_end_item(const struct rte_flow_item *item) 276779f89527SGregory Etelson { 276879f89527SGregory Etelson for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++); 276979f89527SGregory Etelson return item; 277079f89527SGregory Etelson } 277179f89527SGregory Etelson 277279f89527SGregory Etelson static __rte_always_inline bool 277379f89527SGregory Etelson mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item) 277479f89527SGregory Etelson { 277579f89527SGregory Etelson struct rte_flow_item_integrity test = *item; 277679f89527SGregory Etelson test.l3_ok = 0; 277779f89527SGregory Etelson test.l4_ok = 0; 277879f89527SGregory Etelson test.ipv4_csum_ok = 0; 277979f89527SGregory Etelson test.l4_csum_ok = 0; 278079f89527SGregory Etelson return (test.value == 0); 278179f89527SGregory Etelson } 278279f89527SGregory Etelson 27832db75e8bSBing Zhao /* 27844f74cb68SBing Zhao * Get ASO CT action by device and index. 27852db75e8bSBing Zhao * 27862db75e8bSBing Zhao * @param[in] dev 27872db75e8bSBing Zhao * Pointer to the Ethernet device structure. 27882db75e8bSBing Zhao * @param[in] idx 27892db75e8bSBing Zhao * Index to the ASO CT action. 27902db75e8bSBing Zhao * 27912db75e8bSBing Zhao * @return 27922db75e8bSBing Zhao * The specified ASO CT action pointer. 27932db75e8bSBing Zhao */ 27942db75e8bSBing Zhao static inline struct mlx5_aso_ct_action * 27954f74cb68SBing Zhao flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx) 27962db75e8bSBing Zhao { 27972db75e8bSBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 27982db75e8bSBing Zhao struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng; 27992db75e8bSBing Zhao struct mlx5_aso_ct_pool *pool; 28002db75e8bSBing Zhao 28012db75e8bSBing Zhao idx--; 28022db75e8bSBing Zhao MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n); 28032db75e8bSBing Zhao /* Bit operation AND could be used. */ 28042db75e8bSBing Zhao rte_rwlock_read_lock(&mng->resize_rwl); 28052db75e8bSBing Zhao pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL]; 28062db75e8bSBing Zhao rte_rwlock_read_unlock(&mng->resize_rwl); 28072db75e8bSBing Zhao return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL]; 28082db75e8bSBing Zhao } 28092db75e8bSBing Zhao 28104f74cb68SBing Zhao /* 28114f74cb68SBing Zhao * Get ASO CT action by owner & index. 28124f74cb68SBing Zhao * 28134f74cb68SBing Zhao * @param[in] dev 28144f74cb68SBing Zhao * Pointer to the Ethernet device structure. 28154f74cb68SBing Zhao * @param[in] idx 28164f74cb68SBing Zhao * Index to the ASO CT action and owner port combination. 28174f74cb68SBing Zhao * 28184f74cb68SBing Zhao * @return 28194f74cb68SBing Zhao * The specified ASO CT action pointer. 28204f74cb68SBing Zhao */ 28214f74cb68SBing Zhao static inline struct mlx5_aso_ct_action * 28224f74cb68SBing Zhao flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx) 28234f74cb68SBing Zhao { 28244f74cb68SBing Zhao struct mlx5_priv *priv = dev->data->dev_private; 28254f74cb68SBing Zhao struct mlx5_aso_ct_action *ct; 28264f74cb68SBing Zhao uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx); 28274f74cb68SBing Zhao uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx); 28284f74cb68SBing Zhao 28294f74cb68SBing Zhao if (owner == PORT_ID(priv)) { 28304f74cb68SBing Zhao ct = flow_aso_ct_get_by_dev_idx(dev, idx); 28314f74cb68SBing Zhao } else { 28324f74cb68SBing Zhao struct rte_eth_dev *owndev = &rte_eth_devices[owner]; 28334f74cb68SBing Zhao 28344f74cb68SBing Zhao MLX5_ASSERT(owner < RTE_MAX_ETHPORTS); 28354f74cb68SBing Zhao if (dev->data->dev_started != 1) 28364f74cb68SBing Zhao return NULL; 28374f74cb68SBing Zhao ct = flow_aso_ct_get_by_dev_idx(owndev, idx); 28384f74cb68SBing Zhao if (ct->peer != PORT_ID(priv)) 28394f74cb68SBing Zhao return NULL; 28404f74cb68SBing Zhao } 28414f74cb68SBing Zhao return ct; 28424f74cb68SBing Zhao } 28434f74cb68SBing Zhao 2844985b4792SGregory Etelson static inline uint16_t 2845985b4792SGregory Etelson mlx5_translate_tunnel_etypes(uint64_t pattern_flags) 2846985b4792SGregory Etelson { 2847985b4792SGregory Etelson if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2) 2848985b4792SGregory Etelson return RTE_ETHER_TYPE_TEB; 2849985b4792SGregory Etelson else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4) 2850985b4792SGregory Etelson return RTE_ETHER_TYPE_IPV4; 2851985b4792SGregory Etelson else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6) 2852985b4792SGregory Etelson return RTE_ETHER_TYPE_IPV6; 2853985b4792SGregory Etelson else if (pattern_flags & MLX5_FLOW_LAYER_MPLS) 2854985b4792SGregory Etelson return RTE_ETHER_TYPE_MPLS; 2855985b4792SGregory Etelson return 0; 2856985b4792SGregory Etelson } 2857985b4792SGregory Etelson 2858c40c061aSSuanming Mou int flow_hw_q_flow_flush(struct rte_eth_dev *dev, 2859c40c061aSSuanming Mou struct rte_flow_error *error); 286075a00812SSuanming Mou 286175a00812SSuanming Mou /* 286275a00812SSuanming Mou * Convert rte_mtr_color to mlx5 color. 286375a00812SSuanming Mou * 286475a00812SSuanming Mou * @param[in] rcol 286575a00812SSuanming Mou * rte_mtr_color. 286675a00812SSuanming Mou * 286775a00812SSuanming Mou * @return 286875a00812SSuanming Mou * mlx5 color. 286975a00812SSuanming Mou */ 287075a00812SSuanming Mou static inline int 287175a00812SSuanming Mou rte_col_2_mlx5_col(enum rte_color rcol) 287275a00812SSuanming Mou { 287375a00812SSuanming Mou switch (rcol) { 287475a00812SSuanming Mou case RTE_COLOR_GREEN: 287575a00812SSuanming Mou return MLX5_FLOW_COLOR_GREEN; 287675a00812SSuanming Mou case RTE_COLOR_YELLOW: 287775a00812SSuanming Mou return MLX5_FLOW_COLOR_YELLOW; 287875a00812SSuanming Mou case RTE_COLOR_RED: 287975a00812SSuanming Mou return MLX5_FLOW_COLOR_RED; 288075a00812SSuanming Mou default: 288175a00812SSuanming Mou break; 288275a00812SSuanming Mou } 288375a00812SSuanming Mou return MLX5_FLOW_COLOR_UNDEFINED; 288475a00812SSuanming Mou } 288575a00812SSuanming Mou 2886e9de8f33SJiawei Wang /** 2887e9de8f33SJiawei Wang * Indicates whether flow source vport is representor port. 2888e9de8f33SJiawei Wang * 2889e9de8f33SJiawei Wang * @param[in] priv 2890e9de8f33SJiawei Wang * Pointer to device private context structure. 2891e9de8f33SJiawei Wang * @param[in] act_priv 2892e9de8f33SJiawei Wang * Pointer to actual device private context structure if have. 2893e9de8f33SJiawei Wang * 2894e9de8f33SJiawei Wang * @return 2895e9de8f33SJiawei Wang * True when the flow source vport is representor port, false otherwise. 2896e9de8f33SJiawei Wang */ 2897e9de8f33SJiawei Wang static inline bool 2898e9de8f33SJiawei Wang flow_source_vport_representor(struct mlx5_priv *priv, struct mlx5_priv *act_priv) 2899e9de8f33SJiawei Wang { 2900e9de8f33SJiawei Wang MLX5_ASSERT(priv); 2901e9de8f33SJiawei Wang return (!act_priv ? (priv->representor_id != UINT16_MAX) : 2902e9de8f33SJiawei Wang (act_priv->representor_id != UINT16_MAX)); 2903e9de8f33SJiawei Wang } 2904e9de8f33SJiawei Wang 29059fa7c1cdSDariusz Sosnowski /* All types of Ethernet patterns used in control flow rules. */ 29069fa7c1cdSDariusz Sosnowski enum mlx5_flow_ctrl_rx_eth_pattern_type { 29079fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL = 0, 29089fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST, 29099fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST, 29109fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN, 29119fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST, 29129fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN, 29139fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST, 29149fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN, 29159fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC, 29169fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN, 29179fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX, 29189fa7c1cdSDariusz Sosnowski }; 29199fa7c1cdSDariusz Sosnowski 29209fa7c1cdSDariusz Sosnowski /* All types of RSS actions used in control flow rules. */ 29219fa7c1cdSDariusz Sosnowski enum mlx5_flow_ctrl_rx_expanded_rss_type { 29229a66bb73SBing Zhao MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP = 0, 29239a66bb73SBing Zhao MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP, 29249fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP, 29259fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP, 29269fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6, 29279a66bb73SBing Zhao MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4, 29289a66bb73SBing Zhao MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP, 29299fa7c1cdSDariusz Sosnowski MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX, 29309fa7c1cdSDariusz Sosnowski }; 29319fa7c1cdSDariusz Sosnowski 29329fa7c1cdSDariusz Sosnowski /** 29339fa7c1cdSDariusz Sosnowski * Contains pattern template, template table and its attributes for a single 29349fa7c1cdSDariusz Sosnowski * combination of Ethernet pattern and RSS action. Used to create control flow rules 29359fa7c1cdSDariusz Sosnowski * with HWS. 29369fa7c1cdSDariusz Sosnowski */ 29379fa7c1cdSDariusz Sosnowski struct mlx5_flow_hw_ctrl_rx_table { 29389fa7c1cdSDariusz Sosnowski struct rte_flow_template_table_attr attr; 29399fa7c1cdSDariusz Sosnowski struct rte_flow_pattern_template *pt; 29409fa7c1cdSDariusz Sosnowski struct rte_flow_template_table *tbl; 29419fa7c1cdSDariusz Sosnowski }; 29429fa7c1cdSDariusz Sosnowski 29439fa7c1cdSDariusz Sosnowski /* Contains all templates required to create control flow rules with HWS. */ 29449fa7c1cdSDariusz Sosnowski struct mlx5_flow_hw_ctrl_rx { 29459fa7c1cdSDariusz Sosnowski struct rte_flow_actions_template *rss[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX]; 29469fa7c1cdSDariusz Sosnowski struct mlx5_flow_hw_ctrl_rx_table tables[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX] 29479fa7c1cdSDariusz Sosnowski [MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX]; 29489fa7c1cdSDariusz Sosnowski }; 29499fa7c1cdSDariusz Sosnowski 295048db3b61SDariusz Sosnowski /* Contains all templates required for control flow rules in FDB with HWS. */ 295148db3b61SDariusz Sosnowski struct mlx5_flow_hw_ctrl_fdb { 295248db3b61SDariusz Sosnowski struct rte_flow_pattern_template *esw_mgr_items_tmpl; 295348db3b61SDariusz Sosnowski struct rte_flow_actions_template *regc_jump_actions_tmpl; 295448db3b61SDariusz Sosnowski struct rte_flow_template_table *hw_esw_sq_miss_root_tbl; 295548db3b61SDariusz Sosnowski struct rte_flow_pattern_template *regc_sq_items_tmpl; 295648db3b61SDariusz Sosnowski struct rte_flow_actions_template *port_actions_tmpl; 295748db3b61SDariusz Sosnowski struct rte_flow_template_table *hw_esw_sq_miss_tbl; 295848db3b61SDariusz Sosnowski struct rte_flow_pattern_template *port_items_tmpl; 295948db3b61SDariusz Sosnowski struct rte_flow_actions_template *jump_one_actions_tmpl; 296048db3b61SDariusz Sosnowski struct rte_flow_template_table *hw_esw_zero_tbl; 296148db3b61SDariusz Sosnowski struct rte_flow_pattern_template *tx_meta_items_tmpl; 296248db3b61SDariusz Sosnowski struct rte_flow_actions_template *tx_meta_actions_tmpl; 296348db3b61SDariusz Sosnowski struct rte_flow_template_table *hw_tx_meta_cpy_tbl; 296448db3b61SDariusz Sosnowski struct rte_flow_pattern_template *lacp_rx_items_tmpl; 296548db3b61SDariusz Sosnowski struct rte_flow_actions_template *lacp_rx_actions_tmpl; 296648db3b61SDariusz Sosnowski struct rte_flow_template_table *hw_lacp_rx_tbl; 296748db3b61SDariusz Sosnowski }; 296848db3b61SDariusz Sosnowski 29699fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_PROMISCUOUS (RTE_BIT32(0)) 29709fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_ALL_MULTICAST (RTE_BIT32(1)) 29719fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_BROADCAST (RTE_BIT32(2)) 29729fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_IPV4_MULTICAST (RTE_BIT32(3)) 29739fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_IPV6_MULTICAST (RTE_BIT32(4)) 29749fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_DMAC (RTE_BIT32(5)) 29759fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_VLAN_FILTER (RTE_BIT32(6)) 29769fa7c1cdSDariusz Sosnowski 29779fa7c1cdSDariusz Sosnowski int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags); 2978da7f82b0SDariusz Sosnowski 2979cf99567fSDariusz Sosnowski /** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */ 2980cf99567fSDariusz Sosnowski int mlx5_legacy_dmac_flow_create(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); 2981cf99567fSDariusz Sosnowski 2982cf99567fSDariusz Sosnowski /** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */ 2983cf99567fSDariusz Sosnowski int mlx5_legacy_dmac_flow_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); 2984cf99567fSDariusz Sosnowski 2985cf99567fSDariusz Sosnowski /** Create a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */ 2986cf99567fSDariusz Sosnowski int mlx5_legacy_dmac_vlan_flow_create(struct rte_eth_dev *dev, 2987cf99567fSDariusz Sosnowski const struct rte_ether_addr *addr, 2988cf99567fSDariusz Sosnowski const uint16_t vid); 2989cf99567fSDariusz Sosnowski 2990cf99567fSDariusz Sosnowski /** Destroy a control flow rule for matching unicast DMAC with VLAN (Verbs and DV). */ 2991cf99567fSDariusz Sosnowski int mlx5_legacy_dmac_vlan_flow_destroy(struct rte_eth_dev *dev, 2992cf99567fSDariusz Sosnowski const struct rte_ether_addr *addr, 2993cf99567fSDariusz Sosnowski const uint16_t vid); 2994cf99567fSDariusz Sosnowski 2995cf99567fSDariusz Sosnowski /** Destroy a control flow rule registered on port level control flow rule type. */ 2996cf99567fSDariusz Sosnowski void mlx5_legacy_ctrl_flow_destroy(struct rte_eth_dev *dev, struct mlx5_ctrl_flow_entry *entry); 2997cf99567fSDariusz Sosnowski 2998da7f82b0SDariusz Sosnowski /** Create a control flow rule for matching unicast DMAC (HWS). */ 2999da7f82b0SDariusz Sosnowski int mlx5_flow_hw_ctrl_flow_dmac(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); 3000da7f82b0SDariusz Sosnowski 300104ea8468SDariusz Sosnowski /** Destroy a control flow rule for matching unicast DMAC (HWS). */ 300204ea8468SDariusz Sosnowski int mlx5_flow_hw_ctrl_flow_dmac_destroy(struct rte_eth_dev *dev, const struct rte_ether_addr *addr); 300304ea8468SDariusz Sosnowski 3004da7f82b0SDariusz Sosnowski /** Create a control flow rule for matching unicast DMAC with VLAN (HWS). */ 3005da7f82b0SDariusz Sosnowski int mlx5_flow_hw_ctrl_flow_dmac_vlan(struct rte_eth_dev *dev, 3006da7f82b0SDariusz Sosnowski const struct rte_ether_addr *addr, 3007da7f82b0SDariusz Sosnowski const uint16_t vlan); 3008da7f82b0SDariusz Sosnowski 300904ea8468SDariusz Sosnowski /** Destroy a control flow rule for matching unicast DMAC with VLAN (HWS). */ 301004ea8468SDariusz Sosnowski int mlx5_flow_hw_ctrl_flow_dmac_vlan_destroy(struct rte_eth_dev *dev, 301104ea8468SDariusz Sosnowski const struct rte_ether_addr *addr, 301204ea8468SDariusz Sosnowski const uint16_t vlan); 301304ea8468SDariusz Sosnowski 30149fa7c1cdSDariusz Sosnowski void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev); 30159fa7c1cdSDariusz Sosnowski 30164ec6360dSGregory Etelson int mlx5_flow_group_to_table(struct rte_eth_dev *dev, 30174ec6360dSGregory Etelson const struct mlx5_flow_tunnel *tunnel, 30184ec6360dSGregory Etelson uint32_t group, uint32_t *table, 3019eab3ca48SGregory Etelson const struct flow_grp_info *flags, 30204ec6360dSGregory Etelson struct rte_flow_error *error); 3021e745f900SSuanming Mou uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc, 3022e745f900SSuanming Mou int tunnel, uint64_t layer_types, 3023fc2c498cSOri Kam uint64_t hash_fields); 30243eca5f8aSOphir Munk int mlx5_flow_discover_priorities(struct rte_eth_dev *dev); 302584c406e7SOri Kam uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority, 302684c406e7SOri Kam uint32_t subpriority); 30275f8ae44dSDong Zhou uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev, 30285f8ae44dSDong Zhou const struct rte_flow_attr *attr); 30295f8ae44dSDong Zhou uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev, 30305f8ae44dSDong Zhou const struct rte_flow_attr *attr, 3031ebe9afedSXueming Li uint32_t subpriority, bool external); 30327f6e276bSMichael Savisko uint32_t mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev); 303399d49f47SMatan Azrad int mlx5_flow_get_reg_id(struct rte_eth_dev *dev, 30343e8edd0eSViacheslav Ovsiienko enum mlx5_feature_name feature, 30353e8edd0eSViacheslav Ovsiienko uint32_t id, 30363e8edd0eSViacheslav Ovsiienko struct rte_flow_error *error); 3037e4fcdcd6SMoti Haimovsky const struct rte_flow_action *mlx5_flow_find_action 3038e4fcdcd6SMoti Haimovsky (const struct rte_flow_action *actions, 3039e4fcdcd6SMoti Haimovsky enum rte_flow_action_type action); 3040d7cfcdddSAndrey Vesnovaty int mlx5_validate_action_rss(struct rte_eth_dev *dev, 3041d7cfcdddSAndrey Vesnovaty const struct rte_flow_action *action, 3042d7cfcdddSAndrey Vesnovaty struct rte_flow_error *error); 30435e26c99fSRongwei Liu 30445e26c99fSRongwei Liu struct mlx5_hw_encap_decap_action* 30455e26c99fSRongwei Liu mlx5_reformat_action_create(struct rte_eth_dev *dev, 30465e26c99fSRongwei Liu const struct rte_flow_indir_action_conf *conf, 30475e26c99fSRongwei Liu const struct rte_flow_action *encap_action, 30485e26c99fSRongwei Liu const struct rte_flow_action *decap_action, 30495e26c99fSRongwei Liu struct rte_flow_error *error); 30505e26c99fSRongwei Liu int mlx5_reformat_action_destroy(struct rte_eth_dev *dev, 30515e26c99fSRongwei Liu struct rte_flow_action_list_handle *handle, 30525e26c99fSRongwei Liu struct rte_flow_error *error); 305384c406e7SOri Kam int mlx5_flow_validate_action_count(struct rte_eth_dev *dev, 30543e9fa079SDekel Peled const struct rte_flow_attr *attr, 305584c406e7SOri Kam struct rte_flow_error *error); 3056c1f0cdaeSDariusz Sosnowski int mlx5_flow_validate_action_drop(struct rte_eth_dev *dev, 3057c1f0cdaeSDariusz Sosnowski bool is_root, 30583e9fa079SDekel Peled const struct rte_flow_attr *attr, 305984c406e7SOri Kam struct rte_flow_error *error); 306084c406e7SOri Kam int mlx5_flow_validate_action_flag(uint64_t action_flags, 30613e9fa079SDekel Peled const struct rte_flow_attr *attr, 306284c406e7SOri Kam struct rte_flow_error *error); 3063d6dc072aSGregory Etelson int mlx5_flow_validate_action_mark(struct rte_eth_dev *dev, 3064d6dc072aSGregory Etelson const struct rte_flow_action *action, 306584c406e7SOri Kam uint64_t action_flags, 30663e9fa079SDekel Peled const struct rte_flow_attr *attr, 306784c406e7SOri Kam struct rte_flow_error *error); 306899daf855SDariusz Sosnowski int mlx5_flow_validate_target_queue(struct rte_eth_dev *dev, 306999daf855SDariusz Sosnowski const struct rte_flow_action *action, 307099daf855SDariusz Sosnowski struct rte_flow_error *error); 307184c406e7SOri Kam int mlx5_flow_validate_action_queue(const struct rte_flow_action *action, 307284c406e7SOri Kam uint64_t action_flags, 307384c406e7SOri Kam struct rte_eth_dev *dev, 30743e9fa079SDekel Peled const struct rte_flow_attr *attr, 307584c406e7SOri Kam struct rte_flow_error *error); 307684c406e7SOri Kam int mlx5_flow_validate_action_rss(const struct rte_flow_action *action, 307784c406e7SOri Kam uint64_t action_flags, 307884c406e7SOri Kam struct rte_eth_dev *dev, 30793e9fa079SDekel Peled const struct rte_flow_attr *attr, 30801183f12fSOri Kam uint64_t item_flags, 308184c406e7SOri Kam struct rte_flow_error *error); 30823c78124fSShiri Kuzin int mlx5_flow_validate_action_default_miss(uint64_t action_flags, 30833c78124fSShiri Kuzin const struct rte_flow_attr *attr, 30843c78124fSShiri Kuzin struct rte_flow_error *error); 3085c23626f2SMichael Baum int flow_validate_modify_field_level 308677edfda9SSuanming Mou (const struct rte_flow_field_data *data, 3087c23626f2SMichael Baum struct rte_flow_error *error); 3088d6dc072aSGregory Etelson int 308980c67625SGregory Etelson mlx5_flow_dv_validate_action_l2_encap(struct rte_eth_dev *dev, 3090d6dc072aSGregory Etelson uint64_t action_flags, 3091d6dc072aSGregory Etelson const struct rte_flow_action *action, 3092d6dc072aSGregory Etelson const struct rte_flow_attr *attr, 3093d6dc072aSGregory Etelson struct rte_flow_error *error); 3094d6dc072aSGregory Etelson int 309580c67625SGregory Etelson mlx5_flow_dv_validate_action_decap(struct rte_eth_dev *dev, 3096d6dc072aSGregory Etelson uint64_t action_flags, 3097d6dc072aSGregory Etelson const struct rte_flow_action *action, 3098d6dc072aSGregory Etelson const uint64_t item_flags, 3099d6dc072aSGregory Etelson const struct rte_flow_attr *attr, 3100d6dc072aSGregory Etelson struct rte_flow_error *error); 3101d6dc072aSGregory Etelson int 310280c67625SGregory Etelson mlx5_flow_dv_validate_action_aso_ct(struct rte_eth_dev *dev, 3103d6dc072aSGregory Etelson uint64_t action_flags, 3104d6dc072aSGregory Etelson uint64_t item_flags, 3105d6dc072aSGregory Etelson bool root, 3106d6dc072aSGregory Etelson struct rte_flow_error *error); 3107d6dc072aSGregory Etelson int 310880c67625SGregory Etelson mlx5_flow_dv_validate_action_raw_encap_decap 3109d6dc072aSGregory Etelson (struct rte_eth_dev *dev, 3110d6dc072aSGregory Etelson const struct rte_flow_action_raw_decap *decap, 3111d6dc072aSGregory Etelson const struct rte_flow_action_raw_encap *encap, 3112d6dc072aSGregory Etelson const struct rte_flow_attr *attr, uint64_t *action_flags, 3113d6dc072aSGregory Etelson int *actions_n, const struct rte_flow_action *action, 3114d6dc072aSGregory Etelson uint64_t item_flags, struct rte_flow_error *error); 311580c67625SGregory Etelson int mlx5_flow_item_acceptable(const struct rte_eth_dev *dev, 311680c67625SGregory Etelson const struct rte_flow_item *item, 31176bd7fbd0SDekel Peled const uint8_t *mask, 31186bd7fbd0SDekel Peled const uint8_t *nic_mask, 31196bd7fbd0SDekel Peled unsigned int size, 31206859e67eSDekel Peled bool range_accepted, 31216bd7fbd0SDekel Peled struct rte_flow_error *error); 312280c67625SGregory Etelson int mlx5_flow_validate_item_eth(const struct rte_eth_dev *dev, 312380c67625SGregory Etelson const struct rte_flow_item *item, 312486b59a1aSMatan Azrad uint64_t item_flags, bool ext_vlan_sup, 312584c406e7SOri Kam struct rte_flow_error *error); 312680c67625SGregory Etelson int 312780c67625SGregory Etelson mlx5_flow_dv_validate_item_vlan(const struct rte_flow_item *item, 312880c67625SGregory Etelson uint64_t item_flags, 312980c67625SGregory Etelson struct rte_eth_dev *dev, 313080c67625SGregory Etelson struct rte_flow_error *error); 313180c67625SGregory Etelson int 313280c67625SGregory Etelson mlx5_flow_dv_validate_item_ipv4(struct rte_eth_dev *dev, 313380c67625SGregory Etelson const struct rte_flow_item *item, 313480c67625SGregory Etelson uint64_t item_flags, 313580c67625SGregory Etelson uint64_t last_item, 313680c67625SGregory Etelson uint16_t ether_type, 313780c67625SGregory Etelson const struct rte_flow_item_ipv4 *acc_mask, 313880c67625SGregory Etelson struct rte_flow_error *error); 313980c67625SGregory Etelson int 314080c67625SGregory Etelson mlx5_flow_dv_validate_item_gtp(struct rte_eth_dev *dev, 314180c67625SGregory Etelson const struct rte_flow_item *item, 314280c67625SGregory Etelson uint64_t item_flags, 314380c67625SGregory Etelson struct rte_flow_error *error); 314480c67625SGregory Etelson int 314580c67625SGregory Etelson mlx5_flow_dv_validate_item_gtp_psc(const struct rte_eth_dev *dev, 314680c67625SGregory Etelson const struct rte_flow_item *item, 314780c67625SGregory Etelson uint64_t last_item, 314880c67625SGregory Etelson const struct rte_flow_item *gtp_item, 314980c67625SGregory Etelson bool root, struct rte_flow_error *error); 315080c67625SGregory Etelson int 315180c67625SGregory Etelson mlx5_flow_dv_validate_item_aso_ct(struct rte_eth_dev *dev, 315280c67625SGregory Etelson const struct rte_flow_item *item, 315380c67625SGregory Etelson uint64_t *item_flags, 315480c67625SGregory Etelson struct rte_flow_error *error); 315580c67625SGregory Etelson int mlx5_flow_validate_item_gre(const struct rte_eth_dev *dev, 315680c67625SGregory Etelson const struct rte_flow_item *item, 315784c406e7SOri Kam uint64_t item_flags, 315884c406e7SOri Kam uint8_t target_protocol, 315984c406e7SOri Kam struct rte_flow_error *error); 316080c67625SGregory Etelson int mlx5_flow_validate_item_gre_key(const struct rte_eth_dev *dev, 316180c67625SGregory Etelson const struct rte_flow_item *item, 3162a7a03655SXiaoyu Min uint64_t item_flags, 3163a7a03655SXiaoyu Min const struct rte_flow_item *gre_item, 3164a7a03655SXiaoyu Min struct rte_flow_error *error); 31655c4d4917SSean Zhang int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev, 31665c4d4917SSean Zhang const struct rte_flow_item *item, 31675c4d4917SSean Zhang uint64_t item_flags, 31685c4d4917SSean Zhang const struct rte_flow_attr *attr, 31695c4d4917SSean Zhang const struct rte_flow_item *gre_item, 31705c4d4917SSean Zhang struct rte_flow_error *error); 317180c67625SGregory Etelson int mlx5_flow_validate_item_ipv4(const struct rte_eth_dev *dev, 317280c67625SGregory Etelson const struct rte_flow_item *item, 3173ed4c5247SShahaf Shuler uint64_t item_flags, 3174fba32130SXiaoyu Min uint64_t last_item, 3175fba32130SXiaoyu Min uint16_t ether_type, 317655c61fa7SViacheslav Ovsiienko const struct rte_flow_item_ipv4 *acc_mask, 31776859e67eSDekel Peled bool range_accepted, 317884c406e7SOri Kam struct rte_flow_error *error); 317980c67625SGregory Etelson int mlx5_flow_validate_item_ipv6(const struct rte_eth_dev *dev, 318080c67625SGregory Etelson const struct rte_flow_item *item, 318184c406e7SOri Kam uint64_t item_flags, 3182fba32130SXiaoyu Min uint64_t last_item, 3183fba32130SXiaoyu Min uint16_t ether_type, 318455c61fa7SViacheslav Ovsiienko const struct rte_flow_item_ipv6 *acc_mask, 318584c406e7SOri Kam struct rte_flow_error *error); 318638f7efaaSDekel Peled int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev, 318738f7efaaSDekel Peled const struct rte_flow_item *item, 318884c406e7SOri Kam uint64_t item_flags, 318938f7efaaSDekel Peled uint64_t prev_layer, 319084c406e7SOri Kam struct rte_flow_error *error); 319180c67625SGregory Etelson int mlx5_flow_validate_item_tcp(const struct rte_eth_dev *dev, 319280c67625SGregory Etelson const struct rte_flow_item *item, 319384c406e7SOri Kam uint64_t item_flags, 319484c406e7SOri Kam uint8_t target_protocol, 319592378c2bSMoti Haimovsky const struct rte_flow_item_tcp *flow_mask, 319684c406e7SOri Kam struct rte_flow_error *error); 319780c67625SGregory Etelson int mlx5_flow_validate_item_udp(const struct rte_eth_dev *dev, 319880c67625SGregory Etelson const struct rte_flow_item *item, 319984c406e7SOri Kam uint64_t item_flags, 320084c406e7SOri Kam uint8_t target_protocol, 320184c406e7SOri Kam struct rte_flow_error *error); 320284c406e7SOri Kam int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item, 3203ed4c5247SShahaf Shuler uint64_t item_flags, 3204dfedf3e3SViacheslav Ovsiienko struct rte_eth_dev *dev, 320584c406e7SOri Kam struct rte_flow_error *error); 3206630a587bSRongwei Liu int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev, 3207a1fd0c82SRongwei Liu uint16_t udp_dport, 3208630a587bSRongwei Liu const struct rte_flow_item *item, 320984c406e7SOri Kam uint64_t item_flags, 32101939eb6fSDariusz Sosnowski bool root, 321184c406e7SOri Kam struct rte_flow_error *error); 321284c406e7SOri Kam int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item, 321384c406e7SOri Kam uint64_t item_flags, 321484c406e7SOri Kam struct rte_eth_dev *dev, 321584c406e7SOri Kam struct rte_flow_error *error); 321680c67625SGregory Etelson int mlx5_flow_validate_item_icmp(const struct rte_eth_dev *dev, 321780c67625SGregory Etelson const struct rte_flow_item *item, 3218d53aa89aSXiaoyu Min uint64_t item_flags, 3219d53aa89aSXiaoyu Min uint8_t target_protocol, 3220d53aa89aSXiaoyu Min struct rte_flow_error *error); 322180c67625SGregory Etelson int mlx5_flow_validate_item_icmp6(const struct rte_eth_dev *dev, 322280c67625SGregory Etelson const struct rte_flow_item *item, 3223d53aa89aSXiaoyu Min uint64_t item_flags, 3224d53aa89aSXiaoyu Min uint8_t target_protocol, 3225d53aa89aSXiaoyu Min struct rte_flow_error *error); 322680c67625SGregory Etelson int mlx5_flow_validate_item_icmp6_echo(const struct rte_eth_dev *dev, 322780c67625SGregory Etelson const struct rte_flow_item *item, 322801314192SLeo Xu uint64_t item_flags, 322901314192SLeo Xu uint8_t target_protocol, 323001314192SLeo Xu struct rte_flow_error *error); 323180c67625SGregory Etelson int mlx5_flow_validate_item_nvgre(const struct rte_eth_dev *dev, 323280c67625SGregory Etelson const struct rte_flow_item *item, 3233ea81c1b8SDekel Peled uint64_t item_flags, 3234ea81c1b8SDekel Peled uint8_t target_protocol, 3235ea81c1b8SDekel Peled struct rte_flow_error *error); 3236e59a5dbcSMoti Haimovsky int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item, 3237e59a5dbcSMoti Haimovsky uint64_t item_flags, 3238e59a5dbcSMoti Haimovsky struct rte_eth_dev *dev, 3239e59a5dbcSMoti Haimovsky struct rte_flow_error *error); 3240f7239fceSShiri Kuzin int mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item, 3241f7239fceSShiri Kuzin uint64_t last_item, 3242f7239fceSShiri Kuzin const struct rte_flow_item *geneve_item, 3243f7239fceSShiri Kuzin struct rte_eth_dev *dev, 3244f7239fceSShiri Kuzin struct rte_flow_error *error); 324580c67625SGregory Etelson int mlx5_flow_validate_item_ecpri(const struct rte_eth_dev *dev, 324680c67625SGregory Etelson const struct rte_flow_item *item, 3247c7eca236SBing Zhao uint64_t item_flags, 3248c7eca236SBing Zhao uint64_t last_item, 3249c7eca236SBing Zhao uint16_t ether_type, 3250c7eca236SBing Zhao const struct rte_flow_item_ecpri *acc_mask, 3251c7eca236SBing Zhao struct rte_flow_error *error); 32526f7d6622SHaifei Luo int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev, 32536f7d6622SHaifei Luo const struct rte_flow_item *item, 32546f7d6622SHaifei Luo struct rte_flow_error *error); 325544432018SLi Zhang int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev, 325644432018SLi Zhang struct mlx5_flow_meter_info *fm, 325744432018SLi Zhang uint32_t mtr_idx, 325844432018SLi Zhang uint8_t domain_bitmap); 325944432018SLi Zhang void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev, 326044432018SLi Zhang struct mlx5_flow_meter_info *fm); 3261afb4aa4fSLi Zhang void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev); 3262fc6ce56bSLi Zhang struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare 3263fc6ce56bSLi Zhang (struct rte_eth_dev *dev, 3264fc6ce56bSLi Zhang struct mlx5_flow_meter_policy *mtr_policy, 3265fc6ce56bSLi Zhang struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]); 3266ec962badSLi Zhang void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev, 3267ec962badSLi Zhang struct mlx5_flow_meter_policy *mtr_policy); 3268994829e6SSuanming Mou int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev); 326945633c46SSuanming Mou int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev); 3270d1c84dc0SGavin Li int mlx5_flow_discover_ipv6_tc_support(struct rte_eth_dev *dev); 3271ec4e11d4SDmitry Kozlyuk int mlx5_action_handle_attach(struct rte_eth_dev *dev); 3272ec4e11d4SDmitry Kozlyuk int mlx5_action_handle_detach(struct rte_eth_dev *dev); 32734b61b877SBing Zhao int mlx5_action_handle_flush(struct rte_eth_dev *dev); 32744ec6360dSGregory Etelson void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id); 32754ec6360dSGregory Etelson int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh); 3276afd7a625SXueming Li 3277961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx); 3278961b6774SMatan Azrad int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 3279f5b0aed2SSuanming Mou void *cb_ctx); 3280961b6774SMatan Azrad void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3281961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx, 3282961b6774SMatan Azrad struct mlx5_list_entry *oentry, 3283961b6774SMatan Azrad void *entry_ctx); 3284961b6774SMatan Azrad void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3285afd7a625SXueming Li struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev, 32862d2cef5dSLi Zhang uint32_t table_level, uint8_t egress, uint8_t transfer, 3287afd7a625SXueming Li bool external, const struct mlx5_flow_tunnel *tunnel, 32882d2cef5dSLi Zhang uint32_t group_id, uint8_t dummy, 32892d2cef5dSLi Zhang uint32_t table_id, struct rte_flow_error *error); 3290f31a141eSMichael Savisko int flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh, 3291f31a141eSMichael Savisko struct mlx5_flow_tbl_resource *tbl); 3292afd7a625SXueming Li 3293961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx); 3294961b6774SMatan Azrad int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 3295f5b0aed2SSuanming Mou void *cb_ctx); 3296961b6774SMatan Azrad void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3297961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx, 3298961b6774SMatan Azrad struct mlx5_list_entry *oentry, 3299f5b0aed2SSuanming Mou void *cb_ctx); 3300961b6774SMatan Azrad void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3301f7f73ac1SXueming Li 3302ff4064d5SMaayan Kashani int flow_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 3303961b6774SMatan Azrad void *cb_ctx); 3304ff4064d5SMaayan Kashani struct mlx5_list_entry *flow_modify_create_cb(void *tool_ctx, void *ctx); 3305ff4064d5SMaayan Kashani void flow_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3306ff4064d5SMaayan Kashani struct mlx5_list_entry *flow_modify_clone_cb(void *tool_ctx, 3307961b6774SMatan Azrad struct mlx5_list_entry *oentry, 3308961b6774SMatan Azrad void *ctx); 3309ff4064d5SMaayan Kashani void flow_modify_clone_free_cb(void *tool_ctx, 3310961b6774SMatan Azrad struct mlx5_list_entry *entry); 3311961b6774SMatan Azrad 3312961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx); 3313961b6774SMatan Azrad int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 3314961b6774SMatan Azrad void *cb_ctx); 3315961b6774SMatan Azrad void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3316961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx, 3317961b6774SMatan Azrad struct mlx5_list_entry *entry, 3318961b6774SMatan Azrad void *ctx); 3319961b6774SMatan Azrad void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3320961b6774SMatan Azrad 3321ff4064d5SMaayan Kashani int flow_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 3322961b6774SMatan Azrad void *cb_ctx); 3323ff4064d5SMaayan Kashani struct mlx5_list_entry *flow_encap_decap_create_cb(void *tool_ctx, 3324961b6774SMatan Azrad void *cb_ctx); 3325ff4064d5SMaayan Kashani void flow_encap_decap_remove_cb(void *tool_ctx, 3326961b6774SMatan Azrad struct mlx5_list_entry *entry); 3327ff4064d5SMaayan Kashani struct mlx5_list_entry *flow_encap_decap_clone_cb(void *tool_ctx, 3328961b6774SMatan Azrad struct mlx5_list_entry *entry, 3329961b6774SMatan Azrad void *cb_ctx); 3330ff4064d5SMaayan Kashani void flow_encap_decap_clone_free_cb(void *tool_ctx, 3331961b6774SMatan Azrad struct mlx5_list_entry *entry); 3332ff4064d5SMaayan Kashani int __flow_encap_decap_resource_register 3333ff4064d5SMaayan Kashani (struct rte_eth_dev *dev, 3334ff4064d5SMaayan Kashani struct mlx5_flow_dv_encap_decap_resource *resource, 3335ff4064d5SMaayan Kashani bool is_root, 3336ff4064d5SMaayan Kashani struct mlx5_flow_dv_encap_decap_resource **encap_decap, 3337ff4064d5SMaayan Kashani struct rte_flow_error *error); 3338ff4064d5SMaayan Kashani int __flow_modify_hdr_resource_register 3339ff4064d5SMaayan Kashani (struct rte_eth_dev *dev, 3340ff4064d5SMaayan Kashani struct mlx5_flow_dv_modify_hdr_resource *resource, 3341ff4064d5SMaayan Kashani struct mlx5_flow_dv_modify_hdr_resource **modify, 3342ff4064d5SMaayan Kashani struct rte_flow_error *error); 3343ff4064d5SMaayan Kashani int flow_encap_decap_resource_release(struct rte_eth_dev *dev, 3344ff4064d5SMaayan Kashani uint32_t encap_decap_idx); 3345e38776c3SMaayan Kashani int flow_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 3346e78e5408SMatan Azrad void *ctx); 3347e38776c3SMaayan Kashani struct mlx5_list_entry *flow_matcher_create_cb(void *tool_ctx, void *ctx); 3348e38776c3SMaayan Kashani void flow_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3349e38776c3SMaayan Kashani struct mlx5_list_entry *flow_matcher_clone_cb(void *tool_ctx __rte_unused, 3350e38776c3SMaayan Kashani struct mlx5_list_entry *entry, void *cb_ctx); 3351e38776c3SMaayan Kashani void flow_matcher_clone_free_cb(void *tool_ctx __rte_unused, 3352e38776c3SMaayan Kashani struct mlx5_list_entry *entry); 33536507c9f5SSuanming Mou int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 33546507c9f5SSuanming Mou void *cb_ctx); 33556507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx); 33566507c9f5SSuanming Mou void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 33576507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx, 33586507c9f5SSuanming Mou struct mlx5_list_entry *entry, void *cb_ctx); 33596507c9f5SSuanming Mou void flow_dv_port_id_clone_free_cb(void *tool_ctx, 3360e78e5408SMatan Azrad struct mlx5_list_entry *entry); 336118726355SXueming Li 33626507c9f5SSuanming Mou int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 3363e78e5408SMatan Azrad void *cb_ctx); 33646507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx, 3365491b7137SMatan Azrad void *cb_ctx); 33666507c9f5SSuanming Mou void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 33676507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx, 3368e78e5408SMatan Azrad struct mlx5_list_entry *entry, void *cb_ctx); 33696507c9f5SSuanming Mou void flow_dv_push_vlan_clone_free_cb(void *tool_ctx, 3370491b7137SMatan Azrad struct mlx5_list_entry *entry); 33713422af2aSXueming Li 33726507c9f5SSuanming Mou int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 3373e78e5408SMatan Azrad void *cb_ctx); 33746507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx); 33756507c9f5SSuanming Mou void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 33766507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx, 3377491b7137SMatan Azrad struct mlx5_list_entry *entry, void *cb_ctx); 33786507c9f5SSuanming Mou void flow_dv_sample_clone_free_cb(void *tool_ctx, 3379491b7137SMatan Azrad struct mlx5_list_entry *entry); 338019784141SSuanming Mou 33816507c9f5SSuanming Mou int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry, 3382e78e5408SMatan Azrad void *cb_ctx); 33836507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx, 33846507c9f5SSuanming Mou void *cb_ctx); 33856507c9f5SSuanming Mou void flow_dv_dest_array_remove_cb(void *tool_ctx, 3386e78e5408SMatan Azrad struct mlx5_list_entry *entry); 33876507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx, 3388491b7137SMatan Azrad struct mlx5_list_entry *entry, void *cb_ctx); 33896507c9f5SSuanming Mou void flow_dv_dest_array_clone_free_cb(void *tool_ctx, 3390491b7137SMatan Azrad struct mlx5_list_entry *entry); 33913a2f674bSSuanming Mou void flow_dv_hashfields_set(uint64_t item_flags, 33923a2f674bSSuanming Mou struct mlx5_flow_rss_desc *rss_desc, 33933a2f674bSSuanming Mou uint64_t *hash_fields); 33943a2f674bSSuanming Mou void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types, 33953a2f674bSSuanming Mou uint64_t *hash_field); 33967ab3962dSSuanming Mou uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx, 33977ab3962dSSuanming Mou const uint64_t hash_fields); 3398e38776c3SMaayan Kashani int flow_dv_validate(struct rte_eth_dev *dev, const struct rte_flow_attr *attr, 3399e38776c3SMaayan Kashani const struct rte_flow_item items[], 3400e38776c3SMaayan Kashani const struct rte_flow_action actions[], 3401e38776c3SMaayan Kashani bool external, int hairpin, struct rte_flow_error *error); 34026507c9f5SSuanming Mou 3403d1559d66SSuanming Mou struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx); 3404d1559d66SSuanming Mou void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3405d1559d66SSuanming Mou int flow_hw_grp_match_cb(void *tool_ctx, 3406d1559d66SSuanming Mou struct mlx5_list_entry *entry, 3407d1559d66SSuanming Mou void *cb_ctx); 3408d1559d66SSuanming Mou struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx, 3409d1559d66SSuanming Mou struct mlx5_list_entry *oentry, 3410d1559d66SSuanming Mou void *cb_ctx); 3411d1559d66SSuanming Mou void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3412d1559d66SSuanming Mou 341381073e1fSMatan Azrad struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev, 341481073e1fSMatan Azrad uint32_t age_idx); 341544864503SSuanming Mou 34165d55a494STal Shnaiderman void flow_release_workspace(void *data); 34175d55a494STal Shnaiderman int mlx5_flow_os_init_workspace_once(void); 34185d55a494STal Shnaiderman void *mlx5_flow_os_get_specific_workspace(void); 34195d55a494STal Shnaiderman int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data); 34205d55a494STal Shnaiderman void mlx5_flow_os_release_workspace(void); 3421e6100c7bSLi Zhang uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev); 3422e6100c7bSLi Zhang void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx); 3423afb4aa4fSLi Zhang int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev, 3424afb4aa4fSLi Zhang const struct rte_flow_action *actions[RTE_COLORS], 3425afb4aa4fSLi Zhang struct rte_flow_attr *attr, 3426afb4aa4fSLi Zhang bool *is_rss, 3427afb4aa4fSLi Zhang uint8_t *domain_bitmap, 34284b7bf3ffSBing Zhao uint8_t *policy_mode, 3429afb4aa4fSLi Zhang struct rte_mtr_error *error); 3430afb4aa4fSLi Zhang void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev, 3431afb4aa4fSLi Zhang struct mlx5_flow_meter_policy *mtr_policy); 3432afb4aa4fSLi Zhang int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev, 3433afb4aa4fSLi Zhang struct mlx5_flow_meter_policy *mtr_policy, 3434afb4aa4fSLi Zhang const struct rte_flow_action *actions[RTE_COLORS], 34356431068dSSean Zhang struct rte_flow_attr *attr, 3436afb4aa4fSLi Zhang struct rte_mtr_error *error); 3437afb4aa4fSLi Zhang int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev, 3438afb4aa4fSLi Zhang struct mlx5_flow_meter_policy *mtr_policy); 3439afb4aa4fSLi Zhang void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev, 3440afb4aa4fSLi Zhang struct mlx5_flow_meter_policy *mtr_policy); 3441afb4aa4fSLi Zhang int mlx5_flow_create_def_policy(struct rte_eth_dev *dev); 3442afb4aa4fSLi Zhang void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev); 3443afb4aa4fSLi Zhang void flow_drv_rxq_flags_set(struct rte_eth_dev *dev, 3444afb4aa4fSLi Zhang struct mlx5_flow_handle *dev_handle); 34458c5a231bSGregory Etelson const struct mlx5_flow_tunnel * 34468c5a231bSGregory Etelson mlx5_get_tof(const struct rte_flow_item *items, 34478c5a231bSGregory Etelson const struct rte_flow_action *actions, 34488c5a231bSGregory Etelson enum mlx5_tof_rule_type *rule_type); 3449b401400dSSuanming Mou void 3450b401400dSSuanming Mou flow_hw_resource_release(struct rte_eth_dev *dev); 3451f5177bdcSMichael Baum int 3452f5177bdcSMichael Baum mlx5_geneve_tlv_options_destroy(struct mlx5_geneve_tlv_options *options, 3453f5177bdcSMichael Baum struct mlx5_physical_device *phdev); 3454f5177bdcSMichael Baum int 3455f5177bdcSMichael Baum mlx5_geneve_tlv_options_check_busy(struct mlx5_priv *priv); 3456f64a7946SRongwei Liu void 3457f64a7946SRongwei Liu flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable); 34587ab3962dSSuanming Mou int flow_dv_action_validate(struct rte_eth_dev *dev, 34597ab3962dSSuanming Mou const struct rte_flow_indir_action_conf *conf, 34607ab3962dSSuanming Mou const struct rte_flow_action *action, 34617ab3962dSSuanming Mou struct rte_flow_error *err); 34627ab3962dSSuanming Mou struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev, 34637ab3962dSSuanming Mou const struct rte_flow_indir_action_conf *conf, 34647ab3962dSSuanming Mou const struct rte_flow_action *action, 34657ab3962dSSuanming Mou struct rte_flow_error *err); 34667ab3962dSSuanming Mou int flow_dv_action_destroy(struct rte_eth_dev *dev, 34677ab3962dSSuanming Mou struct rte_flow_action_handle *handle, 34687ab3962dSSuanming Mou struct rte_flow_error *error); 34697ab3962dSSuanming Mou int flow_dv_action_update(struct rte_eth_dev *dev, 34707ab3962dSSuanming Mou struct rte_flow_action_handle *handle, 34717ab3962dSSuanming Mou const void *update, 34727ab3962dSSuanming Mou struct rte_flow_error *err); 34737ab3962dSSuanming Mou int flow_dv_action_query(struct rte_eth_dev *dev, 34747ab3962dSSuanming Mou const struct rte_flow_action_handle *handle, 34757ab3962dSSuanming Mou void *data, 34767ab3962dSSuanming Mou struct rte_flow_error *error); 3477fe3620aaSSuanming Mou size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type); 3478fe3620aaSSuanming Mou int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf, 3479fe3620aaSSuanming Mou size_t *size, struct rte_flow_error *error); 34800f4aa72bSSuanming Mou void mlx5_flow_field_id_to_modify_info 348177edfda9SSuanming Mou (const struct rte_flow_field_data *data, 34820f4aa72bSSuanming Mou struct field_modify_info *info, uint32_t *mask, 34830f4aa72bSSuanming Mou uint32_t width, struct rte_eth_dev *dev, 34840f4aa72bSSuanming Mou const struct rte_flow_attr *attr, struct rte_flow_error *error); 34850f4aa72bSSuanming Mou int flow_dv_convert_modify_action(struct rte_flow_item *item, 34860f4aa72bSSuanming Mou struct field_modify_info *field, 348799af18f6SSuanming Mou struct field_modify_info *dest, 34880f4aa72bSSuanming Mou struct mlx5_flow_dv_modify_hdr_resource *resource, 34890f4aa72bSSuanming Mou uint32_t type, struct rte_flow_error *error); 349068e9925cSShun Hao 349168e9925cSShun Hao #define MLX5_PF_VPORT_ID 0 349268e9925cSShun Hao #define MLX5_ECPF_VPORT_ID 0xFFFE 349368e9925cSShun Hao 349492b3c68eSShun Hao int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev); 349592b3c68eSShun Hao int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev, 349692b3c68eSShun Hao const struct rte_flow_item *item, 349792b3c68eSShun Hao uint16_t *vport_id, 3498ca7e6051SShun Hao bool *all_ports, 349992b3c68eSShun Hao struct rte_flow_error *error); 350092b3c68eSShun Hao 350175a00812SSuanming Mou int flow_dv_translate_items_hws(const struct rte_flow_item *items, 350275a00812SSuanming Mou struct mlx5_flow_attr *attr, void *key, 350375a00812SSuanming Mou uint32_t key_type, uint64_t *item_flags, 350475a00812SSuanming Mou uint8_t *match_criteria, 350575a00812SSuanming Mou struct rte_flow_error *error); 35061939eb6fSDariusz Sosnowski 3507e38776c3SMaayan Kashani int __flow_dv_translate_items_hws(const struct rte_flow_item *items, 3508e38776c3SMaayan Kashani struct mlx5_flow_attr *attr, void *key, 3509e38776c3SMaayan Kashani uint32_t key_type, uint64_t *item_flags, 3510e38776c3SMaayan Kashani uint8_t *match_criteria, 3511e38776c3SMaayan Kashani bool nt_flow, 3512e38776c3SMaayan Kashani struct rte_flow_error *error); 3513e38776c3SMaayan Kashani 35141939eb6fSDariusz Sosnowski int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev, 35151939eb6fSDariusz Sosnowski uint16_t *proxy_port_id, 35161939eb6fSDariusz Sosnowski struct rte_flow_error *error); 3517c68bb7a6SAsaf Penso int flow_null_get_aged_flows(struct rte_eth_dev *dev, 3518c68bb7a6SAsaf Penso void **context, 3519c68bb7a6SAsaf Penso uint32_t nb_contexts, 3520c68bb7a6SAsaf Penso struct rte_flow_error *error); 3521c68bb7a6SAsaf Penso uint32_t flow_null_counter_allocate(struct rte_eth_dev *dev); 3522c68bb7a6SAsaf Penso void flow_null_counter_free(struct rte_eth_dev *dev, 3523c68bb7a6SAsaf Penso uint32_t counter); 3524c68bb7a6SAsaf Penso int flow_null_counter_query(struct rte_eth_dev *dev, 3525c68bb7a6SAsaf Penso uint32_t counter, 3526c68bb7a6SAsaf Penso bool clear, 3527c68bb7a6SAsaf Penso uint64_t *pkts, 3528c68bb7a6SAsaf Penso uint64_t *bytes, 3529c68bb7a6SAsaf Penso void **action); 35301939eb6fSDariusz Sosnowski 35311939eb6fSDariusz Sosnowski int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev); 35321939eb6fSDariusz Sosnowski 35331939eb6fSDariusz Sosnowski int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev, 3534f37c184aSSuanming Mou uint32_t sqn, bool external); 353586f2907cSDariusz Sosnowski int mlx5_flow_hw_esw_destroy_sq_miss_flow(struct rte_eth_dev *dev, 353686f2907cSDariusz Sosnowski uint32_t sqn); 35371939eb6fSDariusz Sosnowski int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev); 3538ddb68e47SBing Zhao int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev); 3539f37c184aSSuanming Mou int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn, bool external); 354049dffadfSBing Zhao int mlx5_flow_hw_lacp_rx_flow(struct rte_eth_dev *dev); 354124865366SAlexander Kozyrev int mlx5_flow_actions_validate(struct rte_eth_dev *dev, 354224865366SAlexander Kozyrev const struct rte_flow_actions_template_attr *attr, 354324865366SAlexander Kozyrev const struct rte_flow_action actions[], 354424865366SAlexander Kozyrev const struct rte_flow_action masks[], 354524865366SAlexander Kozyrev struct rte_flow_error *error); 354624865366SAlexander Kozyrev int mlx5_flow_pattern_validate(struct rte_eth_dev *dev, 354724865366SAlexander Kozyrev const struct rte_flow_pattern_template_attr *attr, 354824865366SAlexander Kozyrev const struct rte_flow_item items[], 354924865366SAlexander Kozyrev struct rte_flow_error *error); 3550f1fecffaSDariusz Sosnowski int flow_hw_table_update(struct rte_eth_dev *dev, 3551f1fecffaSDariusz Sosnowski struct rte_flow_error *error); 3552773ca0e9SGregory Etelson int mlx5_flow_item_field_width(struct rte_eth_dev *dev, 3553773ca0e9SGregory Etelson enum rte_flow_field_id field, int inherit, 3554773ca0e9SGregory Etelson const struct rte_flow_attr *attr, 3555773ca0e9SGregory Etelson struct rte_flow_error *error); 355627d171b8SMaayan Kashani uintptr_t flow_legacy_list_create(struct rte_eth_dev *dev, enum mlx5_flow_type type, 3557e38776c3SMaayan Kashani const struct rte_flow_attr *attr, 3558e38776c3SMaayan Kashani const struct rte_flow_item items[], 3559e38776c3SMaayan Kashani const struct rte_flow_action actions[], 3560e38776c3SMaayan Kashani bool external, struct rte_flow_error *error); 3561e38776c3SMaayan Kashani void flow_legacy_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, 356227d171b8SMaayan Kashani uintptr_t flow_idx); 356300e57916SRongwei Liu 356400e57916SRongwei Liu static __rte_always_inline int 356500e57916SRongwei Liu flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused) 356600e57916SRongwei Liu { 356700e57916SRongwei Liu #ifdef HAVE_IBV_FLOW_DV_SUPPORT 356800e57916SRongwei Liu uint16_t port; 356900e57916SRongwei Liu 357000e57916SRongwei Liu MLX5_ETH_FOREACH_DEV(port, NULL) { 357100e57916SRongwei Liu struct mlx5_priv *priv; 357200e57916SRongwei Liu struct mlx5_hca_flex_attr *attr; 3573bc0a9303SRongwei Liu struct mlx5_devx_match_sample_info_query_attr *info; 357400e57916SRongwei Liu 357500e57916SRongwei Liu priv = rte_eth_devices[port].data->dev_private; 357600e57916SRongwei Liu attr = &priv->sh->cdev->config.hca_attr.flex; 3577bc0a9303SRongwei Liu if (priv->dr_ctx == dr_ctx && attr->query_match_sample_info) { 3578bc0a9303SRongwei Liu info = &priv->sh->srh_flex_parser.flex.devx_fp->sample_info[0]; 3579bc0a9303SRongwei Liu if (priv->sh->srh_flex_parser.flex.mapnum) 3580bc0a9303SRongwei Liu return info->sample_dw_data * sizeof(uint32_t); 358100e57916SRongwei Liu else 358200e57916SRongwei Liu return UINT32_MAX; 358300e57916SRongwei Liu } 358400e57916SRongwei Liu } 358500e57916SRongwei Liu #endif 358600e57916SRongwei Liu return UINT32_MAX; 358700e57916SRongwei Liu } 35880891355dSRongwei Liu 35890891355dSRongwei Liu static __rte_always_inline uint8_t 35900891355dSRongwei Liu flow_hw_get_ipv6_route_ext_anchor_from_ctx(void *dr_ctx) 35910891355dSRongwei Liu { 35920891355dSRongwei Liu #ifdef HAVE_IBV_FLOW_DV_SUPPORT 35930891355dSRongwei Liu uint16_t port; 35940891355dSRongwei Liu struct mlx5_priv *priv; 35950891355dSRongwei Liu 35960891355dSRongwei Liu MLX5_ETH_FOREACH_DEV(port, NULL) { 35970891355dSRongwei Liu priv = rte_eth_devices[port].data->dev_private; 35980891355dSRongwei Liu if (priv->dr_ctx == dr_ctx) 35990891355dSRongwei Liu return priv->sh->srh_flex_parser.flex.devx_fp->anchor_id; 36000891355dSRongwei Liu } 36010891355dSRongwei Liu #else 36020891355dSRongwei Liu RTE_SET_USED(dr_ctx); 36030891355dSRongwei Liu #endif 36040891355dSRongwei Liu return 0; 36050891355dSRongwei Liu } 36060891355dSRongwei Liu 36070891355dSRongwei Liu static __rte_always_inline uint16_t 36080891355dSRongwei Liu flow_hw_get_ipv6_route_ext_mod_id_from_ctx(void *dr_ctx, uint8_t idx) 36090891355dSRongwei Liu { 36100891355dSRongwei Liu #ifdef HAVE_IBV_FLOW_DV_SUPPORT 36110891355dSRongwei Liu uint16_t port; 36120891355dSRongwei Liu struct mlx5_priv *priv; 36130891355dSRongwei Liu struct mlx5_flex_parser_devx *fp; 36140891355dSRongwei Liu 36150891355dSRongwei Liu if (idx >= MLX5_GRAPH_NODE_SAMPLE_NUM || idx >= MLX5_SRV6_SAMPLE_NUM) 36160891355dSRongwei Liu return 0; 36170891355dSRongwei Liu MLX5_ETH_FOREACH_DEV(port, NULL) { 36180891355dSRongwei Liu priv = rte_eth_devices[port].data->dev_private; 36190891355dSRongwei Liu if (priv->dr_ctx == dr_ctx) { 36200891355dSRongwei Liu fp = priv->sh->srh_flex_parser.flex.devx_fp; 36210891355dSRongwei Liu return fp->sample_info[idx].modify_field_id; 36220891355dSRongwei Liu } 36230891355dSRongwei Liu } 36240891355dSRongwei Liu #else 36250891355dSRongwei Liu RTE_SET_USED(dr_ctx); 36260891355dSRongwei Liu RTE_SET_USED(idx); 36270891355dSRongwei Liu #endif 36280891355dSRongwei Liu return 0; 36290891355dSRongwei Liu } 36303564e928SGregory Etelson void 36313564e928SGregory Etelson mlx5_indirect_list_handles_release(struct rte_eth_dev *dev); 36323564e928SGregory Etelson #ifdef HAVE_MLX5_HWS_SUPPORT 3633691326d1SErez Shitrit 3634691326d1SErez Shitrit #define MLX5_REPR_STC_MEMORY_LOG 11 3635691326d1SErez Shitrit 36363564e928SGregory Etelson struct mlx5_mirror; 36373564e928SGregory Etelson void 3638e26f50adSGregory Etelson mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror); 3639e26f50adSGregory Etelson void 3640e26f50adSGregory Etelson mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev, 3641e26f50adSGregory Etelson struct mlx5_indirect_list *ptr); 36425e26c99fSRongwei Liu void 36435e26c99fSRongwei Liu mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev, 36445e26c99fSRongwei Liu struct mlx5_indirect_list *reformat); 3645ae67e3c4SGregory Etelson int 3646ae67e3c4SGregory Etelson flow_hw_create_flow(struct rte_eth_dev *dev, enum mlx5_flow_type type, 3647ae67e3c4SGregory Etelson const struct rte_flow_attr *attr, 3648ae67e3c4SGregory Etelson const struct rte_flow_item items[], 3649ae67e3c4SGregory Etelson const struct rte_flow_action actions[], 3650ae67e3c4SGregory Etelson uint64_t item_flags, uint64_t action_flags, bool external, 3651ae67e3c4SGregory Etelson struct rte_flow_hw **flow, struct rte_flow_error *error); 3652ae67e3c4SGregory Etelson void 3653ae67e3c4SGregory Etelson flow_hw_destroy(struct rte_eth_dev *dev, struct rte_flow_hw *flow); 3654ae67e3c4SGregory Etelson void 3655ae67e3c4SGregory Etelson flow_hw_list_destroy(struct rte_eth_dev *dev, enum mlx5_flow_type type, 3656ae67e3c4SGregory Etelson uintptr_t flow_idx); 3657ae67e3c4SGregory Etelson const struct rte_flow_action_rss * 3658ae67e3c4SGregory Etelson flow_nta_locate_rss(struct rte_eth_dev *dev, 3659ae67e3c4SGregory Etelson const struct rte_flow_action actions[], 3660ae67e3c4SGregory Etelson struct rte_flow_error *error); 3661ae67e3c4SGregory Etelson struct rte_flow_hw * 3662ae67e3c4SGregory Etelson flow_nta_handle_rss(struct rte_eth_dev *dev, 3663ae67e3c4SGregory Etelson const struct rte_flow_attr *attr, 3664ae67e3c4SGregory Etelson const struct rte_flow_item items[], 3665ae67e3c4SGregory Etelson const struct rte_flow_action actions[], 3666ae67e3c4SGregory Etelson const struct rte_flow_action_rss *rss_conf, 3667ae67e3c4SGregory Etelson uint64_t item_flags, uint64_t action_flags, 3668ae67e3c4SGregory Etelson bool external, enum mlx5_flow_type flow_type, 3669ae67e3c4SGregory Etelson struct rte_flow_error *error); 3670d6dc072aSGregory Etelson 3671d6dc072aSGregory Etelson extern const struct rte_flow_action_raw_decap empty_decap; 367280c67625SGregory Etelson extern const struct rte_flow_item_ipv6 nic_ipv6_mask; 367380c67625SGregory Etelson extern const struct rte_flow_item_tcp nic_tcp_mask; 3674d6dc072aSGregory Etelson 3675821a6a5cSBing Zhao /* mlx5_nta_split.c */ 3676821a6a5cSBing Zhao int 3677821a6a5cSBing Zhao mlx5_flow_nta_split_metadata(struct rte_eth_dev *dev, 3678821a6a5cSBing Zhao const struct rte_flow_attr *attr, 3679821a6a5cSBing Zhao const struct rte_flow_action actions[], 3680821a6a5cSBing Zhao const struct rte_flow_action *qrss, 3681821a6a5cSBing Zhao uint64_t action_flags, 3682821a6a5cSBing Zhao int actions_n, 3683821a6a5cSBing Zhao bool external, 3684821a6a5cSBing Zhao struct mlx5_flow_hw_split_resource *res, 3685821a6a5cSBing Zhao struct rte_flow_error *error); 3686821a6a5cSBing Zhao void 3687821a6a5cSBing Zhao mlx5_flow_nta_split_resource_free(struct rte_eth_dev *dev, 3688821a6a5cSBing Zhao struct mlx5_flow_hw_split_resource *res); 3689821a6a5cSBing Zhao struct mlx5_list_entry * 3690821a6a5cSBing Zhao flow_nta_mreg_create_cb(void *tool_ctx, void *cb_ctx); 3691821a6a5cSBing Zhao void 3692821a6a5cSBing Zhao flow_nta_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry); 3693821a6a5cSBing Zhao void 3694821a6a5cSBing Zhao mlx5_flow_nta_del_copy_action(struct rte_eth_dev *dev, uint32_t idx); 3695821a6a5cSBing Zhao void 3696821a6a5cSBing Zhao mlx5_flow_nta_del_default_copy_action(struct rte_eth_dev *dev); 3697821a6a5cSBing Zhao int 3698821a6a5cSBing Zhao mlx5_flow_nta_add_default_copy_action(struct rte_eth_dev *dev, 3699821a6a5cSBing Zhao struct rte_flow_error *error); 3700821a6a5cSBing Zhao int 3701821a6a5cSBing Zhao mlx5_flow_nta_update_copy_table(struct rte_eth_dev *dev, 3702821a6a5cSBing Zhao uint32_t *idx, 3703821a6a5cSBing Zhao const struct rte_flow_action *mark, 3704821a6a5cSBing Zhao uint64_t action_flags, 3705821a6a5cSBing Zhao struct rte_flow_error *error); 3706821a6a5cSBing Zhao 37073564e928SGregory Etelson #endif 370884c406e7SOri Kam #endif /* RTE_PMD_MLX5_FLOW_H_ */ 3709