xref: /dpdk/drivers/net/mlx5/mlx5_flow.h (revision 6c991cd9b0147e03660b62e9f6a0951d7fd04bb7)
184c406e7SOri Kam /* SPDX-License-Identifier: BSD-3-Clause
284c406e7SOri Kam  * Copyright 2018 Mellanox Technologies, Ltd
384c406e7SOri Kam  */
484c406e7SOri Kam 
584c406e7SOri Kam #ifndef RTE_PMD_MLX5_FLOW_H_
684c406e7SOri Kam #define RTE_PMD_MLX5_FLOW_H_
784c406e7SOri Kam 
884c406e7SOri Kam #include <stdalign.h>
984c406e7SOri Kam #include <stdint.h>
1084c406e7SOri Kam #include <string.h>
1189813a52SDmitry Kozlyuk #include <sys/queue.h>
1284c406e7SOri Kam 
13f15db67dSMatan Azrad #include <rte_alarm.h>
143bd26b23SSuanming Mou #include <rte_mtr.h>
15f15db67dSMatan Azrad 
169d60f545SOphir Munk #include <mlx5_glue.h>
177b4f1e6bSMatan Azrad #include <mlx5_prm.h>
187b4f1e6bSMatan Azrad 
19f5bf91deSMoti Haimovsky #include "mlx5.h"
205f5e2f86SAlexander Kozyrev #include "rte_pmd_mlx5.h"
2122681deeSAlex Vesker #include "hws/mlx5dr.h"
22f5bf91deSMoti Haimovsky 
23a5640386SXueming Li /* E-Switch Manager port, used for rte_flow_item_port_id. */
24a5640386SXueming Li #define MLX5_PORT_ESW_MGR UINT32_MAX
25a5640386SXueming Li 
2633d506b9SShun Hao /* E-Switch Manager port, used for rte_flow_item_ethdev. */
2733d506b9SShun Hao #define MLX5_REPRESENTED_PORT_ESW_MGR UINT16_MAX
2833d506b9SShun Hao 
2970d84dc7SOri Kam /* Private rte flow items. */
3070d84dc7SOri Kam enum mlx5_rte_flow_item_type {
3170d84dc7SOri Kam 	MLX5_RTE_FLOW_ITEM_TYPE_END = INT_MIN,
3270d84dc7SOri Kam 	MLX5_RTE_FLOW_ITEM_TYPE_TAG,
3375a00812SSuanming Mou 	MLX5_RTE_FLOW_ITEM_TYPE_SQ,
3450f576d6SSuanming Mou 	MLX5_RTE_FLOW_ITEM_TYPE_VLAN,
354ec6360dSGregory Etelson 	MLX5_RTE_FLOW_ITEM_TYPE_TUNNEL,
3670d84dc7SOri Kam };
3770d84dc7SOri Kam 
38baf516beSViacheslav Ovsiienko /* Private (internal) rte flow actions. */
3970d84dc7SOri Kam enum mlx5_rte_flow_action_type {
4070d84dc7SOri Kam 	MLX5_RTE_FLOW_ACTION_TYPE_END = INT_MIN,
4170d84dc7SOri Kam 	MLX5_RTE_FLOW_ACTION_TYPE_TAG,
42dd3c774fSViacheslav Ovsiienko 	MLX5_RTE_FLOW_ACTION_TYPE_MARK,
43baf516beSViacheslav Ovsiienko 	MLX5_RTE_FLOW_ACTION_TYPE_COPY_MREG,
443c78124fSShiri Kuzin 	MLX5_RTE_FLOW_ACTION_TYPE_DEFAULT_MISS,
454ec6360dSGregory Etelson 	MLX5_RTE_FLOW_ACTION_TYPE_TUNNEL_SET,
4681073e1fSMatan Azrad 	MLX5_RTE_FLOW_ACTION_TYPE_AGE,
4751ec04dcSShun Hao 	MLX5_RTE_FLOW_ACTION_TYPE_COUNT,
48f3191849SMichael Baum 	MLX5_RTE_FLOW_ACTION_TYPE_JUMP,
497ab3962dSSuanming Mou 	MLX5_RTE_FLOW_ACTION_TYPE_RSS,
5048fbb0e9SAlexander Kozyrev 	MLX5_RTE_FLOW_ACTION_TYPE_METER_MARK,
5170d84dc7SOri Kam };
5270d84dc7SOri Kam 
53ddb68e47SBing Zhao /* Private (internal) Field IDs for MODIFY_FIELD action. */
54ddb68e47SBing Zhao enum mlx5_rte_flow_field_id {
55ddb68e47SBing Zhao 	MLX5_RTE_FLOW_FIELD_END = INT_MIN,
56ddb68e47SBing Zhao 	MLX5_RTE_FLOW_FIELD_META_REG,
57ddb68e47SBing Zhao };
58ddb68e47SBing Zhao 
5948fbb0e9SAlexander Kozyrev #define MLX5_INDIRECT_ACTION_TYPE_OFFSET 29
604a42ac1fSMatan Azrad 
61478ba4bbSSuanming Mou #define MLX5_INDIRECT_ACTION_TYPE_GET(handle) \
62478ba4bbSSuanming Mou 	(((uint32_t)(uintptr_t)(handle)) >> MLX5_INDIRECT_ACTION_TYPE_OFFSET)
63478ba4bbSSuanming Mou 
64478ba4bbSSuanming Mou #define MLX5_INDIRECT_ACTION_IDX_GET(handle) \
65478ba4bbSSuanming Mou 	(((uint32_t)(uintptr_t)(handle)) & \
66478ba4bbSSuanming Mou 	 ((1u << MLX5_INDIRECT_ACTION_TYPE_OFFSET) - 1))
67478ba4bbSSuanming Mou 
683564e928SGregory Etelson enum mlx5_indirect_type {
694b61b877SBing Zhao 	MLX5_INDIRECT_ACTION_TYPE_RSS,
704b61b877SBing Zhao 	MLX5_INDIRECT_ACTION_TYPE_AGE,
71f3191849SMichael Baum 	MLX5_INDIRECT_ACTION_TYPE_COUNT,
722db75e8bSBing Zhao 	MLX5_INDIRECT_ACTION_TYPE_CT,
7348fbb0e9SAlexander Kozyrev 	MLX5_INDIRECT_ACTION_TYPE_METER_MARK,
7415896eafSGregory Etelson 	MLX5_INDIRECT_ACTION_TYPE_QUOTA,
754a42ac1fSMatan Azrad };
764a42ac1fSMatan Azrad 
7748fbb0e9SAlexander Kozyrev /* Now, the maximal ports will be supported is 16, action number is 32M. */
7848fbb0e9SAlexander Kozyrev #define MLX5_INDIRECT_ACT_CT_MAX_PORT 0x10
794f74cb68SBing Zhao 
804f74cb68SBing Zhao #define MLX5_INDIRECT_ACT_CT_OWNER_SHIFT 22
814f74cb68SBing Zhao #define MLX5_INDIRECT_ACT_CT_OWNER_MASK (MLX5_INDIRECT_ACT_CT_MAX_PORT - 1)
824f74cb68SBing Zhao 
8348fbb0e9SAlexander Kozyrev /* 29-31: type, 25-28: owner port, 0-24: index */
844f74cb68SBing Zhao #define MLX5_INDIRECT_ACT_CT_GEN_IDX(owner, index) \
854f74cb68SBing Zhao 	((MLX5_INDIRECT_ACTION_TYPE_CT << MLX5_INDIRECT_ACTION_TYPE_OFFSET) | \
864f74cb68SBing Zhao 	 (((owner) & MLX5_INDIRECT_ACT_CT_OWNER_MASK) << \
874f74cb68SBing Zhao 	  MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) | (index))
884f74cb68SBing Zhao 
894f74cb68SBing Zhao #define MLX5_INDIRECT_ACT_CT_GET_OWNER(index) \
904f74cb68SBing Zhao 	(((index) >> MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) & \
914f74cb68SBing Zhao 	 MLX5_INDIRECT_ACT_CT_OWNER_MASK)
924f74cb68SBing Zhao 
934f74cb68SBing Zhao #define MLX5_INDIRECT_ACT_CT_GET_IDX(index) \
944f74cb68SBing Zhao 	((index) & ((1 << MLX5_INDIRECT_ACT_CT_OWNER_SHIFT) - 1))
954f74cb68SBing Zhao 
96463170a7SSuanming Mou #define MLX5_ACTION_CTX_CT_GET_IDX  MLX5_INDIRECT_ACT_CT_GET_IDX
97463170a7SSuanming Mou #define MLX5_ACTION_CTX_CT_GET_OWNER MLX5_INDIRECT_ACT_CT_GET_OWNER
98463170a7SSuanming Mou #define MLX5_ACTION_CTX_CT_GEN_IDX MLX5_INDIRECT_ACT_CT_GEN_IDX
99463170a7SSuanming Mou 
1003564e928SGregory Etelson enum mlx5_indirect_list_type {
101e26f50adSGregory Etelson 	MLX5_INDIRECT_ACTION_LIST_TYPE_ERR = 0,
102e26f50adSGregory Etelson 	MLX5_INDIRECT_ACTION_LIST_TYPE_LEGACY = 1,
103e26f50adSGregory Etelson 	MLX5_INDIRECT_ACTION_LIST_TYPE_MIRROR = 2,
1045e26c99fSRongwei Liu 	MLX5_INDIRECT_ACTION_LIST_TYPE_REFORMAT = 3,
1053564e928SGregory Etelson };
1063564e928SGregory Etelson 
107e26f50adSGregory Etelson /**
1083564e928SGregory Etelson  * Base type for indirect list type.
1093564e928SGregory Etelson  */
1103564e928SGregory Etelson struct mlx5_indirect_list {
111e26f50adSGregory Etelson 	/* Indirect list type. */
1123564e928SGregory Etelson 	enum mlx5_indirect_list_type type;
113e26f50adSGregory Etelson 	/* Optional storage list entry */
1143564e928SGregory Etelson 	LIST_ENTRY(mlx5_indirect_list) entry;
1153564e928SGregory Etelson };
1163564e928SGregory Etelson 
117e26f50adSGregory Etelson static __rte_always_inline void
118e26f50adSGregory Etelson mlx5_indirect_list_add_entry(void *head, struct mlx5_indirect_list *elem)
1193564e928SGregory Etelson {
120e26f50adSGregory Etelson 	LIST_HEAD(, mlx5_indirect_list) *h = head;
121e26f50adSGregory Etelson 
122e26f50adSGregory Etelson 	LIST_INSERT_HEAD(h, elem, entry);
123e26f50adSGregory Etelson }
124e26f50adSGregory Etelson 
125e26f50adSGregory Etelson static __rte_always_inline void
126e26f50adSGregory Etelson mlx5_indirect_list_remove_entry(struct mlx5_indirect_list *elem)
127e26f50adSGregory Etelson {
128e26f50adSGregory Etelson 	if (elem->entry.le_prev)
129e26f50adSGregory Etelson 		LIST_REMOVE(elem, entry);
130e26f50adSGregory Etelson }
131e26f50adSGregory Etelson 
132e26f50adSGregory Etelson static __rte_always_inline enum mlx5_indirect_list_type
133e26f50adSGregory Etelson mlx5_get_indirect_list_type(const struct rte_flow_action_list_handle *obj)
134e26f50adSGregory Etelson {
135e26f50adSGregory Etelson 	return ((const struct mlx5_indirect_list *)obj)->type;
1363564e928SGregory Etelson }
1373564e928SGregory Etelson 
13870d84dc7SOri Kam /* Matches on selected register. */
13970d84dc7SOri Kam struct mlx5_rte_flow_item_tag {
140baf516beSViacheslav Ovsiienko 	enum modify_reg id;
141cff811c7SViacheslav Ovsiienko 	uint32_t data;
14270d84dc7SOri Kam };
14370d84dc7SOri Kam 
14470d84dc7SOri Kam /* Modify selected register. */
14570d84dc7SOri Kam struct mlx5_rte_flow_action_set_tag {
146baf516beSViacheslav Ovsiienko 	enum modify_reg id;
147a597ef33SShun Hao 	uint8_t offset;
148a597ef33SShun Hao 	uint8_t length;
149cff811c7SViacheslav Ovsiienko 	uint32_t data;
15070d84dc7SOri Kam };
15170d84dc7SOri Kam 
152baf516beSViacheslav Ovsiienko struct mlx5_flow_action_copy_mreg {
153baf516beSViacheslav Ovsiienko 	enum modify_reg dst;
154baf516beSViacheslav Ovsiienko 	enum modify_reg src;
155baf516beSViacheslav Ovsiienko };
156baf516beSViacheslav Ovsiienko 
1573c84f34eSOri Kam /* Matches on source queue. */
15875a00812SSuanming Mou struct mlx5_rte_flow_item_sq {
15926e1eaf2SDariusz Sosnowski 	uint32_t queue; /* DevX SQ number */
1603c84f34eSOri Kam };
1613c84f34eSOri Kam 
1623e8edd0eSViacheslav Ovsiienko /* Feature name to allocate metadata register. */
1633e8edd0eSViacheslav Ovsiienko enum mlx5_feature_name {
1643e8edd0eSViacheslav Ovsiienko 	MLX5_HAIRPIN_RX,
1653e8edd0eSViacheslav Ovsiienko 	MLX5_HAIRPIN_TX,
1663e8edd0eSViacheslav Ovsiienko 	MLX5_METADATA_RX,
1673e8edd0eSViacheslav Ovsiienko 	MLX5_METADATA_TX,
1683e8edd0eSViacheslav Ovsiienko 	MLX5_METADATA_FDB,
1693e8edd0eSViacheslav Ovsiienko 	MLX5_FLOW_MARK,
1703e8edd0eSViacheslav Ovsiienko 	MLX5_APP_TAG,
1713e8edd0eSViacheslav Ovsiienko 	MLX5_COPY_MARK,
17227efd5deSSuanming Mou 	MLX5_MTR_COLOR,
17383306d6cSShun Hao 	MLX5_MTR_ID,
17431ef2982SDekel Peled 	MLX5_ASO_FLOW_HIT,
1758ebbc01fSBing Zhao 	MLX5_ASO_CONNTRACK,
176a9b6ea45SJiawei Wang 	MLX5_SAMPLE_ID,
1773e8edd0eSViacheslav Ovsiienko };
1783e8edd0eSViacheslav Ovsiienko 
1798bb81f26SXueming Li /* Default queue number. */
1808bb81f26SXueming Li #define MLX5_RSSQ_DEFAULT_NUM 16
1818bb81f26SXueming Li 
18284c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L2 (1u << 0)
18384c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L3_IPV4 (1u << 1)
18484c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L3_IPV6 (1u << 2)
18584c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L4_UDP (1u << 3)
18684c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L4_TCP (1u << 4)
18784c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_VLAN (1u << 5)
18884c406e7SOri Kam 
18984c406e7SOri Kam /* Pattern inner Layer bits. */
19084c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L2 (1u << 6)
19184c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L3_IPV4 (1u << 7)
19284c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L3_IPV6 (1u << 8)
19384c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L4_UDP (1u << 9)
19484c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L4_TCP (1u << 10)
19584c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_VLAN (1u << 11)
19684c406e7SOri Kam 
19784c406e7SOri Kam /* Pattern tunnel Layer bits. */
19884c406e7SOri Kam #define MLX5_FLOW_LAYER_VXLAN (1u << 12)
19984c406e7SOri Kam #define MLX5_FLOW_LAYER_VXLAN_GPE (1u << 13)
20084c406e7SOri Kam #define MLX5_FLOW_LAYER_GRE (1u << 14)
20184c406e7SOri Kam #define MLX5_FLOW_LAYER_MPLS (1u << 15)
202ea81c1b8SDekel Peled /* List of tunnel Layer bits continued below. */
20384c406e7SOri Kam 
2046bd7fbd0SDekel Peled /* General pattern items bits. */
2056bd7fbd0SDekel Peled #define MLX5_FLOW_ITEM_METADATA (1u << 16)
2062e4c987aSOri Kam #define MLX5_FLOW_ITEM_PORT_ID (1u << 17)
20770d84dc7SOri Kam #define MLX5_FLOW_ITEM_TAG (1u << 18)
20855deee17SViacheslav Ovsiienko #define MLX5_FLOW_ITEM_MARK (1u << 19)
2096bd7fbd0SDekel Peled 
210d53aa89aSXiaoyu Min /* Pattern MISC bits. */
21120ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_ICMP (1u << 20)
21220ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_ICMP6 (1u << 21)
21320ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_GRE_KEY (1u << 22)
214d53aa89aSXiaoyu Min 
215ea81c1b8SDekel Peled /* Pattern tunnel Layer bits (continued). */
21620ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_IPIP (1u << 23)
21720ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_IPV6_ENCAP (1u << 24)
21820ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_NVGRE (1u << 25)
21920ac7966SXiaoyu Min #define MLX5_FLOW_LAYER_GENEVE (1u << 26)
2205e33bebdSXiaoyu Min 
2213c84f34eSOri Kam /* Queue items. */
22275a00812SSuanming Mou #define MLX5_FLOW_ITEM_SQ (1u << 27)
2233c84f34eSOri Kam 
224f31d7a01SDekel Peled /* Pattern tunnel Layer bits (continued). */
225f31d7a01SDekel Peled #define MLX5_FLOW_LAYER_GTP (1u << 28)
226f31d7a01SDekel Peled 
227c7eca236SBing Zhao /* Pattern eCPRI Layer bit. */
228c7eca236SBing Zhao #define MLX5_FLOW_LAYER_ECPRI (UINT64_C(1) << 29)
229c7eca236SBing Zhao 
2300e5a0d8fSDekel Peled /* IPv6 Fragment Extension Header bit. */
2310e5a0d8fSDekel Peled #define MLX5_FLOW_LAYER_OUTER_L3_IPV6_FRAG_EXT (1u << 30)
2320e5a0d8fSDekel Peled #define MLX5_FLOW_LAYER_INNER_L3_IPV6_FRAG_EXT (1u << 31)
2330e5a0d8fSDekel Peled 
2342c9f9617SShiri Kuzin /* Pattern tunnel Layer bits (continued). */
235f7239fceSShiri Kuzin #define MLX5_FLOW_LAYER_GENEVE_OPT (UINT64_C(1) << 32)
2362c9f9617SShiri Kuzin #define MLX5_FLOW_LAYER_GTP_PSC (UINT64_C(1) << 33)
2372c9f9617SShiri Kuzin 
23806741117SGregory Etelson /* INTEGRITY item bits */
23906741117SGregory Etelson #define MLX5_FLOW_ITEM_OUTER_INTEGRITY (UINT64_C(1) << 34)
24006741117SGregory Etelson #define MLX5_FLOW_ITEM_INNER_INTEGRITY (UINT64_C(1) << 35)
24123b0a8b2SGregory Etelson #define MLX5_FLOW_ITEM_INTEGRITY \
24223b0a8b2SGregory Etelson 	(MLX5_FLOW_ITEM_OUTER_INTEGRITY | MLX5_FLOW_ITEM_INNER_INTEGRITY)
24379f89527SGregory Etelson 
244aca19061SBing Zhao /* Conntrack item. */
24506741117SGregory Etelson #define MLX5_FLOW_LAYER_ASO_CT (UINT64_C(1) << 36)
246aca19061SBing Zhao 
247a23e9b6eSGregory Etelson /* Flex item */
24860bc2805SGregory Etelson #define MLX5_FLOW_ITEM_OUTER_FLEX (UINT64_C(1) << 37)
24960bc2805SGregory Etelson #define MLX5_FLOW_ITEM_INNER_FLEX (UINT64_C(1) << 38)
25060bc2805SGregory Etelson #define MLX5_FLOW_ITEM_FLEX_TUNNEL (UINT64_C(1) << 39)
251a23e9b6eSGregory Etelson 
25218ca4a4eSRaja Zidane /* ESP item */
25318ca4a4eSRaja Zidane #define MLX5_FLOW_ITEM_ESP (UINT64_C(1) << 40)
25418ca4a4eSRaja Zidane 
255e8146c63SSean Zhang /* Port Representor/Represented Port item */
256e8146c63SSean Zhang #define MLX5_FLOW_ITEM_PORT_REPRESENTOR (UINT64_C(1) << 41)
257e8146c63SSean Zhang #define MLX5_FLOW_ITEM_REPRESENTED_PORT (UINT64_C(1) << 42)
258e8146c63SSean Zhang 
25975a00812SSuanming Mou /* Meter color item */
26075a00812SSuanming Mou #define MLX5_FLOW_ITEM_METER_COLOR (UINT64_C(1) << 44)
26115896eafSGregory Etelson #define MLX5_FLOW_ITEM_QUOTA (UINT64_C(1) << 45)
26215896eafSGregory Etelson 
26375a00812SSuanming Mou 
26400e57916SRongwei Liu /* IPv6 routing extension item */
26500e57916SRongwei Liu #define MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT (UINT64_C(1) << 45)
26600e57916SRongwei Liu #define MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT (UINT64_C(1) << 46)
26700e57916SRongwei Liu 
268674afdf0SJiawei Wang /* Aggregated affinity item */
269674afdf0SJiawei Wang #define MLX5_FLOW_ITEM_AGGR_AFFINITY (UINT64_C(1) << 49)
270674afdf0SJiawei Wang 
27132c2847aSDong Zhou /* IB BTH ITEM. */
27232c2847aSDong Zhou #define MLX5_FLOW_ITEM_IB_BTH (1ull << 51)
27332c2847aSDong Zhou 
274ad17988aSAlexander Kozyrev /* PTYPE ITEM */
275ad17988aSAlexander Kozyrev #define MLX5_FLOW_ITEM_PTYPE (1ull << 52)
276ad17988aSAlexander Kozyrev 
2776f7d6622SHaifei Luo /* NSH ITEM */
2786f7d6622SHaifei Luo #define MLX5_FLOW_ITEM_NSH (1ull << 53)
2796f7d6622SHaifei Luo 
28084c406e7SOri Kam /* Outer Masks. */
28184c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L3 \
28284c406e7SOri Kam 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_OUTER_L3_IPV6)
28384c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER_L4 \
28484c406e7SOri Kam 	(MLX5_FLOW_LAYER_OUTER_L4_UDP | MLX5_FLOW_LAYER_OUTER_L4_TCP)
28584c406e7SOri Kam #define MLX5_FLOW_LAYER_OUTER \
28684c406e7SOri Kam 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_OUTER_L3 | \
28784c406e7SOri Kam 	 MLX5_FLOW_LAYER_OUTER_L4)
28884c406e7SOri Kam 
28984c406e7SOri Kam /* Tunnel Masks. */
29084c406e7SOri Kam #define MLX5_FLOW_LAYER_TUNNEL \
29184c406e7SOri Kam 	(MLX5_FLOW_LAYER_VXLAN | MLX5_FLOW_LAYER_VXLAN_GPE | \
292ea81c1b8SDekel Peled 	 MLX5_FLOW_LAYER_GRE | MLX5_FLOW_LAYER_NVGRE | MLX5_FLOW_LAYER_MPLS | \
293e59a5dbcSMoti Haimovsky 	 MLX5_FLOW_LAYER_IPIP | MLX5_FLOW_LAYER_IPV6_ENCAP | \
294a23e9b6eSGregory Etelson 	 MLX5_FLOW_LAYER_GENEVE | MLX5_FLOW_LAYER_GTP | \
295a23e9b6eSGregory Etelson 	 MLX5_FLOW_ITEM_FLEX_TUNNEL)
29684c406e7SOri Kam 
29784c406e7SOri Kam /* Inner Masks. */
29884c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L3 \
29984c406e7SOri Kam 	(MLX5_FLOW_LAYER_INNER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
30084c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER_L4 \
30184c406e7SOri Kam 	(MLX5_FLOW_LAYER_INNER_L4_UDP | MLX5_FLOW_LAYER_INNER_L4_TCP)
30284c406e7SOri Kam #define MLX5_FLOW_LAYER_INNER \
30384c406e7SOri Kam 	(MLX5_FLOW_LAYER_INNER_L2 | MLX5_FLOW_LAYER_INNER_L3 | \
30484c406e7SOri Kam 	 MLX5_FLOW_LAYER_INNER_L4)
30584c406e7SOri Kam 
3064bb14c83SDekel Peled /* Layer Masks. */
3074bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L2 \
3084bb14c83SDekel Peled 	(MLX5_FLOW_LAYER_OUTER_L2 | MLX5_FLOW_LAYER_INNER_L2)
3094bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L3_IPV4 \
3104bb14c83SDekel Peled 	(MLX5_FLOW_LAYER_OUTER_L3_IPV4 | MLX5_FLOW_LAYER_INNER_L3_IPV4)
3114bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L3_IPV6 \
3124bb14c83SDekel Peled 	(MLX5_FLOW_LAYER_OUTER_L3_IPV6 | MLX5_FLOW_LAYER_INNER_L3_IPV6)
3134bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L3 \
3144bb14c83SDekel Peled 	(MLX5_FLOW_LAYER_L3_IPV4 | MLX5_FLOW_LAYER_L3_IPV6)
3154bb14c83SDekel Peled #define MLX5_FLOW_LAYER_L4 \
3164bb14c83SDekel Peled 	(MLX5_FLOW_LAYER_OUTER_L4 | MLX5_FLOW_LAYER_INNER_L4)
3174bb14c83SDekel Peled 
31884c406e7SOri Kam /* Actions */
319e5517406SShun Hao #define MLX5_FLOW_ACTION_DROP (1ull << 0)
320e5517406SShun Hao #define MLX5_FLOW_ACTION_QUEUE (1ull << 1)
321e5517406SShun Hao #define MLX5_FLOW_ACTION_RSS (1ull << 2)
322e5517406SShun Hao #define MLX5_FLOW_ACTION_FLAG (1ull << 3)
323e5517406SShun Hao #define MLX5_FLOW_ACTION_MARK (1ull << 4)
324e5517406SShun Hao #define MLX5_FLOW_ACTION_COUNT (1ull << 5)
325e5517406SShun Hao #define MLX5_FLOW_ACTION_PORT_ID (1ull << 6)
326e5517406SShun Hao #define MLX5_FLOW_ACTION_OF_POP_VLAN (1ull << 7)
327e5517406SShun Hao #define MLX5_FLOW_ACTION_OF_PUSH_VLAN (1ull << 8)
328e5517406SShun Hao #define MLX5_FLOW_ACTION_OF_SET_VLAN_VID (1ull << 9)
329e5517406SShun Hao #define MLX5_FLOW_ACTION_OF_SET_VLAN_PCP (1ull << 10)
330e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_IPV4_SRC (1ull << 11)
331e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_IPV4_DST (1ull << 12)
332e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_IPV6_SRC (1ull << 13)
333e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_IPV6_DST (1ull << 14)
334e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_TP_SRC (1ull << 15)
335e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_TP_DST (1ull << 16)
336e5517406SShun Hao #define MLX5_FLOW_ACTION_JUMP (1ull << 17)
337e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_TTL (1ull << 18)
338e5517406SShun Hao #define MLX5_FLOW_ACTION_DEC_TTL (1ull << 19)
339e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_MAC_SRC (1ull << 20)
340e5517406SShun Hao #define MLX5_FLOW_ACTION_SET_MAC_DST (1ull << 21)
341e5517406SShun Hao #define MLX5_FLOW_ACTION_ENCAP (1ull << 22)
342e5517406SShun Hao #define MLX5_FLOW_ACTION_DECAP (1ull << 23)
343e5517406SShun Hao #define MLX5_FLOW_ACTION_INC_TCP_SEQ (1ull << 24)
344e5517406SShun Hao #define MLX5_FLOW_ACTION_DEC_TCP_SEQ (1ull << 25)
345e5517406SShun Hao #define MLX5_FLOW_ACTION_INC_TCP_ACK (1ull << 26)
346e5517406SShun Hao #define MLX5_FLOW_ACTION_DEC_TCP_ACK (1ull << 27)
34706387be8SMatan Azrad #define MLX5_FLOW_ACTION_SET_TAG (1ull << 28)
34806387be8SMatan Azrad #define MLX5_FLOW_ACTION_MARK_EXT (1ull << 29)
34906387be8SMatan Azrad #define MLX5_FLOW_ACTION_SET_META (1ull << 30)
35006387be8SMatan Azrad #define MLX5_FLOW_ACTION_METER (1ull << 31)
35106387be8SMatan Azrad #define MLX5_FLOW_ACTION_SET_IPV4_DSCP (1ull << 32)
35206387be8SMatan Azrad #define MLX5_FLOW_ACTION_SET_IPV6_DSCP (1ull << 33)
353fa2d01c8SDong Zhou #define MLX5_FLOW_ACTION_AGE (1ull << 34)
3543c78124fSShiri Kuzin #define MLX5_FLOW_ACTION_DEFAULT_MISS (1ull << 35)
35596b1f027SJiawei Wang #define MLX5_FLOW_ACTION_SAMPLE (1ull << 36)
3564ec6360dSGregory Etelson #define MLX5_FLOW_ACTION_TUNNEL_SET (1ull << 37)
3574ec6360dSGregory Etelson #define MLX5_FLOW_ACTION_TUNNEL_MATCH (1ull << 38)
358641dbe4fSAlexander Kozyrev #define MLX5_FLOW_ACTION_MODIFY_FIELD (1ull << 39)
35944432018SLi Zhang #define MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY (1ull << 40)
3602d084f69SBing Zhao #define MLX5_FLOW_ACTION_CT (1ull << 41)
36125c4d6dfSMichael Savisko #define MLX5_FLOW_ACTION_SEND_TO_KERNEL (1ull << 42)
36204a4de75SMichael Baum #define MLX5_FLOW_ACTION_INDIRECT_COUNT (1ull << 43)
36304a4de75SMichael Baum #define MLX5_FLOW_ACTION_INDIRECT_AGE (1ull << 44)
36415896eafSGregory Etelson #define MLX5_FLOW_ACTION_QUOTA (1ull << 46)
3653dce73a2SSuanming Mou #define MLX5_FLOW_ACTION_PORT_REPRESENTOR (1ull << 47)
36684c406e7SOri Kam 
367e2b05b22SShun Hao #define MLX5_FLOW_DROP_INCLUSIVE_ACTIONS \
368e2b05b22SShun Hao 	(MLX5_FLOW_ACTION_COUNT | MLX5_FLOW_ACTION_SAMPLE | MLX5_FLOW_ACTION_AGE)
369e2b05b22SShun Hao 
37084c406e7SOri Kam #define MLX5_FLOW_FATE_ACTIONS \
371684b9a1bSOri Kam 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_QUEUE | \
3723c78124fSShiri Kuzin 	 MLX5_FLOW_ACTION_RSS | MLX5_FLOW_ACTION_JUMP | \
37344432018SLi Zhang 	 MLX5_FLOW_ACTION_DEFAULT_MISS | \
37425c4d6dfSMichael Savisko 	 MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY | \
3753dce73a2SSuanming Mou 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
3763dce73a2SSuanming Mou 	 MLX5_FLOW_ACTION_PORT_REPRESENTOR)
37784c406e7SOri Kam 
3782e4c987aSOri Kam #define MLX5_FLOW_FATE_ESWITCH_ACTIONS \
3792e4c987aSOri Kam 	(MLX5_FLOW_ACTION_DROP | MLX5_FLOW_ACTION_PORT_ID | \
380b2cd3918SJiawei Wang 	 MLX5_FLOW_ACTION_SEND_TO_KERNEL | \
38144432018SLi Zhang 	 MLX5_FLOW_ACTION_JUMP | MLX5_FLOW_ACTION_METER_WITH_TERMINATED_POLICY)
3824b8727f0SDekel Peled 
3834bb14c83SDekel Peled #define MLX5_FLOW_MODIFY_HDR_ACTIONS (MLX5_FLOW_ACTION_SET_IPV4_SRC | \
3844bb14c83SDekel Peled 				      MLX5_FLOW_ACTION_SET_IPV4_DST | \
3854bb14c83SDekel Peled 				      MLX5_FLOW_ACTION_SET_IPV6_SRC | \
3864bb14c83SDekel Peled 				      MLX5_FLOW_ACTION_SET_IPV6_DST | \
3874bb14c83SDekel Peled 				      MLX5_FLOW_ACTION_SET_TP_SRC | \
3884bb14c83SDekel Peled 				      MLX5_FLOW_ACTION_SET_TP_DST | \
3894bb14c83SDekel Peled 				      MLX5_FLOW_ACTION_SET_TTL | \
3904bb14c83SDekel Peled 				      MLX5_FLOW_ACTION_DEC_TTL | \
3914bb14c83SDekel Peled 				      MLX5_FLOW_ACTION_SET_MAC_SRC | \
392585b99fbSDekel Peled 				      MLX5_FLOW_ACTION_SET_MAC_DST | \
393585b99fbSDekel Peled 				      MLX5_FLOW_ACTION_INC_TCP_SEQ | \
394585b99fbSDekel Peled 				      MLX5_FLOW_ACTION_DEC_TCP_SEQ | \
395585b99fbSDekel Peled 				      MLX5_FLOW_ACTION_INC_TCP_ACK | \
3965f163d52SMoti Haimovsky 				      MLX5_FLOW_ACTION_DEC_TCP_ACK | \
39770d84dc7SOri Kam 				      MLX5_FLOW_ACTION_OF_SET_VLAN_VID | \
39855deee17SViacheslav Ovsiienko 				      MLX5_FLOW_ACTION_SET_TAG | \
399fcc8d2f7SViacheslav Ovsiienko 				      MLX5_FLOW_ACTION_MARK_EXT | \
4006f26e604SSuanming Mou 				      MLX5_FLOW_ACTION_SET_META | \
4016f26e604SSuanming Mou 				      MLX5_FLOW_ACTION_SET_IPV4_DSCP | \
402641dbe4fSAlexander Kozyrev 				      MLX5_FLOW_ACTION_SET_IPV6_DSCP | \
403641dbe4fSAlexander Kozyrev 				      MLX5_FLOW_ACTION_MODIFY_FIELD)
4044bb14c83SDekel Peled 
4059aee7a84SMoti Haimovsky #define MLX5_FLOW_VLAN_ACTIONS (MLX5_FLOW_ACTION_OF_POP_VLAN | \
4069aee7a84SMoti Haimovsky 				MLX5_FLOW_ACTION_OF_PUSH_VLAN)
40706387be8SMatan Azrad 
40806387be8SMatan Azrad #define MLX5_FLOW_XCAP_ACTIONS (MLX5_FLOW_ACTION_ENCAP | MLX5_FLOW_ACTION_DECAP)
40906387be8SMatan Azrad 
41084c406e7SOri Kam #ifndef IPPROTO_MPLS
41184c406e7SOri Kam #define IPPROTO_MPLS 137
41284c406e7SOri Kam #endif
41384c406e7SOri Kam 
414d1abe664SDekel Peled /* UDP port number for MPLS */
415d1abe664SDekel Peled #define MLX5_UDP_PORT_MPLS 6635
416d1abe664SDekel Peled 
417fc2c498cSOri Kam /* UDP port numbers for VxLAN. */
418fc2c498cSOri Kam #define MLX5_UDP_PORT_VXLAN 4789
419fc2c498cSOri Kam #define MLX5_UDP_PORT_VXLAN_GPE 4790
420fc2c498cSOri Kam 
42132c2847aSDong Zhou /* UDP port numbers for RoCEv2. */
42232c2847aSDong Zhou #define MLX5_UDP_PORT_ROCEv2 4791
42332c2847aSDong Zhou 
424e59a5dbcSMoti Haimovsky /* UDP port numbers for GENEVE. */
425e59a5dbcSMoti Haimovsky #define MLX5_UDP_PORT_GENEVE 6081
426e59a5dbcSMoti Haimovsky 
4275f8ae44dSDong Zhou /* Lowest priority indicator. */
4285f8ae44dSDong Zhou #define MLX5_FLOW_LOWEST_PRIO_INDICATOR ((uint32_t)-1)
4295f8ae44dSDong Zhou 
4305f8ae44dSDong Zhou /*
4315f8ae44dSDong Zhou  * Max priority for ingress\egress flow groups
4325f8ae44dSDong Zhou  * greater than 0 and for any transfer flow group.
4335f8ae44dSDong Zhou  * From user configation: 0 - 21843.
4345f8ae44dSDong Zhou  */
4355f8ae44dSDong Zhou #define MLX5_NON_ROOT_FLOW_MAX_PRIO	(21843 + 1)
43684c406e7SOri Kam 
43784c406e7SOri Kam /*
43884c406e7SOri Kam  * Number of sub priorities.
43984c406e7SOri Kam  * For each kind of pattern matching i.e. L2, L3, L4 to have a correct
44084c406e7SOri Kam  * matching on the NIC (firmware dependent) L4 most have the higher priority
44184c406e7SOri Kam  * followed by L3 and ending with L2.
44284c406e7SOri Kam  */
44384c406e7SOri Kam #define MLX5_PRIORITY_MAP_L2 2
44484c406e7SOri Kam #define MLX5_PRIORITY_MAP_L3 1
44584c406e7SOri Kam #define MLX5_PRIORITY_MAP_L4 0
44684c406e7SOri Kam #define MLX5_PRIORITY_MAP_MAX 3
44784c406e7SOri Kam 
448fc2c498cSOri Kam /* Valid layer type for IPV4 RSS. */
449fc2c498cSOri Kam #define MLX5_IPV4_LAYER_TYPES \
450295968d1SFerruh Yigit 	(RTE_ETH_RSS_IPV4 | RTE_ETH_RSS_FRAG_IPV4 | \
451295968d1SFerruh Yigit 	 RTE_ETH_RSS_NONFRAG_IPV4_TCP | RTE_ETH_RSS_NONFRAG_IPV4_UDP | \
452295968d1SFerruh Yigit 	 RTE_ETH_RSS_NONFRAG_IPV4_OTHER)
453fc2c498cSOri Kam 
454fc2c498cSOri Kam /* IBV hash source bits  for IPV4. */
455fc2c498cSOri Kam #define MLX5_IPV4_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
456fc2c498cSOri Kam 
457fc2c498cSOri Kam /* Valid layer type for IPV6 RSS. */
458fc2c498cSOri Kam #define MLX5_IPV6_LAYER_TYPES \
459295968d1SFerruh Yigit 	(RTE_ETH_RSS_IPV6 | RTE_ETH_RSS_FRAG_IPV6 | RTE_ETH_RSS_NONFRAG_IPV6_TCP | \
460295968d1SFerruh Yigit 	 RTE_ETH_RSS_NONFRAG_IPV6_UDP | RTE_ETH_RSS_IPV6_EX  | RTE_ETH_RSS_IPV6_TCP_EX | \
461295968d1SFerruh Yigit 	 RTE_ETH_RSS_IPV6_UDP_EX | RTE_ETH_RSS_NONFRAG_IPV6_OTHER)
462fc2c498cSOri Kam 
463fc2c498cSOri Kam /* IBV hash source bits  for IPV6. */
464fc2c498cSOri Kam #define MLX5_IPV6_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
465fc2c498cSOri Kam 
466c3e33304SDekel Peled /* IBV hash bits for L3 SRC. */
467c3e33304SDekel Peled #define MLX5_L3_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_SRC_IPV6)
468c3e33304SDekel Peled 
469c3e33304SDekel Peled /* IBV hash bits for L3 DST. */
470c3e33304SDekel Peled #define MLX5_L3_DST_IBV_RX_HASH (IBV_RX_HASH_DST_IPV4 | IBV_RX_HASH_DST_IPV6)
471c3e33304SDekel Peled 
472c3e33304SDekel Peled /* IBV hash bits for TCP. */
473c3e33304SDekel Peled #define MLX5_TCP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
474c3e33304SDekel Peled 			      IBV_RX_HASH_DST_PORT_TCP)
475c3e33304SDekel Peled 
476c3e33304SDekel Peled /* IBV hash bits for UDP. */
477c3e33304SDekel Peled #define MLX5_UDP_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_UDP | \
478c3e33304SDekel Peled 			      IBV_RX_HASH_DST_PORT_UDP)
479c3e33304SDekel Peled 
480c3e33304SDekel Peled /* IBV hash bits for L4 SRC. */
481c3e33304SDekel Peled #define MLX5_L4_SRC_IBV_RX_HASH (IBV_RX_HASH_SRC_PORT_TCP | \
482c3e33304SDekel Peled 				 IBV_RX_HASH_SRC_PORT_UDP)
483c3e33304SDekel Peled 
484c3e33304SDekel Peled /* IBV hash bits for L4 DST. */
485c3e33304SDekel Peled #define MLX5_L4_DST_IBV_RX_HASH (IBV_RX_HASH_DST_PORT_TCP | \
486c3e33304SDekel Peled 				 IBV_RX_HASH_DST_PORT_UDP)
487e59a5dbcSMoti Haimovsky 
488e59a5dbcSMoti Haimovsky /* Geneve header first 16Bit */
489e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_VER_MASK 0x3
490e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_VER_SHIFT 14
491e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_VER_VAL(a) \
492e59a5dbcSMoti Haimovsky 		(((a) >> (MLX5_GENEVE_VER_SHIFT)) & (MLX5_GENEVE_VER_MASK))
493e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OPTLEN_MASK 0x3F
494e440d6cfSShiri Kuzin #define MLX5_GENEVE_OPTLEN_SHIFT 8
495e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OPTLEN_VAL(a) \
496e59a5dbcSMoti Haimovsky 	    (((a) >> (MLX5_GENEVE_OPTLEN_SHIFT)) & (MLX5_GENEVE_OPTLEN_MASK))
497e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OAMF_MASK 0x1
498e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OAMF_SHIFT 7
499e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OAMF_VAL(a) \
500e59a5dbcSMoti Haimovsky 		(((a) >> (MLX5_GENEVE_OAMF_SHIFT)) & (MLX5_GENEVE_OAMF_MASK))
501e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_CRITO_MASK 0x1
502e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_CRITO_SHIFT 6
503e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_CRITO_VAL(a) \
504e59a5dbcSMoti Haimovsky 		(((a) >> (MLX5_GENEVE_CRITO_SHIFT)) & (MLX5_GENEVE_CRITO_MASK))
505e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_RSVD_MASK 0x3F
506e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_RSVD_VAL(a) ((a) & (MLX5_GENEVE_RSVD_MASK))
507e59a5dbcSMoti Haimovsky /*
508e59a5dbcSMoti Haimovsky  * The length of the Geneve options fields, expressed in four byte multiples,
509e59a5dbcSMoti Haimovsky  * not including the eight byte fixed tunnel.
510e59a5dbcSMoti Haimovsky  */
511e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OPT_LEN_0 14
512e59a5dbcSMoti Haimovsky #define MLX5_GENEVE_OPT_LEN_1 63
513e59a5dbcSMoti Haimovsky 
514f9210259SViacheslav Ovsiienko #define MLX5_ENCAPSULATION_DECISION_SIZE (sizeof(struct rte_ether_hdr) + \
515f9210259SViacheslav Ovsiienko 					  sizeof(struct rte_ipv4_hdr))
5162c9f9617SShiri Kuzin /* GTP extension header flag. */
5172c9f9617SShiri Kuzin #define MLX5_GTP_EXT_HEADER_FLAG 4
5182c9f9617SShiri Kuzin 
51906cd4cf6SShiri Kuzin /* GTP extension header PDU type shift. */
52006cd4cf6SShiri Kuzin #define MLX5_GTP_PDU_TYPE_SHIFT(a) ((a) << 4)
52106cd4cf6SShiri Kuzin 
5226859e67eSDekel Peled /* IPv4 fragment_offset field contains relevant data in bits 2 to 15. */
5236859e67eSDekel Peled #define MLX5_IPV4_FRAG_OFFSET_MASK \
5246859e67eSDekel Peled 		(RTE_IPV4_HDR_OFFSET_MASK | RTE_IPV4_HDR_MF_FLAG)
5256859e67eSDekel Peled 
5266859e67eSDekel Peled /* Specific item's fields can accept a range of values (using spec and last). */
5276859e67eSDekel Peled #define MLX5_ITEM_RANGE_NOT_ACCEPTED	false
5286859e67eSDekel Peled #define MLX5_ITEM_RANGE_ACCEPTED	true
5296859e67eSDekel Peled 
53072a944dbSBing Zhao /* Software header modify action numbers of a flow. */
53172a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_IPV4		1
53272a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_IPV6		4
53372a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_MAC		2
53472a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_VID		1
535ea7cc15aSDmitry Kozlyuk #define MLX5_ACT_NUM_MDF_PORT		1
53672a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_TTL		1
53772a944dbSBing Zhao #define MLX5_ACT_NUM_DEC_TTL		MLX5_ACT_NUM_MDF_TTL
53872a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_TCPSEQ		1
53972a944dbSBing Zhao #define MLX5_ACT_NUM_MDF_TCPACK		1
54072a944dbSBing Zhao #define MLX5_ACT_NUM_SET_REG		1
54172a944dbSBing Zhao #define MLX5_ACT_NUM_SET_TAG		1
54272a944dbSBing Zhao #define MLX5_ACT_NUM_CPY_MREG		MLX5_ACT_NUM_SET_TAG
54372a944dbSBing Zhao #define MLX5_ACT_NUM_SET_MARK		MLX5_ACT_NUM_SET_TAG
54472a944dbSBing Zhao #define MLX5_ACT_NUM_SET_META		MLX5_ACT_NUM_SET_TAG
54572a944dbSBing Zhao #define MLX5_ACT_NUM_SET_DSCP		1
54672a944dbSBing Zhao 
547641dbe4fSAlexander Kozyrev /* Maximum number of fields to modify in MODIFY_FIELD */
548641dbe4fSAlexander Kozyrev #define MLX5_ACT_MAX_MOD_FIELDS 5
549641dbe4fSAlexander Kozyrev 
5505cac1a5cSBing Zhao /* Syndrome bits definition for connection tracking. */
5515cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_VALID		(0x0 << 6)
5525cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_INVALID	(0x1 << 6)
5535cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_TRAP		(0x2 << 6)
5545cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_STATE_CHANGE	(0x1 << 1)
5555cac1a5cSBing Zhao #define MLX5_CT_SYNDROME_BAD_PACKET	(0x1 << 0)
5565cac1a5cSBing Zhao 
5570c76d1c9SYongseok Koh enum mlx5_flow_drv_type {
5580c76d1c9SYongseok Koh 	MLX5_FLOW_TYPE_MIN,
5590c76d1c9SYongseok Koh 	MLX5_FLOW_TYPE_DV,
5600c76d1c9SYongseok Koh 	MLX5_FLOW_TYPE_VERBS,
5612b679150SSuanming Mou 	MLX5_FLOW_TYPE_HW,
5620c76d1c9SYongseok Koh 	MLX5_FLOW_TYPE_MAX,
5630c76d1c9SYongseok Koh };
5640c76d1c9SYongseok Koh 
565488d13abSSuanming Mou /* Fate action type. */
566488d13abSSuanming Mou enum mlx5_flow_fate_type {
567488d13abSSuanming Mou 	MLX5_FLOW_FATE_NONE, /* Egress flow. */
568488d13abSSuanming Mou 	MLX5_FLOW_FATE_QUEUE,
569488d13abSSuanming Mou 	MLX5_FLOW_FATE_JUMP,
570488d13abSSuanming Mou 	MLX5_FLOW_FATE_PORT_ID,
571488d13abSSuanming Mou 	MLX5_FLOW_FATE_DROP,
5723c78124fSShiri Kuzin 	MLX5_FLOW_FATE_DEFAULT_MISS,
573fabf8a37SSuanming Mou 	MLX5_FLOW_FATE_SHARED_RSS,
57450cc92ddSShun Hao 	MLX5_FLOW_FATE_MTR,
57525c4d6dfSMichael Savisko 	MLX5_FLOW_FATE_SEND_TO_KERNEL,
576488d13abSSuanming Mou 	MLX5_FLOW_FATE_MAX,
577488d13abSSuanming Mou };
578488d13abSSuanming Mou 
579865a0c15SOri Kam /* Matcher PRM representation */
580865a0c15SOri Kam struct mlx5_flow_dv_match_params {
581865a0c15SOri Kam 	size_t size;
582865a0c15SOri Kam 	/**< Size of match value. Do NOT split size and key! */
583865a0c15SOri Kam 	uint32_t buf[MLX5_ST_SZ_DW(fte_match_param)];
584865a0c15SOri Kam 	/**< Matcher value. This value is used as the mask or as a key. */
585865a0c15SOri Kam };
586865a0c15SOri Kam 
587865a0c15SOri Kam /* Matcher structure. */
588865a0c15SOri Kam struct mlx5_flow_dv_matcher {
589e78e5408SMatan Azrad 	struct mlx5_list_entry entry; /**< Pointer to the next element. */
590e9e36e52SBing Zhao 	struct mlx5_flow_tbl_resource *tbl;
591e9e36e52SBing Zhao 	/**< Pointer to the table(group) the matcher associated with. */
592865a0c15SOri Kam 	void *matcher_object; /**< Pointer to DV matcher */
593865a0c15SOri Kam 	uint16_t crc; /**< CRC of key. */
594865a0c15SOri Kam 	uint16_t priority; /**< Priority of matcher. */
595865a0c15SOri Kam 	struct mlx5_flow_dv_match_params mask; /**< Matcher mask. */
596865a0c15SOri Kam };
597865a0c15SOri Kam 
5984bb14c83SDekel Peled #define MLX5_ENCAP_MAX_LEN 132
5994bb14c83SDekel Peled 
600c513f05cSDekel Peled /* Encap/decap resource structure. */
601c513f05cSDekel Peled struct mlx5_flow_dv_encap_decap_resource {
602961b6774SMatan Azrad 	struct mlx5_list_entry entry;
603c513f05cSDekel Peled 	/* Pointer to next element. */
604cf7d1995SAlexander Kozyrev 	uint32_t refcnt; /**< Reference counter. */
6056ad7cfaaSDekel Peled 	void *action;
6066ad7cfaaSDekel Peled 	/**< Encap/decap action object. */
607c513f05cSDekel Peled 	uint8_t buf[MLX5_ENCAP_MAX_LEN];
608c513f05cSDekel Peled 	size_t size;
609c513f05cSDekel Peled 	uint8_t reformat_type;
610c513f05cSDekel Peled 	uint8_t ft_type;
6114f84a197SOri Kam 	uint64_t flags; /**< Flags for RDMA API. */
612bf615b07SSuanming Mou 	uint32_t idx; /**< Index for the index memory pool. */
613c513f05cSDekel Peled };
614c513f05cSDekel Peled 
615cbb66daaSOri Kam /* Tag resource structure. */
616cbb66daaSOri Kam struct mlx5_flow_dv_tag_resource {
617961b6774SMatan Azrad 	struct mlx5_list_entry entry;
618e484e403SBing Zhao 	/**< hash list entry for tag resource, tag value as the key. */
619cbb66daaSOri Kam 	void *action;
6206ad7cfaaSDekel Peled 	/**< Tag action object. */
621cf7d1995SAlexander Kozyrev 	uint32_t refcnt; /**< Reference counter. */
6225f114269SSuanming Mou 	uint32_t idx; /**< Index for the index memory pool. */
623f5b0aed2SSuanming Mou 	uint32_t tag_id; /**< Tag ID. */
624cbb66daaSOri Kam };
625cbb66daaSOri Kam 
6264bb14c83SDekel Peled /* Modify resource structure */
6274bb14c83SDekel Peled struct mlx5_flow_dv_modify_hdr_resource {
628961b6774SMatan Azrad 	struct mlx5_list_entry entry;
62916a7dbc4SXueming Li 	void *action; /**< Modify header action object. */
6304f3d8d0eSMatan Azrad 	uint32_t idx;
63116a7dbc4SXueming Li 	/* Key area for hash list matching: */
6324bb14c83SDekel Peled 	uint8_t ft_type; /**< Flow table type, Rx or Tx. */
633e681eb05SMatan Azrad 	uint8_t actions_num; /**< Number of modification actions. */
634e681eb05SMatan Azrad 	bool root; /**< Whether action is in root table. */
635024e9575SBing Zhao 	struct mlx5_modification_cmd actions[];
636024e9575SBing Zhao 	/**< Modification actions. */
637e681eb05SMatan Azrad } __rte_packed;
6384bb14c83SDekel Peled 
6393fe88961SSuanming Mou /* Modify resource key of the hash organization. */
6403fe88961SSuanming Mou union mlx5_flow_modify_hdr_key {
6413fe88961SSuanming Mou 	struct {
6423fe88961SSuanming Mou 		uint32_t ft_type:8;	/**< Flow table type, Rx or Tx. */
6433fe88961SSuanming Mou 		uint32_t actions_num:5;	/**< Number of modification actions. */
6443fe88961SSuanming Mou 		uint32_t group:19;	/**< Flow group id. */
6453fe88961SSuanming Mou 		uint32_t cksum;		/**< Actions check sum. */
6463fe88961SSuanming Mou 	};
6473fe88961SSuanming Mou 	uint64_t v64;			/**< full 64bits value of key */
6483fe88961SSuanming Mou };
6493fe88961SSuanming Mou 
650684b9a1bSOri Kam /* Jump action resource structure. */
651684b9a1bSOri Kam struct mlx5_flow_dv_jump_tbl_resource {
6526c1d9a64SBing Zhao 	void *action; /**< Pointer to the rdma core action. */
653684b9a1bSOri Kam };
654684b9a1bSOri Kam 
655c269b517SOri Kam /* Port ID resource structure. */
656c269b517SOri Kam struct mlx5_flow_dv_port_id_action_resource {
657e78e5408SMatan Azrad 	struct mlx5_list_entry entry;
6580fd5f82aSXueming Li 	void *action; /**< Action object. */
659c269b517SOri Kam 	uint32_t port_id; /**< Port ID value. */
6600fd5f82aSXueming Li 	uint32_t idx; /**< Indexed pool memory index. */
661c269b517SOri Kam };
662c269b517SOri Kam 
6639aee7a84SMoti Haimovsky /* Push VLAN action resource structure */
6649aee7a84SMoti Haimovsky struct mlx5_flow_dv_push_vlan_action_resource {
665e78e5408SMatan Azrad 	struct mlx5_list_entry entry; /* Cache entry. */
6666ad7cfaaSDekel Peled 	void *action; /**< Action object. */
6679aee7a84SMoti Haimovsky 	uint8_t ft_type; /**< Flow table type, Rx, Tx or FDB. */
6689aee7a84SMoti Haimovsky 	rte_be32_t vlan_tag; /**< VLAN tag value. */
6693422af2aSXueming Li 	uint32_t idx; /**< Indexed pool memory index. */
6709aee7a84SMoti Haimovsky };
6719aee7a84SMoti Haimovsky 
672dd3c774fSViacheslav Ovsiienko /* Metadata register copy table entry. */
673dd3c774fSViacheslav Ovsiienko struct mlx5_flow_mreg_copy_resource {
674dd3c774fSViacheslav Ovsiienko 	/*
675dd3c774fSViacheslav Ovsiienko 	 * Hash list entry for copy table.
676dd3c774fSViacheslav Ovsiienko 	 *  - Key is 32/64-bit MARK action ID.
677dd3c774fSViacheslav Ovsiienko 	 *  - MUST be the first entry.
678dd3c774fSViacheslav Ovsiienko 	 */
679961b6774SMatan Azrad 	struct mlx5_list_entry hlist_ent;
680dd3c774fSViacheslav Ovsiienko 	LIST_ENTRY(mlx5_flow_mreg_copy_resource) next;
681dd3c774fSViacheslav Ovsiienko 	/* List entry for device flows. */
68290e6053aSSuanming Mou 	uint32_t idx;
683ab612adcSSuanming Mou 	uint32_t rix_flow; /* Built flow for copy. */
684f5b0aed2SSuanming Mou 	uint32_t mark_id;
685dd3c774fSViacheslav Ovsiienko };
686dd3c774fSViacheslav Ovsiienko 
687afd7a625SXueming Li /* Table tunnel parameter. */
688afd7a625SXueming Li struct mlx5_flow_tbl_tunnel_prm {
689afd7a625SXueming Li 	const struct mlx5_flow_tunnel *tunnel;
690afd7a625SXueming Li 	uint32_t group_id;
691afd7a625SXueming Li 	bool external;
692afd7a625SXueming Li };
693afd7a625SXueming Li 
694860897d2SBing Zhao /* Table data structure of the hash organization. */
695860897d2SBing Zhao struct mlx5_flow_tbl_data_entry {
696961b6774SMatan Azrad 	struct mlx5_list_entry entry;
697e9e36e52SBing Zhao 	/**< hash list entry, 64-bits key inside. */
698860897d2SBing Zhao 	struct mlx5_flow_tbl_resource tbl;
699e9e36e52SBing Zhao 	/**< flow table resource. */
700679f46c7SMatan Azrad 	struct mlx5_list *matchers;
701e9e36e52SBing Zhao 	/**< matchers' header associated with the flow table. */
7026c1d9a64SBing Zhao 	struct mlx5_flow_dv_jump_tbl_resource jump;
7036c1d9a64SBing Zhao 	/**< jump resource, at most one for each table created. */
7047ac99475SSuanming Mou 	uint32_t idx; /**< index for the indexed mempool. */
7054ec6360dSGregory Etelson 	/**< tunnel offload */
7064ec6360dSGregory Etelson 	const struct mlx5_flow_tunnel *tunnel;
7074ec6360dSGregory Etelson 	uint32_t group_id;
708f5b0aed2SSuanming Mou 	uint32_t external:1;
7097be78d02SJosh Soref 	uint32_t tunnel_offload:1; /* Tunnel offload table or not. */
710f5b0aed2SSuanming Mou 	uint32_t is_egress:1; /**< Egress table. */
711f5b0aed2SSuanming Mou 	uint32_t is_transfer:1; /**< Transfer table. */
712f5b0aed2SSuanming Mou 	uint32_t dummy:1; /**<  DR table. */
7132d2cef5dSLi Zhang 	uint32_t id:22; /**< Table ID. */
7142d2cef5dSLi Zhang 	uint32_t reserve:5; /**< Reserved to future using. */
7152d2cef5dSLi Zhang 	uint32_t level; /**< Table level. */
716860897d2SBing Zhao };
717860897d2SBing Zhao 
718b4c0ddbfSJiawei Wang /* Sub rdma-core actions list. */
719b4c0ddbfSJiawei Wang struct mlx5_flow_sub_actions_list {
720b4c0ddbfSJiawei Wang 	uint32_t actions_num; /**< Number of sample actions. */
721b4c0ddbfSJiawei Wang 	uint64_t action_flags;
722b4c0ddbfSJiawei Wang 	void *dr_queue_action;
723b4c0ddbfSJiawei Wang 	void *dr_tag_action;
724b4c0ddbfSJiawei Wang 	void *dr_cnt_action;
72500c10c22SJiawei Wang 	void *dr_port_id_action;
72600c10c22SJiawei Wang 	void *dr_encap_action;
7276a951567SJiawei Wang 	void *dr_jump_action;
728b4c0ddbfSJiawei Wang };
729b4c0ddbfSJiawei Wang 
730b4c0ddbfSJiawei Wang /* Sample sub-actions resource list. */
731b4c0ddbfSJiawei Wang struct mlx5_flow_sub_actions_idx {
732b4c0ddbfSJiawei Wang 	uint32_t rix_hrxq; /**< Hash Rx queue object index. */
733b4c0ddbfSJiawei Wang 	uint32_t rix_tag; /**< Index to the tag action. */
73400c10c22SJiawei Wang 	uint32_t rix_port_id_action; /**< Index to port ID action resource. */
73500c10c22SJiawei Wang 	uint32_t rix_encap_decap; /**< Index to encap/decap resource. */
7366a951567SJiawei Wang 	uint32_t rix_jump; /**< Index to the jump action resource. */
737b4c0ddbfSJiawei Wang };
738b4c0ddbfSJiawei Wang 
739b4c0ddbfSJiawei Wang /* Sample action resource structure. */
740b4c0ddbfSJiawei Wang struct mlx5_flow_dv_sample_resource {
741e78e5408SMatan Azrad 	struct mlx5_list_entry entry; /**< Cache entry. */
74219784141SSuanming Mou 	union {
743b4c0ddbfSJiawei Wang 		void *verbs_action; /**< Verbs sample action object. */
74419784141SSuanming Mou 		void **sub_actions; /**< Sample sub-action array. */
74519784141SSuanming Mou 	};
74601c05ee0SSuanming Mou 	struct rte_eth_dev *dev; /**< Device registers the action. */
74719784141SSuanming Mou 	uint32_t idx; /** Sample object index. */
748b4c0ddbfSJiawei Wang 	uint8_t ft_type; /** Flow Table Type */
749b4c0ddbfSJiawei Wang 	uint32_t ft_id; /** Flow Table Level */
750b4c0ddbfSJiawei Wang 	uint32_t ratio;   /** Sample Ratio */
751b4c0ddbfSJiawei Wang 	uint64_t set_action; /** Restore reg_c0 value */
752b4c0ddbfSJiawei Wang 	void *normal_path_tbl; /** Flow Table pointer */
753b4c0ddbfSJiawei Wang 	struct mlx5_flow_sub_actions_idx sample_idx;
754b4c0ddbfSJiawei Wang 	/**< Action index resources. */
755b4c0ddbfSJiawei Wang 	struct mlx5_flow_sub_actions_list sample_act;
756b4c0ddbfSJiawei Wang 	/**< Action resources. */
757b4c0ddbfSJiawei Wang };
758b4c0ddbfSJiawei Wang 
75900c10c22SJiawei Wang #define MLX5_MAX_DEST_NUM	2
76000c10c22SJiawei Wang 
76100c10c22SJiawei Wang /* Destination array action resource structure. */
76200c10c22SJiawei Wang struct mlx5_flow_dv_dest_array_resource {
763e78e5408SMatan Azrad 	struct mlx5_list_entry entry; /**< Cache entry. */
76419784141SSuanming Mou 	uint32_t idx; /** Destination array action object index. */
76500c10c22SJiawei Wang 	uint8_t ft_type; /** Flow Table Type */
76600c10c22SJiawei Wang 	uint8_t num_of_dest; /**< Number of destination actions. */
76701c05ee0SSuanming Mou 	struct rte_eth_dev *dev; /**< Device registers the action. */
76800c10c22SJiawei Wang 	void *action; /**< Pointer to the rdma core action. */
76900c10c22SJiawei Wang 	struct mlx5_flow_sub_actions_idx sample_idx[MLX5_MAX_DEST_NUM];
77000c10c22SJiawei Wang 	/**< Action index resources. */
77100c10c22SJiawei Wang 	struct mlx5_flow_sub_actions_list sample_act[MLX5_MAX_DEST_NUM];
77200c10c22SJiawei Wang 	/**< Action resources. */
77300c10c22SJiawei Wang };
77400c10c22SJiawei Wang 
775750ff30aSGregory Etelson /* PMD flow priority for tunnel */
776750ff30aSGregory Etelson #define MLX5_TUNNEL_PRIO_GET(rss_desc) \
777750ff30aSGregory Etelson 	((rss_desc)->level >= 2 ? MLX5_PRIORITY_MAP_L2 : MLX5_PRIORITY_MAP_L4)
778750ff30aSGregory Etelson 
779e745f900SSuanming Mou 
780c42f44bdSBing Zhao /** Device flow handle structure for DV mode only. */
781c42f44bdSBing Zhao struct mlx5_flow_handle_dv {
782c42f44bdSBing Zhao 	/* Flow DV api: */
783c42f44bdSBing Zhao 	struct mlx5_flow_dv_matcher *matcher; /**< Cache to matcher. */
784c42f44bdSBing Zhao 	struct mlx5_flow_dv_modify_hdr_resource *modify_hdr;
785c42f44bdSBing Zhao 	/**< Pointer to modify header resource in cache. */
78677749adaSSuanming Mou 	uint32_t rix_encap_decap;
78777749adaSSuanming Mou 	/**< Index to encap/decap resource in cache. */
78877749adaSSuanming Mou 	uint32_t rix_push_vlan;
7898acf8ac9SSuanming Mou 	/**< Index to push VLAN action resource in cache. */
79077749adaSSuanming Mou 	uint32_t rix_tag;
7915f114269SSuanming Mou 	/**< Index to the tag action. */
792b4c0ddbfSJiawei Wang 	uint32_t rix_sample;
793b4c0ddbfSJiawei Wang 	/**< Index to sample action resource in cache. */
79400c10c22SJiawei Wang 	uint32_t rix_dest_array;
79500c10c22SJiawei Wang 	/**< Index to destination array resource in cache. */
79677749adaSSuanming Mou } __rte_packed;
797c42f44bdSBing Zhao 
798c42f44bdSBing Zhao /** Device flow handle structure: used both for creating & destroying. */
799c42f44bdSBing Zhao struct mlx5_flow_handle {
800b88341caSSuanming Mou 	SILIST_ENTRY(uint32_t)next;
80177749adaSSuanming Mou 	struct mlx5_vf_vlan vf_vlan; /**< Structure for VF VLAN workaround. */
802b88341caSSuanming Mou 	/**< Index to next device flow handle. */
8030ddd1143SYongseok Koh 	uint64_t layers;
80424663641SYongseok Koh 	/**< Bit-fields of present layers, see MLX5_FLOW_LAYER_*. */
805341c8941SDekel Peled 	void *drv_flow; /**< pointer to driver flow object. */
80683306d6cSShun Hao 	uint32_t split_flow_id:27; /**< Sub flow unique match flow id. */
8077be78d02SJosh Soref 	uint32_t is_meter_flow_id:1; /**< Indicate if flow_id is for meter. */
80825c4d6dfSMichael Savisko 	uint32_t fate_action:4; /**< Fate action type. */
8096fc18392SSuanming Mou 	union {
81077749adaSSuanming Mou 		uint32_t rix_hrxq; /**< Hash Rx queue object index. */
81177749adaSSuanming Mou 		uint32_t rix_jump; /**< Index to the jump action resource. */
81277749adaSSuanming Mou 		uint32_t rix_port_id_action;
8136fc18392SSuanming Mou 		/**< Index to port ID action resource. */
81477749adaSSuanming Mou 		uint32_t rix_fate;
815488d13abSSuanming Mou 		/**< Generic value indicates the fate action. */
8163c78124fSShiri Kuzin 		uint32_t rix_default_fate;
8173c78124fSShiri Kuzin 		/**< Indicates default miss fate action. */
818fabf8a37SSuanming Mou 		uint32_t rix_srss;
819fabf8a37SSuanming Mou 		/**< Indicates shared RSS fate action. */
8206fc18392SSuanming Mou 	};
821f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
822c42f44bdSBing Zhao 	struct mlx5_flow_handle_dv dvh;
823c42f44bdSBing Zhao #endif
824cfe337e7SGregory Etelson 	uint8_t flex_item; /**< referenced Flex Item bitmask. */
82577749adaSSuanming Mou } __rte_packed;
826c42f44bdSBing Zhao 
827c42f44bdSBing Zhao /*
828e7bfa359SBing Zhao  * Size for Verbs device flow handle structure only. Do not use the DV only
829e7bfa359SBing Zhao  * structure in Verbs. No DV flows attributes will be accessed.
830e7bfa359SBing Zhao  * Macro offsetof() could also be used here.
831e7bfa359SBing Zhao  */
832f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
833e7bfa359SBing Zhao #define MLX5_FLOW_HANDLE_VERBS_SIZE \
834e7bfa359SBing Zhao 	(sizeof(struct mlx5_flow_handle) - sizeof(struct mlx5_flow_handle_dv))
835e7bfa359SBing Zhao #else
836e7bfa359SBing Zhao #define MLX5_FLOW_HANDLE_VERBS_SIZE (sizeof(struct mlx5_flow_handle))
837e7bfa359SBing Zhao #endif
838e7bfa359SBing Zhao 
839c42f44bdSBing Zhao /** Device flow structure only for DV flow creation. */
840e7bfa359SBing Zhao struct mlx5_flow_dv_workspace {
841c42f44bdSBing Zhao 	uint32_t group; /**< The group index. */
8422d2cef5dSLi Zhang 	uint32_t table_id; /**< Flow table identifier. */
843c42f44bdSBing Zhao 	uint8_t transfer; /**< 1 if the flow is E-Switch flow. */
844c42f44bdSBing Zhao 	int actions_n; /**< number of actions. */
845c42f44bdSBing Zhao 	void *actions[MLX5_DV_MAX_NUMBER_OF_ACTIONS]; /**< Action list. */
846014d1cbeSSuanming Mou 	struct mlx5_flow_dv_encap_decap_resource *encap_decap;
847014d1cbeSSuanming Mou 	/**< Pointer to encap/decap resource in cache. */
8488acf8ac9SSuanming Mou 	struct mlx5_flow_dv_push_vlan_action_resource *push_vlan_res;
8498acf8ac9SSuanming Mou 	/**< Pointer to push VLAN action resource in cache. */
8505f114269SSuanming Mou 	struct mlx5_flow_dv_tag_resource *tag_resource;
8517ac99475SSuanming Mou 	/**< pointer to the tag action. */
852f3faf9eaSSuanming Mou 	struct mlx5_flow_dv_port_id_action_resource *port_id_action;
853f3faf9eaSSuanming Mou 	/**< Pointer to port ID action resource. */
8547ac99475SSuanming Mou 	struct mlx5_flow_dv_jump_tbl_resource *jump;
8557ac99475SSuanming Mou 	/**< Pointer to the jump action resource. */
856c42f44bdSBing Zhao 	struct mlx5_flow_dv_match_params value;
857c42f44bdSBing Zhao 	/**< Holds the value that the packet is compared to. */
858b4c0ddbfSJiawei Wang 	struct mlx5_flow_dv_sample_resource *sample_res;
859b4c0ddbfSJiawei Wang 	/**< Pointer to the sample action resource. */
86000c10c22SJiawei Wang 	struct mlx5_flow_dv_dest_array_resource *dest_array_res;
86100c10c22SJiawei Wang 	/**< Pointer to the destination array resource. */
862c42f44bdSBing Zhao };
863c42f44bdSBing Zhao 
864f1ae0b35SOphir Munk #ifdef HAVE_INFINIBAND_VERBS_H
865e7bfa359SBing Zhao /*
866e7bfa359SBing Zhao  * Maximal Verbs flow specifications & actions size.
867e7bfa359SBing Zhao  * Some elements are mutually exclusive, but enough space should be allocated.
868e7bfa359SBing Zhao  * Tunnel cases: 1. Max 2 Ethernet + IP(v6 len > v4 len) + TCP/UDP headers.
869e7bfa359SBing Zhao  *               2. One tunnel header (exception: GRE + MPLS),
870e7bfa359SBing Zhao  *                  SPEC length: GRE == tunnel.
871e7bfa359SBing Zhao  * Actions: 1. 1 Mark OR Flag.
872e7bfa359SBing Zhao  *          2. 1 Drop (if any).
873e7bfa359SBing Zhao  *          3. No limitation for counters, but it makes no sense to support too
874e7bfa359SBing Zhao  *             many counters in a single device flow.
875e7bfa359SBing Zhao  */
876e7bfa359SBing Zhao #ifdef HAVE_IBV_DEVICE_MPLS_SUPPORT
877e7bfa359SBing Zhao #define MLX5_VERBS_MAX_SPEC_SIZE \
878e7bfa359SBing Zhao 		( \
879e7bfa359SBing Zhao 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
880e7bfa359SBing Zhao 			      sizeof(struct ibv_flow_spec_ipv6) + \
881e7bfa359SBing Zhao 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
882e7bfa359SBing Zhao 			sizeof(struct ibv_flow_spec_gre) + \
883e7bfa359SBing Zhao 			sizeof(struct ibv_flow_spec_mpls)) \
884e7bfa359SBing Zhao 		)
885e7bfa359SBing Zhao #else
886e7bfa359SBing Zhao #define MLX5_VERBS_MAX_SPEC_SIZE \
887e7bfa359SBing Zhao 		( \
888e7bfa359SBing Zhao 			(2 * (sizeof(struct ibv_flow_spec_eth) + \
889e7bfa359SBing Zhao 			      sizeof(struct ibv_flow_spec_ipv6) + \
890e7bfa359SBing Zhao 			      sizeof(struct ibv_flow_spec_tcp_udp)) + \
891e7bfa359SBing Zhao 			sizeof(struct ibv_flow_spec_tunnel)) \
892e7bfa359SBing Zhao 		)
893e7bfa359SBing Zhao #endif
894e7bfa359SBing Zhao 
895e7bfa359SBing Zhao #if defined(HAVE_IBV_DEVICE_COUNTERS_SET_V42) || \
896e7bfa359SBing Zhao 	defined(HAVE_IBV_DEVICE_COUNTERS_SET_V45)
897e7bfa359SBing Zhao #define MLX5_VERBS_MAX_ACT_SIZE \
898e7bfa359SBing Zhao 		( \
899e7bfa359SBing Zhao 			sizeof(struct ibv_flow_spec_action_tag) + \
900e7bfa359SBing Zhao 			sizeof(struct ibv_flow_spec_action_drop) + \
901e7bfa359SBing Zhao 			sizeof(struct ibv_flow_spec_counter_action) * 4 \
902e7bfa359SBing Zhao 		)
903e7bfa359SBing Zhao #else
904e7bfa359SBing Zhao #define MLX5_VERBS_MAX_ACT_SIZE \
905e7bfa359SBing Zhao 		( \
906e7bfa359SBing Zhao 			sizeof(struct ibv_flow_spec_action_tag) + \
907e7bfa359SBing Zhao 			sizeof(struct ibv_flow_spec_action_drop) \
908e7bfa359SBing Zhao 		)
909e7bfa359SBing Zhao #endif
910e7bfa359SBing Zhao 
911e7bfa359SBing Zhao #define MLX5_VERBS_MAX_SPEC_ACT_SIZE \
912e7bfa359SBing Zhao 		(MLX5_VERBS_MAX_SPEC_SIZE + MLX5_VERBS_MAX_ACT_SIZE)
913e7bfa359SBing Zhao 
914c42f44bdSBing Zhao /** Device flow structure only for Verbs flow creation. */
915e7bfa359SBing Zhao struct mlx5_flow_verbs_workspace {
916c42f44bdSBing Zhao 	unsigned int size; /**< Size of the attribute. */
917e7bfa359SBing Zhao 	struct ibv_flow_attr attr; /**< Verbs flow attribute buffer. */
918e7bfa359SBing Zhao 	uint8_t specs[MLX5_VERBS_MAX_SPEC_ACT_SIZE];
919e7bfa359SBing Zhao 	/**< Specifications & actions buffer of verbs flow. */
920c42f44bdSBing Zhao };
921f1ae0b35SOphir Munk #endif /* HAVE_INFINIBAND_VERBS_H */
922c42f44bdSBing Zhao 
923ae2927cdSJiawei Wang #define MLX5_SCALE_FLOW_GROUP_BIT 0
924ae2927cdSJiawei Wang #define MLX5_SCALE_JUMP_FLOW_GROUP_BIT 1
925ae2927cdSJiawei Wang 
926e7bfa359SBing Zhao /** Maximal number of device sub-flows supported. */
927e7bfa359SBing Zhao #define MLX5_NUM_MAX_DEV_FLOWS 32
928e7bfa359SBing Zhao 
9298c5a231bSGregory Etelson /**
9308c5a231bSGregory Etelson  * tunnel offload rules type
9318c5a231bSGregory Etelson  */
9328c5a231bSGregory Etelson enum mlx5_tof_rule_type {
9338c5a231bSGregory Etelson 	MLX5_TUNNEL_OFFLOAD_NONE = 0,
9348c5a231bSGregory Etelson 	MLX5_TUNNEL_OFFLOAD_SET_RULE,
9358c5a231bSGregory Etelson 	MLX5_TUNNEL_OFFLOAD_MATCH_RULE,
9368c5a231bSGregory Etelson 	MLX5_TUNNEL_OFFLOAD_MISS_RULE,
9378c5a231bSGregory Etelson };
9388c5a231bSGregory Etelson 
939c42f44bdSBing Zhao /** Device flow structure. */
9409ade91dfSJiawei Wang __extension__
941c42f44bdSBing Zhao struct mlx5_flow {
942c42f44bdSBing Zhao 	struct rte_flow *flow; /**< Pointer to the main flow. */
943fa2d01c8SDong Zhou 	uint32_t flow_idx; /**< The memory pool index to the main flow. */
9446ad7cfaaSDekel Peled 	uint64_t hash_fields; /**< Hash Rx queue hash fields. */
945488d13abSSuanming Mou 	uint64_t act_flags;
946488d13abSSuanming Mou 	/**< Bit-fields of detected actions, see MLX5_FLOW_ACTION_*. */
947b67b4ecbSDekel Peled 	bool external; /**< true if the flow is created external to PMD. */
9489ade91dfSJiawei Wang 	uint8_t ingress:1; /**< 1 if the flow is ingress. */
949ae2927cdSJiawei Wang 	uint8_t skip_scale:2;
9500e04e1e2SXueming Li 	uint8_t symmetric_hash_function:1;
951ae2927cdSJiawei Wang 	/**
952ae2927cdSJiawei Wang 	 * Each Bit be set to 1 if Skip the scale the flow group with factor.
953ae2927cdSJiawei Wang 	 * If bit0 be set to 1, then skip the scale the original flow group;
954ae2927cdSJiawei Wang 	 * If bit1 be set to 1, then skip the scale the jump flow group if
955ae2927cdSJiawei Wang 	 * having jump action.
956ae2927cdSJiawei Wang 	 * 00: Enable scale in a flow, default value.
957ae2927cdSJiawei Wang 	 * 01: Skip scale the flow group with factor, enable scale the group
958ae2927cdSJiawei Wang 	 * of jump action.
959ae2927cdSJiawei Wang 	 * 10: Enable scale the group with factor, skip scale the group of
960ae2927cdSJiawei Wang 	 * jump action.
961ae2927cdSJiawei Wang 	 * 11: Skip scale the table with factor both for flow group and jump
962ae2927cdSJiawei Wang 	 * group.
963ae2927cdSJiawei Wang 	 */
964c42f44bdSBing Zhao 	union {
965f1ae0b35SOphir Munk #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
966e7bfa359SBing Zhao 		struct mlx5_flow_dv_workspace dv;
967c42f44bdSBing Zhao #endif
968f1ae0b35SOphir Munk #ifdef HAVE_INFINIBAND_VERBS_H
969e7bfa359SBing Zhao 		struct mlx5_flow_verbs_workspace verbs;
970f1ae0b35SOphir Munk #endif
971c42f44bdSBing Zhao 	};
972e7bfa359SBing Zhao 	struct mlx5_flow_handle *handle;
973b88341caSSuanming Mou 	uint32_t handle_idx; /* Index of the mlx5 flow handle memory. */
9744ec6360dSGregory Etelson 	const struct mlx5_flow_tunnel *tunnel;
9758c5a231bSGregory Etelson 	enum mlx5_tof_rule_type tof_type;
97684c406e7SOri Kam };
97784c406e7SOri Kam 
97833e01809SSuanming Mou /* Flow meter state. */
97933e01809SSuanming Mou #define MLX5_FLOW_METER_DISABLE 0
98033e01809SSuanming Mou #define MLX5_FLOW_METER_ENABLE 1
98133e01809SSuanming Mou 
98229efa63aSLi Zhang #define MLX5_ASO_WQE_CQE_RESPONSE_DELAY 10u
98329efa63aSLi Zhang #define MLX5_MTR_POLL_WQE_CQE_TIMES 100000u
984e6100c7bSLi Zhang 
985ebaf1b31SBing Zhao #define MLX5_CT_POLL_WQE_CQE_TIMES MLX5_MTR_POLL_WQE_CQE_TIMES
986ebaf1b31SBing Zhao 
9873bd26b23SSuanming Mou #define MLX5_MAN_WIDTH 8
988e6100c7bSLi Zhang /* Legacy Meter parameter structure. */
989e6100c7bSLi Zhang struct mlx5_legacy_flow_meter {
990e6100c7bSLi Zhang 	struct mlx5_flow_meter_info fm;
991e6100c7bSLi Zhang 	/* Must be the first in struct. */
992e6100c7bSLi Zhang 	TAILQ_ENTRY(mlx5_legacy_flow_meter) next;
9933f373f35SSuanming Mou 	/**< Pointer to the next flow meter structure. */
99444432018SLi Zhang 	uint32_t idx;
99544432018SLi Zhang 	/* Index to meter object. */
9963bd26b23SSuanming Mou };
9973bd26b23SSuanming Mou 
9984ec6360dSGregory Etelson #define MLX5_MAX_TUNNELS 256
9994ec6360dSGregory Etelson #define MLX5_TNL_MISS_RULE_PRIORITY 3
10004ec6360dSGregory Etelson #define MLX5_TNL_MISS_FDB_JUMP_GRP  0x1234faac
10014ec6360dSGregory Etelson 
10024ec6360dSGregory Etelson /*
10034ec6360dSGregory Etelson  * When tunnel offload is active, all JUMP group ids are converted
10044ec6360dSGregory Etelson  * using the same method. That conversion is applied both to tunnel and
10054ec6360dSGregory Etelson  * regular rule types.
10064ec6360dSGregory Etelson  * Group ids used in tunnel rules are relative to it's tunnel (!).
10074ec6360dSGregory Etelson  * Application can create number of steer rules, using the same
10084ec6360dSGregory Etelson  * tunnel, with different group id in each rule.
10094ec6360dSGregory Etelson  * Each tunnel stores its groups internally in PMD tunnel object.
10104ec6360dSGregory Etelson  * Groups used in regular rules do not belong to any tunnel and are stored
10114ec6360dSGregory Etelson  * in tunnel hub.
10124ec6360dSGregory Etelson  */
10134ec6360dSGregory Etelson 
10144ec6360dSGregory Etelson struct mlx5_flow_tunnel {
10154ec6360dSGregory Etelson 	LIST_ENTRY(mlx5_flow_tunnel) chain;
10164ec6360dSGregory Etelson 	struct rte_flow_tunnel app_tunnel;	/** app tunnel copy */
10174ec6360dSGregory Etelson 	uint32_t tunnel_id;			/** unique tunnel ID */
10184ec6360dSGregory Etelson 	uint32_t refctn;
10194ec6360dSGregory Etelson 	struct rte_flow_action action;
10204ec6360dSGregory Etelson 	struct rte_flow_item item;
10214ec6360dSGregory Etelson 	struct mlx5_hlist *groups;		/** tunnel groups */
10224ec6360dSGregory Etelson };
10234ec6360dSGregory Etelson 
10244ec6360dSGregory Etelson /** PMD tunnel related context */
10254ec6360dSGregory Etelson struct mlx5_flow_tunnel_hub {
1026868d2e34SGregory Etelson 	/* Tunnels list
1027868d2e34SGregory Etelson 	 * Access to the list MUST be MT protected
1028868d2e34SGregory Etelson 	 */
10294ec6360dSGregory Etelson 	LIST_HEAD(, mlx5_flow_tunnel) tunnels;
1030868d2e34SGregory Etelson 	 /* protect access to the tunnels list */
1031868d2e34SGregory Etelson 	rte_spinlock_t sl;
10324ec6360dSGregory Etelson 	struct mlx5_hlist *groups;		/** non tunnel groups */
10334ec6360dSGregory Etelson };
10344ec6360dSGregory Etelson 
10354ec6360dSGregory Etelson /* convert jump group to flow table ID in tunnel rules */
10364ec6360dSGregory Etelson struct tunnel_tbl_entry {
1037961b6774SMatan Azrad 	struct mlx5_list_entry hash;
10384ec6360dSGregory Etelson 	uint32_t flow_table;
1039f5b0aed2SSuanming Mou 	uint32_t tunnel_id;
1040f5b0aed2SSuanming Mou 	uint32_t group;
10414ec6360dSGregory Etelson };
10424ec6360dSGregory Etelson 
10434ec6360dSGregory Etelson static inline uint32_t
10444ec6360dSGregory Etelson tunnel_id_to_flow_tbl(uint32_t id)
10454ec6360dSGregory Etelson {
10464ec6360dSGregory Etelson 	return id | (1u << 16);
10474ec6360dSGregory Etelson }
10484ec6360dSGregory Etelson 
10494ec6360dSGregory Etelson static inline uint32_t
10504ec6360dSGregory Etelson tunnel_flow_tbl_to_id(uint32_t flow_tbl)
10514ec6360dSGregory Etelson {
10524ec6360dSGregory Etelson 	return flow_tbl & ~(1u << 16);
10534ec6360dSGregory Etelson }
10544ec6360dSGregory Etelson 
10554ec6360dSGregory Etelson union tunnel_tbl_key {
10564ec6360dSGregory Etelson 	uint64_t val;
10574ec6360dSGregory Etelson 	struct {
10584ec6360dSGregory Etelson 		uint32_t tunnel_id;
10594ec6360dSGregory Etelson 		uint32_t group;
10604ec6360dSGregory Etelson 	};
10614ec6360dSGregory Etelson };
10624ec6360dSGregory Etelson 
10634ec6360dSGregory Etelson static inline struct mlx5_flow_tunnel_hub *
10644ec6360dSGregory Etelson mlx5_tunnel_hub(struct rte_eth_dev *dev)
10654ec6360dSGregory Etelson {
10664ec6360dSGregory Etelson 	struct mlx5_priv *priv = dev->data->dev_private;
10674ec6360dSGregory Etelson 	return priv->sh->tunnel_hub;
10684ec6360dSGregory Etelson }
10694ec6360dSGregory Etelson 
10704ec6360dSGregory Etelson static inline bool
10718c5a231bSGregory Etelson is_tunnel_offload_active(const struct rte_eth_dev *dev)
10724ec6360dSGregory Etelson {
1073bc1d90a3SGregory Etelson #ifdef HAVE_IBV_FLOW_DV_SUPPORT
10748c5a231bSGregory Etelson 	const struct mlx5_priv *priv = dev->data->dev_private;
1075a13ec19cSMichael Baum 	return !!priv->sh->config.dv_miss_info;
1076bc1d90a3SGregory Etelson #else
1077bc1d90a3SGregory Etelson 	RTE_SET_USED(dev);
1078bc1d90a3SGregory Etelson 	return false;
1079bc1d90a3SGregory Etelson #endif
10804ec6360dSGregory Etelson }
10814ec6360dSGregory Etelson 
10824ec6360dSGregory Etelson static inline bool
10838c5a231bSGregory Etelson is_flow_tunnel_match_rule(enum mlx5_tof_rule_type tof_rule_type)
10844ec6360dSGregory Etelson {
10858c5a231bSGregory Etelson 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_MATCH_RULE;
10864ec6360dSGregory Etelson }
10874ec6360dSGregory Etelson 
10884ec6360dSGregory Etelson static inline bool
10898c5a231bSGregory Etelson is_flow_tunnel_steer_rule(enum mlx5_tof_rule_type tof_rule_type)
10904ec6360dSGregory Etelson {
10918c5a231bSGregory Etelson 	return tof_rule_type == MLX5_TUNNEL_OFFLOAD_SET_RULE;
10924ec6360dSGregory Etelson }
10934ec6360dSGregory Etelson 
10944ec6360dSGregory Etelson static inline const struct mlx5_flow_tunnel *
10954ec6360dSGregory Etelson flow_actions_to_tunnel(const struct rte_flow_action actions[])
10964ec6360dSGregory Etelson {
10974ec6360dSGregory Etelson 	return actions[0].conf;
10984ec6360dSGregory Etelson }
10994ec6360dSGregory Etelson 
11004ec6360dSGregory Etelson static inline const struct mlx5_flow_tunnel *
11014ec6360dSGregory Etelson flow_items_to_tunnel(const struct rte_flow_item items[])
11024ec6360dSGregory Etelson {
11034ec6360dSGregory Etelson 	return items[0].spec;
11044ec6360dSGregory Etelson }
11054ec6360dSGregory Etelson 
11060f4aa72bSSuanming Mou /**
1107c23626f2SMichael Baum  * Gets the tag array given for RTE_FLOW_FIELD_TAG type.
1108c23626f2SMichael Baum  *
1109c23626f2SMichael Baum  * In old API the value was provided in "level" field, but in new API
1110c23626f2SMichael Baum  * it is provided in "tag_array" field. Since encapsulation level is not
1111c23626f2SMichael Baum  * relevant for metadata, the tag array can be still provided in "level"
1112c23626f2SMichael Baum  * for backwards compatibility.
1113c23626f2SMichael Baum  *
1114c23626f2SMichael Baum  * @param[in] data
1115c23626f2SMichael Baum  *   Pointer to tag modify data structure.
1116c23626f2SMichael Baum  *
1117c23626f2SMichael Baum  * @return
1118c23626f2SMichael Baum  *   Tag array index.
1119c23626f2SMichael Baum  */
1120c23626f2SMichael Baum static inline uint8_t
1121c23626f2SMichael Baum flow_tag_index_get(const struct rte_flow_action_modify_data *data)
1122c23626f2SMichael Baum {
1123c23626f2SMichael Baum 	return data->tag_index ? data->tag_index : data->level;
1124c23626f2SMichael Baum }
1125c23626f2SMichael Baum 
1126c23626f2SMichael Baum /**
11270f4aa72bSSuanming Mou  * Fetch 1, 2, 3 or 4 byte field from the byte array
11280f4aa72bSSuanming Mou  * and return as unsigned integer in host-endian format.
11290f4aa72bSSuanming Mou  *
11300f4aa72bSSuanming Mou  * @param[in] data
11310f4aa72bSSuanming Mou  *   Pointer to data array.
11320f4aa72bSSuanming Mou  * @param[in] size
11330f4aa72bSSuanming Mou  *   Size of field to extract.
11340f4aa72bSSuanming Mou  *
11350f4aa72bSSuanming Mou  * @return
11360f4aa72bSSuanming Mou  *   converted field in host endian format.
11370f4aa72bSSuanming Mou  */
11380f4aa72bSSuanming Mou static inline uint32_t
11390f4aa72bSSuanming Mou flow_dv_fetch_field(const uint8_t *data, uint32_t size)
11400f4aa72bSSuanming Mou {
11410f4aa72bSSuanming Mou 	uint32_t ret;
11420f4aa72bSSuanming Mou 
11430f4aa72bSSuanming Mou 	switch (size) {
11440f4aa72bSSuanming Mou 	case 1:
11450f4aa72bSSuanming Mou 		ret = *data;
11460f4aa72bSSuanming Mou 		break;
11470f4aa72bSSuanming Mou 	case 2:
11480f4aa72bSSuanming Mou 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
11490f4aa72bSSuanming Mou 		break;
11500f4aa72bSSuanming Mou 	case 3:
11510f4aa72bSSuanming Mou 		ret = rte_be_to_cpu_16(*(const unaligned_uint16_t *)data);
11520f4aa72bSSuanming Mou 		ret = (ret << 8) | *(data + sizeof(uint16_t));
11530f4aa72bSSuanming Mou 		break;
11540f4aa72bSSuanming Mou 	case 4:
11550f4aa72bSSuanming Mou 		ret = rte_be_to_cpu_32(*(const unaligned_uint32_t *)data);
11560f4aa72bSSuanming Mou 		break;
11570f4aa72bSSuanming Mou 	default:
11580f4aa72bSSuanming Mou 		MLX5_ASSERT(false);
11590f4aa72bSSuanming Mou 		ret = 0;
11600f4aa72bSSuanming Mou 		break;
11610f4aa72bSSuanming Mou 	}
11620f4aa72bSSuanming Mou 	return ret;
11630f4aa72bSSuanming Mou }
11640f4aa72bSSuanming Mou 
11653c37110eSMichael Baum static inline bool
11663c37110eSMichael Baum flow_modify_field_support_tag_array(enum rte_flow_field_id field)
11673c37110eSMichael Baum {
11689e21f6cdSBing Zhao 	switch ((int)field) {
11693c37110eSMichael Baum 	case RTE_FLOW_FIELD_TAG:
11704580dcecSMichael Baum 	case RTE_FLOW_FIELD_MPLS:
11719e21f6cdSBing Zhao 	case MLX5_RTE_FLOW_FIELD_META_REG:
11723c37110eSMichael Baum 		return true;
11733c37110eSMichael Baum 	default:
11743c37110eSMichael Baum 		break;
11753c37110eSMichael Baum 	}
11763c37110eSMichael Baum 	return false;
11773c37110eSMichael Baum }
11783c37110eSMichael Baum 
11790f4aa72bSSuanming Mou struct field_modify_info {
11800f4aa72bSSuanming Mou 	uint32_t size; /* Size of field in protocol header, in bytes. */
11810f4aa72bSSuanming Mou 	uint32_t offset; /* Offset of field in protocol header, in bytes. */
11820f4aa72bSSuanming Mou 	enum mlx5_modification_field id;
11836b6c0b8dSRongwei Liu 	uint32_t shift;
11846b6c0b8dSRongwei Liu 	uint8_t is_flex; /* Temporary indicator for flex item modify filed WA. */
11850f4aa72bSSuanming Mou };
11860f4aa72bSSuanming Mou 
118775a00812SSuanming Mou /* HW steering flow attributes. */
118875a00812SSuanming Mou struct mlx5_flow_attr {
118975a00812SSuanming Mou 	uint32_t port_id; /* Port index. */
119075a00812SSuanming Mou 	uint32_t group; /* Flow group. */
119175a00812SSuanming Mou 	uint32_t priority; /* Original Priority. */
119275a00812SSuanming Mou 	/* rss level, used by priority adjustment. */
119375a00812SSuanming Mou 	uint32_t rss_level;
119475a00812SSuanming Mou 	/* Action flags, used by priority adjustment. */
119575a00812SSuanming Mou 	uint32_t act_flags;
119675a00812SSuanming Mou 	uint32_t tbl_type; /* Flow table type. */
119775a00812SSuanming Mou };
119875a00812SSuanming Mou 
119984c406e7SOri Kam /* Flow structure. */
120084c406e7SOri Kam struct rte_flow {
1201b88341caSSuanming Mou 	uint32_t dev_handles;
1202e7bfa359SBing Zhao 	/**< Device flow handles that are part of the flow. */
1203b4edeaf3SSuanming Mou 	uint32_t type:2;
12040136df99SSuanming Mou 	uint32_t drv_type:2; /**< Driver type. */
12054ec6360dSGregory Etelson 	uint32_t tunnel:1;
1206e6100c7bSLi Zhang 	uint32_t meter:24; /**< Holds flow meter id. */
12072d084f69SBing Zhao 	uint32_t indirect_type:2; /**< Indirect action type. */
12080136df99SSuanming Mou 	uint32_t rix_mreg_copy;
12090136df99SSuanming Mou 	/**< Index to metadata register copy table resource. */
12100136df99SSuanming Mou 	uint32_t counter; /**< Holds flow counter. */
12114ec6360dSGregory Etelson 	uint32_t tunnel_id;  /**< Tunnel id */
12122d084f69SBing Zhao 	union {
1213f935ed4bSDekel Peled 		uint32_t age; /**< Holds ASO age bit index. */
12142d084f69SBing Zhao 		uint32_t ct; /**< Holds ASO CT index. */
12152d084f69SBing Zhao 	};
1216f15f0c38SShiri Kuzin 	uint32_t geneve_tlv_option; /**< Holds Geneve TLV option id. > */
12170136df99SSuanming Mou } __rte_packed;
12182720f833SYongseok Koh 
121904a4de75SMichael Baum /*
122004a4de75SMichael Baum  * HWS COUNTER ID's layout
122104a4de75SMichael Baum  *       3                   2                   1                   0
122204a4de75SMichael Baum  *     1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
122304a4de75SMichael Baum  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
122404a4de75SMichael Baum  *    |  T  |     | D |                                               |
122504a4de75SMichael Baum  *    ~  Y  |     | C |                    IDX                        ~
122604a4de75SMichael Baum  *    |  P  |     | S |                                               |
122704a4de75SMichael Baum  *    +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
122804a4de75SMichael Baum  *
122904a4de75SMichael Baum  *    Bit 31:29 = TYPE = MLX5_INDIRECT_ACTION_TYPE_COUNT = b'10
123004a4de75SMichael Baum  *    Bit 25:24 = DCS index
123104a4de75SMichael Baum  *    Bit 23:00 = IDX in this counter belonged DCS bulk.
123204a4de75SMichael Baum  */
123304a4de75SMichael Baum typedef uint32_t cnt_id_t;
123404a4de75SMichael Baum 
123542431df9SSuanming Mou #if defined(HAVE_IBV_FLOW_DV_SUPPORT) || !defined(HAVE_INFINIBAND_VERBS_H)
123642431df9SSuanming Mou 
123722681deeSAlex Vesker #ifdef PEDANTIC
123822681deeSAlex Vesker #pragma GCC diagnostic ignored "-Wpedantic"
123922681deeSAlex Vesker #endif
124022681deeSAlex Vesker 
1241c40c061aSSuanming Mou /* HWS flow struct. */
1242c40c061aSSuanming Mou struct rte_flow_hw {
1243c40c061aSSuanming Mou 	uint32_t idx; /* Flow index from indexed pool. */
124463296851SAlexander Kozyrev 	uint32_t res_idx; /* Resource index from indexed pool. */
1245f13fab23SSuanming Mou 	uint32_t fate_type; /* Fate action type. */
1246f13fab23SSuanming Mou 	union {
1247f13fab23SSuanming Mou 		/* Jump action. */
1248f13fab23SSuanming Mou 		struct mlx5_hw_jump_action *jump;
12493a2f674bSSuanming Mou 		struct mlx5_hrxq *hrxq; /* TIR action. */
1250f13fab23SSuanming Mou 	};
1251c40c061aSSuanming Mou 	struct rte_flow_template_table *table; /* The table flow allcated from. */
125263296851SAlexander Kozyrev 	uint8_t mt_idx;
125304a4de75SMichael Baum 	uint32_t age_idx;
125404a4de75SMichael Baum 	cnt_id_t cnt_id;
125548fbb0e9SAlexander Kozyrev 	uint32_t mtr_id;
125660db7673SAlexander Kozyrev 	uint32_t rule_idx;
125722681deeSAlex Vesker 	uint8_t rule[0]; /* HWS layer data struct. */
1258c40c061aSSuanming Mou } __rte_packed;
1259c40c061aSSuanming Mou 
126022681deeSAlex Vesker #ifdef PEDANTIC
126122681deeSAlex Vesker #pragma GCC diagnostic error "-Wpedantic"
126222681deeSAlex Vesker #endif
126322681deeSAlex Vesker 
1264e26f50adSGregory Etelson struct mlx5_action_construct_data;
1265e26f50adSGregory Etelson typedef int
1266e26f50adSGregory Etelson (*indirect_list_callback_t)(struct rte_eth_dev *,
1267e26f50adSGregory Etelson 			    const struct mlx5_action_construct_data *,
1268e26f50adSGregory Etelson 			    const struct rte_flow_action *,
1269e26f50adSGregory Etelson 			    struct mlx5dr_rule_action *);
12703564e928SGregory Etelson 
1271f13fab23SSuanming Mou /* rte flow action translate to DR action struct. */
1272f13fab23SSuanming Mou struct mlx5_action_construct_data {
1273f13fab23SSuanming Mou 	LIST_ENTRY(mlx5_action_construct_data) next;
1274f13fab23SSuanming Mou 	/* Ensure the action types are matched. */
1275f13fab23SSuanming Mou 	int type;
1276f13fab23SSuanming Mou 	uint32_t idx;  /* Data index. */
1277f13fab23SSuanming Mou 	uint16_t action_src; /* rte_flow_action src offset. */
1278f13fab23SSuanming Mou 	uint16_t action_dst; /* mlx5dr_rule_action dst offset. */
1279e26f50adSGregory Etelson 	indirect_list_callback_t indirect_list_cb;
12807ab3962dSSuanming Mou 	union {
12817ab3962dSSuanming Mou 		struct {
1282fe3620aaSSuanming Mou 			/* encap data len. */
1283fe3620aaSSuanming Mou 			uint16_t len;
1284fe3620aaSSuanming Mou 		} encap;
1285fe3620aaSSuanming Mou 		struct {
12860f4aa72bSSuanming Mou 			/* Modify header action offset in pattern. */
12870f4aa72bSSuanming Mou 			uint16_t mhdr_cmds_off;
12880f4aa72bSSuanming Mou 			/* Offset in pattern after modify header actions. */
12890f4aa72bSSuanming Mou 			uint16_t mhdr_cmds_end;
12900f4aa72bSSuanming Mou 			/*
12910f4aa72bSSuanming Mou 			 * True if this action is masked and does not need to
12920f4aa72bSSuanming Mou 			 * be generated.
12930f4aa72bSSuanming Mou 			 */
12940f4aa72bSSuanming Mou 			bool shared;
12950f4aa72bSSuanming Mou 			/*
12960f4aa72bSSuanming Mou 			 * Modified field definitions in dst field (SET, ADD)
12970f4aa72bSSuanming Mou 			 * or src field (COPY).
12980f4aa72bSSuanming Mou 			 */
12990f4aa72bSSuanming Mou 			struct field_modify_info field[MLX5_ACT_MAX_MOD_FIELDS];
13000f4aa72bSSuanming Mou 			/* Modified field definitions in dst field (COPY). */
13010f4aa72bSSuanming Mou 			struct field_modify_info dcopy[MLX5_ACT_MAX_MOD_FIELDS];
13020f4aa72bSSuanming Mou 			/*
13030f4aa72bSSuanming Mou 			 * Masks applied to field values to generate
13040f4aa72bSSuanming Mou 			 * PRM actions.
13050f4aa72bSSuanming Mou 			 */
13060f4aa72bSSuanming Mou 			uint32_t mask[MLX5_ACT_MAX_MOD_FIELDS];
13070f4aa72bSSuanming Mou 		} modify_header;
13080f4aa72bSSuanming Mou 		struct {
13090e04e1e2SXueming Li 			bool symmetric_hash_function; /* Symmetric RSS hash */
13107ab3962dSSuanming Mou 			uint64_t types; /* RSS hash types. */
13117ab3962dSSuanming Mou 			uint32_t level; /* RSS level. */
13127ab3962dSSuanming Mou 			uint32_t idx; /* Shared action index. */
13137ab3962dSSuanming Mou 		} shared_rss;
13144d368e1dSXiaoyu Min 		struct {
131504a4de75SMichael Baum 			cnt_id_t id;
13164d368e1dSXiaoyu Min 		} shared_counter;
131748fbb0e9SAlexander Kozyrev 		struct {
131848fbb0e9SAlexander Kozyrev 			uint32_t id;
1319e26f50adSGregory Etelson 			uint32_t conf_masked:1;
132048fbb0e9SAlexander Kozyrev 		} shared_meter;
13217ab3962dSSuanming Mou 	};
1322f13fab23SSuanming Mou };
1323f13fab23SSuanming Mou 
132442431df9SSuanming Mou /* Flow item template struct. */
132542431df9SSuanming Mou struct rte_flow_pattern_template {
132642431df9SSuanming Mou 	LIST_ENTRY(rte_flow_pattern_template) next;
132742431df9SSuanming Mou 	/* Template attributes. */
132842431df9SSuanming Mou 	struct rte_flow_pattern_template_attr attr;
132942431df9SSuanming Mou 	struct mlx5dr_match_template *mt; /* mlx5 match template. */
13307ab3962dSSuanming Mou 	uint64_t item_flags; /* Item layer flags. */
1331483181f7SDariusz Sosnowski 	uint64_t orig_item_nb; /* Number of pattern items provided by the user (with END item). */
133242431df9SSuanming Mou 	uint32_t refcnt;  /* Reference counter. */
13331939eb6fSDariusz Sosnowski 	/*
13341939eb6fSDariusz Sosnowski 	 * If true, then rule pattern should be prepended with
13351939eb6fSDariusz Sosnowski 	 * represented_port pattern item.
13361939eb6fSDariusz Sosnowski 	 */
13371939eb6fSDariusz Sosnowski 	bool implicit_port;
1338483181f7SDariusz Sosnowski 	/*
1339483181f7SDariusz Sosnowski 	 * If true, then rule pattern should be prepended with
1340483181f7SDariusz Sosnowski 	 * tag pattern item for representor matching.
1341483181f7SDariusz Sosnowski 	 */
1342483181f7SDariusz Sosnowski 	bool implicit_tag;
13438c0ca752SRongwei Liu 	uint8_t flex_item; /* flex item index. */
134442431df9SSuanming Mou };
134542431df9SSuanming Mou 
1346836b5c9bSSuanming Mou /* Flow action template struct. */
1347836b5c9bSSuanming Mou struct rte_flow_actions_template {
1348836b5c9bSSuanming Mou 	LIST_ENTRY(rte_flow_actions_template) next;
1349836b5c9bSSuanming Mou 	/* Template attributes. */
1350836b5c9bSSuanming Mou 	struct rte_flow_actions_template_attr attr;
1351836b5c9bSSuanming Mou 	struct rte_flow_action *actions; /* Cached flow actions. */
1352836b5c9bSSuanming Mou 	struct rte_flow_action *masks; /* Cached action masks.*/
1353f1fecffaSDariusz Sosnowski 	struct mlx5dr_action_template *tmpl; /* mlx5dr action template. */
135404a4de75SMichael Baum 	uint64_t action_flags; /* Bit-map of all valid action in template. */
1355f1fecffaSDariusz Sosnowski 	uint16_t dr_actions_num; /* Amount of DR rules actions. */
1356f1fecffaSDariusz Sosnowski 	uint16_t actions_num; /* Amount of flow actions */
1357ca00eb69SGregory Etelson 	uint16_t *dr_off; /* DR action offset for given rte action offset. */
1358ca00eb69SGregory Etelson 	uint16_t *src_off; /* RTE action displacement from app. template */
1359f1fecffaSDariusz Sosnowski 	uint16_t reformat_off; /* Offset of DR reformat action. */
13600f4aa72bSSuanming Mou 	uint16_t mhdr_off; /* Offset of DR modify header action. */
1361836b5c9bSSuanming Mou 	uint32_t refcnt; /* Reference counter. */
13626b6c0b8dSRongwei Liu 	uint8_t flex_item; /* flex item index. */
1363836b5c9bSSuanming Mou };
1364836b5c9bSSuanming Mou 
1365d1559d66SSuanming Mou /* Jump action struct. */
1366d1559d66SSuanming Mou struct mlx5_hw_jump_action {
1367d1559d66SSuanming Mou 	/* Action jump from root. */
1368d1559d66SSuanming Mou 	struct mlx5dr_action *root_action;
1369d1559d66SSuanming Mou 	/* HW steering jump action. */
1370d1559d66SSuanming Mou 	struct mlx5dr_action *hws_action;
1371d1559d66SSuanming Mou };
1372d1559d66SSuanming Mou 
1373fe3620aaSSuanming Mou /* Encap decap action struct. */
1374fe3620aaSSuanming Mou struct mlx5_hw_encap_decap_action {
13755e26c99fSRongwei Liu 	struct mlx5_indirect_list indirect;
13765e26c99fSRongwei Liu 	enum mlx5dr_action_type action_type;
1377fe3620aaSSuanming Mou 	struct mlx5dr_action *action; /* Action object. */
13787f6daa49SSuanming Mou 	/* Is header_reformat action shared across flows in table. */
13797f6daa49SSuanming Mou 	bool shared;
1380fe3620aaSSuanming Mou 	size_t data_size; /* Action metadata size. */
1381fe3620aaSSuanming Mou 	uint8_t data[]; /* Action data. */
1382fe3620aaSSuanming Mou };
1383fe3620aaSSuanming Mou 
13840f4aa72bSSuanming Mou #define MLX5_MHDR_MAX_CMD ((MLX5_MAX_MODIFY_NUM) * 2 + 1)
13850f4aa72bSSuanming Mou 
13860f4aa72bSSuanming Mou /* Modify field action struct. */
13870f4aa72bSSuanming Mou struct mlx5_hw_modify_header_action {
13880f4aa72bSSuanming Mou 	/* Reference to DR action */
13890f4aa72bSSuanming Mou 	struct mlx5dr_action *action;
13900f4aa72bSSuanming Mou 	/* Modify header action position in action rule table. */
13910f4aa72bSSuanming Mou 	uint16_t pos;
13920f4aa72bSSuanming Mou 	/* Is MODIFY_HEADER action shared across flows in table. */
13930f4aa72bSSuanming Mou 	bool shared;
13940f4aa72bSSuanming Mou 	/* Amount of modification commands stored in the precompiled buffer. */
13950f4aa72bSSuanming Mou 	uint32_t mhdr_cmds_num;
13960f4aa72bSSuanming Mou 	/* Precompiled modification commands. */
13970f4aa72bSSuanming Mou 	struct mlx5_modification_cmd mhdr_cmds[MLX5_MHDR_MAX_CMD];
13980f4aa72bSSuanming Mou };
13990f4aa72bSSuanming Mou 
1400f13fab23SSuanming Mou /* The maximum actions support in the flow. */
1401f13fab23SSuanming Mou #define MLX5_HW_MAX_ACTS 16
1402f13fab23SSuanming Mou 
1403d1559d66SSuanming Mou /* DR action set struct. */
1404d1559d66SSuanming Mou struct mlx5_hw_actions {
1405f13fab23SSuanming Mou 	/* Dynamic action list. */
1406f13fab23SSuanming Mou 	LIST_HEAD(act_list, mlx5_action_construct_data) act_list;
1407f13fab23SSuanming Mou 	struct mlx5_hw_jump_action *jump; /* Jump action. */
14083a2f674bSSuanming Mou 	struct mlx5_hrxq *tir; /* TIR action. */
14090f4aa72bSSuanming Mou 	struct mlx5_hw_modify_header_action *mhdr; /* Modify header action. */
1410fe3620aaSSuanming Mou 	/* Encap/Decap action. */
1411fe3620aaSSuanming Mou 	struct mlx5_hw_encap_decap_action *encap_decap;
1412fe3620aaSSuanming Mou 	uint16_t encap_decap_pos; /* Encap/Decap action position. */
14131deadfd7SSuanming Mou 	uint32_t mark:1; /* Indicate the mark action. */
141404a4de75SMichael Baum 	cnt_id_t cnt_id; /* Counter id. */
141548fbb0e9SAlexander Kozyrev 	uint32_t mtr_id; /* Meter id. */
1416f13fab23SSuanming Mou 	/* Translated DR action array from action template. */
1417f13fab23SSuanming Mou 	struct mlx5dr_rule_action rule_acts[MLX5_HW_MAX_ACTS];
1418d1559d66SSuanming Mou };
1419d1559d66SSuanming Mou 
1420d1559d66SSuanming Mou /* mlx5 action template struct. */
1421d1559d66SSuanming Mou struct mlx5_hw_action_template {
1422d1559d66SSuanming Mou 	/* Action template pointer. */
1423d1559d66SSuanming Mou 	struct rte_flow_actions_template *action_template;
1424d1559d66SSuanming Mou 	struct mlx5_hw_actions acts; /* Template actions. */
1425d1559d66SSuanming Mou };
1426d1559d66SSuanming Mou 
1427d1559d66SSuanming Mou /* mlx5 flow group struct. */
1428d1559d66SSuanming Mou struct mlx5_flow_group {
1429d1559d66SSuanming Mou 	struct mlx5_list_entry entry;
14308ce638efSTomer Shmilovich 	LIST_ENTRY(mlx5_flow_group) next;
14311939eb6fSDariusz Sosnowski 	struct rte_eth_dev *dev; /* Reference to corresponding device. */
1432d1559d66SSuanming Mou 	struct mlx5dr_table *tbl; /* HWS table object. */
1433d1559d66SSuanming Mou 	struct mlx5_hw_jump_action jump; /* Jump action. */
14348ce638efSTomer Shmilovich 	struct mlx5_flow_group *miss_group; /* Group pointed to by miss action. */
1435d1559d66SSuanming Mou 	enum mlx5dr_table_type type; /* Table type. */
1436d1559d66SSuanming Mou 	uint32_t group_id; /* Group id. */
1437d1559d66SSuanming Mou 	uint32_t idx; /* Group memory index. */
1438d1559d66SSuanming Mou };
1439d1559d66SSuanming Mou 
1440d1559d66SSuanming Mou 
1441d1559d66SSuanming Mou #define MLX5_HW_TBL_MAX_ITEM_TEMPLATE 2
1442d1559d66SSuanming Mou #define MLX5_HW_TBL_MAX_ACTION_TEMPLATE 32
1443d1559d66SSuanming Mou 
1444ddb68e47SBing Zhao struct mlx5_flow_template_table_cfg {
1445ddb68e47SBing Zhao 	struct rte_flow_template_table_attr attr; /* Table attributes passed through flow API. */
1446ddb68e47SBing Zhao 	bool external; /* True if created by flow API, false if table is internal to PMD. */
1447ddb68e47SBing Zhao };
1448ddb68e47SBing Zhao 
1449d1559d66SSuanming Mou struct rte_flow_template_table {
1450d1559d66SSuanming Mou 	LIST_ENTRY(rte_flow_template_table) next;
1451d1559d66SSuanming Mou 	struct mlx5_flow_group *grp; /* The group rte_flow_template_table uses. */
1452d1559d66SSuanming Mou 	struct mlx5dr_matcher *matcher; /* Template matcher. */
1453d1559d66SSuanming Mou 	/* Item templates bind to the table. */
1454d1559d66SSuanming Mou 	struct rte_flow_pattern_template *its[MLX5_HW_TBL_MAX_ITEM_TEMPLATE];
1455d1559d66SSuanming Mou 	/* Action templates bind to the table. */
1456d1559d66SSuanming Mou 	struct mlx5_hw_action_template ats[MLX5_HW_TBL_MAX_ACTION_TEMPLATE];
1457d1559d66SSuanming Mou 	struct mlx5_indexed_pool *flow; /* The table's flow ipool. */
145863296851SAlexander Kozyrev 	struct mlx5_indexed_pool *resource; /* The table's resource ipool. */
1459ddb68e47SBing Zhao 	struct mlx5_flow_template_table_cfg cfg;
1460d1559d66SSuanming Mou 	uint32_t type; /* Flow table type RX/TX/FDB. */
1461d1559d66SSuanming Mou 	uint8_t nb_item_templates; /* Item template number. */
1462d1559d66SSuanming Mou 	uint8_t nb_action_templates; /* Action template number. */
1463d1559d66SSuanming Mou 	uint32_t refcnt; /* Table reference counter. */
1464d1559d66SSuanming Mou };
1465d1559d66SSuanming Mou 
146642431df9SSuanming Mou #endif
146742431df9SSuanming Mou 
1468d7cfcdddSAndrey Vesnovaty /*
1469d7cfcdddSAndrey Vesnovaty  * Define list of valid combinations of RX Hash fields
1470d7cfcdddSAndrey Vesnovaty  * (see enum ibv_rx_hash_fields).
1471d7cfcdddSAndrey Vesnovaty  */
1472d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV4 (IBV_RX_HASH_SRC_IPV4 | IBV_RX_HASH_DST_IPV4)
1473d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV4_TCP \
1474d7cfcdddSAndrey Vesnovaty 	(MLX5_RSS_HASH_IPV4 | \
1475c83456cdSDekel Peled 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1476d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV4_UDP \
1477d7cfcdddSAndrey Vesnovaty 	(MLX5_RSS_HASH_IPV4 | \
1478c83456cdSDekel Peled 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1479d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV6 (IBV_RX_HASH_SRC_IPV6 | IBV_RX_HASH_DST_IPV6)
1480d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV6_TCP \
1481d7cfcdddSAndrey Vesnovaty 	(MLX5_RSS_HASH_IPV6 | \
1482c83456cdSDekel Peled 	 IBV_RX_HASH_SRC_PORT_TCP | IBV_RX_HASH_DST_PORT_TCP)
1483d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_IPV6_UDP \
1484d7cfcdddSAndrey Vesnovaty 	(MLX5_RSS_HASH_IPV6 | \
1485c83456cdSDekel Peled 	 IBV_RX_HASH_SRC_PORT_UDP | IBV_RX_HASH_DST_PORT_UDP)
1486212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_SRC_ONLY IBV_RX_HASH_SRC_IPV4
1487212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_DST_ONLY IBV_RX_HASH_DST_IPV4
1488212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_SRC_ONLY IBV_RX_HASH_SRC_IPV6
1489212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_DST_ONLY IBV_RX_HASH_DST_IPV6
1490212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_UDP_SRC_ONLY \
1491212d17b6SXiaoyu Min 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_UDP)
1492212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_UDP_DST_ONLY \
1493212d17b6SXiaoyu Min 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_UDP)
1494212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_UDP_SRC_ONLY \
1495212d17b6SXiaoyu Min 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_UDP)
1496212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_UDP_DST_ONLY \
1497212d17b6SXiaoyu Min 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_UDP)
1498212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_TCP_SRC_ONLY \
1499212d17b6SXiaoyu Min 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_SRC_PORT_TCP)
1500212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV4_TCP_DST_ONLY \
1501212d17b6SXiaoyu Min 	(MLX5_RSS_HASH_IPV4 | IBV_RX_HASH_DST_PORT_TCP)
1502212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_TCP_SRC_ONLY \
1503212d17b6SXiaoyu Min 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_SRC_PORT_TCP)
1504212d17b6SXiaoyu Min #define MLX5_RSS_HASH_IPV6_TCP_DST_ONLY \
1505212d17b6SXiaoyu Min 	(MLX5_RSS_HASH_IPV6 | IBV_RX_HASH_DST_PORT_TCP)
150618ca4a4eSRaja Zidane 
150718ca4a4eSRaja Zidane #ifndef HAVE_IBV_RX_HASH_IPSEC_SPI
150818ca4a4eSRaja Zidane #define IBV_RX_HASH_IPSEC_SPI (1U << 8)
150918ca4a4eSRaja Zidane #endif
151018ca4a4eSRaja Zidane 
151118ca4a4eSRaja Zidane #define MLX5_RSS_HASH_ESP_SPI IBV_RX_HASH_IPSEC_SPI
151218ca4a4eSRaja Zidane #define MLX5_RSS_HASH_IPV4_ESP (MLX5_RSS_HASH_IPV4 | \
151318ca4a4eSRaja Zidane 				MLX5_RSS_HASH_ESP_SPI)
151418ca4a4eSRaja Zidane #define MLX5_RSS_HASH_IPV6_ESP (MLX5_RSS_HASH_IPV6 | \
151518ca4a4eSRaja Zidane 				MLX5_RSS_HASH_ESP_SPI)
1516d7cfcdddSAndrey Vesnovaty #define MLX5_RSS_HASH_NONE 0ULL
1517d7cfcdddSAndrey Vesnovaty 
15180e04e1e2SXueming Li #define MLX5_RSS_IS_SYMM(func) \
151976f3d99cSXueming Li 		(((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ) || \
152076f3d99cSXueming Li 		 ((func) == RTE_ETH_HASH_FUNCTION_SYMMETRIC_TOEPLITZ_SORT))
152179f89527SGregory Etelson 
152279f89527SGregory Etelson /* extract next protocol type from Ethernet & VLAN headers */
152379f89527SGregory Etelson #define MLX5_ETHER_TYPE_FROM_HEADER(_s, _m, _itm, _prt) do { \
152479f89527SGregory Etelson 	(_prt) = ((const struct _s *)(_itm)->mask)->_m;       \
152579f89527SGregory Etelson 	(_prt) &= ((const struct _s *)(_itm)->spec)->_m;      \
152679f89527SGregory Etelson 	(_prt) = rte_be_to_cpu_16((_prt));                    \
152779f89527SGregory Etelson } while (0)
152879f89527SGregory Etelson 
1529d7cfcdddSAndrey Vesnovaty /* array of valid combinations of RX Hash fields for RSS */
1530d7cfcdddSAndrey Vesnovaty static const uint64_t mlx5_rss_hash_fields[] = {
1531d7cfcdddSAndrey Vesnovaty 	MLX5_RSS_HASH_IPV4,
1532d7cfcdddSAndrey Vesnovaty 	MLX5_RSS_HASH_IPV4_TCP,
1533d7cfcdddSAndrey Vesnovaty 	MLX5_RSS_HASH_IPV4_UDP,
153418ca4a4eSRaja Zidane 	MLX5_RSS_HASH_IPV4_ESP,
1535d7cfcdddSAndrey Vesnovaty 	MLX5_RSS_HASH_IPV6,
1536d7cfcdddSAndrey Vesnovaty 	MLX5_RSS_HASH_IPV6_TCP,
1537d7cfcdddSAndrey Vesnovaty 	MLX5_RSS_HASH_IPV6_UDP,
153818ca4a4eSRaja Zidane 	MLX5_RSS_HASH_IPV6_ESP,
153918ca4a4eSRaja Zidane 	MLX5_RSS_HASH_ESP_SPI,
1540d7cfcdddSAndrey Vesnovaty 	MLX5_RSS_HASH_NONE,
1541d7cfcdddSAndrey Vesnovaty };
1542d7cfcdddSAndrey Vesnovaty 
1543d7cfcdddSAndrey Vesnovaty /* Shared RSS action structure */
1544d7cfcdddSAndrey Vesnovaty struct mlx5_shared_action_rss {
15454a42ac1fSMatan Azrad 	ILIST_ENTRY(uint32_t)next; /**< Index to the next RSS structure. */
15464a42ac1fSMatan Azrad 	uint32_t refcnt; /**< Atomically accessed refcnt. */
1547d7cfcdddSAndrey Vesnovaty 	struct rte_flow_action_rss origin; /**< Original rte RSS action. */
1548d7cfcdddSAndrey Vesnovaty 	uint8_t key[MLX5_RSS_HASH_KEY_LEN]; /**< RSS hash key. */
1549fa7ad49eSAndrey Vesnovaty 	struct mlx5_ind_table_obj *ind_tbl;
1550fa7ad49eSAndrey Vesnovaty 	/**< Hash RX queues (hrxq, hrxq_tunnel fields) indirection table. */
1551d7cfcdddSAndrey Vesnovaty 	uint32_t hrxq[MLX5_RSS_HASH_FIELDS_LEN];
1552d7cfcdddSAndrey Vesnovaty 	/**< Hash RX queue indexes mapped to mlx5_rss_hash_fields */
1553fa7ad49eSAndrey Vesnovaty 	rte_spinlock_t action_rss_sl; /**< Shared RSS action spinlock. */
1554d7cfcdddSAndrey Vesnovaty };
1555d7cfcdddSAndrey Vesnovaty 
15564b61b877SBing Zhao struct rte_flow_action_handle {
15574a42ac1fSMatan Azrad 	uint32_t id;
1558d7cfcdddSAndrey Vesnovaty };
1559d7cfcdddSAndrey Vesnovaty 
15608bb81f26SXueming Li /* Thread specific flow workspace intermediate data. */
15618bb81f26SXueming Li struct mlx5_flow_workspace {
15620064bf43SXueming Li 	/* If creating another flow in same thread, push new as stack. */
15630064bf43SXueming Li 	struct mlx5_flow_workspace *prev;
15640064bf43SXueming Li 	struct mlx5_flow_workspace *next;
1565dc7c5e0aSGregory Etelson 	struct mlx5_flow_workspace *gc;
15660064bf43SXueming Li 	uint32_t inuse; /* can't create new flow with current. */
15678bb81f26SXueming Li 	struct mlx5_flow flows[MLX5_NUM_MAX_DEV_FLOWS];
15680064bf43SXueming Li 	struct mlx5_flow_rss_desc rss_desc;
156938c6dc20SXueming Li 	uint32_t flow_idx; /* Intermediate device flow index. */
1570e6100c7bSLi Zhang 	struct mlx5_flow_meter_info *fm; /* Pointer to the meter in flow. */
157150cc92ddSShun Hao 	struct mlx5_flow_meter_policy *policy;
157250cc92ddSShun Hao 	/* The meter policy used by meter in flow. */
157350cc92ddSShun Hao 	struct mlx5_flow_meter_policy *final_policy;
157450cc92ddSShun Hao 	/* The final policy when meter policy is hierarchy. */
157551ec04dcSShun Hao 	uint32_t skip_matcher_reg:1;
157651ec04dcSShun Hao 	/* Indicates if need to skip matcher register in translate. */
1577082becbfSRaja Zidane 	uint32_t mark:1; /* Indicates if flow contains mark action. */
1578cd4ab742SSuanming Mou 	uint32_t vport_meta_tag; /* Used for vport index match. */
1579cd4ab742SSuanming Mou };
1580cd4ab742SSuanming Mou 
1581cd4ab742SSuanming Mou /* Matcher translate type. */
1582cd4ab742SSuanming Mou enum MLX5_SET_MATCHER {
1583cd4ab742SSuanming Mou 	MLX5_SET_MATCHER_SW_V = 1 << 0,
1584cd4ab742SSuanming Mou 	MLX5_SET_MATCHER_SW_M = 1 << 1,
1585cd4ab742SSuanming Mou 	MLX5_SET_MATCHER_HS_V = 1 << 2,
1586cd4ab742SSuanming Mou 	MLX5_SET_MATCHER_HS_M = 1 << 3,
1587cd4ab742SSuanming Mou };
1588cd4ab742SSuanming Mou 
1589cd4ab742SSuanming Mou #define MLX5_SET_MATCHER_SW (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_SW_M)
1590cd4ab742SSuanming Mou #define MLX5_SET_MATCHER_HS (MLX5_SET_MATCHER_HS_V | MLX5_SET_MATCHER_HS_M)
1591cd4ab742SSuanming Mou #define MLX5_SET_MATCHER_V (MLX5_SET_MATCHER_SW_V | MLX5_SET_MATCHER_HS_V)
1592cd4ab742SSuanming Mou #define MLX5_SET_MATCHER_M (MLX5_SET_MATCHER_SW_M | MLX5_SET_MATCHER_HS_M)
1593cd4ab742SSuanming Mou 
1594cd4ab742SSuanming Mou /* Flow matcher workspace intermediate data. */
1595cd4ab742SSuanming Mou struct mlx5_dv_matcher_workspace {
1596cd4ab742SSuanming Mou 	uint8_t priority; /* Flow priority. */
1597cd4ab742SSuanming Mou 	uint64_t last_item; /* Last item in pattern. */
1598cd4ab742SSuanming Mou 	uint64_t item_flags; /* Flow item pattern flags. */
1599cd4ab742SSuanming Mou 	uint64_t action_flags; /* Flow action flags. */
1600cd4ab742SSuanming Mou 	bool external; /* External flow or not. */
1601cd4ab742SSuanming Mou 	uint32_t vlan_tag:12; /* Flow item VLAN tag. */
1602cd4ab742SSuanming Mou 	uint8_t next_protocol; /* Tunnel next protocol */
1603cd4ab742SSuanming Mou 	uint32_t geneve_tlv_option; /* Flow item Geneve TLV option. */
1604cd4ab742SSuanming Mou 	uint32_t group; /* Flow group. */
1605cd4ab742SSuanming Mou 	uint16_t udp_dport; /* Flow item UDP port. */
1606cd4ab742SSuanming Mou 	const struct rte_flow_attr *attr; /* Flow attribute. */
1607cd4ab742SSuanming Mou 	struct mlx5_flow_rss_desc *rss_desc; /* RSS descriptor. */
1608cd4ab742SSuanming Mou 	const struct rte_flow_item *tunnel_item; /* Flow tunnel item. */
1609cd4ab742SSuanming Mou 	const struct rte_flow_item *gre_item; /* Flow GRE item. */
1610a3778a47SGregory Etelson 	const struct rte_flow_item *integrity_items[2];
16118bb81f26SXueming Li };
16128bb81f26SXueming Li 
16139ade91dfSJiawei Wang struct mlx5_flow_split_info {
1614693c7d4bSJiawei Wang 	uint32_t external:1;
16159ade91dfSJiawei Wang 	/**< True if flow is created by request external to PMD. */
1616693c7d4bSJiawei Wang 	uint32_t prefix_mark:1; /**< Prefix subflow mark flag. */
1617693c7d4bSJiawei Wang 	uint32_t skip_scale:8; /**< Skip the scale the table with factor. */
16189ade91dfSJiawei Wang 	uint32_t flow_idx; /**< This memory pool index to the flow. */
16192d2cef5dSLi Zhang 	uint32_t table_id; /**< Flow table identifier. */
1620693c7d4bSJiawei Wang 	uint64_t prefix_layers; /**< Prefix subflow layers. */
16219ade91dfSJiawei Wang };
16229ade91dfSJiawei Wang 
16235bd0e3e6SDariusz Sosnowski struct flow_hw_port_info {
16245bd0e3e6SDariusz Sosnowski 	uint32_t regc_mask;
16255bd0e3e6SDariusz Sosnowski 	uint32_t regc_value;
16265bd0e3e6SDariusz Sosnowski 	uint32_t is_wire:1;
16275bd0e3e6SDariusz Sosnowski };
16285bd0e3e6SDariusz Sosnowski 
16295bd0e3e6SDariusz Sosnowski extern struct flow_hw_port_info mlx5_flow_hw_port_infos[RTE_MAX_ETHPORTS];
16305bd0e3e6SDariusz Sosnowski 
16315bd0e3e6SDariusz Sosnowski /*
16325bd0e3e6SDariusz Sosnowski  * Get metadata match tag and mask for given rte_eth_dev port.
16335bd0e3e6SDariusz Sosnowski  * Used in HWS rule creation.
16345bd0e3e6SDariusz Sosnowski  */
16355bd0e3e6SDariusz Sosnowski static __rte_always_inline const struct flow_hw_port_info *
16365bd0e3e6SDariusz Sosnowski flow_hw_conv_port_id(const uint16_t port_id)
16375bd0e3e6SDariusz Sosnowski {
16385bd0e3e6SDariusz Sosnowski 	struct flow_hw_port_info *port_info;
16395bd0e3e6SDariusz Sosnowski 
16405bd0e3e6SDariusz Sosnowski 	if (port_id >= RTE_MAX_ETHPORTS)
16415bd0e3e6SDariusz Sosnowski 		return NULL;
16425bd0e3e6SDariusz Sosnowski 	port_info = &mlx5_flow_hw_port_infos[port_id];
16435bd0e3e6SDariusz Sosnowski 	return !!port_info->regc_mask ? port_info : NULL;
16445bd0e3e6SDariusz Sosnowski }
16455bd0e3e6SDariusz Sosnowski 
16465bd0e3e6SDariusz Sosnowski #ifdef HAVE_IBV_FLOW_DV_SUPPORT
16475bd0e3e6SDariusz Sosnowski /*
16485bd0e3e6SDariusz Sosnowski  * Get metadata match tag and mask for the uplink port represented
16495bd0e3e6SDariusz Sosnowski  * by given IB context. Used in HWS context creation.
16505bd0e3e6SDariusz Sosnowski  */
16515bd0e3e6SDariusz Sosnowski static __rte_always_inline const struct flow_hw_port_info *
16525bd0e3e6SDariusz Sosnowski flow_hw_get_wire_port(struct ibv_context *ibctx)
16535bd0e3e6SDariusz Sosnowski {
16545bd0e3e6SDariusz Sosnowski 	struct ibv_device *ibdev = ibctx->device;
16555bd0e3e6SDariusz Sosnowski 	uint16_t port_id;
16565bd0e3e6SDariusz Sosnowski 
16575bd0e3e6SDariusz Sosnowski 	MLX5_ETH_FOREACH_DEV(port_id, NULL) {
16585bd0e3e6SDariusz Sosnowski 		const struct mlx5_priv *priv =
16595bd0e3e6SDariusz Sosnowski 				rte_eth_devices[port_id].data->dev_private;
16605bd0e3e6SDariusz Sosnowski 
16615bd0e3e6SDariusz Sosnowski 		if (priv && priv->master) {
16625bd0e3e6SDariusz Sosnowski 			struct ibv_context *port_ibctx = priv->sh->cdev->ctx;
16635bd0e3e6SDariusz Sosnowski 
16645bd0e3e6SDariusz Sosnowski 			if (port_ibctx->device == ibdev)
16655bd0e3e6SDariusz Sosnowski 				return flow_hw_conv_port_id(port_id);
16665bd0e3e6SDariusz Sosnowski 		}
16675bd0e3e6SDariusz Sosnowski 	}
16685bd0e3e6SDariusz Sosnowski 	return NULL;
16695bd0e3e6SDariusz Sosnowski }
16705bd0e3e6SDariusz Sosnowski #endif
16715bd0e3e6SDariusz Sosnowski 
16728a89038fSBing Zhao /*
16738a89038fSBing Zhao  * Convert metadata or tag to the actual register.
16748a89038fSBing Zhao  * META: Can only be used to match in the FDB in this stage, fixed C_1.
16758a89038fSBing Zhao  * TAG: C_x expect meter color reg and the reserved ones.
16768a89038fSBing Zhao  */
16778a89038fSBing Zhao static __rte_always_inline int
167804e740e6SGregory Etelson flow_hw_get_reg_id(struct rte_eth_dev *dev,
167904e740e6SGregory Etelson 		   enum rte_flow_item_type type, uint32_t id)
16808a89038fSBing Zhao {
168104e740e6SGregory Etelson 	struct mlx5_dev_ctx_shared *sh = MLX5_SH(dev);
168204e740e6SGregory Etelson 	struct mlx5_dev_registers *reg = &sh->registers;
168304e740e6SGregory Etelson 
16848a89038fSBing Zhao 	switch (type) {
16858a89038fSBing Zhao 	case RTE_FLOW_ITEM_TYPE_META:
1686f1fecffaSDariusz Sosnowski #ifdef HAVE_MLX5_HWS_SUPPORT
168704e740e6SGregory Etelson 		if (sh->config.dv_esw_en &&
168804e740e6SGregory Etelson 		    sh->config.dv_xmeta_en == MLX5_XMETA_MODE_META32_HWS) {
16898a89038fSBing Zhao 			return REG_C_1;
1690f1fecffaSDariusz Sosnowski 		}
1691f1fecffaSDariusz Sosnowski #endif
1692f1fecffaSDariusz Sosnowski 		/*
1693f1fecffaSDariusz Sosnowski 		 * On root table - PMD allows only egress META matching, thus
1694f1fecffaSDariusz Sosnowski 		 * REG_A matching is sufficient.
1695f1fecffaSDariusz Sosnowski 		 *
1696f1fecffaSDariusz Sosnowski 		 * On non-root tables - REG_A corresponds to general_purpose_lookup_field,
1697f1fecffaSDariusz Sosnowski 		 * which translates to REG_A in NIC TX and to REG_B in NIC RX.
1698f1fecffaSDariusz Sosnowski 		 * However, current FW does not implement REG_B case right now, so
1699f1fecffaSDariusz Sosnowski 		 * REG_B case should be rejected on pattern template validation.
1700f1fecffaSDariusz Sosnowski 		 */
1701f1fecffaSDariusz Sosnowski 		return REG_A;
1702463170a7SSuanming Mou 	case RTE_FLOW_ITEM_TYPE_CONNTRACK:
170348fbb0e9SAlexander Kozyrev 	case RTE_FLOW_ITEM_TYPE_METER_COLOR:
17045e9f9a28SGregory Etelson 		return reg->aso_reg;
17058a89038fSBing Zhao 	case RTE_FLOW_ITEM_TYPE_TAG:
17065f5e2f86SAlexander Kozyrev 		if (id == MLX5_LINEAR_HASH_TAG_INDEX)
17075f5e2f86SAlexander Kozyrev 			return REG_C_3;
17088a89038fSBing Zhao 		MLX5_ASSERT(id < MLX5_FLOW_HW_TAGS_MAX);
170904e740e6SGregory Etelson 		return reg->hw_avl_tags[id];
17108a89038fSBing Zhao 	default:
17118a89038fSBing Zhao 		return REG_NON;
17128a89038fSBing Zhao 	}
17138a89038fSBing Zhao }
17148a89038fSBing Zhao 
171504e740e6SGregory Etelson static __rte_always_inline int
171604e740e6SGregory Etelson flow_hw_get_reg_id_from_ctx(void *dr_ctx,
171704e740e6SGregory Etelson 			    enum rte_flow_item_type type, uint32_t id)
171804e740e6SGregory Etelson {
171904e740e6SGregory Etelson #ifdef HAVE_IBV_FLOW_DV_SUPPORT
172004e740e6SGregory Etelson 	uint16_t port;
172104e740e6SGregory Etelson 
172204e740e6SGregory Etelson 	MLX5_ETH_FOREACH_DEV(port, NULL) {
172304e740e6SGregory Etelson 		struct mlx5_priv *priv;
172404e740e6SGregory Etelson 
172504e740e6SGregory Etelson 		priv = rte_eth_devices[port].data->dev_private;
172604e740e6SGregory Etelson 		if (priv->dr_ctx == dr_ctx)
172704e740e6SGregory Etelson 			return flow_hw_get_reg_id(&rte_eth_devices[port],
172804e740e6SGregory Etelson 						  type, id);
172904e740e6SGregory Etelson 	}
173004e740e6SGregory Etelson #else
173104e740e6SGregory Etelson 	RTE_SET_USED(dr_ctx);
173204e740e6SGregory Etelson 	RTE_SET_USED(type);
173304e740e6SGregory Etelson 	RTE_SET_USED(id);
173404e740e6SGregory Etelson #endif
173504e740e6SGregory Etelson 	return REG_NON;
173604e740e6SGregory Etelson }
173704e740e6SGregory Etelson 
17385bd0e3e6SDariusz Sosnowski void flow_hw_set_port_info(struct rte_eth_dev *dev);
17395bd0e3e6SDariusz Sosnowski void flow_hw_clear_port_info(struct rte_eth_dev *dev);
17401939eb6fSDariusz Sosnowski int flow_hw_create_vport_action(struct rte_eth_dev *dev);
17411939eb6fSDariusz Sosnowski void flow_hw_destroy_vport_action(struct rte_eth_dev *dev);
17421939eb6fSDariusz Sosnowski 
174384c406e7SOri Kam typedef int (*mlx5_flow_validate_t)(struct rte_eth_dev *dev,
174484c406e7SOri Kam 				    const struct rte_flow_attr *attr,
174584c406e7SOri Kam 				    const struct rte_flow_item items[],
174684c406e7SOri Kam 				    const struct rte_flow_action actions[],
1747b67b4ecbSDekel Peled 				    bool external,
174872a944dbSBing Zhao 				    int hairpin,
174984c406e7SOri Kam 				    struct rte_flow_error *error);
175084c406e7SOri Kam typedef struct mlx5_flow *(*mlx5_flow_prepare_t)
1751e7bfa359SBing Zhao 	(struct rte_eth_dev *dev, const struct rte_flow_attr *attr,
1752e7bfa359SBing Zhao 	 const struct rte_flow_item items[],
1753c1cfb132SYongseok Koh 	 const struct rte_flow_action actions[], struct rte_flow_error *error);
175484c406e7SOri Kam typedef int (*mlx5_flow_translate_t)(struct rte_eth_dev *dev,
175584c406e7SOri Kam 				     struct mlx5_flow *dev_flow,
175684c406e7SOri Kam 				     const struct rte_flow_attr *attr,
175784c406e7SOri Kam 				     const struct rte_flow_item items[],
175884c406e7SOri Kam 				     const struct rte_flow_action actions[],
175984c406e7SOri Kam 				     struct rte_flow_error *error);
176084c406e7SOri Kam typedef int (*mlx5_flow_apply_t)(struct rte_eth_dev *dev, struct rte_flow *flow,
176184c406e7SOri Kam 				 struct rte_flow_error *error);
176284c406e7SOri Kam typedef void (*mlx5_flow_remove_t)(struct rte_eth_dev *dev,
176384c406e7SOri Kam 				   struct rte_flow *flow);
176484c406e7SOri Kam typedef void (*mlx5_flow_destroy_t)(struct rte_eth_dev *dev,
176584c406e7SOri Kam 				    struct rte_flow *flow);
1766684dafe7SMoti Haimovsky typedef int (*mlx5_flow_query_t)(struct rte_eth_dev *dev,
1767684dafe7SMoti Haimovsky 				 struct rte_flow *flow,
1768684dafe7SMoti Haimovsky 				 const struct rte_flow_action *actions,
1769684dafe7SMoti Haimovsky 				 void *data,
1770684dafe7SMoti Haimovsky 				 struct rte_flow_error *error);
177144432018SLi Zhang typedef int (*mlx5_flow_create_mtr_tbls_t)(struct rte_eth_dev *dev,
177244432018SLi Zhang 					struct mlx5_flow_meter_info *fm,
177344432018SLi Zhang 					uint32_t mtr_idx,
177444432018SLi Zhang 					uint8_t domain_bitmap);
177544432018SLi Zhang typedef void (*mlx5_flow_destroy_mtr_tbls_t)(struct rte_eth_dev *dev,
177644432018SLi Zhang 				struct mlx5_flow_meter_info *fm);
1777afb4aa4fSLi Zhang typedef void (*mlx5_flow_destroy_mtr_drop_tbls_t)(struct rte_eth_dev *dev);
1778fc6ce56bSLi Zhang typedef struct mlx5_flow_meter_sub_policy *
1779fc6ce56bSLi Zhang 	(*mlx5_flow_meter_sub_policy_rss_prepare_t)
1780fc6ce56bSLi Zhang 		(struct rte_eth_dev *dev,
1781fc6ce56bSLi Zhang 		struct mlx5_flow_meter_policy *mtr_policy,
1782fc6ce56bSLi Zhang 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
17838e5c9feaSShun Hao typedef int (*mlx5_flow_meter_hierarchy_rule_create_t)
17848e5c9feaSShun Hao 		(struct rte_eth_dev *dev,
17858e5c9feaSShun Hao 		struct mlx5_flow_meter_info *fm,
17868e5c9feaSShun Hao 		int32_t src_port,
17878e5c9feaSShun Hao 		const struct rte_flow_item *item,
17888e5c9feaSShun Hao 		struct rte_flow_error *error);
1789ec962badSLi Zhang typedef void (*mlx5_flow_destroy_sub_policy_with_rxq_t)
1790ec962badSLi Zhang 	(struct rte_eth_dev *dev,
1791ec962badSLi Zhang 	struct mlx5_flow_meter_policy *mtr_policy);
1792e6100c7bSLi Zhang typedef uint32_t (*mlx5_flow_mtr_alloc_t)
1793e6100c7bSLi Zhang 					    (struct rte_eth_dev *dev);
1794e6100c7bSLi Zhang typedef void (*mlx5_flow_mtr_free_t)(struct rte_eth_dev *dev,
1795e6100c7bSLi Zhang 						uint32_t mtr_idx);
1796956d5c74SSuanming Mou typedef uint32_t (*mlx5_flow_counter_alloc_t)
1797e189f55cSSuanming Mou 				   (struct rte_eth_dev *dev);
1798e189f55cSSuanming Mou typedef void (*mlx5_flow_counter_free_t)(struct rte_eth_dev *dev,
1799956d5c74SSuanming Mou 					 uint32_t cnt);
1800e189f55cSSuanming Mou typedef int (*mlx5_flow_counter_query_t)(struct rte_eth_dev *dev,
1801956d5c74SSuanming Mou 					 uint32_t cnt,
1802e189f55cSSuanming Mou 					 bool clear, uint64_t *pkts,
18039b57df55SHaifei Luo 					 uint64_t *bytes, void **action);
1804fa2d01c8SDong Zhou typedef int (*mlx5_flow_get_aged_flows_t)
1805fa2d01c8SDong Zhou 					(struct rte_eth_dev *dev,
1806fa2d01c8SDong Zhou 					 void **context,
1807fa2d01c8SDong Zhou 					 uint32_t nb_contexts,
1808fa2d01c8SDong Zhou 					 struct rte_flow_error *error);
180904a4de75SMichael Baum typedef int (*mlx5_flow_get_q_aged_flows_t)
181004a4de75SMichael Baum 					(struct rte_eth_dev *dev,
181104a4de75SMichael Baum 					 uint32_t queue_id,
181204a4de75SMichael Baum 					 void **context,
181304a4de75SMichael Baum 					 uint32_t nb_contexts,
181404a4de75SMichael Baum 					 struct rte_flow_error *error);
1815d7cfcdddSAndrey Vesnovaty typedef int (*mlx5_flow_action_validate_t)
1816d7cfcdddSAndrey Vesnovaty 				(struct rte_eth_dev *dev,
18174b61b877SBing Zhao 				 const struct rte_flow_indir_action_conf *conf,
1818d7cfcdddSAndrey Vesnovaty 				 const struct rte_flow_action *action,
1819d7cfcdddSAndrey Vesnovaty 				 struct rte_flow_error *error);
18204b61b877SBing Zhao typedef struct rte_flow_action_handle *(*mlx5_flow_action_create_t)
1821d7cfcdddSAndrey Vesnovaty 				(struct rte_eth_dev *dev,
18224b61b877SBing Zhao 				 const struct rte_flow_indir_action_conf *conf,
1823d7cfcdddSAndrey Vesnovaty 				 const struct rte_flow_action *action,
1824d7cfcdddSAndrey Vesnovaty 				 struct rte_flow_error *error);
1825d7cfcdddSAndrey Vesnovaty typedef int (*mlx5_flow_action_destroy_t)
1826d7cfcdddSAndrey Vesnovaty 				(struct rte_eth_dev *dev,
18274b61b877SBing Zhao 				 struct rte_flow_action_handle *action,
1828d7cfcdddSAndrey Vesnovaty 				 struct rte_flow_error *error);
1829d7cfcdddSAndrey Vesnovaty typedef int (*mlx5_flow_action_update_t)
1830d7cfcdddSAndrey Vesnovaty 			(struct rte_eth_dev *dev,
18314b61b877SBing Zhao 			 struct rte_flow_action_handle *action,
18324b61b877SBing Zhao 			 const void *update,
1833d7cfcdddSAndrey Vesnovaty 			 struct rte_flow_error *error);
183481073e1fSMatan Azrad typedef int (*mlx5_flow_action_query_t)
183581073e1fSMatan Azrad 			(struct rte_eth_dev *dev,
18364b61b877SBing Zhao 			 const struct rte_flow_action_handle *action,
183781073e1fSMatan Azrad 			 void *data,
183881073e1fSMatan Azrad 			 struct rte_flow_error *error);
183915896eafSGregory Etelson typedef int (*mlx5_flow_action_query_update_t)
184015896eafSGregory Etelson 			(struct rte_eth_dev *dev,
184115896eafSGregory Etelson 			 struct rte_flow_action_handle *handle,
184215896eafSGregory Etelson 			 const void *update, void *data,
184315896eafSGregory Etelson 			 enum rte_flow_query_update_mode qu_mode,
184415896eafSGregory Etelson 			 struct rte_flow_error *error);
18453564e928SGregory Etelson typedef struct rte_flow_action_list_handle *
18463564e928SGregory Etelson (*mlx5_flow_action_list_handle_create_t)
18473564e928SGregory Etelson 			(struct rte_eth_dev *dev,
18483564e928SGregory Etelson 			 const struct rte_flow_indir_action_conf *conf,
18493564e928SGregory Etelson 			 const struct rte_flow_action *actions,
18503564e928SGregory Etelson 			 struct rte_flow_error *error);
18513564e928SGregory Etelson typedef int
18523564e928SGregory Etelson (*mlx5_flow_action_list_handle_destroy_t)
18533564e928SGregory Etelson 			(struct rte_eth_dev *dev,
18543564e928SGregory Etelson 			 struct rte_flow_action_list_handle *handle,
18553564e928SGregory Etelson 			 struct rte_flow_error *error);
185623f627e0SBing Zhao typedef int (*mlx5_flow_sync_domain_t)
185723f627e0SBing Zhao 			(struct rte_eth_dev *dev,
185823f627e0SBing Zhao 			 uint32_t domains,
185923f627e0SBing Zhao 			 uint32_t flags);
1860afb4aa4fSLi Zhang typedef int (*mlx5_flow_validate_mtr_acts_t)
1861afb4aa4fSLi Zhang 			(struct rte_eth_dev *dev,
1862afb4aa4fSLi Zhang 			 const struct rte_flow_action *actions[RTE_COLORS],
1863afb4aa4fSLi Zhang 			 struct rte_flow_attr *attr,
1864afb4aa4fSLi Zhang 			 bool *is_rss,
1865afb4aa4fSLi Zhang 			 uint8_t *domain_bitmap,
18664b7bf3ffSBing Zhao 			 uint8_t *policy_mode,
1867afb4aa4fSLi Zhang 			 struct rte_mtr_error *error);
1868afb4aa4fSLi Zhang typedef int (*mlx5_flow_create_mtr_acts_t)
1869afb4aa4fSLi Zhang 			(struct rte_eth_dev *dev,
1870afb4aa4fSLi Zhang 		      struct mlx5_flow_meter_policy *mtr_policy,
1871afb4aa4fSLi Zhang 		      const struct rte_flow_action *actions[RTE_COLORS],
18726431068dSSean Zhang 		      struct rte_flow_attr *attr,
1873afb4aa4fSLi Zhang 		      struct rte_mtr_error *error);
1874afb4aa4fSLi Zhang typedef void (*mlx5_flow_destroy_mtr_acts_t)
1875afb4aa4fSLi Zhang 			(struct rte_eth_dev *dev,
1876afb4aa4fSLi Zhang 		      struct mlx5_flow_meter_policy *mtr_policy);
1877afb4aa4fSLi Zhang typedef int (*mlx5_flow_create_policy_rules_t)
1878afb4aa4fSLi Zhang 			(struct rte_eth_dev *dev,
1879afb4aa4fSLi Zhang 			  struct mlx5_flow_meter_policy *mtr_policy);
1880afb4aa4fSLi Zhang typedef void (*mlx5_flow_destroy_policy_rules_t)
1881afb4aa4fSLi Zhang 			(struct rte_eth_dev *dev,
1882afb4aa4fSLi Zhang 			  struct mlx5_flow_meter_policy *mtr_policy);
1883afb4aa4fSLi Zhang typedef int (*mlx5_flow_create_def_policy_t)
1884afb4aa4fSLi Zhang 			(struct rte_eth_dev *dev);
1885afb4aa4fSLi Zhang typedef void (*mlx5_flow_destroy_def_policy_t)
1886afb4aa4fSLi Zhang 			(struct rte_eth_dev *dev);
1887c5042f93SDmitry Kozlyuk typedef int (*mlx5_flow_discover_priorities_t)
1888c5042f93SDmitry Kozlyuk 			(struct rte_eth_dev *dev,
1889c5042f93SDmitry Kozlyuk 			 const uint16_t *vprio, int vprio_n);
1890db25cadcSViacheslav Ovsiienko typedef struct rte_flow_item_flex_handle *(*mlx5_flow_item_create_t)
1891db25cadcSViacheslav Ovsiienko 			(struct rte_eth_dev *dev,
1892db25cadcSViacheslav Ovsiienko 			 const struct rte_flow_item_flex_conf *conf,
1893db25cadcSViacheslav Ovsiienko 			 struct rte_flow_error *error);
1894db25cadcSViacheslav Ovsiienko typedef int (*mlx5_flow_item_release_t)
1895db25cadcSViacheslav Ovsiienko 			(struct rte_eth_dev *dev,
1896db25cadcSViacheslav Ovsiienko 			 const struct rte_flow_item_flex_handle *handle,
1897db25cadcSViacheslav Ovsiienko 			 struct rte_flow_error *error);
1898db25cadcSViacheslav Ovsiienko typedef int (*mlx5_flow_item_update_t)
1899db25cadcSViacheslav Ovsiienko 			(struct rte_eth_dev *dev,
1900db25cadcSViacheslav Ovsiienko 			 const struct rte_flow_item_flex_handle *handle,
1901db25cadcSViacheslav Ovsiienko 			 const struct rte_flow_item_flex_conf *conf,
1902db25cadcSViacheslav Ovsiienko 			 struct rte_flow_error *error);
1903b401400dSSuanming Mou typedef int (*mlx5_flow_info_get_t)
1904b401400dSSuanming Mou 			(struct rte_eth_dev *dev,
1905b401400dSSuanming Mou 			 struct rte_flow_port_info *port_info,
1906b401400dSSuanming Mou 			 struct rte_flow_queue_info *queue_info,
1907b401400dSSuanming Mou 			 struct rte_flow_error *error);
1908b401400dSSuanming Mou typedef int (*mlx5_flow_port_configure_t)
1909b401400dSSuanming Mou 			(struct rte_eth_dev *dev,
1910b401400dSSuanming Mou 			 const struct rte_flow_port_attr *port_attr,
1911b401400dSSuanming Mou 			 uint16_t nb_queue,
1912b401400dSSuanming Mou 			 const struct rte_flow_queue_attr *queue_attr[],
1913b401400dSSuanming Mou 			 struct rte_flow_error *err);
191424865366SAlexander Kozyrev typedef int (*mlx5_flow_pattern_validate_t)
191524865366SAlexander Kozyrev 			(struct rte_eth_dev *dev,
191624865366SAlexander Kozyrev 			 const struct rte_flow_pattern_template_attr *attr,
191724865366SAlexander Kozyrev 			 const struct rte_flow_item items[],
191824865366SAlexander Kozyrev 			 struct rte_flow_error *error);
191942431df9SSuanming Mou typedef struct rte_flow_pattern_template *(*mlx5_flow_pattern_template_create_t)
192042431df9SSuanming Mou 			(struct rte_eth_dev *dev,
192142431df9SSuanming Mou 			 const struct rte_flow_pattern_template_attr *attr,
192242431df9SSuanming Mou 			 const struct rte_flow_item items[],
192342431df9SSuanming Mou 			 struct rte_flow_error *error);
192442431df9SSuanming Mou typedef int (*mlx5_flow_pattern_template_destroy_t)
192542431df9SSuanming Mou 			(struct rte_eth_dev *dev,
192642431df9SSuanming Mou 			 struct rte_flow_pattern_template *template,
192742431df9SSuanming Mou 			 struct rte_flow_error *error);
192824865366SAlexander Kozyrev typedef int (*mlx5_flow_actions_validate_t)
192924865366SAlexander Kozyrev 			(struct rte_eth_dev *dev,
193024865366SAlexander Kozyrev 			 const struct rte_flow_actions_template_attr *attr,
193124865366SAlexander Kozyrev 			 const struct rte_flow_action actions[],
193224865366SAlexander Kozyrev 			 const struct rte_flow_action masks[],
193324865366SAlexander Kozyrev 			 struct rte_flow_error *error);
1934836b5c9bSSuanming Mou typedef struct rte_flow_actions_template *(*mlx5_flow_actions_template_create_t)
1935836b5c9bSSuanming Mou 			(struct rte_eth_dev *dev,
1936836b5c9bSSuanming Mou 			 const struct rte_flow_actions_template_attr *attr,
1937836b5c9bSSuanming Mou 			 const struct rte_flow_action actions[],
1938836b5c9bSSuanming Mou 			 const struct rte_flow_action masks[],
1939836b5c9bSSuanming Mou 			 struct rte_flow_error *error);
1940836b5c9bSSuanming Mou typedef int (*mlx5_flow_actions_template_destroy_t)
1941836b5c9bSSuanming Mou 			(struct rte_eth_dev *dev,
1942836b5c9bSSuanming Mou 			 struct rte_flow_actions_template *template,
1943836b5c9bSSuanming Mou 			 struct rte_flow_error *error);
1944d1559d66SSuanming Mou typedef struct rte_flow_template_table *(*mlx5_flow_table_create_t)
1945d1559d66SSuanming Mou 		(struct rte_eth_dev *dev,
1946d1559d66SSuanming Mou 		 const struct rte_flow_template_table_attr *attr,
1947d1559d66SSuanming Mou 		 struct rte_flow_pattern_template *item_templates[],
1948d1559d66SSuanming Mou 		 uint8_t nb_item_templates,
1949d1559d66SSuanming Mou 		 struct rte_flow_actions_template *action_templates[],
1950d1559d66SSuanming Mou 		 uint8_t nb_action_templates,
1951d1559d66SSuanming Mou 		 struct rte_flow_error *error);
1952d1559d66SSuanming Mou typedef int (*mlx5_flow_table_destroy_t)
1953d1559d66SSuanming Mou 			(struct rte_eth_dev *dev,
1954d1559d66SSuanming Mou 			 struct rte_flow_template_table *table,
1955d1559d66SSuanming Mou 			 struct rte_flow_error *error);
19568ce638efSTomer Shmilovich typedef int (*mlx5_flow_group_set_miss_actions_t)
19578ce638efSTomer Shmilovich 			(struct rte_eth_dev *dev,
19588ce638efSTomer Shmilovich 			 uint32_t group_id,
19598ce638efSTomer Shmilovich 			 const struct rte_flow_group_attr *attr,
19608ce638efSTomer Shmilovich 			 const struct rte_flow_action actions[],
19618ce638efSTomer Shmilovich 			 struct rte_flow_error *error);
1962c40c061aSSuanming Mou typedef struct rte_flow *(*mlx5_flow_async_flow_create_t)
1963c40c061aSSuanming Mou 			(struct rte_eth_dev *dev,
1964c40c061aSSuanming Mou 			 uint32_t queue,
1965c40c061aSSuanming Mou 			 const struct rte_flow_op_attr *attr,
1966c40c061aSSuanming Mou 			 struct rte_flow_template_table *table,
1967c40c061aSSuanming Mou 			 const struct rte_flow_item items[],
1968c40c061aSSuanming Mou 			 uint8_t pattern_template_index,
1969c40c061aSSuanming Mou 			 const struct rte_flow_action actions[],
1970c40c061aSSuanming Mou 			 uint8_t action_template_index,
1971c40c061aSSuanming Mou 			 void *user_data,
1972c40c061aSSuanming Mou 			 struct rte_flow_error *error);
197360db7673SAlexander Kozyrev typedef struct rte_flow *(*mlx5_flow_async_flow_create_by_index_t)
197460db7673SAlexander Kozyrev 			(struct rte_eth_dev *dev,
197560db7673SAlexander Kozyrev 			 uint32_t queue,
197660db7673SAlexander Kozyrev 			 const struct rte_flow_op_attr *attr,
197760db7673SAlexander Kozyrev 			 struct rte_flow_template_table *table,
197860db7673SAlexander Kozyrev 			 uint32_t rule_index,
197960db7673SAlexander Kozyrev 			 const struct rte_flow_action actions[],
198060db7673SAlexander Kozyrev 			 uint8_t action_template_index,
198160db7673SAlexander Kozyrev 			 void *user_data,
198260db7673SAlexander Kozyrev 			 struct rte_flow_error *error);
198363296851SAlexander Kozyrev typedef int (*mlx5_flow_async_flow_update_t)
198463296851SAlexander Kozyrev 			(struct rte_eth_dev *dev,
198563296851SAlexander Kozyrev 			 uint32_t queue,
198663296851SAlexander Kozyrev 			 const struct rte_flow_op_attr *attr,
198763296851SAlexander Kozyrev 			 struct rte_flow *flow,
198863296851SAlexander Kozyrev 			 const struct rte_flow_action actions[],
198963296851SAlexander Kozyrev 			 uint8_t action_template_index,
199063296851SAlexander Kozyrev 			 void *user_data,
199163296851SAlexander Kozyrev 			 struct rte_flow_error *error);
1992c40c061aSSuanming Mou typedef int (*mlx5_flow_async_flow_destroy_t)
1993c40c061aSSuanming Mou 			(struct rte_eth_dev *dev,
1994c40c061aSSuanming Mou 			 uint32_t queue,
1995c40c061aSSuanming Mou 			 const struct rte_flow_op_attr *attr,
1996c40c061aSSuanming Mou 			 struct rte_flow *flow,
1997c40c061aSSuanming Mou 			 void *user_data,
1998c40c061aSSuanming Mou 			 struct rte_flow_error *error);
1999c40c061aSSuanming Mou typedef int (*mlx5_flow_pull_t)
2000c40c061aSSuanming Mou 			(struct rte_eth_dev *dev,
2001c40c061aSSuanming Mou 			 uint32_t queue,
2002c40c061aSSuanming Mou 			 struct rte_flow_op_result res[],
2003c40c061aSSuanming Mou 			 uint16_t n_res,
2004c40c061aSSuanming Mou 			 struct rte_flow_error *error);
2005c40c061aSSuanming Mou typedef int (*mlx5_flow_push_t)
2006c40c061aSSuanming Mou 			(struct rte_eth_dev *dev,
2007c40c061aSSuanming Mou 			 uint32_t queue,
2008c40c061aSSuanming Mou 			 struct rte_flow_error *error);
200981073e1fSMatan Azrad 
20107ab3962dSSuanming Mou typedef struct rte_flow_action_handle *(*mlx5_flow_async_action_handle_create_t)
20117ab3962dSSuanming Mou 			(struct rte_eth_dev *dev,
20127ab3962dSSuanming Mou 			 uint32_t queue,
20137ab3962dSSuanming Mou 			 const struct rte_flow_op_attr *attr,
20147ab3962dSSuanming Mou 			 const struct rte_flow_indir_action_conf *conf,
20157ab3962dSSuanming Mou 			 const struct rte_flow_action *action,
20167ab3962dSSuanming Mou 			 void *user_data,
20177ab3962dSSuanming Mou 			 struct rte_flow_error *error);
20187ab3962dSSuanming Mou 
20197ab3962dSSuanming Mou typedef int (*mlx5_flow_async_action_handle_update_t)
20207ab3962dSSuanming Mou 			(struct rte_eth_dev *dev,
20217ab3962dSSuanming Mou 			 uint32_t queue,
20227ab3962dSSuanming Mou 			 const struct rte_flow_op_attr *attr,
20237ab3962dSSuanming Mou 			 struct rte_flow_action_handle *handle,
20247ab3962dSSuanming Mou 			 const void *update,
20257ab3962dSSuanming Mou 			 void *user_data,
20267ab3962dSSuanming Mou 			 struct rte_flow_error *error);
202715896eafSGregory Etelson typedef int (*mlx5_flow_async_action_handle_query_update_t)
202815896eafSGregory Etelson 			(struct rte_eth_dev *dev, uint32_t queue_id,
202915896eafSGregory Etelson 			 const struct rte_flow_op_attr *op_attr,
203015896eafSGregory Etelson 			 struct rte_flow_action_handle *action_handle,
203115896eafSGregory Etelson 			 const void *update, void *data,
203215896eafSGregory Etelson 			 enum rte_flow_query_update_mode qu_mode,
203315896eafSGregory Etelson 			 void *user_data, struct rte_flow_error *error);
2034478ba4bbSSuanming Mou typedef int (*mlx5_flow_async_action_handle_query_t)
2035478ba4bbSSuanming Mou 			(struct rte_eth_dev *dev,
2036478ba4bbSSuanming Mou 			 uint32_t queue,
2037478ba4bbSSuanming Mou 			 const struct rte_flow_op_attr *attr,
2038478ba4bbSSuanming Mou 			 const struct rte_flow_action_handle *handle,
2039478ba4bbSSuanming Mou 			 void *data,
2040478ba4bbSSuanming Mou 			 void *user_data,
2041478ba4bbSSuanming Mou 			 struct rte_flow_error *error);
2042478ba4bbSSuanming Mou 
20437ab3962dSSuanming Mou typedef int (*mlx5_flow_async_action_handle_destroy_t)
20447ab3962dSSuanming Mou 			(struct rte_eth_dev *dev,
20457ab3962dSSuanming Mou 			 uint32_t queue,
20467ab3962dSSuanming Mou 			 const struct rte_flow_op_attr *attr,
20477ab3962dSSuanming Mou 			 struct rte_flow_action_handle *handle,
20487ab3962dSSuanming Mou 			 void *user_data,
20497ab3962dSSuanming Mou 			 struct rte_flow_error *error);
20503564e928SGregory Etelson typedef struct rte_flow_action_list_handle *
20513564e928SGregory Etelson (*mlx5_flow_async_action_list_handle_create_t)
20523564e928SGregory Etelson 			(struct rte_eth_dev *dev, uint32_t queue_id,
20533564e928SGregory Etelson 			 const struct rte_flow_op_attr *attr,
20543564e928SGregory Etelson 			 const struct rte_flow_indir_action_conf *conf,
20553564e928SGregory Etelson 			 const struct rte_flow_action *actions,
20563564e928SGregory Etelson 			 void *user_data, struct rte_flow_error *error);
20573564e928SGregory Etelson typedef int
20583564e928SGregory Etelson (*mlx5_flow_async_action_list_handle_destroy_t)
20593564e928SGregory Etelson 			(struct rte_eth_dev *dev, uint32_t queue_id,
20603564e928SGregory Etelson 			 const struct rte_flow_op_attr *op_attr,
20613564e928SGregory Etelson 			 struct rte_flow_action_list_handle *action_handle,
20623564e928SGregory Etelson 			 void *user_data, struct rte_flow_error *error);
2063e26f50adSGregory Etelson typedef int
2064e26f50adSGregory Etelson (*mlx5_flow_action_list_handle_query_update_t)
2065e26f50adSGregory Etelson 			(struct rte_eth_dev *dev,
2066e26f50adSGregory Etelson 			const struct rte_flow_action_list_handle *handle,
2067e26f50adSGregory Etelson 			const void **update, void **query,
2068e26f50adSGregory Etelson 			enum rte_flow_query_update_mode mode,
2069e26f50adSGregory Etelson 			struct rte_flow_error *error);
2070e26f50adSGregory Etelson typedef int
2071e26f50adSGregory Etelson (*mlx5_flow_async_action_list_handle_query_update_t)
2072e26f50adSGregory Etelson 			(struct rte_eth_dev *dev, uint32_t queue_id,
2073e26f50adSGregory Etelson 			const struct rte_flow_op_attr *attr,
2074e26f50adSGregory Etelson 			const struct rte_flow_action_list_handle *handle,
2075e26f50adSGregory Etelson 			const void **update, void **query,
2076e26f50adSGregory Etelson 			enum rte_flow_query_update_mode mode,
2077e26f50adSGregory Etelson 			void *user_data, struct rte_flow_error *error);
2078*6c991cd9SOri Kam typedef int
2079*6c991cd9SOri Kam (*mlx5_flow_calc_table_hash_t)
2080*6c991cd9SOri Kam 			(struct rte_eth_dev *dev,
2081*6c991cd9SOri Kam 			 const struct rte_flow_template_table *table,
2082*6c991cd9SOri Kam 			 const struct rte_flow_item pattern[],
2083*6c991cd9SOri Kam 			 uint8_t pattern_template_index,
2084*6c991cd9SOri Kam 			 uint32_t *hash, struct rte_flow_error *error);
20857ab3962dSSuanming Mou 
208684c406e7SOri Kam struct mlx5_flow_driver_ops {
208784c406e7SOri Kam 	mlx5_flow_validate_t validate;
208884c406e7SOri Kam 	mlx5_flow_prepare_t prepare;
208984c406e7SOri Kam 	mlx5_flow_translate_t translate;
209084c406e7SOri Kam 	mlx5_flow_apply_t apply;
209184c406e7SOri Kam 	mlx5_flow_remove_t remove;
209284c406e7SOri Kam 	mlx5_flow_destroy_t destroy;
2093684dafe7SMoti Haimovsky 	mlx5_flow_query_t query;
209446a5e6bcSSuanming Mou 	mlx5_flow_create_mtr_tbls_t create_mtr_tbls;
209546a5e6bcSSuanming Mou 	mlx5_flow_destroy_mtr_tbls_t destroy_mtr_tbls;
2096afb4aa4fSLi Zhang 	mlx5_flow_destroy_mtr_drop_tbls_t destroy_mtr_drop_tbls;
2097e6100c7bSLi Zhang 	mlx5_flow_mtr_alloc_t create_meter;
2098e6100c7bSLi Zhang 	mlx5_flow_mtr_free_t free_meter;
2099afb4aa4fSLi Zhang 	mlx5_flow_validate_mtr_acts_t validate_mtr_acts;
2100afb4aa4fSLi Zhang 	mlx5_flow_create_mtr_acts_t create_mtr_acts;
2101afb4aa4fSLi Zhang 	mlx5_flow_destroy_mtr_acts_t destroy_mtr_acts;
2102afb4aa4fSLi Zhang 	mlx5_flow_create_policy_rules_t create_policy_rules;
2103afb4aa4fSLi Zhang 	mlx5_flow_destroy_policy_rules_t destroy_policy_rules;
2104afb4aa4fSLi Zhang 	mlx5_flow_create_def_policy_t create_def_policy;
2105afb4aa4fSLi Zhang 	mlx5_flow_destroy_def_policy_t destroy_def_policy;
2106fc6ce56bSLi Zhang 	mlx5_flow_meter_sub_policy_rss_prepare_t meter_sub_policy_rss_prepare;
21078e5c9feaSShun Hao 	mlx5_flow_meter_hierarchy_rule_create_t meter_hierarchy_rule_create;
2108ec962badSLi Zhang 	mlx5_flow_destroy_sub_policy_with_rxq_t destroy_sub_policy_with_rxq;
2109e189f55cSSuanming Mou 	mlx5_flow_counter_alloc_t counter_alloc;
2110e189f55cSSuanming Mou 	mlx5_flow_counter_free_t counter_free;
2111e189f55cSSuanming Mou 	mlx5_flow_counter_query_t counter_query;
2112fa2d01c8SDong Zhou 	mlx5_flow_get_aged_flows_t get_aged_flows;
211304a4de75SMichael Baum 	mlx5_flow_get_q_aged_flows_t get_q_aged_flows;
2114d7cfcdddSAndrey Vesnovaty 	mlx5_flow_action_validate_t action_validate;
2115d7cfcdddSAndrey Vesnovaty 	mlx5_flow_action_create_t action_create;
2116d7cfcdddSAndrey Vesnovaty 	mlx5_flow_action_destroy_t action_destroy;
2117d7cfcdddSAndrey Vesnovaty 	mlx5_flow_action_update_t action_update;
211881073e1fSMatan Azrad 	mlx5_flow_action_query_t action_query;
211915896eafSGregory Etelson 	mlx5_flow_action_query_update_t action_query_update;
21203564e928SGregory Etelson 	mlx5_flow_action_list_handle_create_t action_list_handle_create;
21213564e928SGregory Etelson 	mlx5_flow_action_list_handle_destroy_t action_list_handle_destroy;
212223f627e0SBing Zhao 	mlx5_flow_sync_domain_t sync_domain;
2123c5042f93SDmitry Kozlyuk 	mlx5_flow_discover_priorities_t discover_priorities;
2124db25cadcSViacheslav Ovsiienko 	mlx5_flow_item_create_t item_create;
2125db25cadcSViacheslav Ovsiienko 	mlx5_flow_item_release_t item_release;
2126db25cadcSViacheslav Ovsiienko 	mlx5_flow_item_update_t item_update;
2127b401400dSSuanming Mou 	mlx5_flow_info_get_t info_get;
2128b401400dSSuanming Mou 	mlx5_flow_port_configure_t configure;
212924865366SAlexander Kozyrev 	mlx5_flow_pattern_validate_t pattern_validate;
213042431df9SSuanming Mou 	mlx5_flow_pattern_template_create_t pattern_template_create;
213142431df9SSuanming Mou 	mlx5_flow_pattern_template_destroy_t pattern_template_destroy;
213224865366SAlexander Kozyrev 	mlx5_flow_actions_validate_t actions_validate;
2133836b5c9bSSuanming Mou 	mlx5_flow_actions_template_create_t actions_template_create;
2134836b5c9bSSuanming Mou 	mlx5_flow_actions_template_destroy_t actions_template_destroy;
2135d1559d66SSuanming Mou 	mlx5_flow_table_create_t template_table_create;
2136d1559d66SSuanming Mou 	mlx5_flow_table_destroy_t template_table_destroy;
21378ce638efSTomer Shmilovich 	mlx5_flow_group_set_miss_actions_t group_set_miss_actions;
2138c40c061aSSuanming Mou 	mlx5_flow_async_flow_create_t async_flow_create;
213960db7673SAlexander Kozyrev 	mlx5_flow_async_flow_create_by_index_t async_flow_create_by_index;
214063296851SAlexander Kozyrev 	mlx5_flow_async_flow_update_t async_flow_update;
2141c40c061aSSuanming Mou 	mlx5_flow_async_flow_destroy_t async_flow_destroy;
2142c40c061aSSuanming Mou 	mlx5_flow_pull_t pull;
2143c40c061aSSuanming Mou 	mlx5_flow_push_t push;
21447ab3962dSSuanming Mou 	mlx5_flow_async_action_handle_create_t async_action_create;
21457ab3962dSSuanming Mou 	mlx5_flow_async_action_handle_update_t async_action_update;
214615896eafSGregory Etelson 	mlx5_flow_async_action_handle_query_update_t async_action_query_update;
2147478ba4bbSSuanming Mou 	mlx5_flow_async_action_handle_query_t async_action_query;
21487ab3962dSSuanming Mou 	mlx5_flow_async_action_handle_destroy_t async_action_destroy;
21493564e928SGregory Etelson 	mlx5_flow_async_action_list_handle_create_t
21503564e928SGregory Etelson 		async_action_list_handle_create;
21513564e928SGregory Etelson 	mlx5_flow_async_action_list_handle_destroy_t
21523564e928SGregory Etelson 		async_action_list_handle_destroy;
2153e26f50adSGregory Etelson 	mlx5_flow_action_list_handle_query_update_t
2154e26f50adSGregory Etelson 		action_list_handle_query_update;
2155e26f50adSGregory Etelson 	mlx5_flow_async_action_list_handle_query_update_t
2156e26f50adSGregory Etelson 		async_action_list_handle_query_update;
2157*6c991cd9SOri Kam 	mlx5_flow_calc_table_hash_t flow_calc_table_hash;
215884c406e7SOri Kam };
215984c406e7SOri Kam 
216084c406e7SOri Kam /* mlx5_flow.c */
216184c406e7SOri Kam 
216275a00812SSuanming Mou struct mlx5_flow_workspace *mlx5_flow_push_thread_workspace(void);
216375a00812SSuanming Mou void mlx5_flow_pop_thread_workspace(void);
21648bb81f26SXueming Li struct mlx5_flow_workspace *mlx5_flow_get_thread_workspace(void);
2165dc7c5e0aSGregory Etelson void mlx5_flow_workspace_gc_release(void);
2166dc7c5e0aSGregory Etelson 
21674ec6360dSGregory Etelson __extension__
21684ec6360dSGregory Etelson struct flow_grp_info {
21694ec6360dSGregory Etelson 	uint64_t external:1;
21704ec6360dSGregory Etelson 	uint64_t transfer:1;
21714ec6360dSGregory Etelson 	uint64_t fdb_def_rule:1;
21724ec6360dSGregory Etelson 	/* force standard group translation */
21734ec6360dSGregory Etelson 	uint64_t std_tbl_fix:1;
2174ae2927cdSJiawei Wang 	uint64_t skip_scale:2;
21754ec6360dSGregory Etelson };
21764ec6360dSGregory Etelson 
21774ec6360dSGregory Etelson static inline bool
21784ec6360dSGregory Etelson tunnel_use_standard_attr_group_translate
21798c5a231bSGregory Etelson 		    (const struct rte_eth_dev *dev,
21804ec6360dSGregory Etelson 		     const struct rte_flow_attr *attr,
21818c5a231bSGregory Etelson 		     const struct mlx5_flow_tunnel *tunnel,
21828c5a231bSGregory Etelson 		     enum mlx5_tof_rule_type tof_rule_type)
21834ec6360dSGregory Etelson {
21844ec6360dSGregory Etelson 	bool verdict;
21854ec6360dSGregory Etelson 
21864ec6360dSGregory Etelson 	if (!is_tunnel_offload_active(dev))
21874ec6360dSGregory Etelson 		/* no tunnel offload API */
21884ec6360dSGregory Etelson 		verdict = true;
21894ec6360dSGregory Etelson 	else if (tunnel) {
21904ec6360dSGregory Etelson 		/*
21914ec6360dSGregory Etelson 		 * OvS will use jump to group 0 in tunnel steer rule.
21924ec6360dSGregory Etelson 		 * If tunnel steer rule starts from group 0 (attr.group == 0)
21934ec6360dSGregory Etelson 		 * that 0 group must be translated with standard method.
21944ec6360dSGregory Etelson 		 * attr.group == 0 in tunnel match rule translated with tunnel
21954ec6360dSGregory Etelson 		 * method
21964ec6360dSGregory Etelson 		 */
21974ec6360dSGregory Etelson 		verdict = !attr->group &&
21988c5a231bSGregory Etelson 			  is_flow_tunnel_steer_rule(tof_rule_type);
21994ec6360dSGregory Etelson 	} else {
22004ec6360dSGregory Etelson 		/*
22014ec6360dSGregory Etelson 		 * non-tunnel group translation uses standard method for
22024ec6360dSGregory Etelson 		 * root group only: attr.group == 0
22034ec6360dSGregory Etelson 		 */
22044ec6360dSGregory Etelson 		verdict = !attr->group;
22054ec6360dSGregory Etelson 	}
22064ec6360dSGregory Etelson 
22074ec6360dSGregory Etelson 	return verdict;
22084ec6360dSGregory Etelson }
22094ec6360dSGregory Etelson 
2210e6100c7bSLi Zhang /**
2211e6100c7bSLi Zhang  * Get DV flow aso meter by index.
2212e6100c7bSLi Zhang  *
2213e6100c7bSLi Zhang  * @param[in] dev
2214e6100c7bSLi Zhang  *   Pointer to the Ethernet device structure.
2215e6100c7bSLi Zhang  * @param[in] idx
2216e6100c7bSLi Zhang  *   mlx5 flow aso meter index in the container.
2217e6100c7bSLi Zhang  * @param[out] ppool
2218e6100c7bSLi Zhang  *   mlx5 flow aso meter pool in the container,
2219e6100c7bSLi Zhang  *
2220e6100c7bSLi Zhang  * @return
2221e6100c7bSLi Zhang  *   Pointer to the aso meter, NULL otherwise.
2222e6100c7bSLi Zhang  */
2223e6100c7bSLi Zhang static inline struct mlx5_aso_mtr *
2224e6100c7bSLi Zhang mlx5_aso_meter_by_idx(struct mlx5_priv *priv, uint32_t idx)
2225e6100c7bSLi Zhang {
2226e6100c7bSLi Zhang 	struct mlx5_aso_mtr_pool *pool;
2227afb4aa4fSLi Zhang 	struct mlx5_aso_mtr_pools_mng *pools_mng =
2228afb4aa4fSLi Zhang 				&priv->sh->mtrmng->pools_mng;
2229e6100c7bSLi Zhang 
223024865366SAlexander Kozyrev 	if (priv->mtr_bulk.aso)
223124865366SAlexander Kozyrev 		return priv->mtr_bulk.aso + idx;
223248fbb0e9SAlexander Kozyrev 	/* Decrease to original index. */
223348fbb0e9SAlexander Kozyrev 	idx--;
2234afb4aa4fSLi Zhang 	MLX5_ASSERT(idx / MLX5_ASO_MTRS_PER_POOL < pools_mng->n);
22357797b0feSJiawei Wang 	rte_rwlock_read_lock(&pools_mng->resize_mtrwl);
2236afb4aa4fSLi Zhang 	pool = pools_mng->pools[idx / MLX5_ASO_MTRS_PER_POOL];
22377797b0feSJiawei Wang 	rte_rwlock_read_unlock(&pools_mng->resize_mtrwl);
2238e6100c7bSLi Zhang 	return &pool->mtrs[idx % MLX5_ASO_MTRS_PER_POOL];
2239e6100c7bSLi Zhang }
2240e6100c7bSLi Zhang 
224179f89527SGregory Etelson static __rte_always_inline const struct rte_flow_item *
224279f89527SGregory Etelson mlx5_find_end_item(const struct rte_flow_item *item)
224379f89527SGregory Etelson {
224479f89527SGregory Etelson 	for (; item->type != RTE_FLOW_ITEM_TYPE_END; item++);
224579f89527SGregory Etelson 	return item;
224679f89527SGregory Etelson }
224779f89527SGregory Etelson 
224879f89527SGregory Etelson static __rte_always_inline bool
224979f89527SGregory Etelson mlx5_validate_integrity_item(const struct rte_flow_item_integrity *item)
225079f89527SGregory Etelson {
225179f89527SGregory Etelson 	struct rte_flow_item_integrity test = *item;
225279f89527SGregory Etelson 	test.l3_ok = 0;
225379f89527SGregory Etelson 	test.l4_ok = 0;
225479f89527SGregory Etelson 	test.ipv4_csum_ok = 0;
225579f89527SGregory Etelson 	test.l4_csum_ok = 0;
225679f89527SGregory Etelson 	return (test.value == 0);
225779f89527SGregory Etelson }
225879f89527SGregory Etelson 
22592db75e8bSBing Zhao /*
22604f74cb68SBing Zhao  * Get ASO CT action by device and index.
22612db75e8bSBing Zhao  *
22622db75e8bSBing Zhao  * @param[in] dev
22632db75e8bSBing Zhao  *   Pointer to the Ethernet device structure.
22642db75e8bSBing Zhao  * @param[in] idx
22652db75e8bSBing Zhao  *   Index to the ASO CT action.
22662db75e8bSBing Zhao  *
22672db75e8bSBing Zhao  * @return
22682db75e8bSBing Zhao  *   The specified ASO CT action pointer.
22692db75e8bSBing Zhao  */
22702db75e8bSBing Zhao static inline struct mlx5_aso_ct_action *
22714f74cb68SBing Zhao flow_aso_ct_get_by_dev_idx(struct rte_eth_dev *dev, uint32_t idx)
22722db75e8bSBing Zhao {
22732db75e8bSBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
22742db75e8bSBing Zhao 	struct mlx5_aso_ct_pools_mng *mng = priv->sh->ct_mng;
22752db75e8bSBing Zhao 	struct mlx5_aso_ct_pool *pool;
22762db75e8bSBing Zhao 
22772db75e8bSBing Zhao 	idx--;
22782db75e8bSBing Zhao 	MLX5_ASSERT((idx / MLX5_ASO_CT_ACTIONS_PER_POOL) < mng->n);
22792db75e8bSBing Zhao 	/* Bit operation AND could be used. */
22802db75e8bSBing Zhao 	rte_rwlock_read_lock(&mng->resize_rwl);
22812db75e8bSBing Zhao 	pool = mng->pools[idx / MLX5_ASO_CT_ACTIONS_PER_POOL];
22822db75e8bSBing Zhao 	rte_rwlock_read_unlock(&mng->resize_rwl);
22832db75e8bSBing Zhao 	return &pool->actions[idx % MLX5_ASO_CT_ACTIONS_PER_POOL];
22842db75e8bSBing Zhao }
22852db75e8bSBing Zhao 
22864f74cb68SBing Zhao /*
22874f74cb68SBing Zhao  * Get ASO CT action by owner & index.
22884f74cb68SBing Zhao  *
22894f74cb68SBing Zhao  * @param[in] dev
22904f74cb68SBing Zhao  *   Pointer to the Ethernet device structure.
22914f74cb68SBing Zhao  * @param[in] idx
22924f74cb68SBing Zhao  *   Index to the ASO CT action and owner port combination.
22934f74cb68SBing Zhao  *
22944f74cb68SBing Zhao  * @return
22954f74cb68SBing Zhao  *   The specified ASO CT action pointer.
22964f74cb68SBing Zhao  */
22974f74cb68SBing Zhao static inline struct mlx5_aso_ct_action *
22984f74cb68SBing Zhao flow_aso_ct_get_by_idx(struct rte_eth_dev *dev, uint32_t own_idx)
22994f74cb68SBing Zhao {
23004f74cb68SBing Zhao 	struct mlx5_priv *priv = dev->data->dev_private;
23014f74cb68SBing Zhao 	struct mlx5_aso_ct_action *ct;
23024f74cb68SBing Zhao 	uint16_t owner = (uint16_t)MLX5_INDIRECT_ACT_CT_GET_OWNER(own_idx);
23034f74cb68SBing Zhao 	uint32_t idx = MLX5_INDIRECT_ACT_CT_GET_IDX(own_idx);
23044f74cb68SBing Zhao 
23054f74cb68SBing Zhao 	if (owner == PORT_ID(priv)) {
23064f74cb68SBing Zhao 		ct = flow_aso_ct_get_by_dev_idx(dev, idx);
23074f74cb68SBing Zhao 	} else {
23084f74cb68SBing Zhao 		struct rte_eth_dev *owndev = &rte_eth_devices[owner];
23094f74cb68SBing Zhao 
23104f74cb68SBing Zhao 		MLX5_ASSERT(owner < RTE_MAX_ETHPORTS);
23114f74cb68SBing Zhao 		if (dev->data->dev_started != 1)
23124f74cb68SBing Zhao 			return NULL;
23134f74cb68SBing Zhao 		ct = flow_aso_ct_get_by_dev_idx(owndev, idx);
23144f74cb68SBing Zhao 		if (ct->peer != PORT_ID(priv))
23154f74cb68SBing Zhao 			return NULL;
23164f74cb68SBing Zhao 	}
23174f74cb68SBing Zhao 	return ct;
23184f74cb68SBing Zhao }
23194f74cb68SBing Zhao 
2320985b4792SGregory Etelson static inline uint16_t
2321985b4792SGregory Etelson mlx5_translate_tunnel_etypes(uint64_t pattern_flags)
2322985b4792SGregory Etelson {
2323985b4792SGregory Etelson 	if (pattern_flags & MLX5_FLOW_LAYER_INNER_L2)
2324985b4792SGregory Etelson 		return RTE_ETHER_TYPE_TEB;
2325985b4792SGregory Etelson 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV4)
2326985b4792SGregory Etelson 		return RTE_ETHER_TYPE_IPV4;
2327985b4792SGregory Etelson 	else if (pattern_flags & MLX5_FLOW_LAYER_INNER_L3_IPV6)
2328985b4792SGregory Etelson 		return RTE_ETHER_TYPE_IPV6;
2329985b4792SGregory Etelson 	else if (pattern_flags & MLX5_FLOW_LAYER_MPLS)
2330985b4792SGregory Etelson 		return RTE_ETHER_TYPE_MPLS;
2331985b4792SGregory Etelson 	return 0;
2332985b4792SGregory Etelson }
2333985b4792SGregory Etelson 
2334c40c061aSSuanming Mou int flow_hw_q_flow_flush(struct rte_eth_dev *dev,
2335c40c061aSSuanming Mou 			 struct rte_flow_error *error);
233675a00812SSuanming Mou 
233775a00812SSuanming Mou /*
233875a00812SSuanming Mou  * Convert rte_mtr_color to mlx5 color.
233975a00812SSuanming Mou  *
234075a00812SSuanming Mou  * @param[in] rcol
234175a00812SSuanming Mou  *   rte_mtr_color.
234275a00812SSuanming Mou  *
234375a00812SSuanming Mou  * @return
234475a00812SSuanming Mou  *   mlx5 color.
234575a00812SSuanming Mou  */
234675a00812SSuanming Mou static inline int
234775a00812SSuanming Mou rte_col_2_mlx5_col(enum rte_color rcol)
234875a00812SSuanming Mou {
234975a00812SSuanming Mou 	switch (rcol) {
235075a00812SSuanming Mou 	case RTE_COLOR_GREEN:
235175a00812SSuanming Mou 		return MLX5_FLOW_COLOR_GREEN;
235275a00812SSuanming Mou 	case RTE_COLOR_YELLOW:
235375a00812SSuanming Mou 		return MLX5_FLOW_COLOR_YELLOW;
235475a00812SSuanming Mou 	case RTE_COLOR_RED:
235575a00812SSuanming Mou 		return MLX5_FLOW_COLOR_RED;
235675a00812SSuanming Mou 	default:
235775a00812SSuanming Mou 		break;
235875a00812SSuanming Mou 	}
235975a00812SSuanming Mou 	return MLX5_FLOW_COLOR_UNDEFINED;
236075a00812SSuanming Mou }
236175a00812SSuanming Mou 
2362e9de8f33SJiawei Wang /**
2363e9de8f33SJiawei Wang  * Indicates whether flow source vport is representor port.
2364e9de8f33SJiawei Wang  *
2365e9de8f33SJiawei Wang  * @param[in] priv
2366e9de8f33SJiawei Wang  *   Pointer to device private context structure.
2367e9de8f33SJiawei Wang  * @param[in] act_priv
2368e9de8f33SJiawei Wang  *   Pointer to actual device private context structure if have.
2369e9de8f33SJiawei Wang  *
2370e9de8f33SJiawei Wang  * @return
2371e9de8f33SJiawei Wang  *   True when the flow source vport is representor port, false otherwise.
2372e9de8f33SJiawei Wang  */
2373e9de8f33SJiawei Wang static inline bool
2374e9de8f33SJiawei Wang flow_source_vport_representor(struct mlx5_priv *priv, struct mlx5_priv *act_priv)
2375e9de8f33SJiawei Wang {
2376e9de8f33SJiawei Wang 	MLX5_ASSERT(priv);
2377e9de8f33SJiawei Wang 	return (!act_priv ? (priv->representor_id != UINT16_MAX) :
2378e9de8f33SJiawei Wang 		 (act_priv->representor_id != UINT16_MAX));
2379e9de8f33SJiawei Wang }
2380e9de8f33SJiawei Wang 
23819fa7c1cdSDariusz Sosnowski /* All types of Ethernet patterns used in control flow rules. */
23829fa7c1cdSDariusz Sosnowski enum mlx5_flow_ctrl_rx_eth_pattern_type {
23839fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL = 0,
23849fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_ALL_MCAST,
23859fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST,
23869fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_BCAST_VLAN,
23879fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST,
23889fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV4_MCAST_VLAN,
23899fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST,
23909fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_IPV6_MCAST_VLAN,
23919fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC,
23929fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_DMAC_VLAN,
23939fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX,
23949fa7c1cdSDariusz Sosnowski };
23959fa7c1cdSDariusz Sosnowski 
23969fa7c1cdSDariusz Sosnowski /* All types of RSS actions used in control flow rules. */
23979fa7c1cdSDariusz Sosnowski enum mlx5_flow_ctrl_rx_expanded_rss_type {
23989fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_NON_IP = 0,
23999fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4,
24009fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_UDP,
24019fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV4_TCP,
24029fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6,
24039fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_UDP,
24049fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_IPV6_TCP,
24059fa7c1cdSDariusz Sosnowski 	MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX,
24069fa7c1cdSDariusz Sosnowski };
24079fa7c1cdSDariusz Sosnowski 
24089fa7c1cdSDariusz Sosnowski /**
24099fa7c1cdSDariusz Sosnowski  * Contains pattern template, template table and its attributes for a single
24109fa7c1cdSDariusz Sosnowski  * combination of Ethernet pattern and RSS action. Used to create control flow rules
24119fa7c1cdSDariusz Sosnowski  * with HWS.
24129fa7c1cdSDariusz Sosnowski  */
24139fa7c1cdSDariusz Sosnowski struct mlx5_flow_hw_ctrl_rx_table {
24149fa7c1cdSDariusz Sosnowski 	struct rte_flow_template_table_attr attr;
24159fa7c1cdSDariusz Sosnowski 	struct rte_flow_pattern_template *pt;
24169fa7c1cdSDariusz Sosnowski 	struct rte_flow_template_table *tbl;
24179fa7c1cdSDariusz Sosnowski };
24189fa7c1cdSDariusz Sosnowski 
24199fa7c1cdSDariusz Sosnowski /* Contains all templates required to create control flow rules with HWS. */
24209fa7c1cdSDariusz Sosnowski struct mlx5_flow_hw_ctrl_rx {
24219fa7c1cdSDariusz Sosnowski 	struct rte_flow_actions_template *rss[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
24229fa7c1cdSDariusz Sosnowski 	struct mlx5_flow_hw_ctrl_rx_table tables[MLX5_FLOW_HW_CTRL_RX_ETH_PATTERN_MAX]
24239fa7c1cdSDariusz Sosnowski 						[MLX5_FLOW_HW_CTRL_RX_EXPANDED_RSS_MAX];
24249fa7c1cdSDariusz Sosnowski };
24259fa7c1cdSDariusz Sosnowski 
24269fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_PROMISCUOUS    (RTE_BIT32(0))
24279fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_ALL_MULTICAST  (RTE_BIT32(1))
24289fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_BROADCAST      (RTE_BIT32(2))
24299fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_IPV4_MULTICAST (RTE_BIT32(3))
24309fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_IPV6_MULTICAST (RTE_BIT32(4))
24319fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_DMAC           (RTE_BIT32(5))
24329fa7c1cdSDariusz Sosnowski #define MLX5_CTRL_VLAN_FILTER    (RTE_BIT32(6))
24339fa7c1cdSDariusz Sosnowski 
24349fa7c1cdSDariusz Sosnowski int mlx5_flow_hw_ctrl_flows(struct rte_eth_dev *dev, uint32_t flags);
24359fa7c1cdSDariusz Sosnowski void mlx5_flow_hw_cleanup_ctrl_rx_templates(struct rte_eth_dev *dev);
24369fa7c1cdSDariusz Sosnowski 
24374ec6360dSGregory Etelson int mlx5_flow_group_to_table(struct rte_eth_dev *dev,
24384ec6360dSGregory Etelson 			     const struct mlx5_flow_tunnel *tunnel,
24394ec6360dSGregory Etelson 			     uint32_t group, uint32_t *table,
2440eab3ca48SGregory Etelson 			     const struct flow_grp_info *flags,
24414ec6360dSGregory Etelson 			     struct rte_flow_error *error);
2442e745f900SSuanming Mou uint64_t mlx5_flow_hashfields_adjust(struct mlx5_flow_rss_desc *rss_desc,
2443e745f900SSuanming Mou 				     int tunnel, uint64_t layer_types,
2444fc2c498cSOri Kam 				     uint64_t hash_fields);
24453eca5f8aSOphir Munk int mlx5_flow_discover_priorities(struct rte_eth_dev *dev);
244684c406e7SOri Kam uint32_t mlx5_flow_adjust_priority(struct rte_eth_dev *dev, int32_t priority,
244784c406e7SOri Kam 				   uint32_t subpriority);
24485f8ae44dSDong Zhou uint32_t mlx5_get_lowest_priority(struct rte_eth_dev *dev,
24495f8ae44dSDong Zhou 					const struct rte_flow_attr *attr);
24505f8ae44dSDong Zhou uint16_t mlx5_get_matcher_priority(struct rte_eth_dev *dev,
24515f8ae44dSDong Zhou 				   const struct rte_flow_attr *attr,
2452ebe9afedSXueming Li 				   uint32_t subpriority, bool external);
24537f6e276bSMichael Savisko uint32_t mlx5_get_send_to_kernel_priority(struct rte_eth_dev *dev);
245499d49f47SMatan Azrad int mlx5_flow_get_reg_id(struct rte_eth_dev *dev,
24553e8edd0eSViacheslav Ovsiienko 				     enum mlx5_feature_name feature,
24563e8edd0eSViacheslav Ovsiienko 				     uint32_t id,
24573e8edd0eSViacheslav Ovsiienko 				     struct rte_flow_error *error);
2458e4fcdcd6SMoti Haimovsky const struct rte_flow_action *mlx5_flow_find_action
2459e4fcdcd6SMoti Haimovsky 					(const struct rte_flow_action *actions,
2460e4fcdcd6SMoti Haimovsky 					 enum rte_flow_action_type action);
2461d7cfcdddSAndrey Vesnovaty int mlx5_validate_action_rss(struct rte_eth_dev *dev,
2462d7cfcdddSAndrey Vesnovaty 			     const struct rte_flow_action *action,
2463d7cfcdddSAndrey Vesnovaty 			     struct rte_flow_error *error);
24645e26c99fSRongwei Liu 
24655e26c99fSRongwei Liu struct mlx5_hw_encap_decap_action*
24665e26c99fSRongwei Liu mlx5_reformat_action_create(struct rte_eth_dev *dev,
24675e26c99fSRongwei Liu 			    const struct rte_flow_indir_action_conf *conf,
24685e26c99fSRongwei Liu 			    const struct rte_flow_action *encap_action,
24695e26c99fSRongwei Liu 			    const struct rte_flow_action *decap_action,
24705e26c99fSRongwei Liu 			    struct rte_flow_error *error);
24715e26c99fSRongwei Liu int mlx5_reformat_action_destroy(struct rte_eth_dev *dev,
24725e26c99fSRongwei Liu 				 struct rte_flow_action_list_handle *handle,
24735e26c99fSRongwei Liu 				 struct rte_flow_error *error);
247484c406e7SOri Kam int mlx5_flow_validate_action_count(struct rte_eth_dev *dev,
24753e9fa079SDekel Peled 				    const struct rte_flow_attr *attr,
247684c406e7SOri Kam 				    struct rte_flow_error *error);
2477c1f0cdaeSDariusz Sosnowski int mlx5_flow_validate_action_drop(struct rte_eth_dev *dev,
2478c1f0cdaeSDariusz Sosnowski 				   bool is_root,
24793e9fa079SDekel Peled 				   const struct rte_flow_attr *attr,
248084c406e7SOri Kam 				   struct rte_flow_error *error);
248184c406e7SOri Kam int mlx5_flow_validate_action_flag(uint64_t action_flags,
24823e9fa079SDekel Peled 				   const struct rte_flow_attr *attr,
248384c406e7SOri Kam 				   struct rte_flow_error *error);
248484c406e7SOri Kam int mlx5_flow_validate_action_mark(const struct rte_flow_action *action,
248584c406e7SOri Kam 				   uint64_t action_flags,
24863e9fa079SDekel Peled 				   const struct rte_flow_attr *attr,
248784c406e7SOri Kam 				   struct rte_flow_error *error);
248884c406e7SOri Kam int mlx5_flow_validate_action_queue(const struct rte_flow_action *action,
248984c406e7SOri Kam 				    uint64_t action_flags,
249084c406e7SOri Kam 				    struct rte_eth_dev *dev,
24913e9fa079SDekel Peled 				    const struct rte_flow_attr *attr,
249284c406e7SOri Kam 				    struct rte_flow_error *error);
249384c406e7SOri Kam int mlx5_flow_validate_action_rss(const struct rte_flow_action *action,
249484c406e7SOri Kam 				  uint64_t action_flags,
249584c406e7SOri Kam 				  struct rte_eth_dev *dev,
24963e9fa079SDekel Peled 				  const struct rte_flow_attr *attr,
24971183f12fSOri Kam 				  uint64_t item_flags,
249884c406e7SOri Kam 				  struct rte_flow_error *error);
24993c78124fSShiri Kuzin int mlx5_flow_validate_action_default_miss(uint64_t action_flags,
25003c78124fSShiri Kuzin 				const struct rte_flow_attr *attr,
25013c78124fSShiri Kuzin 				struct rte_flow_error *error);
2502c23626f2SMichael Baum int flow_validate_modify_field_level
2503c23626f2SMichael Baum 			(const struct rte_flow_action_modify_data *data,
2504c23626f2SMichael Baum 			 struct rte_flow_error *error);
25056bd7fbd0SDekel Peled int mlx5_flow_item_acceptable(const struct rte_flow_item *item,
25066bd7fbd0SDekel Peled 			      const uint8_t *mask,
25076bd7fbd0SDekel Peled 			      const uint8_t *nic_mask,
25086bd7fbd0SDekel Peled 			      unsigned int size,
25096859e67eSDekel Peled 			      bool range_accepted,
25106bd7fbd0SDekel Peled 			      struct rte_flow_error *error);
251184c406e7SOri Kam int mlx5_flow_validate_item_eth(const struct rte_flow_item *item,
251286b59a1aSMatan Azrad 				uint64_t item_flags, bool ext_vlan_sup,
251384c406e7SOri Kam 				struct rte_flow_error *error);
251484c406e7SOri Kam int mlx5_flow_validate_item_gre(const struct rte_flow_item *item,
251584c406e7SOri Kam 				uint64_t item_flags,
251684c406e7SOri Kam 				uint8_t target_protocol,
251784c406e7SOri Kam 				struct rte_flow_error *error);
2518a7a03655SXiaoyu Min int mlx5_flow_validate_item_gre_key(const struct rte_flow_item *item,
2519a7a03655SXiaoyu Min 				    uint64_t item_flags,
2520a7a03655SXiaoyu Min 				    const struct rte_flow_item *gre_item,
2521a7a03655SXiaoyu Min 				    struct rte_flow_error *error);
25225c4d4917SSean Zhang int mlx5_flow_validate_item_gre_option(struct rte_eth_dev *dev,
25235c4d4917SSean Zhang 				       const struct rte_flow_item *item,
25245c4d4917SSean Zhang 				       uint64_t item_flags,
25255c4d4917SSean Zhang 				       const struct rte_flow_attr *attr,
25265c4d4917SSean Zhang 				       const struct rte_flow_item *gre_item,
25275c4d4917SSean Zhang 				       struct rte_flow_error *error);
252884c406e7SOri Kam int mlx5_flow_validate_item_ipv4(const struct rte_flow_item *item,
2529ed4c5247SShahaf Shuler 				 uint64_t item_flags,
2530fba32130SXiaoyu Min 				 uint64_t last_item,
2531fba32130SXiaoyu Min 				 uint16_t ether_type,
253255c61fa7SViacheslav Ovsiienko 				 const struct rte_flow_item_ipv4 *acc_mask,
25336859e67eSDekel Peled 				 bool range_accepted,
253484c406e7SOri Kam 				 struct rte_flow_error *error);
253584c406e7SOri Kam int mlx5_flow_validate_item_ipv6(const struct rte_flow_item *item,
253684c406e7SOri Kam 				 uint64_t item_flags,
2537fba32130SXiaoyu Min 				 uint64_t last_item,
2538fba32130SXiaoyu Min 				 uint16_t ether_type,
253955c61fa7SViacheslav Ovsiienko 				 const struct rte_flow_item_ipv6 *acc_mask,
254084c406e7SOri Kam 				 struct rte_flow_error *error);
254138f7efaaSDekel Peled int mlx5_flow_validate_item_mpls(struct rte_eth_dev *dev,
254238f7efaaSDekel Peled 				 const struct rte_flow_item *item,
254384c406e7SOri Kam 				 uint64_t item_flags,
254438f7efaaSDekel Peled 				 uint64_t prev_layer,
254584c406e7SOri Kam 				 struct rte_flow_error *error);
254684c406e7SOri Kam int mlx5_flow_validate_item_tcp(const struct rte_flow_item *item,
254784c406e7SOri Kam 				uint64_t item_flags,
254884c406e7SOri Kam 				uint8_t target_protocol,
254992378c2bSMoti Haimovsky 				const struct rte_flow_item_tcp *flow_mask,
255084c406e7SOri Kam 				struct rte_flow_error *error);
255184c406e7SOri Kam int mlx5_flow_validate_item_udp(const struct rte_flow_item *item,
255284c406e7SOri Kam 				uint64_t item_flags,
255384c406e7SOri Kam 				uint8_t target_protocol,
255484c406e7SOri Kam 				struct rte_flow_error *error);
255584c406e7SOri Kam int mlx5_flow_validate_item_vlan(const struct rte_flow_item *item,
2556ed4c5247SShahaf Shuler 				 uint64_t item_flags,
2557dfedf3e3SViacheslav Ovsiienko 				 struct rte_eth_dev *dev,
255884c406e7SOri Kam 				 struct rte_flow_error *error);
2559630a587bSRongwei Liu int mlx5_flow_validate_item_vxlan(struct rte_eth_dev *dev,
2560a1fd0c82SRongwei Liu 				  uint16_t udp_dport,
2561630a587bSRongwei Liu 				  const struct rte_flow_item *item,
256284c406e7SOri Kam 				  uint64_t item_flags,
25631939eb6fSDariusz Sosnowski 				  bool root,
256484c406e7SOri Kam 				  struct rte_flow_error *error);
256584c406e7SOri Kam int mlx5_flow_validate_item_vxlan_gpe(const struct rte_flow_item *item,
256684c406e7SOri Kam 				      uint64_t item_flags,
256784c406e7SOri Kam 				      struct rte_eth_dev *dev,
256884c406e7SOri Kam 				      struct rte_flow_error *error);
2569d53aa89aSXiaoyu Min int mlx5_flow_validate_item_icmp(const struct rte_flow_item *item,
2570d53aa89aSXiaoyu Min 				 uint64_t item_flags,
2571d53aa89aSXiaoyu Min 				 uint8_t target_protocol,
2572d53aa89aSXiaoyu Min 				 struct rte_flow_error *error);
2573d53aa89aSXiaoyu Min int mlx5_flow_validate_item_icmp6(const struct rte_flow_item *item,
2574d53aa89aSXiaoyu Min 				   uint64_t item_flags,
2575d53aa89aSXiaoyu Min 				   uint8_t target_protocol,
2576d53aa89aSXiaoyu Min 				   struct rte_flow_error *error);
257701314192SLeo Xu int mlx5_flow_validate_item_icmp6_echo(const struct rte_flow_item *item,
257801314192SLeo Xu 				       uint64_t item_flags,
257901314192SLeo Xu 				       uint8_t target_protocol,
258001314192SLeo Xu 				       struct rte_flow_error *error);
2581ea81c1b8SDekel Peled int mlx5_flow_validate_item_nvgre(const struct rte_flow_item *item,
2582ea81c1b8SDekel Peled 				  uint64_t item_flags,
2583ea81c1b8SDekel Peled 				  uint8_t target_protocol,
2584ea81c1b8SDekel Peled 				  struct rte_flow_error *error);
2585e59a5dbcSMoti Haimovsky int mlx5_flow_validate_item_geneve(const struct rte_flow_item *item,
2586e59a5dbcSMoti Haimovsky 				   uint64_t item_flags,
2587e59a5dbcSMoti Haimovsky 				   struct rte_eth_dev *dev,
2588e59a5dbcSMoti Haimovsky 				   struct rte_flow_error *error);
2589f7239fceSShiri Kuzin int mlx5_flow_validate_item_geneve_opt(const struct rte_flow_item *item,
2590f7239fceSShiri Kuzin 				   uint64_t last_item,
2591f7239fceSShiri Kuzin 				   const struct rte_flow_item *geneve_item,
2592f7239fceSShiri Kuzin 				   struct rte_eth_dev *dev,
2593f7239fceSShiri Kuzin 				   struct rte_flow_error *error);
2594c7eca236SBing Zhao int mlx5_flow_validate_item_ecpri(const struct rte_flow_item *item,
2595c7eca236SBing Zhao 				  uint64_t item_flags,
2596c7eca236SBing Zhao 				  uint64_t last_item,
2597c7eca236SBing Zhao 				  uint16_t ether_type,
2598c7eca236SBing Zhao 				  const struct rte_flow_item_ecpri *acc_mask,
2599c7eca236SBing Zhao 				  struct rte_flow_error *error);
26006f7d6622SHaifei Luo int mlx5_flow_validate_item_nsh(struct rte_eth_dev *dev,
26016f7d6622SHaifei Luo 				const struct rte_flow_item *item,
26026f7d6622SHaifei Luo 				struct rte_flow_error *error);
260344432018SLi Zhang int mlx5_flow_create_mtr_tbls(struct rte_eth_dev *dev,
260444432018SLi Zhang 			      struct mlx5_flow_meter_info *fm,
260544432018SLi Zhang 			      uint32_t mtr_idx,
260644432018SLi Zhang 			      uint8_t domain_bitmap);
260744432018SLi Zhang void mlx5_flow_destroy_mtr_tbls(struct rte_eth_dev *dev,
260844432018SLi Zhang 			       struct mlx5_flow_meter_info *fm);
2609afb4aa4fSLi Zhang void mlx5_flow_destroy_mtr_drop_tbls(struct rte_eth_dev *dev);
2610fc6ce56bSLi Zhang struct mlx5_flow_meter_sub_policy *mlx5_flow_meter_sub_policy_rss_prepare
2611fc6ce56bSLi Zhang 		(struct rte_eth_dev *dev,
2612fc6ce56bSLi Zhang 		struct mlx5_flow_meter_policy *mtr_policy,
2613fc6ce56bSLi Zhang 		struct mlx5_flow_rss_desc *rss_desc[MLX5_MTR_RTE_COLORS]);
2614ec962badSLi Zhang void mlx5_flow_destroy_sub_policy_with_rxq(struct rte_eth_dev *dev,
2615ec962badSLi Zhang 		struct mlx5_flow_meter_policy *mtr_policy);
2616994829e6SSuanming Mou int mlx5_flow_dv_discover_counter_offset_support(struct rte_eth_dev *dev);
261745633c46SSuanming Mou int mlx5_flow_discover_dr_action_support(struct rte_eth_dev *dev);
2618ec4e11d4SDmitry Kozlyuk int mlx5_action_handle_attach(struct rte_eth_dev *dev);
2619ec4e11d4SDmitry Kozlyuk int mlx5_action_handle_detach(struct rte_eth_dev *dev);
26204b61b877SBing Zhao int mlx5_action_handle_flush(struct rte_eth_dev *dev);
26214ec6360dSGregory Etelson void mlx5_release_tunnel_hub(struct mlx5_dev_ctx_shared *sh, uint16_t port_id);
26224ec6360dSGregory Etelson int mlx5_alloc_tunnel_hub(struct mlx5_dev_ctx_shared *sh);
2623afd7a625SXueming Li 
2624961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_tbl_create_cb(void *tool_ctx, void *entry_ctx);
2625961b6774SMatan Azrad int flow_dv_tbl_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2626f5b0aed2SSuanming Mou 			 void *cb_ctx);
2627961b6774SMatan Azrad void flow_dv_tbl_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
2628961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_tbl_clone_cb(void *tool_ctx,
2629961b6774SMatan Azrad 					     struct mlx5_list_entry *oentry,
2630961b6774SMatan Azrad 					     void *entry_ctx);
2631961b6774SMatan Azrad void flow_dv_tbl_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
2632afd7a625SXueming Li struct mlx5_flow_tbl_resource *flow_dv_tbl_resource_get(struct rte_eth_dev *dev,
26332d2cef5dSLi Zhang 		uint32_t table_level, uint8_t egress, uint8_t transfer,
2634afd7a625SXueming Li 		bool external, const struct mlx5_flow_tunnel *tunnel,
26352d2cef5dSLi Zhang 		uint32_t group_id, uint8_t dummy,
26362d2cef5dSLi Zhang 		uint32_t table_id, struct rte_flow_error *error);
2637f31a141eSMichael Savisko int flow_dv_tbl_resource_release(struct mlx5_dev_ctx_shared *sh,
2638f31a141eSMichael Savisko 				 struct mlx5_flow_tbl_resource *tbl);
2639afd7a625SXueming Li 
2640961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_tag_create_cb(void *tool_ctx, void *cb_ctx);
2641961b6774SMatan Azrad int flow_dv_tag_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2642f5b0aed2SSuanming Mou 			 void *cb_ctx);
2643961b6774SMatan Azrad void flow_dv_tag_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
2644961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_tag_clone_cb(void *tool_ctx,
2645961b6774SMatan Azrad 					     struct mlx5_list_entry *oentry,
2646f5b0aed2SSuanming Mou 					     void *cb_ctx);
2647961b6774SMatan Azrad void flow_dv_tag_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
2648f7f73ac1SXueming Li 
2649961b6774SMatan Azrad int flow_dv_modify_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2650961b6774SMatan Azrad 			    void *cb_ctx);
2651961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_modify_create_cb(void *tool_ctx, void *ctx);
2652961b6774SMatan Azrad void flow_dv_modify_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
2653961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_modify_clone_cb(void *tool_ctx,
2654961b6774SMatan Azrad 						struct mlx5_list_entry *oentry,
2655961b6774SMatan Azrad 						void *ctx);
2656961b6774SMatan Azrad void flow_dv_modify_clone_free_cb(void *tool_ctx,
2657961b6774SMatan Azrad 				  struct mlx5_list_entry *entry);
2658961b6774SMatan Azrad 
2659961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_mreg_create_cb(void *tool_ctx, void *ctx);
2660961b6774SMatan Azrad int flow_dv_mreg_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2661961b6774SMatan Azrad 			  void *cb_ctx);
2662961b6774SMatan Azrad void flow_dv_mreg_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
2663961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_mreg_clone_cb(void *tool_ctx,
2664961b6774SMatan Azrad 					      struct mlx5_list_entry *entry,
2665961b6774SMatan Azrad 					      void *ctx);
2666961b6774SMatan Azrad void flow_dv_mreg_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
2667961b6774SMatan Azrad 
2668961b6774SMatan Azrad int flow_dv_encap_decap_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2669961b6774SMatan Azrad 				 void *cb_ctx);
2670961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_encap_decap_create_cb(void *tool_ctx,
2671961b6774SMatan Azrad 						      void *cb_ctx);
2672961b6774SMatan Azrad void flow_dv_encap_decap_remove_cb(void *tool_ctx,
2673961b6774SMatan Azrad 				   struct mlx5_list_entry *entry);
2674961b6774SMatan Azrad struct mlx5_list_entry *flow_dv_encap_decap_clone_cb(void *tool_ctx,
2675961b6774SMatan Azrad 						  struct mlx5_list_entry *entry,
2676961b6774SMatan Azrad 						  void *cb_ctx);
2677961b6774SMatan Azrad void flow_dv_encap_decap_clone_free_cb(void *tool_ctx,
2678961b6774SMatan Azrad 				       struct mlx5_list_entry *entry);
267918726355SXueming Li 
26806507c9f5SSuanming Mou int flow_dv_matcher_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2681e78e5408SMatan Azrad 			     void *ctx);
26826507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_matcher_create_cb(void *tool_ctx, void *ctx);
26836507c9f5SSuanming Mou void flow_dv_matcher_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
26846507c9f5SSuanming Mou 
26856507c9f5SSuanming Mou int flow_dv_port_id_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
26866507c9f5SSuanming Mou 			     void *cb_ctx);
26876507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_port_id_create_cb(void *tool_ctx, void *cb_ctx);
26886507c9f5SSuanming Mou void flow_dv_port_id_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
26896507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_port_id_clone_cb(void *tool_ctx,
26906507c9f5SSuanming Mou 				struct mlx5_list_entry *entry, void *cb_ctx);
26916507c9f5SSuanming Mou void flow_dv_port_id_clone_free_cb(void *tool_ctx,
2692e78e5408SMatan Azrad 				   struct mlx5_list_entry *entry);
269318726355SXueming Li 
26946507c9f5SSuanming Mou int flow_dv_push_vlan_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2695e78e5408SMatan Azrad 			       void *cb_ctx);
26966507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_push_vlan_create_cb(void *tool_ctx,
2697491b7137SMatan Azrad 						    void *cb_ctx);
26986507c9f5SSuanming Mou void flow_dv_push_vlan_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
26996507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_push_vlan_clone_cb(void *tool_ctx,
2700e78e5408SMatan Azrad 				 struct mlx5_list_entry *entry, void *cb_ctx);
27016507c9f5SSuanming Mou void flow_dv_push_vlan_clone_free_cb(void *tool_ctx,
2702491b7137SMatan Azrad 				     struct mlx5_list_entry *entry);
27033422af2aSXueming Li 
27046507c9f5SSuanming Mou int flow_dv_sample_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2705e78e5408SMatan Azrad 			    void *cb_ctx);
27066507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_sample_create_cb(void *tool_ctx, void *cb_ctx);
27076507c9f5SSuanming Mou void flow_dv_sample_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
27086507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_sample_clone_cb(void *tool_ctx,
2709491b7137SMatan Azrad 				 struct mlx5_list_entry *entry, void *cb_ctx);
27106507c9f5SSuanming Mou void flow_dv_sample_clone_free_cb(void *tool_ctx,
2711491b7137SMatan Azrad 				  struct mlx5_list_entry *entry);
271219784141SSuanming Mou 
27136507c9f5SSuanming Mou int flow_dv_dest_array_match_cb(void *tool_ctx, struct mlx5_list_entry *entry,
2714e78e5408SMatan Azrad 				void *cb_ctx);
27156507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_dest_array_create_cb(void *tool_ctx,
27166507c9f5SSuanming Mou 						     void *cb_ctx);
27176507c9f5SSuanming Mou void flow_dv_dest_array_remove_cb(void *tool_ctx,
2718e78e5408SMatan Azrad 				  struct mlx5_list_entry *entry);
27196507c9f5SSuanming Mou struct mlx5_list_entry *flow_dv_dest_array_clone_cb(void *tool_ctx,
2720491b7137SMatan Azrad 				   struct mlx5_list_entry *entry, void *cb_ctx);
27216507c9f5SSuanming Mou void flow_dv_dest_array_clone_free_cb(void *tool_ctx,
2722491b7137SMatan Azrad 				      struct mlx5_list_entry *entry);
27233a2f674bSSuanming Mou void flow_dv_hashfields_set(uint64_t item_flags,
27243a2f674bSSuanming Mou 			    struct mlx5_flow_rss_desc *rss_desc,
27253a2f674bSSuanming Mou 			    uint64_t *hash_fields);
27263a2f674bSSuanming Mou void flow_dv_action_rss_l34_hash_adjust(uint64_t rss_types,
27273a2f674bSSuanming Mou 					uint64_t *hash_field);
27287ab3962dSSuanming Mou uint32_t flow_dv_action_rss_hrxq_lookup(struct rte_eth_dev *dev, uint32_t idx,
27297ab3962dSSuanming Mou 					const uint64_t hash_fields);
27306507c9f5SSuanming Mou 
2731d1559d66SSuanming Mou struct mlx5_list_entry *flow_hw_grp_create_cb(void *tool_ctx, void *cb_ctx);
2732d1559d66SSuanming Mou void flow_hw_grp_remove_cb(void *tool_ctx, struct mlx5_list_entry *entry);
2733d1559d66SSuanming Mou int flow_hw_grp_match_cb(void *tool_ctx,
2734d1559d66SSuanming Mou 			 struct mlx5_list_entry *entry,
2735d1559d66SSuanming Mou 			 void *cb_ctx);
2736d1559d66SSuanming Mou struct mlx5_list_entry *flow_hw_grp_clone_cb(void *tool_ctx,
2737d1559d66SSuanming Mou 					     struct mlx5_list_entry *oentry,
2738d1559d66SSuanming Mou 					     void *cb_ctx);
2739d1559d66SSuanming Mou void flow_hw_grp_clone_free_cb(void *tool_ctx, struct mlx5_list_entry *entry);
2740d1559d66SSuanming Mou 
274181073e1fSMatan Azrad struct mlx5_aso_age_action *flow_aso_age_get_by_idx(struct rte_eth_dev *dev,
274281073e1fSMatan Azrad 						    uint32_t age_idx);
2743f15f0c38SShiri Kuzin int flow_dev_geneve_tlv_option_resource_register(struct rte_eth_dev *dev,
2744f15f0c38SShiri Kuzin 					     const struct rte_flow_item *item,
2745f15f0c38SShiri Kuzin 					     struct rte_flow_error *error);
274644864503SSuanming Mou void flow_dev_geneve_tlv_option_resource_release(struct mlx5_dev_ctx_shared *sh);
274744864503SSuanming Mou 
27485d55a494STal Shnaiderman void flow_release_workspace(void *data);
27495d55a494STal Shnaiderman int mlx5_flow_os_init_workspace_once(void);
27505d55a494STal Shnaiderman void *mlx5_flow_os_get_specific_workspace(void);
27515d55a494STal Shnaiderman int mlx5_flow_os_set_specific_workspace(struct mlx5_flow_workspace *data);
27525d55a494STal Shnaiderman void mlx5_flow_os_release_workspace(void);
2753e6100c7bSLi Zhang uint32_t mlx5_flow_mtr_alloc(struct rte_eth_dev *dev);
2754e6100c7bSLi Zhang void mlx5_flow_mtr_free(struct rte_eth_dev *dev, uint32_t mtr_idx);
2755afb4aa4fSLi Zhang int mlx5_flow_validate_mtr_acts(struct rte_eth_dev *dev,
2756afb4aa4fSLi Zhang 			const struct rte_flow_action *actions[RTE_COLORS],
2757afb4aa4fSLi Zhang 			struct rte_flow_attr *attr,
2758afb4aa4fSLi Zhang 			bool *is_rss,
2759afb4aa4fSLi Zhang 			uint8_t *domain_bitmap,
27604b7bf3ffSBing Zhao 			uint8_t *policy_mode,
2761afb4aa4fSLi Zhang 			struct rte_mtr_error *error);
2762afb4aa4fSLi Zhang void mlx5_flow_destroy_mtr_acts(struct rte_eth_dev *dev,
2763afb4aa4fSLi Zhang 		      struct mlx5_flow_meter_policy *mtr_policy);
2764afb4aa4fSLi Zhang int mlx5_flow_create_mtr_acts(struct rte_eth_dev *dev,
2765afb4aa4fSLi Zhang 		      struct mlx5_flow_meter_policy *mtr_policy,
2766afb4aa4fSLi Zhang 		      const struct rte_flow_action *actions[RTE_COLORS],
27676431068dSSean Zhang 		      struct rte_flow_attr *attr,
2768afb4aa4fSLi Zhang 		      struct rte_mtr_error *error);
2769afb4aa4fSLi Zhang int mlx5_flow_create_policy_rules(struct rte_eth_dev *dev,
2770afb4aa4fSLi Zhang 			     struct mlx5_flow_meter_policy *mtr_policy);
2771afb4aa4fSLi Zhang void mlx5_flow_destroy_policy_rules(struct rte_eth_dev *dev,
2772afb4aa4fSLi Zhang 			     struct mlx5_flow_meter_policy *mtr_policy);
2773afb4aa4fSLi Zhang int mlx5_flow_create_def_policy(struct rte_eth_dev *dev);
2774afb4aa4fSLi Zhang void mlx5_flow_destroy_def_policy(struct rte_eth_dev *dev);
2775afb4aa4fSLi Zhang void flow_drv_rxq_flags_set(struct rte_eth_dev *dev,
2776afb4aa4fSLi Zhang 		       struct mlx5_flow_handle *dev_handle);
27778c5a231bSGregory Etelson const struct mlx5_flow_tunnel *
27788c5a231bSGregory Etelson mlx5_get_tof(const struct rte_flow_item *items,
27798c5a231bSGregory Etelson 	     const struct rte_flow_action *actions,
27808c5a231bSGregory Etelson 	     enum mlx5_tof_rule_type *rule_type);
2781b401400dSSuanming Mou void
2782b401400dSSuanming Mou flow_hw_resource_release(struct rte_eth_dev *dev);
2783f64a7946SRongwei Liu void
2784f64a7946SRongwei Liu flow_hw_rxq_flag_set(struct rte_eth_dev *dev, bool enable);
27857ab3962dSSuanming Mou int flow_dv_action_validate(struct rte_eth_dev *dev,
27867ab3962dSSuanming Mou 			    const struct rte_flow_indir_action_conf *conf,
27877ab3962dSSuanming Mou 			    const struct rte_flow_action *action,
27887ab3962dSSuanming Mou 			    struct rte_flow_error *err);
27897ab3962dSSuanming Mou struct rte_flow_action_handle *flow_dv_action_create(struct rte_eth_dev *dev,
27907ab3962dSSuanming Mou 		      const struct rte_flow_indir_action_conf *conf,
27917ab3962dSSuanming Mou 		      const struct rte_flow_action *action,
27927ab3962dSSuanming Mou 		      struct rte_flow_error *err);
27937ab3962dSSuanming Mou int flow_dv_action_destroy(struct rte_eth_dev *dev,
27947ab3962dSSuanming Mou 			   struct rte_flow_action_handle *handle,
27957ab3962dSSuanming Mou 			   struct rte_flow_error *error);
27967ab3962dSSuanming Mou int flow_dv_action_update(struct rte_eth_dev *dev,
27977ab3962dSSuanming Mou 			  struct rte_flow_action_handle *handle,
27987ab3962dSSuanming Mou 			  const void *update,
27997ab3962dSSuanming Mou 			  struct rte_flow_error *err);
28007ab3962dSSuanming Mou int flow_dv_action_query(struct rte_eth_dev *dev,
28017ab3962dSSuanming Mou 			 const struct rte_flow_action_handle *handle,
28027ab3962dSSuanming Mou 			 void *data,
28037ab3962dSSuanming Mou 			 struct rte_flow_error *error);
2804fe3620aaSSuanming Mou size_t flow_dv_get_item_hdr_len(const enum rte_flow_item_type item_type);
2805fe3620aaSSuanming Mou int flow_dv_convert_encap_data(const struct rte_flow_item *items, uint8_t *buf,
2806fe3620aaSSuanming Mou 			   size_t *size, struct rte_flow_error *error);
28070f4aa72bSSuanming Mou void mlx5_flow_field_id_to_modify_info
28080f4aa72bSSuanming Mou 		(const struct rte_flow_action_modify_data *data,
28090f4aa72bSSuanming Mou 		 struct field_modify_info *info, uint32_t *mask,
28100f4aa72bSSuanming Mou 		 uint32_t width, struct rte_eth_dev *dev,
28110f4aa72bSSuanming Mou 		 const struct rte_flow_attr *attr, struct rte_flow_error *error);
28120f4aa72bSSuanming Mou int flow_dv_convert_modify_action(struct rte_flow_item *item,
28130f4aa72bSSuanming Mou 			      struct field_modify_info *field,
28140f4aa72bSSuanming Mou 			      struct field_modify_info *dcopy,
28150f4aa72bSSuanming Mou 			      struct mlx5_flow_dv_modify_hdr_resource *resource,
28160f4aa72bSSuanming Mou 			      uint32_t type, struct rte_flow_error *error);
281768e9925cSShun Hao 
281868e9925cSShun Hao #define MLX5_PF_VPORT_ID 0
281968e9925cSShun Hao #define MLX5_ECPF_VPORT_ID 0xFFFE
282068e9925cSShun Hao 
282192b3c68eSShun Hao int16_t mlx5_flow_get_esw_manager_vport_id(struct rte_eth_dev *dev);
282292b3c68eSShun Hao int mlx5_flow_get_item_vport_id(struct rte_eth_dev *dev,
282392b3c68eSShun Hao 				const struct rte_flow_item *item,
282492b3c68eSShun Hao 				uint16_t *vport_id,
2825ca7e6051SShun Hao 				bool *all_ports,
282692b3c68eSShun Hao 				struct rte_flow_error *error);
282792b3c68eSShun Hao 
282875a00812SSuanming Mou int flow_dv_translate_items_hws(const struct rte_flow_item *items,
282975a00812SSuanming Mou 				struct mlx5_flow_attr *attr, void *key,
283075a00812SSuanming Mou 				uint32_t key_type, uint64_t *item_flags,
283175a00812SSuanming Mou 				uint8_t *match_criteria,
283275a00812SSuanming Mou 				struct rte_flow_error *error);
28331939eb6fSDariusz Sosnowski 
28341939eb6fSDariusz Sosnowski int mlx5_flow_pick_transfer_proxy(struct rte_eth_dev *dev,
28351939eb6fSDariusz Sosnowski 				  uint16_t *proxy_port_id,
28361939eb6fSDariusz Sosnowski 				  struct rte_flow_error *error);
2837c68bb7a6SAsaf Penso int flow_null_get_aged_flows(struct rte_eth_dev *dev,
2838c68bb7a6SAsaf Penso 		    void **context,
2839c68bb7a6SAsaf Penso 		    uint32_t nb_contexts,
2840c68bb7a6SAsaf Penso 		    struct rte_flow_error *error);
2841c68bb7a6SAsaf Penso uint32_t flow_null_counter_allocate(struct rte_eth_dev *dev);
2842c68bb7a6SAsaf Penso void flow_null_counter_free(struct rte_eth_dev *dev,
2843c68bb7a6SAsaf Penso 			uint32_t counter);
2844c68bb7a6SAsaf Penso int flow_null_counter_query(struct rte_eth_dev *dev,
2845c68bb7a6SAsaf Penso 			uint32_t counter,
2846c68bb7a6SAsaf Penso 			bool clear,
2847c68bb7a6SAsaf Penso 		    uint64_t *pkts,
2848c68bb7a6SAsaf Penso 			uint64_t *bytes,
2849c68bb7a6SAsaf Penso 			void **action);
28501939eb6fSDariusz Sosnowski 
28511939eb6fSDariusz Sosnowski int mlx5_flow_hw_flush_ctrl_flows(struct rte_eth_dev *dev);
28521939eb6fSDariusz Sosnowski 
28531939eb6fSDariusz Sosnowski int mlx5_flow_hw_esw_create_sq_miss_flow(struct rte_eth_dev *dev,
285426e1eaf2SDariusz Sosnowski 					 uint32_t sqn);
28551939eb6fSDariusz Sosnowski int mlx5_flow_hw_esw_create_default_jump_flow(struct rte_eth_dev *dev);
2856ddb68e47SBing Zhao int mlx5_flow_hw_create_tx_default_mreg_copy_flow(struct rte_eth_dev *dev);
2857483181f7SDariusz Sosnowski int mlx5_flow_hw_tx_repr_matching_flow(struct rte_eth_dev *dev, uint32_t sqn);
285824865366SAlexander Kozyrev int mlx5_flow_actions_validate(struct rte_eth_dev *dev,
285924865366SAlexander Kozyrev 		const struct rte_flow_actions_template_attr *attr,
286024865366SAlexander Kozyrev 		const struct rte_flow_action actions[],
286124865366SAlexander Kozyrev 		const struct rte_flow_action masks[],
286224865366SAlexander Kozyrev 		struct rte_flow_error *error);
286324865366SAlexander Kozyrev int mlx5_flow_pattern_validate(struct rte_eth_dev *dev,
286424865366SAlexander Kozyrev 		const struct rte_flow_pattern_template_attr *attr,
286524865366SAlexander Kozyrev 		const struct rte_flow_item items[],
286624865366SAlexander Kozyrev 		struct rte_flow_error *error);
2867f1fecffaSDariusz Sosnowski int flow_hw_table_update(struct rte_eth_dev *dev,
2868f1fecffaSDariusz Sosnowski 			 struct rte_flow_error *error);
2869773ca0e9SGregory Etelson int mlx5_flow_item_field_width(struct rte_eth_dev *dev,
2870773ca0e9SGregory Etelson 			   enum rte_flow_field_id field, int inherit,
2871773ca0e9SGregory Etelson 			   const struct rte_flow_attr *attr,
2872773ca0e9SGregory Etelson 			   struct rte_flow_error *error);
287300e57916SRongwei Liu 
287400e57916SRongwei Liu static __rte_always_inline int
287500e57916SRongwei Liu flow_hw_get_srh_flex_parser_byte_off_from_ctx(void *dr_ctx __rte_unused)
287600e57916SRongwei Liu {
287700e57916SRongwei Liu #ifdef HAVE_IBV_FLOW_DV_SUPPORT
287800e57916SRongwei Liu 	uint16_t port;
287900e57916SRongwei Liu 
288000e57916SRongwei Liu 	MLX5_ETH_FOREACH_DEV(port, NULL) {
288100e57916SRongwei Liu 		struct mlx5_priv *priv;
288200e57916SRongwei Liu 		struct mlx5_hca_flex_attr *attr;
2883bc0a9303SRongwei Liu 		struct mlx5_devx_match_sample_info_query_attr *info;
288400e57916SRongwei Liu 
288500e57916SRongwei Liu 		priv = rte_eth_devices[port].data->dev_private;
288600e57916SRongwei Liu 		attr = &priv->sh->cdev->config.hca_attr.flex;
2887bc0a9303SRongwei Liu 		if (priv->dr_ctx == dr_ctx && attr->query_match_sample_info) {
2888bc0a9303SRongwei Liu 			info = &priv->sh->srh_flex_parser.flex.devx_fp->sample_info[0];
2889bc0a9303SRongwei Liu 			if (priv->sh->srh_flex_parser.flex.mapnum)
2890bc0a9303SRongwei Liu 				return info->sample_dw_data * sizeof(uint32_t);
289100e57916SRongwei Liu 			else
289200e57916SRongwei Liu 				return UINT32_MAX;
289300e57916SRongwei Liu 		}
289400e57916SRongwei Liu 	}
289500e57916SRongwei Liu #endif
289600e57916SRongwei Liu 	return UINT32_MAX;
289700e57916SRongwei Liu }
28983564e928SGregory Etelson void
28993564e928SGregory Etelson mlx5_indirect_list_handles_release(struct rte_eth_dev *dev);
29003564e928SGregory Etelson #ifdef HAVE_MLX5_HWS_SUPPORT
29013564e928SGregory Etelson struct mlx5_mirror;
29023564e928SGregory Etelson void
2903e26f50adSGregory Etelson mlx5_hw_mirror_destroy(struct rte_eth_dev *dev, struct mlx5_mirror *mirror);
2904e26f50adSGregory Etelson void
2905e26f50adSGregory Etelson mlx5_destroy_legacy_indirect(struct rte_eth_dev *dev,
2906e26f50adSGregory Etelson 			     struct mlx5_indirect_list *ptr);
29075e26c99fSRongwei Liu void
29085e26c99fSRongwei Liu mlx5_hw_decap_encap_destroy(struct rte_eth_dev *dev,
29095e26c99fSRongwei Liu 			    struct mlx5_indirect_list *reformat);
29103564e928SGregory Etelson #endif
291184c406e7SOri Kam #endif /* RTE_PMD_MLX5_FLOW_H_ */
2912