xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c (revision a371119084b81f77400fa3aed061d570cfc0eefe)
1c55c2bf3SAlex Vesker /* SPDX-License-Identifier: BSD-3-Clause
2c55c2bf3SAlex Vesker  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3c55c2bf3SAlex Vesker  */
4c55c2bf3SAlex Vesker 
5191128d7SDavid Marchand #include <rte_bitops.h>
6191128d7SDavid Marchand 
7c55c2bf3SAlex Vesker #include "mlx5dr_internal.h"
8c55c2bf3SAlex Vesker 
9c55c2bf3SAlex Vesker #define GTP_PDU_SC	0x85
10c55c2bf3SAlex Vesker #define BAD_PORT	0xBAD
117aa6c077SSuanming Mou #define BAD_SQN		0xBAD
12c55c2bf3SAlex Vesker #define ETH_TYPE_IPV4_VXLAN	0x0800
13c55c2bf3SAlex Vesker #define ETH_TYPE_IPV6_VXLAN	0x86DD
1428e69588SAlex Vesker #define UDP_VXLAN_PORT	4789
1528e69588SAlex Vesker #define UDP_VXLAN_GPE_PORT	4790
1628e69588SAlex Vesker #define UDP_GTPU_PORT	2152
1728e69588SAlex Vesker #define UDP_PORT_MPLS	6635
1843b5adbaSAlex Vesker #define UDP_GENEVE_PORT 6081
197bf29065SDong Zhou #define UDP_ROCEV2_PORT	4791
205bf14a4bSErez Shitrit #define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
216c299801SDong Zhou #define NVGRE_PORT 0x6558
226c299801SDong Zhou #define NVGRE_C_RSVD0_VER 0x2000
236c299801SDong Zhou #define NVGRE_C_RSVD0_VER_MASK 0xB000
24c55c2bf3SAlex Vesker 
25c55c2bf3SAlex Vesker #define STE_NO_VLAN	0x0
26c55c2bf3SAlex Vesker #define STE_SVLAN	0x1
27c55c2bf3SAlex Vesker #define STE_CVLAN	0x2
28ad17988aSAlexander Kozyrev #define STE_NO_L3	0x0
29c55c2bf3SAlex Vesker #define STE_IPV4	0x1
30c55c2bf3SAlex Vesker #define STE_IPV6	0x2
31ad17988aSAlexander Kozyrev #define STE_NO_L4	0x0
32c55c2bf3SAlex Vesker #define STE_TCP		0x1
33c55c2bf3SAlex Vesker #define STE_UDP		0x2
34c55c2bf3SAlex Vesker #define STE_ICMP	0x3
35ad17988aSAlexander Kozyrev #define STE_NO_TUN	0x0
36ad17988aSAlexander Kozyrev #define STE_ESP		0x3
37c55c2bf3SAlex Vesker 
389fa0e142SGregory Etelson #define MLX5DR_DEFINER_QUOTA_BLOCK 0
399fa0e142SGregory Etelson #define MLX5DR_DEFINER_QUOTA_PASS  2
40b53a95abSItamar Gozlan #define MLX5DR_DEFINER_MAX_ROW_LOG 32
41b53a95abSItamar Gozlan #define MLX5DR_DEFINER_HL_OPT_MAX 2
429fa0e142SGregory Etelson 
43c55c2bf3SAlex Vesker /* Setter function based on bit offset and mask, for 32bit DW*/
44c55c2bf3SAlex Vesker #define _DR_SET_32(p, v, byte_off, bit_off, mask) \
45c55c2bf3SAlex Vesker 	do { \
46c55c2bf3SAlex Vesker 		u32 _v = v; \
47c55c2bf3SAlex Vesker 		*((rte_be32_t *)(p) + ((byte_off) / 4)) = \
48c55c2bf3SAlex Vesker 		rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \
49c55c2bf3SAlex Vesker 				  ((byte_off) / 4))) & \
50c55c2bf3SAlex Vesker 				  (~((mask) << (bit_off)))) | \
51c55c2bf3SAlex Vesker 				 (((_v) & (mask)) << \
52c55c2bf3SAlex Vesker 				  (bit_off))); \
53c55c2bf3SAlex Vesker 	} while (0)
54c55c2bf3SAlex Vesker 
558c178ac8SMichael Baum /* Getter function based on bit offset and mask, for 32bit DW*/
568c178ac8SMichael Baum #define DR_GET_32(p, byte_off, bit_off, mask) \
578c178ac8SMichael Baum 	((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
588c178ac8SMichael Baum 
59c55c2bf3SAlex Vesker /* Setter function based on bit offset and mask */
60c55c2bf3SAlex Vesker #define DR_SET(p, v, byte_off, bit_off, mask) \
61c55c2bf3SAlex Vesker 	do { \
62c55c2bf3SAlex Vesker 		if (unlikely((bit_off) < 0)) { \
63c55c2bf3SAlex Vesker 			u32 _bit_off = -1 * (bit_off); \
64c55c2bf3SAlex Vesker 			u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
65c55c2bf3SAlex Vesker 			_DR_SET_32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
66c55c2bf3SAlex Vesker 			_DR_SET_32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
67c55c2bf3SAlex Vesker 				   (bit_off) % BITS_IN_DW, second_dw_mask); \
68c55c2bf3SAlex Vesker 		} else { \
69c55c2bf3SAlex Vesker 			_DR_SET_32(p, v, byte_off, (bit_off), (mask)); \
70c55c2bf3SAlex Vesker 		} \
71c55c2bf3SAlex Vesker 	} while (0)
72c55c2bf3SAlex Vesker 
73c55c2bf3SAlex Vesker /* Setter function based on byte offset to directly set FULL BE32 value  */
74c55c2bf3SAlex Vesker #define DR_SET_BE32(p, v, byte_off, bit_off, mask) \
75c55c2bf3SAlex Vesker 	(*((rte_be32_t *)((uint8_t *)(p) + (byte_off))) = (v))
76c55c2bf3SAlex Vesker 
77c55c2bf3SAlex Vesker /* Setter function based on byte offset to directly set FULL BE32 value from ptr  */
78c55c2bf3SAlex Vesker #define DR_SET_BE32P(p, v_ptr, byte_off, bit_off, mask) \
79c55c2bf3SAlex Vesker 	memcpy((uint8_t *)(p) + (byte_off), v_ptr, 4)
80c55c2bf3SAlex Vesker 
81c55c2bf3SAlex Vesker /* Setter function based on byte offset to directly set FULL BE16 value  */
82c55c2bf3SAlex Vesker #define DR_SET_BE16(p, v, byte_off, bit_off, mask) \
83c55c2bf3SAlex Vesker 	(*((rte_be16_t *)((uint8_t *)(p) + (byte_off))) = (v))
84c55c2bf3SAlex Vesker 
85c55c2bf3SAlex Vesker /* Setter function based on byte offset to directly set FULL BE16 value from ptr  */
86c55c2bf3SAlex Vesker #define DR_SET_BE16P(p, v_ptr, byte_off, bit_off, mask) \
87c55c2bf3SAlex Vesker 	memcpy((uint8_t *)(p) + (byte_off), v_ptr, 2)
88c55c2bf3SAlex Vesker 
89c55c2bf3SAlex Vesker #define DR_CALC_FNAME(field, inner) \
90c55c2bf3SAlex Vesker 	((inner) ? MLX5DR_DEFINER_FNAME_##field##_I : \
91c55c2bf3SAlex Vesker 		   MLX5DR_DEFINER_FNAME_##field##_O)
92c55c2bf3SAlex Vesker 
93c55c2bf3SAlex Vesker #define DR_CALC_SET_HDR(fc, hdr, field) \
94c55c2bf3SAlex Vesker 	do { \
95c55c2bf3SAlex Vesker 		(fc)->bit_mask = __mlx5_mask(definer_hl, hdr.field); \
96c55c2bf3SAlex Vesker 		(fc)->bit_off = __mlx5_dw_bit_off(definer_hl, hdr.field); \
97c55c2bf3SAlex Vesker 		(fc)->byte_off = MLX5_BYTE_OFF(definer_hl, hdr.field); \
98c55c2bf3SAlex Vesker 	} while (0)
99c55c2bf3SAlex Vesker 
100c55c2bf3SAlex Vesker /* Helper to calculate data used by DR_SET */
101c55c2bf3SAlex Vesker #define DR_CALC_SET(fc, hdr, field, is_inner) \
102c55c2bf3SAlex Vesker 	do { \
103c55c2bf3SAlex Vesker 		if (is_inner) { \
104c55c2bf3SAlex Vesker 			DR_CALC_SET_HDR(fc, hdr##_inner, field); \
105c55c2bf3SAlex Vesker 		} else { \
106c55c2bf3SAlex Vesker 			DR_CALC_SET_HDR(fc, hdr##_outer, field); \
107c55c2bf3SAlex Vesker 		} \
108c55c2bf3SAlex Vesker 	} while (0)
109c55c2bf3SAlex Vesker 
110c55c2bf3SAlex Vesker  #define DR_GET(typ, p, fld) \
111c55c2bf3SAlex Vesker 	((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + \
112c55c2bf3SAlex Vesker 	__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
113c55c2bf3SAlex Vesker 	__mlx5_mask(typ, fld))
114c55c2bf3SAlex Vesker 
11588ff4179SItamar Gozlan /* Each row (i) indicates a different matcher size, and each column (j)
11688ff4179SItamar Gozlan  * represents {DW5, DW4, DW3, DW2, DW1, DW0}.
117b53a95abSItamar Gozlan  * For values 0,..,2^i, and j (DW) 0,..,5: mlx5dr_optimal_dist_dw[i][j] is 1 if the
11888ff4179SItamar Gozlan  * number of different hash results on these values equals 2^i, meaning this
11988ff4179SItamar Gozlan  * DW hash distribution is complete.
12088ff4179SItamar Gozlan  */
121b53a95abSItamar Gozlan int mlx5dr_optimal_dist_dw[MLX5DR_DEFINER_MAX_ROW_LOG][DW_SELECTORS_MATCH] = {
12288ff4179SItamar Gozlan 	{1, 1, 1, 1, 1, 1}, {0, 1, 1, 0, 1, 0}, {0, 1, 1, 0, 1, 0},
12388ff4179SItamar Gozlan 	{1, 0, 1, 0, 1, 0}, {0, 0, 0, 1, 1, 0}, {0, 1, 1, 0, 1, 0},
12488ff4179SItamar Gozlan 	{0, 0, 0, 0, 1, 0}, {0, 1, 1, 0, 1, 0}, {0, 0, 0, 0, 0, 0},
12588ff4179SItamar Gozlan 	{1, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 1, 0, 1, 0, 0},
12688ff4179SItamar Gozlan 	{1, 0, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 1}, {1, 1, 1, 0, 0, 0},
12788ff4179SItamar Gozlan 	{1, 1, 1, 0, 1, 0}, {0, 0, 1, 1, 0, 0}, {0, 1, 1, 0, 0, 1},
12888ff4179SItamar Gozlan 	{0, 0, 1, 0, 0, 1}, {0, 0, 1, 0, 0, 0}, {1, 0, 1, 1, 0, 0},
12988ff4179SItamar Gozlan 	{1, 0, 1, 0, 0, 1}, {0, 0, 1, 1, 0, 1}, {1, 1, 1, 0, 0, 0},
13088ff4179SItamar Gozlan 	{0, 1, 0, 1, 0, 1}, {0, 0, 0, 0, 0, 1}, {0, 0, 0, 1, 1, 1},
13188ff4179SItamar Gozlan 	{0, 0, 1, 0, 0, 1}, {1, 1, 0, 1, 1, 0}, {0, 0, 0, 0, 1, 0},
13288ff4179SItamar Gozlan 	{0, 0, 0, 1, 1, 0}};
13388ff4179SItamar Gozlan 
134c55c2bf3SAlex Vesker struct mlx5dr_definer_sel_ctrl {
135c55c2bf3SAlex Vesker 	uint8_t allowed_full_dw; /* Full DW selectors cover all offsets */
136c55c2bf3SAlex Vesker 	uint8_t allowed_lim_dw;  /* Limited DW selectors cover offset < 64 */
137c55c2bf3SAlex Vesker 	uint8_t allowed_bytes;   /* Bytes selectors, up to offset 255 */
138c55c2bf3SAlex Vesker 	uint8_t used_full_dw;
139c55c2bf3SAlex Vesker 	uint8_t used_lim_dw;
140c55c2bf3SAlex Vesker 	uint8_t used_bytes;
141c55c2bf3SAlex Vesker 	uint8_t full_dw_selector[DW_SELECTORS];
142c55c2bf3SAlex Vesker 	uint8_t lim_dw_selector[DW_SELECTORS_LIMITED];
143c55c2bf3SAlex Vesker 	uint8_t byte_selector[BYTE_SELECTORS];
144c55c2bf3SAlex Vesker };
145c55c2bf3SAlex Vesker 
146c55c2bf3SAlex Vesker struct mlx5dr_definer_conv_data {
147d72b8fbdSGregory Etelson 	struct mlx5dr_context *ctx;
148c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
149c55c2bf3SAlex Vesker 	uint8_t relaxed;
150c55c2bf3SAlex Vesker 	uint8_t tunnel;
1515bf14a4bSErez Shitrit 	uint8_t mpls_idx;
1528f8dad42SAlex Vesker 	uint8_t geneve_opt_ok_idx;
1538f8dad42SAlex Vesker 	uint8_t geneve_opt_data_idx;
1545bf14a4bSErez Shitrit 	enum rte_flow_item_type last_item;
1552b45a773SMichael Baum 	enum mlx5dr_table_type table_type;
156c55c2bf3SAlex Vesker };
157c55c2bf3SAlex Vesker 
158c55c2bf3SAlex Vesker /* Xmacro used to create generic item setter from items */
159c55c2bf3SAlex Vesker #define LIST_OF_FIELDS_INFO \
1608275d5fcSThomas Monjalon 	X(SET_BE16,	eth_type,		v->hdr.ether_type,		rte_flow_item_eth) \
1618275d5fcSThomas Monjalon 	X(SET_BE32P,	eth_smac_47_16,		&v->hdr.src_addr.addr_bytes[0],	rte_flow_item_eth) \
1628275d5fcSThomas Monjalon 	X(SET_BE16P,	eth_smac_15_0,		&v->hdr.src_addr.addr_bytes[4],	rte_flow_item_eth) \
1638275d5fcSThomas Monjalon 	X(SET_BE32P,	eth_dmac_47_16,		&v->hdr.dst_addr.addr_bytes[0],	rte_flow_item_eth) \
1648275d5fcSThomas Monjalon 	X(SET_BE16P,	eth_dmac_15_0,		&v->hdr.dst_addr.addr_bytes[4],	rte_flow_item_eth) \
1658275d5fcSThomas Monjalon 	X(SET_BE16,	tci,			v->hdr.vlan_tci,		rte_flow_item_vlan) \
166c55c2bf3SAlex Vesker 	X(SET,		ipv4_ihl,		v->ihl,			rte_ipv4_hdr) \
167c55c2bf3SAlex Vesker 	X(SET,		ipv4_tos,		v->type_of_service,	rte_ipv4_hdr) \
168c55c2bf3SAlex Vesker 	X(SET,		ipv4_time_to_live,	v->time_to_live,	rte_ipv4_hdr) \
169c55c2bf3SAlex Vesker 	X(SET_BE32,	ipv4_dst_addr,		v->dst_addr,		rte_ipv4_hdr) \
170c55c2bf3SAlex Vesker 	X(SET_BE32,	ipv4_src_addr,		v->src_addr,		rte_ipv4_hdr) \
171c55c2bf3SAlex Vesker 	X(SET,		ipv4_next_proto,	v->next_proto_id,	rte_ipv4_hdr) \
172c55c2bf3SAlex Vesker 	X(SET,		ipv4_version,		STE_IPV4,		rte_ipv4_hdr) \
173c55c2bf3SAlex Vesker 	X(SET_BE16,	ipv4_frag,		v->fragment_offset,	rte_ipv4_hdr) \
17457800e6cSAlex Vesker 	X(SET_BE16,	ipv4_len,		v->total_length,	rte_ipv4_hdr) \
175631ed6c7SMichael Baum 	X(SET_BE16,	ipv4_identification,	v->packet_id,		rte_ipv4_hdr) \
176f3291366SHamdan Igbaria 	X(SET,          ip_fragmented,          !!v->fragment_offset,   rte_ipv4_hdr) \
177c55c2bf3SAlex Vesker 	X(SET_BE16,	ipv6_payload_len,	v->hdr.payload_len,	rte_flow_item_ipv6) \
178c55c2bf3SAlex Vesker 	X(SET,		ipv6_proto,		v->hdr.proto,		rte_flow_item_ipv6) \
17900e57916SRongwei Liu 	X(SET,		ipv6_routing_hdr,	IPPROTO_ROUTING,	rte_flow_item_ipv6) \
180c55c2bf3SAlex Vesker 	X(SET,		ipv6_hop_limits,	v->hdr.hop_limits,	rte_flow_item_ipv6) \
18189b5642dSRobin Jarry 	X(SET_BE32P,	ipv6_src_addr_127_96,	&v->hdr.src_addr.a[0],	rte_flow_item_ipv6) \
18289b5642dSRobin Jarry 	X(SET_BE32P,	ipv6_src_addr_95_64,	&v->hdr.src_addr.a[4],	rte_flow_item_ipv6) \
18389b5642dSRobin Jarry 	X(SET_BE32P,	ipv6_src_addr_63_32,	&v->hdr.src_addr.a[8],	rte_flow_item_ipv6) \
18489b5642dSRobin Jarry 	X(SET_BE32P,	ipv6_src_addr_31_0,	&v->hdr.src_addr.a[12],	rte_flow_item_ipv6) \
18589b5642dSRobin Jarry 	X(SET_BE32P,	ipv6_dst_addr_127_96,	&v->hdr.dst_addr.a[0],	rte_flow_item_ipv6) \
18689b5642dSRobin Jarry 	X(SET_BE32P,	ipv6_dst_addr_95_64,	&v->hdr.dst_addr.a[4],	rte_flow_item_ipv6) \
18789b5642dSRobin Jarry 	X(SET_BE32P,	ipv6_dst_addr_63_32,	&v->hdr.dst_addr.a[8],	rte_flow_item_ipv6) \
18889b5642dSRobin Jarry 	X(SET_BE32P,	ipv6_dst_addr_31_0,	&v->hdr.dst_addr.a[12],	rte_flow_item_ipv6) \
189c55c2bf3SAlex Vesker 	X(SET,		ipv6_version,		STE_IPV6,		rte_flow_item_ipv6) \
190c55c2bf3SAlex Vesker 	X(SET,		ipv6_frag,		v->has_frag_ext,	rte_flow_item_ipv6) \
191c55c2bf3SAlex Vesker 	X(SET,		icmp_protocol,		STE_ICMP,		rte_flow_item_icmp) \
192c55c2bf3SAlex Vesker 	X(SET,		udp_protocol,		STE_UDP,		rte_flow_item_udp) \
193c55c2bf3SAlex Vesker 	X(SET_BE16,	udp_src_port,		v->hdr.src_port,	rte_flow_item_udp) \
194c55c2bf3SAlex Vesker 	X(SET_BE16,	udp_dst_port,		v->hdr.dst_port,	rte_flow_item_udp) \
195c55c2bf3SAlex Vesker 	X(SET,		tcp_flags,		v->hdr.tcp_flags,	rte_flow_item_tcp) \
196c55c2bf3SAlex Vesker 	X(SET,		tcp_protocol,		STE_TCP,		rte_flow_item_tcp) \
197c55c2bf3SAlex Vesker 	X(SET_BE16,	tcp_src_port,		v->hdr.src_port,	rte_flow_item_tcp) \
198c55c2bf3SAlex Vesker 	X(SET_BE16,	tcp_dst_port,		v->hdr.dst_port,	rte_flow_item_tcp) \
19928e69588SAlex Vesker 	X(SET,		gtp_udp_port,		UDP_GTPU_PORT,		rte_flow_item_gtp) \
2002397616cSThomas Monjalon 	X(SET_BE32,	gtp_teid,		v->hdr.teid,		rte_flow_item_gtp) \
2012397616cSThomas Monjalon 	X(SET,		gtp_msg_type,		v->hdr.msg_type,	rte_flow_item_gtp) \
2022397616cSThomas Monjalon 	X(SET,		gtp_ext_flag,		!!v->hdr.gtp_hdr_info,	rte_flow_item_gtp) \
203c55c2bf3SAlex Vesker 	X(SET,		gtp_next_ext_hdr,	GTP_PDU_SC,		rte_flow_item_gtp_psc) \
204c55c2bf3SAlex Vesker 	X(SET,		gtp_ext_hdr_pdu,	v->hdr.type,		rte_flow_item_gtp_psc) \
205c55c2bf3SAlex Vesker 	X(SET,		gtp_ext_hdr_qfi,	v->hdr.qfi,		rte_flow_item_gtp_psc) \
20649765b78SRongwei Liu 	X(SET_BE32,	vxlan_vx_flags,		v->hdr.vx_flags,	rte_flow_item_vxlan) \
20749765b78SRongwei Liu 	X(SET_BE32,	vxlan_vx_vni,		v->hdr.vx_vni,		rte_flow_item_vxlan) \
20828e69588SAlex Vesker 	X(SET,		vxlan_udp_port,		UDP_VXLAN_PORT,		rte_flow_item_vxlan) \
20928e69588SAlex Vesker 	X(SET,		vxlan_gpe_udp_port,	UDP_VXLAN_GPE_PORT,	rte_flow_item_vxlan_gpe) \
210f6164649SGavin Li 	X(SET,		vxlan_gpe_flags,	v->flags,		rte_flow_item_vxlan_gpe) \
211f6164649SGavin Li 	X(SET,		vxlan_gpe_protocol,	v->protocol,		rte_flow_item_vxlan_gpe) \
212f6164649SGavin Li 	X(SET,		vxlan_gpe_rsvd1,	v->rsvd1,		rte_flow_item_vxlan_gpe) \
21328e69588SAlex Vesker 	X(SET,		mpls_udp_port,		UDP_PORT_MPLS,		rte_flow_item_mpls) \
214c55c2bf3SAlex Vesker 	X(SET,		source_qp,		v->queue,		mlx5_rte_flow_item_sq) \
215c55c2bf3SAlex Vesker 	X(SET,		tag,			v->data,		rte_flow_item_tag) \
216c55c2bf3SAlex Vesker 	X(SET,		metadata,		v->data,		rte_flow_item_meta) \
21743b5adbaSAlex Vesker 	X(SET_BE16,	geneve_protocol,	v->protocol,		rte_flow_item_geneve) \
21843b5adbaSAlex Vesker 	X(SET,		geneve_udp_port,	UDP_GENEVE_PORT,	rte_flow_item_geneve) \
21943b5adbaSAlex Vesker 	X(SET_BE16,	geneve_ctrl,		v->ver_opt_len_o_c_rsvd0,	rte_flow_item_geneve) \
220c55c2bf3SAlex Vesker 	X(SET_BE16,	gre_c_ver,		v->c_rsvd0_ver,		rte_flow_item_gre) \
221c55c2bf3SAlex Vesker 	X(SET_BE16,	gre_protocol_type,	v->protocol,		rte_flow_item_gre) \
222c55c2bf3SAlex Vesker 	X(SET,		ipv4_protocol_gre,	IPPROTO_GRE,		rte_flow_item_gre) \
223c55c2bf3SAlex Vesker 	X(SET_BE32,	gre_opt_key,		v->key.key,		rte_flow_item_gre_opt) \
224c55c2bf3SAlex Vesker 	X(SET_BE32,	gre_opt_seq,		v->sequence.sequence,	rte_flow_item_gre_opt) \
225c55c2bf3SAlex Vesker 	X(SET_BE16,	gre_opt_checksum,	v->checksum_rsvd.checksum,	rte_flow_item_gre_opt) \
2266c299801SDong Zhou 	X(SET,		nvgre_def_c_rsvd0_ver,	NVGRE_C_RSVD0_VER,	rte_flow_item_nvgre) \
2276c299801SDong Zhou 	X(SET,		nvgre_def_c_rsvd0_ver_mask,	NVGRE_C_RSVD0_VER_MASK,	rte_flow_item_nvgre) \
2286c299801SDong Zhou 	X(SET,		nvgre_def_protocol,	NVGRE_PORT,		rte_flow_item_nvgre) \
2296c299801SDong Zhou 	X(SET_BE16,	nvgre_c_rsvd0_ver,	v->c_k_s_rsvd0_ver,	rte_flow_item_nvgre) \
2306c299801SDong Zhou 	X(SET_BE16,	nvgre_protocol,		v->protocol,		rte_flow_item_nvgre) \
2316c299801SDong Zhou 	X(SET_BE32P,	nvgre_dw1,		&v->tni[0],		rte_flow_item_nvgre) \
23281cf20a2SHamdan Igbaria 	X(SET,		meter_color,		rte_col_2_mlx5_col(v->color),	rte_flow_item_meter_color) \
23381cf20a2SHamdan Igbaria 	X(SET_BE32,     ipsec_spi,              v->hdr.spi,             rte_flow_item_esp) \
2347bf29065SDong Zhou 	X(SET_BE32,     ipsec_sequence_number,  v->hdr.seq,             rte_flow_item_esp) \
2357bf29065SDong Zhou 	X(SET,		ib_l4_udp_port,		UDP_ROCEV2_PORT,	rte_flow_item_ib_bth) \
236ff249a62SItamar Gozlan 	X(SET,		ib_l4_opcode,		v->hdr.opcode,		rte_flow_item_ib_bth) \
237fcd7b8c6SErez Shitrit 	X(SET,		random_number,		v->value,		rte_flow_item_random) \
238ff249a62SItamar Gozlan 	X(SET,		ib_l4_bth_a,		v->hdr.a,		rte_flow_item_ib_bth) \
2390aacd886SHamdan Igbaria 	X(SET,		cvlan,			STE_CVLAN,		rte_flow_item_vlan) \
240d1d350d8SHamdan Igbaria 	X(SET_BE16,	inner_type,		v->inner_type,		rte_flow_item_vlan) \
241c55c2bf3SAlex Vesker 
242c55c2bf3SAlex Vesker /* Item set function format */
243c55c2bf3SAlex Vesker #define X(set_type, func_name, value, item_type) \
244c55c2bf3SAlex Vesker static void mlx5dr_definer_##func_name##_set( \
245c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc, \
246c55c2bf3SAlex Vesker 	const void *item_spec, \
247c55c2bf3SAlex Vesker 	uint8_t *tag) \
248c55c2bf3SAlex Vesker { \
249c55c2bf3SAlex Vesker 	__rte_unused const struct item_type *v = item_spec; \
250c55c2bf3SAlex Vesker 	DR_##set_type(tag, value, fc->byte_off, fc->bit_off, fc->bit_mask); \
251c55c2bf3SAlex Vesker }
252c55c2bf3SAlex Vesker LIST_OF_FIELDS_INFO
253c55c2bf3SAlex Vesker #undef X
254c55c2bf3SAlex Vesker 
255c55c2bf3SAlex Vesker static void
256c55c2bf3SAlex Vesker mlx5dr_definer_ones_set(struct mlx5dr_definer_fc *fc,
257c55c2bf3SAlex Vesker 			__rte_unused const void *item_spec,
258c55c2bf3SAlex Vesker 			__rte_unused uint8_t *tag)
259c55c2bf3SAlex Vesker {
260c55c2bf3SAlex Vesker 	DR_SET(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
261c55c2bf3SAlex Vesker }
262c55c2bf3SAlex Vesker 
263c55c2bf3SAlex Vesker static void
264c55c2bf3SAlex Vesker mlx5dr_definer_eth_first_vlan_q_set(struct mlx5dr_definer_fc *fc,
265c55c2bf3SAlex Vesker 				    const void *item_spec,
266c55c2bf3SAlex Vesker 				    uint8_t *tag)
267c55c2bf3SAlex Vesker {
268c55c2bf3SAlex Vesker 	const struct rte_flow_item_eth *v = item_spec;
269c55c2bf3SAlex Vesker 	uint8_t vlan_type;
270c55c2bf3SAlex Vesker 
271c55c2bf3SAlex Vesker 	vlan_type = v->has_vlan ? STE_CVLAN : STE_NO_VLAN;
272c55c2bf3SAlex Vesker 
273c55c2bf3SAlex Vesker 	DR_SET(tag, vlan_type, fc->byte_off, fc->bit_off, fc->bit_mask);
274c55c2bf3SAlex Vesker }
275c55c2bf3SAlex Vesker 
276c55c2bf3SAlex Vesker static void
277c55c2bf3SAlex Vesker mlx5dr_definer_first_vlan_q_set(struct mlx5dr_definer_fc *fc,
278c55c2bf3SAlex Vesker 				const void *item_spec,
279c55c2bf3SAlex Vesker 				uint8_t *tag)
280c55c2bf3SAlex Vesker {
281c55c2bf3SAlex Vesker 	const struct rte_flow_item_vlan *v = item_spec;
282c55c2bf3SAlex Vesker 	uint8_t vlan_type;
283c55c2bf3SAlex Vesker 
284c55c2bf3SAlex Vesker 	vlan_type = v->has_more_vlan ? STE_SVLAN : STE_CVLAN;
285c55c2bf3SAlex Vesker 
286c55c2bf3SAlex Vesker 	DR_SET(tag, vlan_type, fc->byte_off, fc->bit_off, fc->bit_mask);
287c55c2bf3SAlex Vesker }
288c55c2bf3SAlex Vesker 
289c55c2bf3SAlex Vesker static void
290c55c2bf3SAlex Vesker mlx5dr_definer_conntrack_mask(struct mlx5dr_definer_fc *fc,
291c55c2bf3SAlex Vesker 			      const void *item_spec,
292c55c2bf3SAlex Vesker 			      uint8_t *tag)
293c55c2bf3SAlex Vesker {
294c55c2bf3SAlex Vesker 	const struct rte_flow_item_conntrack *m = item_spec;
295c55c2bf3SAlex Vesker 	uint32_t reg_mask = 0;
296c55c2bf3SAlex Vesker 
297c55c2bf3SAlex Vesker 	if (m->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
298c55c2bf3SAlex Vesker 			RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
299c55c2bf3SAlex Vesker 			RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
300c55c2bf3SAlex Vesker 		reg_mask |= (MLX5_CT_SYNDROME_VALID | MLX5_CT_SYNDROME_INVALID |
301c55c2bf3SAlex Vesker 			     MLX5_CT_SYNDROME_TRAP);
302c55c2bf3SAlex Vesker 
303c55c2bf3SAlex Vesker 	if (m->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
304c55c2bf3SAlex Vesker 		reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
305c55c2bf3SAlex Vesker 
306c55c2bf3SAlex Vesker 	if (m->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
307c55c2bf3SAlex Vesker 		reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
308c55c2bf3SAlex Vesker 
309c55c2bf3SAlex Vesker 	DR_SET(tag, reg_mask, fc->byte_off, fc->bit_off, fc->bit_mask);
310c55c2bf3SAlex Vesker }
311c55c2bf3SAlex Vesker 
312c55c2bf3SAlex Vesker static void
313c55c2bf3SAlex Vesker mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc,
314c55c2bf3SAlex Vesker 			     const void *item_spec,
315c55c2bf3SAlex Vesker 			     uint8_t *tag)
316c55c2bf3SAlex Vesker {
317c55c2bf3SAlex Vesker 	const struct rte_flow_item_conntrack *v = item_spec;
318c55c2bf3SAlex Vesker 	uint32_t reg_value = 0;
319c55c2bf3SAlex Vesker 
320c55c2bf3SAlex Vesker 	/* The conflict should be checked in the validation. */
321c55c2bf3SAlex Vesker 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
322c55c2bf3SAlex Vesker 		reg_value |= MLX5_CT_SYNDROME_VALID;
323c55c2bf3SAlex Vesker 
324c55c2bf3SAlex Vesker 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
325c55c2bf3SAlex Vesker 		reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
326c55c2bf3SAlex Vesker 
327c55c2bf3SAlex Vesker 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
328c55c2bf3SAlex Vesker 		reg_value |= MLX5_CT_SYNDROME_INVALID;
329c55c2bf3SAlex Vesker 
330c55c2bf3SAlex Vesker 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
331c55c2bf3SAlex Vesker 		reg_value |= MLX5_CT_SYNDROME_TRAP;
332c55c2bf3SAlex Vesker 
333c55c2bf3SAlex Vesker 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
334c55c2bf3SAlex Vesker 		reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
335c55c2bf3SAlex Vesker 
336c55c2bf3SAlex Vesker 	DR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask);
337c55c2bf3SAlex Vesker }
338c55c2bf3SAlex Vesker 
339c55c2bf3SAlex Vesker static void
340ad17988aSAlexander Kozyrev mlx5dr_definer_ptype_l2_set(struct mlx5dr_definer_fc *fc,
341ad17988aSAlexander Kozyrev 			    const void *item_spec,
342ad17988aSAlexander Kozyrev 			    uint8_t *tag)
343ad17988aSAlexander Kozyrev {
344ad17988aSAlexander Kozyrev 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L2_I);
345ad17988aSAlexander Kozyrev 	const struct rte_flow_item_ptype *v = item_spec;
346ad17988aSAlexander Kozyrev 	uint32_t packet_type = v->packet_type &
347ad17988aSAlexander Kozyrev 		(inner ? RTE_PTYPE_INNER_L2_MASK : RTE_PTYPE_L2_MASK);
348ad17988aSAlexander Kozyrev 	uint8_t l2_type = STE_NO_VLAN;
349ad17988aSAlexander Kozyrev 
350ad17988aSAlexander Kozyrev 	if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER : RTE_PTYPE_L2_ETHER))
351ad17988aSAlexander Kozyrev 		l2_type = STE_NO_VLAN;
352ad17988aSAlexander Kozyrev 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN))
353ad17988aSAlexander Kozyrev 		l2_type = STE_CVLAN;
354ad17988aSAlexander Kozyrev 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_QINQ : RTE_PTYPE_L2_ETHER_QINQ))
355ad17988aSAlexander Kozyrev 		l2_type = STE_SVLAN;
356ad17988aSAlexander Kozyrev 
357ad17988aSAlexander Kozyrev 	DR_SET(tag, l2_type, fc->byte_off, fc->bit_off, fc->bit_mask);
358ad17988aSAlexander Kozyrev }
359ad17988aSAlexander Kozyrev 
360ad17988aSAlexander Kozyrev static void
361ad17988aSAlexander Kozyrev mlx5dr_definer_ptype_l3_set(struct mlx5dr_definer_fc *fc,
362ad17988aSAlexander Kozyrev 			    const void *item_spec,
363ad17988aSAlexander Kozyrev 			    uint8_t *tag)
364ad17988aSAlexander Kozyrev {
365ad17988aSAlexander Kozyrev 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L3_I);
366ad17988aSAlexander Kozyrev 	const struct rte_flow_item_ptype *v = item_spec;
367ad17988aSAlexander Kozyrev 	uint32_t packet_type = v->packet_type &
368ad17988aSAlexander Kozyrev 		(inner ? RTE_PTYPE_INNER_L3_MASK : RTE_PTYPE_L3_MASK);
369ad17988aSAlexander Kozyrev 	uint8_t l3_type = STE_NO_L3;
370ad17988aSAlexander Kozyrev 
371ad17988aSAlexander Kozyrev 	if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4))
372ad17988aSAlexander Kozyrev 		l3_type = STE_IPV4;
373ad17988aSAlexander Kozyrev 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6))
374ad17988aSAlexander Kozyrev 		l3_type = STE_IPV6;
375ad17988aSAlexander Kozyrev 
376ad17988aSAlexander Kozyrev 	DR_SET(tag, l3_type, fc->byte_off, fc->bit_off, fc->bit_mask);
377ad17988aSAlexander Kozyrev }
378ad17988aSAlexander Kozyrev 
379ad17988aSAlexander Kozyrev static void
380ad17988aSAlexander Kozyrev mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc,
381ad17988aSAlexander Kozyrev 			    const void *item_spec,
382ad17988aSAlexander Kozyrev 			    uint8_t *tag)
383ad17988aSAlexander Kozyrev {
384ad17988aSAlexander Kozyrev 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_I);
385ad17988aSAlexander Kozyrev 	const struct rte_flow_item_ptype *v = item_spec;
386ad17988aSAlexander Kozyrev 	uint32_t packet_type = v->packet_type &
387ad17988aSAlexander Kozyrev 		(inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
388ad17988aSAlexander Kozyrev 	uint8_t l4_type = STE_NO_L4;
389ad17988aSAlexander Kozyrev 
390ad17988aSAlexander Kozyrev 	if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
391ad17988aSAlexander Kozyrev 		l4_type = STE_TCP;
392ad17988aSAlexander Kozyrev 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
393ad17988aSAlexander Kozyrev 		l4_type = STE_UDP;
394*a3711190SAlexander Kozyrev 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ESP : RTE_PTYPE_L4_ESP))
395*a3711190SAlexander Kozyrev 		l4_type = STE_ESP;
396*a3711190SAlexander Kozyrev 
397*a3711190SAlexander Kozyrev 	DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
398*a3711190SAlexander Kozyrev }
399*a3711190SAlexander Kozyrev 
400*a3711190SAlexander Kozyrev static void
401*a3711190SAlexander Kozyrev mlx5dr_definer_ptype_l4_ext_set(struct mlx5dr_definer_fc *fc,
402*a3711190SAlexander Kozyrev 				const void *item_spec,
403*a3711190SAlexander Kozyrev 				uint8_t *tag)
404*a3711190SAlexander Kozyrev {
405*a3711190SAlexander Kozyrev 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I);
406*a3711190SAlexander Kozyrev 	const struct rte_flow_item_ptype *v = item_spec;
407*a3711190SAlexander Kozyrev 	uint32_t packet_type = v->packet_type &
408*a3711190SAlexander Kozyrev 		(inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
409*a3711190SAlexander Kozyrev 	uint8_t l4_type = STE_NO_L4;
410*a3711190SAlexander Kozyrev 
411*a3711190SAlexander Kozyrev 	if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
412*a3711190SAlexander Kozyrev 		l4_type = STE_TCP;
413*a3711190SAlexander Kozyrev 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
414*a3711190SAlexander Kozyrev 		l4_type = STE_UDP;
415ad17988aSAlexander Kozyrev 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ICMP : RTE_PTYPE_L4_ICMP))
416ad17988aSAlexander Kozyrev 		l4_type = STE_ICMP;
417ae67e3c4SGregory Etelson 	else if (packet_type == RTE_PTYPE_TUNNEL_ESP)
418ae67e3c4SGregory Etelson 		l4_type = STE_ESP;
419ad17988aSAlexander Kozyrev 
420ad17988aSAlexander Kozyrev 	DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
421ad17988aSAlexander Kozyrev }
422ad17988aSAlexander Kozyrev 
423ad17988aSAlexander Kozyrev static void
424ad17988aSAlexander Kozyrev mlx5dr_definer_ptype_tunnel_set(struct mlx5dr_definer_fc *fc,
425ad17988aSAlexander Kozyrev 				const void *item_spec,
426ad17988aSAlexander Kozyrev 				uint8_t *tag)
427ad17988aSAlexander Kozyrev {
428ad17988aSAlexander Kozyrev 	const struct rte_flow_item_ptype *v = item_spec;
429ad17988aSAlexander Kozyrev 	uint32_t packet_type = v->packet_type & RTE_PTYPE_TUNNEL_MASK;
430ad17988aSAlexander Kozyrev 	uint8_t tun_type = STE_NO_TUN;
431ad17988aSAlexander Kozyrev 
432ad17988aSAlexander Kozyrev 	if (packet_type == RTE_PTYPE_TUNNEL_ESP)
433ad17988aSAlexander Kozyrev 		tun_type = STE_ESP;
434ad17988aSAlexander Kozyrev 
435ad17988aSAlexander Kozyrev 	DR_SET(tag, tun_type, fc->byte_off, fc->bit_off, fc->bit_mask);
436ad17988aSAlexander Kozyrev }
437ad17988aSAlexander Kozyrev 
438ad17988aSAlexander Kozyrev static void
439761439a2SAlexander Kozyrev mlx5dr_definer_ptype_frag_set(struct mlx5dr_definer_fc *fc,
440761439a2SAlexander Kozyrev 			      const void *item_spec,
441761439a2SAlexander Kozyrev 			      uint8_t *tag)
442761439a2SAlexander Kozyrev {
443761439a2SAlexander Kozyrev 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_FRAG_I);
444761439a2SAlexander Kozyrev 	const struct rte_flow_item_ptype *v = item_spec;
445761439a2SAlexander Kozyrev 	uint32_t packet_type = v->packet_type &
446761439a2SAlexander Kozyrev 		(inner ? RTE_PTYPE_INNER_L4_FRAG : RTE_PTYPE_L4_FRAG);
447761439a2SAlexander Kozyrev 
448761439a2SAlexander Kozyrev 	DR_SET(tag, !!packet_type, fc->byte_off, fc->bit_off, fc->bit_mask);
449761439a2SAlexander Kozyrev }
450761439a2SAlexander Kozyrev 
451761439a2SAlexander Kozyrev static void
452a5230507SHamdan Igbaria mlx5dr_definer_compare_base_value_set(const void *item_spec,
453a5230507SHamdan Igbaria 				      uint8_t *tag)
454a5230507SHamdan Igbaria {
455a5230507SHamdan Igbaria 	uint32_t *ctrl = &(((uint32_t *)tag)[MLX5DR_DEFINER_COMPARE_STE_ARGUMENT_1]);
456a5230507SHamdan Igbaria 	uint32_t *base = &(((uint32_t *)tag)[MLX5DR_DEFINER_COMPARE_STE_BASE_0]);
457a5230507SHamdan Igbaria 	const struct rte_flow_item_compare *v = item_spec;
458a5230507SHamdan Igbaria 	const struct rte_flow_field_data *a = &v->a;
459a5230507SHamdan Igbaria 	const struct rte_flow_field_data *b = &v->b;
460a5230507SHamdan Igbaria 	const uint32_t *value;
461a5230507SHamdan Igbaria 
462a5230507SHamdan Igbaria 	value = (const uint32_t *)&b->value[0];
463a5230507SHamdan Igbaria 
464e1af096eSMichael Baum 	switch (a->field) {
465e1af096eSMichael Baum 	case RTE_FLOW_FIELD_RANDOM:
466a5230507SHamdan Igbaria 		*base = htobe32(*value << 16);
467e1af096eSMichael Baum 		break;
468e1af096eSMichael Baum 	case RTE_FLOW_FIELD_TAG:
469e1af096eSMichael Baum 	case RTE_FLOW_FIELD_META:
470a5230507SHamdan Igbaria 		*base = htobe32(*value);
471e1af096eSMichael Baum 		break;
472e1af096eSMichael Baum 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
473e1af096eSMichael Baum 		*base = *value;
474e1af096eSMichael Baum 		break;
475e1af096eSMichael Baum 	default:
476e1af096eSMichael Baum 		break;
477e1af096eSMichael Baum 	}
478a5230507SHamdan Igbaria 
479a5230507SHamdan Igbaria 	MLX5_SET(ste_match_4dw_range_ctrl_dw, ctrl, base0, 1);
480a5230507SHamdan Igbaria }
481a5230507SHamdan Igbaria 
482a5230507SHamdan Igbaria static void
483a5230507SHamdan Igbaria mlx5dr_definer_compare_op_translate(enum rte_flow_item_compare_op op,
484a5230507SHamdan Igbaria 				    uint8_t *tag)
485a5230507SHamdan Igbaria {
486a5230507SHamdan Igbaria 	uint32_t *ctrl = &(((uint32_t *)tag)[MLX5DR_DEFINER_COMPARE_STE_ARGUMENT_1]);
487a5230507SHamdan Igbaria 	uint8_t operator = 0;
488a5230507SHamdan Igbaria 	uint8_t inverse = 0;
489a5230507SHamdan Igbaria 
490a5230507SHamdan Igbaria 	switch (op) {
491a5230507SHamdan Igbaria 	case RTE_FLOW_ITEM_COMPARE_EQ:
492a5230507SHamdan Igbaria 		operator = 2;
493a5230507SHamdan Igbaria 		break;
494a5230507SHamdan Igbaria 	case RTE_FLOW_ITEM_COMPARE_NE:
495a5230507SHamdan Igbaria 		operator = 2;
496a5230507SHamdan Igbaria 		inverse = 1;
497a5230507SHamdan Igbaria 		break;
498a5230507SHamdan Igbaria 	case RTE_FLOW_ITEM_COMPARE_LT:
499a5230507SHamdan Igbaria 		inverse = 1;
500a5230507SHamdan Igbaria 		break;
501a5230507SHamdan Igbaria 	case RTE_FLOW_ITEM_COMPARE_LE:
502a5230507SHamdan Igbaria 		operator = 1;
503a5230507SHamdan Igbaria 		break;
504a5230507SHamdan Igbaria 	case RTE_FLOW_ITEM_COMPARE_GT:
505a5230507SHamdan Igbaria 		operator = 1;
506a5230507SHamdan Igbaria 		inverse = 1;
507a5230507SHamdan Igbaria 		break;
508a5230507SHamdan Igbaria 	case RTE_FLOW_ITEM_COMPARE_GE:
509a5230507SHamdan Igbaria 		break;
510a5230507SHamdan Igbaria 	default:
511a5230507SHamdan Igbaria 		DR_LOG(ERR, "Invalid operation type %d", op);
512a5230507SHamdan Igbaria 		assert(false);
513a5230507SHamdan Igbaria 	}
514a5230507SHamdan Igbaria 
515a5230507SHamdan Igbaria 	MLX5_SET(ste_match_4dw_range_ctrl_dw, ctrl, inverse0, inverse);
516a5230507SHamdan Igbaria 	MLX5_SET(ste_match_4dw_range_ctrl_dw, ctrl, operator0, operator);
517a5230507SHamdan Igbaria }
518a5230507SHamdan Igbaria 
519a5230507SHamdan Igbaria static void
520a5230507SHamdan Igbaria mlx5dr_definer_compare_arg_set(const void *item_spec,
521a5230507SHamdan Igbaria 			       uint8_t *tag)
522a5230507SHamdan Igbaria {
523a5230507SHamdan Igbaria 	const struct rte_flow_item_compare *v = item_spec;
524a5230507SHamdan Igbaria 	enum rte_flow_item_compare_op op = v->operation;
525a5230507SHamdan Igbaria 
526a5230507SHamdan Igbaria 	mlx5dr_definer_compare_op_translate(op, tag);
527a5230507SHamdan Igbaria }
528a5230507SHamdan Igbaria 
529a5230507SHamdan Igbaria static void
530a5230507SHamdan Igbaria mlx5dr_definer_compare_set(struct mlx5dr_definer_fc *fc,
531a5230507SHamdan Igbaria 			   const void *item_spec,
532a5230507SHamdan Igbaria 			   uint8_t *tag)
533a5230507SHamdan Igbaria {
534a5230507SHamdan Igbaria 	if (fc->compare_idx == MLX5DR_DEFINER_COMPARE_ARGUMENT_0) {
535a5230507SHamdan Igbaria 		mlx5dr_definer_compare_arg_set(item_spec, tag);
536a5230507SHamdan Igbaria 		if (fc->compare_set_base)
537a5230507SHamdan Igbaria 			mlx5dr_definer_compare_base_value_set(item_spec, tag);
538a5230507SHamdan Igbaria 	}
539a5230507SHamdan Igbaria }
540a5230507SHamdan Igbaria 
541a5230507SHamdan Igbaria static void
542c55c2bf3SAlex Vesker mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
543c55c2bf3SAlex Vesker 			     const void *item_spec,
544c55c2bf3SAlex Vesker 			     uint8_t *tag)
545c55c2bf3SAlex Vesker {
546c55c2bf3SAlex Vesker 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_INTEGRITY_I);
547c55c2bf3SAlex Vesker 	const struct rte_flow_item_integrity *v = item_spec;
5488c178ac8SMichael Baum 	uint32_t ok1_bits = DR_GET_32(tag, fc->byte_off, fc->bit_off, fc->bit_mask);
549c55c2bf3SAlex Vesker 
550c55c2bf3SAlex Vesker 	if (v->l3_ok)
55147a76c9fSMichael Baum 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) :
55247a76c9fSMichael Baum 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L3_OK);
553c55c2bf3SAlex Vesker 
554c55c2bf3SAlex Vesker 	if (v->ipv4_csum_ok)
555c55c2bf3SAlex Vesker 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_IPV4_CSUM_OK) :
556c55c2bf3SAlex Vesker 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_IPV4_CSUM_OK);
557c55c2bf3SAlex Vesker 
558c55c2bf3SAlex Vesker 	if (v->l4_ok)
559c55c2bf3SAlex Vesker 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_OK) |
560c55c2bf3SAlex Vesker 				    BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_CSUM_OK) :
561c55c2bf3SAlex Vesker 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_OK) |
562c55c2bf3SAlex Vesker 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_CSUM_OK);
563c55c2bf3SAlex Vesker 
564c55c2bf3SAlex Vesker 	if (v->l4_csum_ok)
565c55c2bf3SAlex Vesker 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_CSUM_OK) :
566c55c2bf3SAlex Vesker 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_CSUM_OK);
567c55c2bf3SAlex Vesker 
568c55c2bf3SAlex Vesker 	DR_SET(tag, ok1_bits, fc->byte_off, fc->bit_off, fc->bit_mask);
569c55c2bf3SAlex Vesker }
570c55c2bf3SAlex Vesker 
571c55c2bf3SAlex Vesker static void
57200e57916SRongwei Liu mlx5dr_definer_ipv6_routing_ext_set(struct mlx5dr_definer_fc *fc,
57300e57916SRongwei Liu 				    const void *item,
57400e57916SRongwei Liu 				    uint8_t *tag)
57500e57916SRongwei Liu {
57600e57916SRongwei Liu 	const struct rte_flow_item_ipv6_routing_ext *v = item;
57700e57916SRongwei Liu 	uint32_t val;
57800e57916SRongwei Liu 
57900e57916SRongwei Liu 	val = v->hdr.next_hdr << __mlx5_dw_bit_off(header_ipv6_routing_ext, next_hdr);
58000e57916SRongwei Liu 	val |= v->hdr.type << __mlx5_dw_bit_off(header_ipv6_routing_ext, type);
58100e57916SRongwei Liu 	val |= v->hdr.segments_left <<
58200e57916SRongwei Liu 		__mlx5_dw_bit_off(header_ipv6_routing_ext, segments_left);
58300e57916SRongwei Liu 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
58400e57916SRongwei Liu }
58500e57916SRongwei Liu 
58600e57916SRongwei Liu static void
5878c0ca752SRongwei Liu mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc,
5888c0ca752SRongwei Liu 			       const void *item,
5898c0ca752SRongwei Liu 			       uint8_t *tag, bool is_inner)
5908c0ca752SRongwei Liu {
5918c0ca752SRongwei Liu 	const struct rte_flow_item_flex *flex = item;
5928c0ca752SRongwei Liu 	uint32_t byte_off, val, idx;
5938c0ca752SRongwei Liu 	int ret;
5948c0ca752SRongwei Liu 
5958c0ca752SRongwei Liu 	val = 0;
5968c0ca752SRongwei Liu 	byte_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
5978c0ca752SRongwei Liu 	idx = fc->fname - MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
5988c0ca752SRongwei Liu 	byte_off -= idx * sizeof(uint32_t);
5998c0ca752SRongwei Liu 	ret = mlx5_flex_get_parser_value_per_byte_off(flex, flex->handle, byte_off,
60097e19f07SViacheslav Ovsiienko 						      is_inner, &val);
6018c0ca752SRongwei Liu 	if (ret == -1 || !val)
6028c0ca752SRongwei Liu 		return;
6038c0ca752SRongwei Liu 
6048c0ca752SRongwei Liu 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
6058c0ca752SRongwei Liu }
6068c0ca752SRongwei Liu 
6078c0ca752SRongwei Liu static void
6088c0ca752SRongwei Liu mlx5dr_definer_flex_parser_inner_set(struct mlx5dr_definer_fc *fc,
6098c0ca752SRongwei Liu 				     const void *item,
6108c0ca752SRongwei Liu 				     uint8_t *tag)
6118c0ca752SRongwei Liu {
6128c0ca752SRongwei Liu 	mlx5dr_definer_flex_parser_set(fc, item, tag, true);
6138c0ca752SRongwei Liu }
6148c0ca752SRongwei Liu 
6158c0ca752SRongwei Liu static void
6168c0ca752SRongwei Liu mlx5dr_definer_flex_parser_outer_set(struct mlx5dr_definer_fc *fc,
6178c0ca752SRongwei Liu 				     const void *item,
6188c0ca752SRongwei Liu 				     uint8_t *tag)
6198c0ca752SRongwei Liu {
6208c0ca752SRongwei Liu 	mlx5dr_definer_flex_parser_set(fc, item, tag, false);
6218c0ca752SRongwei Liu }
6228c0ca752SRongwei Liu 
6238c0ca752SRongwei Liu static void
624c55c2bf3SAlex Vesker mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
625c55c2bf3SAlex Vesker 			   const void *item_spec,
626c55c2bf3SAlex Vesker 			   uint8_t *tag)
627c55c2bf3SAlex Vesker {
628c55c2bf3SAlex Vesker 	const rte_be32_t *v = item_spec;
629c55c2bf3SAlex Vesker 
630c55c2bf3SAlex Vesker 	DR_SET_BE32(tag, *v, fc->byte_off, fc->bit_off, fc->bit_mask);
631c55c2bf3SAlex Vesker }
632c55c2bf3SAlex Vesker 
633c55c2bf3SAlex Vesker static void
634c55c2bf3SAlex Vesker mlx5dr_definer_ipv6_tos_set(struct mlx5dr_definer_fc *fc,
635c55c2bf3SAlex Vesker 			    const void *item_spec,
636c55c2bf3SAlex Vesker 			    uint8_t *tag)
637c55c2bf3SAlex Vesker {
638c55c2bf3SAlex Vesker 	const struct rte_flow_item_ipv6 *v = item_spec;
639c55c2bf3SAlex Vesker 	uint8_t tos = DR_GET(header_ipv6_vtc, &v->hdr.vtc_flow, tos);
640c55c2bf3SAlex Vesker 
641c55c2bf3SAlex Vesker 	DR_SET(tag, tos, fc->byte_off, fc->bit_off, fc->bit_mask);
642c55c2bf3SAlex Vesker }
643c55c2bf3SAlex Vesker 
644c55c2bf3SAlex Vesker static void
645c55c2bf3SAlex Vesker mlx5dr_definer_icmp_dw1_set(struct mlx5dr_definer_fc *fc,
646c55c2bf3SAlex Vesker 			    const void *item_spec,
647c55c2bf3SAlex Vesker 			    uint8_t *tag)
648c55c2bf3SAlex Vesker {
649c55c2bf3SAlex Vesker 	const struct rte_flow_item_icmp *v = item_spec;
650c55c2bf3SAlex Vesker 	rte_be32_t icmp_dw1;
651c55c2bf3SAlex Vesker 
652c55c2bf3SAlex Vesker 	icmp_dw1 = (v->hdr.icmp_type << __mlx5_dw_bit_off(header_icmp, type)) |
653c55c2bf3SAlex Vesker 		   (v->hdr.icmp_code << __mlx5_dw_bit_off(header_icmp, code)) |
654c55c2bf3SAlex Vesker 		   (rte_be_to_cpu_16(v->hdr.icmp_cksum) << __mlx5_dw_bit_off(header_icmp, cksum));
655c55c2bf3SAlex Vesker 
656c55c2bf3SAlex Vesker 	DR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);
657c55c2bf3SAlex Vesker }
658c55c2bf3SAlex Vesker 
659c55c2bf3SAlex Vesker static void
660c55c2bf3SAlex Vesker mlx5dr_definer_icmp_dw2_set(struct mlx5dr_definer_fc *fc,
661c55c2bf3SAlex Vesker 			    const void *item_spec,
662c55c2bf3SAlex Vesker 			    uint8_t *tag)
663c55c2bf3SAlex Vesker {
664c55c2bf3SAlex Vesker 	const struct rte_flow_item_icmp *v = item_spec;
665c55c2bf3SAlex Vesker 	rte_be32_t icmp_dw2;
666c55c2bf3SAlex Vesker 
667c55c2bf3SAlex Vesker 	icmp_dw2 = (rte_be_to_cpu_16(v->hdr.icmp_ident) << __mlx5_dw_bit_off(header_icmp, ident)) |
668c55c2bf3SAlex Vesker 		   (rte_be_to_cpu_16(v->hdr.icmp_seq_nb) << __mlx5_dw_bit_off(header_icmp, seq_nb));
669c55c2bf3SAlex Vesker 
670c55c2bf3SAlex Vesker 	DR_SET(tag, icmp_dw2, fc->byte_off, fc->bit_off, fc->bit_mask);
671c55c2bf3SAlex Vesker }
672c55c2bf3SAlex Vesker 
673c55c2bf3SAlex Vesker static void
674c55c2bf3SAlex Vesker mlx5dr_definer_icmp6_dw1_set(struct mlx5dr_definer_fc *fc,
675c55c2bf3SAlex Vesker 			    const void *item_spec,
676c55c2bf3SAlex Vesker 			    uint8_t *tag)
677c55c2bf3SAlex Vesker {
678c55c2bf3SAlex Vesker 	const struct rte_flow_item_icmp6 *v = item_spec;
679c55c2bf3SAlex Vesker 	rte_be32_t icmp_dw1;
680c55c2bf3SAlex Vesker 
681c55c2bf3SAlex Vesker 	icmp_dw1 = (v->type << __mlx5_dw_bit_off(header_icmp, type)) |
682c55c2bf3SAlex Vesker 		   (v->code << __mlx5_dw_bit_off(header_icmp, code)) |
683c55c2bf3SAlex Vesker 		   (rte_be_to_cpu_16(v->checksum) << __mlx5_dw_bit_off(header_icmp, cksum));
684c55c2bf3SAlex Vesker 
685c55c2bf3SAlex Vesker 	DR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);
686c55c2bf3SAlex Vesker }
687c55c2bf3SAlex Vesker 
688c55c2bf3SAlex Vesker static void
68901314192SLeo Xu mlx5dr_definer_icmp6_echo_dw1_mask_set(struct mlx5dr_definer_fc *fc,
69001314192SLeo Xu 				       __rte_unused const void *item_spec,
69101314192SLeo Xu 				       uint8_t *tag)
69201314192SLeo Xu {
69301314192SLeo Xu 	const struct rte_flow_item_icmp6 spec = {0xFF, 0xFF, 0x0};
69401314192SLeo Xu 	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
69501314192SLeo Xu }
69601314192SLeo Xu 
69701314192SLeo Xu static void
69801314192SLeo Xu mlx5dr_definer_icmp6_echo_request_dw1_set(struct mlx5dr_definer_fc *fc,
69901314192SLeo Xu 					  __rte_unused const void *item_spec,
70001314192SLeo Xu 					  uint8_t *tag)
70101314192SLeo Xu {
70201314192SLeo Xu 	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REQUEST, 0, 0};
70301314192SLeo Xu 	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
70401314192SLeo Xu }
70501314192SLeo Xu 
70601314192SLeo Xu static void
70701314192SLeo Xu mlx5dr_definer_icmp6_echo_reply_dw1_set(struct mlx5dr_definer_fc *fc,
70801314192SLeo Xu 					__rte_unused const void *item_spec,
70901314192SLeo Xu 					uint8_t *tag)
71001314192SLeo Xu {
71101314192SLeo Xu 	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REPLY, 0, 0};
71201314192SLeo Xu 	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
71301314192SLeo Xu }
71401314192SLeo Xu 
71501314192SLeo Xu static void
71601314192SLeo Xu mlx5dr_definer_icmp6_echo_dw2_set(struct mlx5dr_definer_fc *fc,
71701314192SLeo Xu 				  const void *item_spec,
71801314192SLeo Xu 				  uint8_t *tag)
71901314192SLeo Xu {
72001314192SLeo Xu 	const struct rte_flow_item_icmp6_echo *v = item_spec;
72101314192SLeo Xu 	rte_be32_t dw2;
72201314192SLeo Xu 
72301314192SLeo Xu 	dw2 = (rte_be_to_cpu_16(v->hdr.identifier) << __mlx5_dw_bit_off(header_icmp, ident)) |
72401314192SLeo Xu 	      (rte_be_to_cpu_16(v->hdr.sequence) << __mlx5_dw_bit_off(header_icmp, seq_nb));
72501314192SLeo Xu 
72601314192SLeo Xu 	DR_SET(tag, dw2, fc->byte_off, fc->bit_off, fc->bit_mask);
72701314192SLeo Xu }
72801314192SLeo Xu 
72901314192SLeo Xu static void
730c55c2bf3SAlex Vesker mlx5dr_definer_ipv6_flow_label_set(struct mlx5dr_definer_fc *fc,
731c55c2bf3SAlex Vesker 				   const void *item_spec,
732c55c2bf3SAlex Vesker 				   uint8_t *tag)
733c55c2bf3SAlex Vesker {
734c55c2bf3SAlex Vesker 	const struct rte_flow_item_ipv6 *v = item_spec;
735c55c2bf3SAlex Vesker 	uint32_t flow_label = DR_GET(header_ipv6_vtc, &v->hdr.vtc_flow, flow_label);
736c55c2bf3SAlex Vesker 
737c55c2bf3SAlex Vesker 	DR_SET(tag, flow_label, fc->byte_off, fc->bit_off, fc->bit_mask);
738c55c2bf3SAlex Vesker }
739c55c2bf3SAlex Vesker 
740c55c2bf3SAlex Vesker static void
741c55c2bf3SAlex Vesker mlx5dr_definer_vport_set(struct mlx5dr_definer_fc *fc,
742c55c2bf3SAlex Vesker 			 const void *item_spec,
743c55c2bf3SAlex Vesker 			 uint8_t *tag)
744c55c2bf3SAlex Vesker {
745c55c2bf3SAlex Vesker 	const struct rte_flow_item_ethdev *v = item_spec;
746c55c2bf3SAlex Vesker 	const struct flow_hw_port_info *port_info;
747c55c2bf3SAlex Vesker 	uint32_t regc_value;
748c55c2bf3SAlex Vesker 
7494cbeba6fSSuanming Mou 	port_info = flow_hw_conv_port_id(fc->dr_ctx, v->port_id);
750c55c2bf3SAlex Vesker 	if (unlikely(!port_info))
751c55c2bf3SAlex Vesker 		regc_value = BAD_PORT;
752c55c2bf3SAlex Vesker 	else
753c55c2bf3SAlex Vesker 		regc_value = port_info->regc_value >> fc->bit_off;
754c55c2bf3SAlex Vesker 
755c55c2bf3SAlex Vesker 	/* Bit offset is set to 0 to since regc value is 32bit */
756c55c2bf3SAlex Vesker 	DR_SET(tag, regc_value, fc->byte_off, fc->bit_off, fc->bit_mask);
757c55c2bf3SAlex Vesker }
758c55c2bf3SAlex Vesker 
7595bf14a4bSErez Shitrit static struct mlx5dr_definer_fc *
7605bf14a4bSErez Shitrit mlx5dr_definer_get_mpls_fc(struct mlx5dr_definer_conv_data *cd, bool inner)
7615bf14a4bSErez Shitrit {
7625bf14a4bSErez Shitrit 	uint8_t mpls_idx = cd->mpls_idx;
7635bf14a4bSErez Shitrit 	struct mlx5dr_definer_fc *fc;
7645bf14a4bSErez Shitrit 
7655bf14a4bSErez Shitrit 	switch (mpls_idx) {
7665bf14a4bSErez Shitrit 	case 0:
7675bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(MPLS0, inner)];
7685bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, mpls_inner, mpls0_label);
7695bf14a4bSErez Shitrit 		break;
7705bf14a4bSErez Shitrit 	case 1:
7715bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(MPLS1, inner)];
7725bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, mpls_inner, mpls1_label);
7735bf14a4bSErez Shitrit 		break;
7745bf14a4bSErez Shitrit 	case 2:
7755bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(MPLS2, inner)];
7765bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, mpls_inner, mpls2_label);
7775bf14a4bSErez Shitrit 		break;
7785bf14a4bSErez Shitrit 	case 3:
7795bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(MPLS3, inner)];
7805bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, mpls_inner, mpls3_label);
7815bf14a4bSErez Shitrit 		break;
7825bf14a4bSErez Shitrit 	case 4:
7835bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(MPLS4, inner)];
7845bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, mpls_inner, mpls4_label);
7855bf14a4bSErez Shitrit 		break;
7865bf14a4bSErez Shitrit 	default:
7875bf14a4bSErez Shitrit 		rte_errno = ENOTSUP;
7886524f0c8SAlex Vesker 		DR_LOG(ERR, "MPLS index %d is not supported", mpls_idx);
7895bf14a4bSErez Shitrit 		return NULL;
7905bf14a4bSErez Shitrit 	}
7915bf14a4bSErez Shitrit 
7925bf14a4bSErez Shitrit 	return fc;
7935bf14a4bSErez Shitrit }
7945bf14a4bSErez Shitrit 
7955bf14a4bSErez Shitrit static struct mlx5dr_definer_fc *
7965bf14a4bSErez Shitrit mlx5dr_definer_get_mpls_oks_fc(struct mlx5dr_definer_conv_data *cd, bool inner)
7975bf14a4bSErez Shitrit {
7985bf14a4bSErez Shitrit 	uint8_t mpls_idx = cd->mpls_idx;
7995bf14a4bSErez Shitrit 	struct mlx5dr_definer_fc *fc;
8005bf14a4bSErez Shitrit 
8015bf14a4bSErez Shitrit 	switch (mpls_idx) {
8025bf14a4bSErez Shitrit 	case 0:
8035bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS0, inner)];
8045bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, oks2, second_mpls0_qualifier);
8055bf14a4bSErez Shitrit 		break;
8065bf14a4bSErez Shitrit 	case 1:
8075bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS1, inner)];
8085bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, oks2, second_mpls1_qualifier);
8095bf14a4bSErez Shitrit 		break;
8105bf14a4bSErez Shitrit 	case 2:
8115bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS2, inner)];
8125bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, oks2, second_mpls2_qualifier);
8135bf14a4bSErez Shitrit 		break;
8145bf14a4bSErez Shitrit 	case 3:
8155bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS3, inner)];
8165bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, oks2, second_mpls3_qualifier);
8175bf14a4bSErez Shitrit 		break;
8185bf14a4bSErez Shitrit 	case 4:
8195bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS4, inner)];
8205bf14a4bSErez Shitrit 		DR_CALC_SET_HDR(fc, oks2, second_mpls4_qualifier);
8215bf14a4bSErez Shitrit 		break;
8225bf14a4bSErez Shitrit 	default:
8235bf14a4bSErez Shitrit 		rte_errno = ENOTSUP;
8246524f0c8SAlex Vesker 		DR_LOG(ERR, "MPLS index %d is not supported", mpls_idx);
8255bf14a4bSErez Shitrit 		return NULL;
8265bf14a4bSErez Shitrit 	}
8275bf14a4bSErez Shitrit 
8285bf14a4bSErez Shitrit 	return fc;
8295bf14a4bSErez Shitrit }
8305bf14a4bSErez Shitrit 
8315bf14a4bSErez Shitrit static void
8325bf14a4bSErez Shitrit mlx5dr_definer_mpls_label_set(struct mlx5dr_definer_fc *fc,
8335bf14a4bSErez Shitrit 			      const void *item_spec,
8345bf14a4bSErez Shitrit 			      uint8_t *tag)
8355bf14a4bSErez Shitrit {
8365bf14a4bSErez Shitrit 	const struct rte_flow_item_mpls *v = item_spec;
8375bf14a4bSErez Shitrit 
8385bf14a4bSErez Shitrit 	memcpy(tag + fc->byte_off, v->label_tc_s, sizeof(v->label_tc_s));
8395bf14a4bSErez Shitrit 	memcpy(tag + fc->byte_off + sizeof(v->label_tc_s), &v->ttl, sizeof(v->ttl));
8405bf14a4bSErez Shitrit }
8415bf14a4bSErez Shitrit 
8427bf29065SDong Zhou static void
84343b5adbaSAlex Vesker mlx5dr_definer_geneve_vni_set(struct mlx5dr_definer_fc *fc,
84443b5adbaSAlex Vesker 			      const void *item_spec,
84543b5adbaSAlex Vesker 			      uint8_t *tag)
84643b5adbaSAlex Vesker {
84743b5adbaSAlex Vesker 	const struct rte_flow_item_geneve *v = item_spec;
84843b5adbaSAlex Vesker 
84943b5adbaSAlex Vesker 	memcpy(tag + fc->byte_off, v->vni, sizeof(v->vni));
85043b5adbaSAlex Vesker }
85143b5adbaSAlex Vesker 
85243b5adbaSAlex Vesker static void
8538f8dad42SAlex Vesker mlx5dr_definer_geneve_opt_ctrl_set(struct mlx5dr_definer_fc *fc,
8548f8dad42SAlex Vesker 				   const void *item_spec,
8558f8dad42SAlex Vesker 				   uint8_t *tag)
8568f8dad42SAlex Vesker {
8578f8dad42SAlex Vesker 	const struct rte_flow_item_geneve_opt *v = item_spec;
8588f8dad42SAlex Vesker 	uint32_t dw0 = 0;
8598f8dad42SAlex Vesker 
8608f8dad42SAlex Vesker 	dw0 |= v->option_type << __mlx5_dw_bit_off(header_geneve_opt, type);
8618f8dad42SAlex Vesker 	dw0 |= rte_cpu_to_be_16(v->option_class) << __mlx5_dw_bit_off(header_geneve_opt, class);
8628f8dad42SAlex Vesker 	DR_SET(tag, dw0, fc->byte_off, fc->bit_off, fc->bit_mask);
8638f8dad42SAlex Vesker }
8648f8dad42SAlex Vesker 
8658f8dad42SAlex Vesker static void
8668f8dad42SAlex Vesker mlx5dr_definer_geneve_opt_data_set(struct mlx5dr_definer_fc *fc,
8678f8dad42SAlex Vesker 				   const void *item_spec,
8688f8dad42SAlex Vesker 				   uint8_t *tag)
8698f8dad42SAlex Vesker {
8708f8dad42SAlex Vesker 	const struct rte_flow_item_geneve_opt *v = item_spec;
8718f8dad42SAlex Vesker 
8728f8dad42SAlex Vesker 	DR_SET_BE32(tag, v->data[fc->extra_data], fc->byte_off, fc->bit_off, fc->bit_mask);
8738f8dad42SAlex Vesker }
8748f8dad42SAlex Vesker 
8758f8dad42SAlex Vesker static void
8767bf29065SDong Zhou mlx5dr_definer_ib_l4_qp_set(struct mlx5dr_definer_fc *fc,
8777bf29065SDong Zhou 			    const void *item_spec,
8787bf29065SDong Zhou 			    uint8_t *tag)
8797bf29065SDong Zhou {
8807bf29065SDong Zhou 	const struct rte_flow_item_ib_bth *v = item_spec;
8817bf29065SDong Zhou 
8827bf29065SDong Zhou 	memcpy(tag + fc->byte_off, &v->hdr.dst_qp, sizeof(v->hdr.dst_qp));
8837bf29065SDong Zhou }
8847bf29065SDong Zhou 
885f6164649SGavin Li static void
886f6164649SGavin Li mlx5dr_definer_vxlan_gpe_vni_set(struct mlx5dr_definer_fc *fc,
887f6164649SGavin Li 				 const void *item_spec,
888f6164649SGavin Li 				 uint8_t *tag)
889f6164649SGavin Li {
890f6164649SGavin Li 	const struct rte_flow_item_vxlan_gpe *v = item_spec;
891f6164649SGavin Li 
892f6164649SGavin Li 	memcpy(tag + fc->byte_off, v->vni, sizeof(v->vni));
893f6164649SGavin Li }
894f6164649SGavin Li 
895f6164649SGavin Li static void
896f6164649SGavin Li mlx5dr_definer_vxlan_gpe_rsvd0_set(struct mlx5dr_definer_fc *fc,
897f6164649SGavin Li 				   const void *item_spec,
898f6164649SGavin Li 				   uint8_t *tag)
899f6164649SGavin Li {
900f6164649SGavin Li 	const struct rte_flow_item_vxlan_gpe *v = item_spec;
901f6164649SGavin Li 	uint16_t rsvd0;
902f6164649SGavin Li 
903f6164649SGavin Li 	rsvd0 = (v->rsvd0[0] << 8 | v->rsvd0[1]);
904f6164649SGavin Li 	DR_SET(tag, rsvd0, fc->byte_off, fc->bit_off, fc->bit_mask);
905f6164649SGavin Li }
906f6164649SGavin Li 
9077aa6c077SSuanming Mou static void
9087aa6c077SSuanming Mou mlx5dr_definer_tx_queue_set(struct mlx5dr_definer_fc *fc,
9097aa6c077SSuanming Mou 			    const void *item_spec,
9107aa6c077SSuanming Mou 			    uint8_t *tag)
9117aa6c077SSuanming Mou {
9127aa6c077SSuanming Mou 	const struct rte_flow_item_tx_queue *v = item_spec;
9137aa6c077SSuanming Mou 	uint32_t sqn = 0;
9147aa6c077SSuanming Mou 	int ret;
9157aa6c077SSuanming Mou 
9167aa6c077SSuanming Mou 	ret = flow_hw_conv_sqn(fc->extra_data, v->tx_queue, &sqn);
9177aa6c077SSuanming Mou 	if (unlikely(ret))
9187aa6c077SSuanming Mou 		sqn = BAD_SQN;
9197aa6c077SSuanming Mou 
9207aa6c077SSuanming Mou 	DR_SET(tag, sqn, fc->byte_off, fc->bit_off, fc->bit_mask);
9217aa6c077SSuanming Mou }
9227aa6c077SSuanming Mou 
923c55c2bf3SAlex Vesker static int
924c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,
925c55c2bf3SAlex Vesker 			     struct rte_flow_item *item,
926c55c2bf3SAlex Vesker 			     int item_idx)
927c55c2bf3SAlex Vesker {
928c55c2bf3SAlex Vesker 	const struct rte_flow_item_eth *m = item->mask;
929c55c2bf3SAlex Vesker 	uint8_t empty_mac[RTE_ETHER_ADDR_LEN] = {0};
930c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
931c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
932c55c2bf3SAlex Vesker 
933c55c2bf3SAlex Vesker 	if (!m)
934c55c2bf3SAlex Vesker 		return 0;
935c55c2bf3SAlex Vesker 
936c55c2bf3SAlex Vesker 	if (m->reserved) {
937c55c2bf3SAlex Vesker 		rte_errno = ENOTSUP;
938c55c2bf3SAlex Vesker 		return rte_errno;
939c55c2bf3SAlex Vesker 	}
940c55c2bf3SAlex Vesker 
9418275d5fcSThomas Monjalon 	if (m->hdr.ether_type) {
942c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)];
943c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
944c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_eth_type_set;
945c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, l3_ethertype, inner);
946c55c2bf3SAlex Vesker 	}
947c55c2bf3SAlex Vesker 
948c55c2bf3SAlex Vesker 	/* Check SMAC 47_16 */
9498275d5fcSThomas Monjalon 	if (memcmp(m->hdr.src_addr.addr_bytes, empty_mac, 4)) {
950c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(ETH_SMAC_48_16, inner)];
951c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
952c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_eth_smac_47_16_set;
953c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2_src, smac_47_16, inner);
954c55c2bf3SAlex Vesker 	}
955c55c2bf3SAlex Vesker 
956c55c2bf3SAlex Vesker 	/* Check SMAC 15_0 */
9578275d5fcSThomas Monjalon 	if (memcmp(m->hdr.src_addr.addr_bytes + 4, empty_mac + 4, 2)) {
958c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(ETH_SMAC_15_0, inner)];
959c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
960c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_eth_smac_15_0_set;
961c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2_src, smac_15_0, inner);
962c55c2bf3SAlex Vesker 	}
963c55c2bf3SAlex Vesker 
964c55c2bf3SAlex Vesker 	/* Check DMAC 47_16 */
9658275d5fcSThomas Monjalon 	if (memcmp(m->hdr.dst_addr.addr_bytes, empty_mac, 4)) {
966c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(ETH_DMAC_48_16, inner)];
967c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
968c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_eth_dmac_47_16_set;
969c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, dmac_47_16, inner);
970c55c2bf3SAlex Vesker 	}
971c55c2bf3SAlex Vesker 
972c55c2bf3SAlex Vesker 	/* Check DMAC 15_0 */
9738275d5fcSThomas Monjalon 	if (memcmp(m->hdr.dst_addr.addr_bytes + 4, empty_mac + 4, 2)) {
974c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(ETH_DMAC_15_0, inner)];
975c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
976c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_eth_dmac_15_0_set;
977c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, dmac_15_0, inner);
978c55c2bf3SAlex Vesker 	}
979c55c2bf3SAlex Vesker 
980c55c2bf3SAlex Vesker 	if (m->has_vlan) {
981c55c2bf3SAlex Vesker 		/* Mark packet as tagged (CVLAN) */
982c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
983c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
984c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
985c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_eth_first_vlan_q_set;
986c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);
987c55c2bf3SAlex Vesker 	}
988c55c2bf3SAlex Vesker 
989c55c2bf3SAlex Vesker 	return 0;
990c55c2bf3SAlex Vesker }
991c55c2bf3SAlex Vesker 
992c55c2bf3SAlex Vesker static int
993c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd,
994c55c2bf3SAlex Vesker 			      struct rte_flow_item *item,
995c55c2bf3SAlex Vesker 			      int item_idx)
996c55c2bf3SAlex Vesker {
997c55c2bf3SAlex Vesker 	const struct rte_flow_item_vlan *m = item->mask;
998c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
999c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
1000c55c2bf3SAlex Vesker 
10010aacd886SHamdan Igbaria 	if (!cd->relaxed) {
10020aacd886SHamdan Igbaria 		/* Mark packet as tagged (CVLAN) */
10030aacd886SHamdan Igbaria 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
10040aacd886SHamdan Igbaria 		fc->item_idx = item_idx;
10050aacd886SHamdan Igbaria 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
10060aacd886SHamdan Igbaria 		fc->tag_set = &mlx5dr_definer_cvlan_set;
10070aacd886SHamdan Igbaria 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);
10080aacd886SHamdan Igbaria 	}
10090aacd886SHamdan Igbaria 
1010c55c2bf3SAlex Vesker 	if (!m)
1011c55c2bf3SAlex Vesker 		return 0;
1012c55c2bf3SAlex Vesker 
1013c55c2bf3SAlex Vesker 	if (m->reserved) {
1014c55c2bf3SAlex Vesker 		rte_errno = ENOTSUP;
1015c55c2bf3SAlex Vesker 		return rte_errno;
1016c55c2bf3SAlex Vesker 	}
1017c55c2bf3SAlex Vesker 
10180aacd886SHamdan Igbaria 	if (m->has_more_vlan) {
1019c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
1020c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1021c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1022c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_first_vlan_q_set;
1023c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);
1024c55c2bf3SAlex Vesker 	}
1025c55c2bf3SAlex Vesker 
10268275d5fcSThomas Monjalon 	if (m->hdr.vlan_tci) {
1027c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TCI, inner)];
1028c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1029c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_tci_set;
1030c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, tci, inner);
1031c55c2bf3SAlex Vesker 	}
1032c55c2bf3SAlex Vesker 
10338275d5fcSThomas Monjalon 	if (m->hdr.eth_proto) {
1034c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)];
1035c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1036d1d350d8SHamdan Igbaria 		fc->tag_set = &mlx5dr_definer_inner_type_set;
1037c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, l3_ethertype, inner);
1038c55c2bf3SAlex Vesker 	}
1039c55c2bf3SAlex Vesker 
1040c55c2bf3SAlex Vesker 	return 0;
1041c55c2bf3SAlex Vesker }
1042c55c2bf3SAlex Vesker 
1043c55c2bf3SAlex Vesker static int
1044c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,
1045c55c2bf3SAlex Vesker 			      struct rte_flow_item *item,
1046c55c2bf3SAlex Vesker 			      int item_idx)
1047c55c2bf3SAlex Vesker {
1048c55c2bf3SAlex Vesker 	const struct rte_ipv4_hdr *m = item->mask;
104957800e6cSAlex Vesker 	const struct rte_ipv4_hdr *l = item->last;
1050c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1051c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
1052c55c2bf3SAlex Vesker 
1053c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
1054c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
1055c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1056c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv4_version_set;
1057c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1058c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
1059c55c2bf3SAlex Vesker 
1060c55c2bf3SAlex Vesker 		/* Overwrite - Unset ethertype if present */
1061c55c2bf3SAlex Vesker 		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
1062c55c2bf3SAlex Vesker 	}
1063c55c2bf3SAlex Vesker 
1064c55c2bf3SAlex Vesker 	if (!m)
1065c55c2bf3SAlex Vesker 		return 0;
1066c55c2bf3SAlex Vesker 
1067631ed6c7SMichael Baum 	if (m->hdr_checksum ||
106857800e6cSAlex Vesker 	    (l && (l->next_proto_id || l->type_of_service))) {
1069c55c2bf3SAlex Vesker 		rte_errno = ENOTSUP;
1070c55c2bf3SAlex Vesker 		return rte_errno;
1071c55c2bf3SAlex Vesker 	}
1072c55c2bf3SAlex Vesker 
10735587be0dSHamdan Igbaria 	if (m->version) {
10745587be0dSHamdan Igbaria 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
10755587be0dSHamdan Igbaria 		fc->item_idx = item_idx;
10765587be0dSHamdan Igbaria 		fc->tag_set = &mlx5dr_definer_ipv4_version_set;
10775587be0dSHamdan Igbaria 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
10785587be0dSHamdan Igbaria 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
10795587be0dSHamdan Igbaria 	}
10805587be0dSHamdan Igbaria 
1081c55c2bf3SAlex Vesker 	if (m->fragment_offset) {
1082c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)];
1083c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1084f3291366SHamdan Igbaria 		if (rte_be_to_cpu_16(m->fragment_offset) == 0x3fff) {
1085f3291366SHamdan Igbaria 			fc->tag_set = &mlx5dr_definer_ip_fragmented_set;
1086f3291366SHamdan Igbaria 			DR_CALC_SET(fc, eth_l2, ip_fragmented, inner);
1087f3291366SHamdan Igbaria 		} else {
1088f3291366SHamdan Igbaria 			fc->is_range = l && l->fragment_offset;
1089c55c2bf3SAlex Vesker 			fc->tag_set = &mlx5dr_definer_ipv4_frag_set;
1090f3291366SHamdan Igbaria 			DR_CALC_SET(fc, eth_l3, ipv4_frag, inner);
1091f3291366SHamdan Igbaria 		}
1092c55c2bf3SAlex Vesker 	}
1093c55c2bf3SAlex Vesker 
1094c55c2bf3SAlex Vesker 	if (m->next_proto_id) {
1095c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1096c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1097c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv4_next_proto_set;
1098c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
1099c55c2bf3SAlex Vesker 	}
1100c55c2bf3SAlex Vesker 
1101631ed6c7SMichael Baum 	if (m->packet_id) {
1102631ed6c7SMichael Baum 		fc = &cd->fc[DR_CALC_FNAME(IP_ID, inner)];
1103631ed6c7SMichael Baum 		fc->item_idx = item_idx;
1104631ed6c7SMichael Baum 		fc->is_range = l && l->packet_id;
1105631ed6c7SMichael Baum 		fc->tag_set = &mlx5dr_definer_ipv4_identification_set;
1106631ed6c7SMichael Baum 		DR_CALC_SET(fc, eth_l3, identification, inner);
1107631ed6c7SMichael Baum 	}
1108631ed6c7SMichael Baum 
110957800e6cSAlex Vesker 	if (m->total_length) {
111057800e6cSAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_LEN, inner)];
111157800e6cSAlex Vesker 		fc->item_idx = item_idx;
111257800e6cSAlex Vesker 		fc->is_range = l && l->total_length;
111357800e6cSAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv4_len_set;
111457800e6cSAlex Vesker 		DR_CALC_SET(fc, eth_l3, ipv4_total_length, inner);
111557800e6cSAlex Vesker 	}
111657800e6cSAlex Vesker 
1117c55c2bf3SAlex Vesker 	if (m->dst_addr) {
1118c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV4_DST, inner)];
1119c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
112057800e6cSAlex Vesker 		fc->is_range = l && l->dst_addr;
1121c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv4_dst_addr_set;
1122c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv4_src_dest, destination_address, inner);
1123c55c2bf3SAlex Vesker 	}
1124c55c2bf3SAlex Vesker 
1125c55c2bf3SAlex Vesker 	if (m->src_addr) {
1126c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV4_SRC, inner)];
1127c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
112857800e6cSAlex Vesker 		fc->is_range = l && l->src_addr;
1129c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv4_src_addr_set;
1130c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv4_src_dest, source_address, inner);
1131c55c2bf3SAlex Vesker 	}
1132c55c2bf3SAlex Vesker 
1133c55c2bf3SAlex Vesker 	if (m->ihl) {
1134c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV4_IHL, inner)];
1135c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
113657800e6cSAlex Vesker 		fc->is_range = l && l->ihl;
1137c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv4_ihl_set;
1138c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, ihl, inner);
1139c55c2bf3SAlex Vesker 	}
1140c55c2bf3SAlex Vesker 
1141c55c2bf3SAlex Vesker 	if (m->time_to_live) {
1142c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];
1143c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
114457800e6cSAlex Vesker 		fc->is_range = l && l->time_to_live;
1145c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv4_time_to_live_set;
1146c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
1147c55c2bf3SAlex Vesker 	}
1148c55c2bf3SAlex Vesker 
1149c55c2bf3SAlex Vesker 	if (m->type_of_service) {
1150c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_TOS, inner)];
1151c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1152c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv4_tos_set;
1153c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, tos, inner);
1154c55c2bf3SAlex Vesker 	}
1155c55c2bf3SAlex Vesker 
1156c55c2bf3SAlex Vesker 	return 0;
1157c55c2bf3SAlex Vesker }
1158c55c2bf3SAlex Vesker 
1159c55c2bf3SAlex Vesker static int
1160c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
1161c55c2bf3SAlex Vesker 			      struct rte_flow_item *item,
1162c55c2bf3SAlex Vesker 			      int item_idx)
1163c55c2bf3SAlex Vesker {
1164c55c2bf3SAlex Vesker 	const struct rte_flow_item_ipv6 *m = item->mask;
116557800e6cSAlex Vesker 	const struct rte_flow_item_ipv6 *l = item->last;
1166c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1167c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
1168c55c2bf3SAlex Vesker 
1169c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
1170c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
1171c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1172c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
1173c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1174c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
1175c55c2bf3SAlex Vesker 
1176c55c2bf3SAlex Vesker 		/* Overwrite - Unset ethertype if present */
1177c55c2bf3SAlex Vesker 		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
1178c55c2bf3SAlex Vesker 	}
1179c55c2bf3SAlex Vesker 
1180c55c2bf3SAlex Vesker 	if (!m)
1181c55c2bf3SAlex Vesker 		return 0;
1182c55c2bf3SAlex Vesker 
1183c55c2bf3SAlex Vesker 	if (m->has_hop_ext || m->has_route_ext || m->has_auth_ext ||
1184c55c2bf3SAlex Vesker 	    m->has_esp_ext || m->has_dest_ext || m->has_mobil_ext ||
118557800e6cSAlex Vesker 	    m->has_hip_ext || m->has_shim6_ext ||
118657800e6cSAlex Vesker 	    (l && (l->has_frag_ext || l->hdr.vtc_flow || l->hdr.proto ||
118789b5642dSRobin Jarry 		   !is_mem_zero(l->hdr.src_addr.a, 16) ||
118889b5642dSRobin Jarry 		   !is_mem_zero(l->hdr.dst_addr.a, 16)))) {
1189c55c2bf3SAlex Vesker 		rte_errno = ENOTSUP;
1190c55c2bf3SAlex Vesker 		return rte_errno;
1191c55c2bf3SAlex Vesker 	}
1192c55c2bf3SAlex Vesker 
1193c55c2bf3SAlex Vesker 	if (m->has_frag_ext) {
1194c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)];
1195c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1196c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_frag_set;
1197c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l4, ip_fragmented, inner);
1198c55c2bf3SAlex Vesker 	}
1199c55c2bf3SAlex Vesker 
12005587be0dSHamdan Igbaria 	if (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, version)) {
12015587be0dSHamdan Igbaria 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
12025587be0dSHamdan Igbaria 		fc->item_idx = item_idx;
12035587be0dSHamdan Igbaria 		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
12045587be0dSHamdan Igbaria 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
12055587be0dSHamdan Igbaria 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
12065587be0dSHamdan Igbaria 	}
12075587be0dSHamdan Igbaria 
1208c55c2bf3SAlex Vesker 	if (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, tos)) {
1209c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_TOS, inner)];
1210c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1211c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_tos_set;
1212c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, tos, inner);
1213c55c2bf3SAlex Vesker 	}
1214c55c2bf3SAlex Vesker 
1215c55c2bf3SAlex Vesker 	if (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, flow_label)) {
1216c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV6_FLOW_LABEL, inner)];
1217c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1218c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_flow_label_set;
1219c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, flow_label, inner);
1220c55c2bf3SAlex Vesker 	}
1221c55c2bf3SAlex Vesker 
1222c55c2bf3SAlex Vesker 	if (m->hdr.payload_len) {
122357800e6cSAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_LEN, inner)];
1224c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
122557800e6cSAlex Vesker 		fc->is_range = l && l->hdr.payload_len;
1226c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_payload_len_set;
1227c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, ipv6_payload_length, inner);
1228c55c2bf3SAlex Vesker 	}
1229c55c2bf3SAlex Vesker 
1230c55c2bf3SAlex Vesker 	if (m->hdr.proto) {
1231c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1232c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1233c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_proto_set;
1234c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
1235c55c2bf3SAlex Vesker 	}
1236c55c2bf3SAlex Vesker 
1237c55c2bf3SAlex Vesker 	if (m->hdr.hop_limits) {
1238c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];
1239c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
124057800e6cSAlex Vesker 		fc->is_range = l && l->hdr.hop_limits;
1241c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_hop_limits_set;
1242c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
1243c55c2bf3SAlex Vesker 	}
1244c55c2bf3SAlex Vesker 
124589b5642dSRobin Jarry 	if (!is_mem_zero(m->hdr.src_addr.a, 4)) {
1246c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_127_96, inner)];
1247c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1248c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_127_96_set;
1249c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv6_src, ipv6_address_127_96, inner);
1250c55c2bf3SAlex Vesker 	}
1251c55c2bf3SAlex Vesker 
125289b5642dSRobin Jarry 	if (!is_mem_zero(m->hdr.src_addr.a + 4, 4)) {
1253c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_95_64, inner)];
1254c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1255c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_95_64_set;
1256c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv6_src, ipv6_address_95_64, inner);
1257c55c2bf3SAlex Vesker 	}
1258c55c2bf3SAlex Vesker 
125989b5642dSRobin Jarry 	if (!is_mem_zero(m->hdr.src_addr.a + 8, 4)) {
1260c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_63_32, inner)];
1261c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1262c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_63_32_set;
1263c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv6_src, ipv6_address_63_32, inner);
1264c55c2bf3SAlex Vesker 	}
1265c55c2bf3SAlex Vesker 
126689b5642dSRobin Jarry 	if (!is_mem_zero(m->hdr.src_addr.a + 12, 4)) {
1267c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_31_0, inner)];
1268c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1269c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_31_0_set;
1270c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv6_src, ipv6_address_31_0, inner);
1271c55c2bf3SAlex Vesker 	}
1272c55c2bf3SAlex Vesker 
127389b5642dSRobin Jarry 	if (!is_mem_zero(m->hdr.dst_addr.a, 4)) {
1274c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_127_96, inner)];
1275c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1276c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_127_96_set;
1277c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_127_96, inner);
1278c55c2bf3SAlex Vesker 	}
1279c55c2bf3SAlex Vesker 
128089b5642dSRobin Jarry 	if (!is_mem_zero(m->hdr.dst_addr.a + 4, 4)) {
1281c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_95_64, inner)];
1282c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1283c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_95_64_set;
1284c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_95_64, inner);
1285c55c2bf3SAlex Vesker 	}
1286c55c2bf3SAlex Vesker 
128789b5642dSRobin Jarry 	if (!is_mem_zero(m->hdr.dst_addr.a + 8, 4)) {
1288c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_63_32, inner)];
1289c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1290c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_63_32_set;
1291c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_63_32, inner);
1292c55c2bf3SAlex Vesker 	}
1293c55c2bf3SAlex Vesker 
129489b5642dSRobin Jarry 	if (!is_mem_zero(m->hdr.dst_addr.a + 12, 4)) {
1295c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_31_0, inner)];
1296c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1297c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_31_0_set;
1298c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_31_0, inner);
1299c55c2bf3SAlex Vesker 	}
1300c55c2bf3SAlex Vesker 
1301c55c2bf3SAlex Vesker 	return 0;
1302c55c2bf3SAlex Vesker }
1303c55c2bf3SAlex Vesker 
1304c55c2bf3SAlex Vesker static int
1305c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_udp(struct mlx5dr_definer_conv_data *cd,
1306c55c2bf3SAlex Vesker 			     struct rte_flow_item *item,
1307c55c2bf3SAlex Vesker 			     int item_idx)
1308c55c2bf3SAlex Vesker {
1309c55c2bf3SAlex Vesker 	const struct rte_flow_item_udp *m = item->mask;
131057800e6cSAlex Vesker 	const struct rte_flow_item_udp *l = item->last;
1311c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1312c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
1313c55c2bf3SAlex Vesker 
1314c55c2bf3SAlex Vesker 	/* Set match on L4 type UDP */
1315c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
1316c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
13179cbf5076SRongwei Liu 		if (!fc->not_overwrite) {
1318c55c2bf3SAlex Vesker 			fc->item_idx = item_idx;
1319c55c2bf3SAlex Vesker 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
1320c55c2bf3SAlex Vesker 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1321c55c2bf3SAlex Vesker 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
1322c55c2bf3SAlex Vesker 		}
13239cbf5076SRongwei Liu 	}
1324c55c2bf3SAlex Vesker 
1325c55c2bf3SAlex Vesker 	if (!m)
1326c55c2bf3SAlex Vesker 		return 0;
1327c55c2bf3SAlex Vesker 
1328c55c2bf3SAlex Vesker 	if (m->hdr.dgram_cksum || m->hdr.dgram_len) {
1329c55c2bf3SAlex Vesker 		rte_errno = ENOTSUP;
1330c55c2bf3SAlex Vesker 		return rte_errno;
1331c55c2bf3SAlex Vesker 	}
1332c55c2bf3SAlex Vesker 
1333c55c2bf3SAlex Vesker 	if (m->hdr.src_port) {
1334c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];
1335c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
133657800e6cSAlex Vesker 		fc->is_range = l && l->hdr.src_port;
1337c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_udp_src_port_set;
1338c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l4, source_port, inner);
1339c55c2bf3SAlex Vesker 	}
1340c55c2bf3SAlex Vesker 
1341c55c2bf3SAlex Vesker 	if (m->hdr.dst_port) {
1342c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
1343c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
134457800e6cSAlex Vesker 		fc->is_range = l && l->hdr.dst_port;
1345c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_udp_dst_port_set;
1346c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l4, destination_port, inner);
1347c55c2bf3SAlex Vesker 	}
1348c55c2bf3SAlex Vesker 
1349c55c2bf3SAlex Vesker 	return 0;
1350c55c2bf3SAlex Vesker }
1351c55c2bf3SAlex Vesker 
1352c55c2bf3SAlex Vesker static int
1353c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_tcp(struct mlx5dr_definer_conv_data *cd,
1354c55c2bf3SAlex Vesker 			     struct rte_flow_item *item,
1355c55c2bf3SAlex Vesker 			     int item_idx)
1356c55c2bf3SAlex Vesker {
1357c55c2bf3SAlex Vesker 	const struct rte_flow_item_tcp *m = item->mask;
135857800e6cSAlex Vesker 	const struct rte_flow_item_tcp *l = item->last;
1359c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1360c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
1361c55c2bf3SAlex Vesker 
1362c55c2bf3SAlex Vesker 	/* Overwrite match on L4 type TCP */
1363c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
1364c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
13659cbf5076SRongwei Liu 		if (!fc->not_overwrite) {
1366c55c2bf3SAlex Vesker 			fc->item_idx = item_idx;
1367c55c2bf3SAlex Vesker 			fc->tag_set = &mlx5dr_definer_tcp_protocol_set;
1368c55c2bf3SAlex Vesker 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1369c55c2bf3SAlex Vesker 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
1370c55c2bf3SAlex Vesker 		}
13719cbf5076SRongwei Liu 	}
1372c55c2bf3SAlex Vesker 
1373c55c2bf3SAlex Vesker 	if (!m)
1374c55c2bf3SAlex Vesker 		return 0;
1375c55c2bf3SAlex Vesker 
137657800e6cSAlex Vesker 	if (m->hdr.sent_seq || m->hdr.recv_ack || m->hdr.data_off ||
137757800e6cSAlex Vesker 	    m->hdr.rx_win || m->hdr.cksum || m->hdr.tcp_urp) {
137857800e6cSAlex Vesker 		rte_errno = ENOTSUP;
137957800e6cSAlex Vesker 		return rte_errno;
138057800e6cSAlex Vesker 	}
138157800e6cSAlex Vesker 
1382c55c2bf3SAlex Vesker 	if (m->hdr.tcp_flags) {
1383c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(TCP_FLAGS, inner)];
1384c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
138557800e6cSAlex Vesker 		fc->is_range = l && l->hdr.tcp_flags;
1386c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_tcp_flags_set;
1387c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l4, tcp_flags, inner);
1388c55c2bf3SAlex Vesker 	}
1389c55c2bf3SAlex Vesker 
1390c55c2bf3SAlex Vesker 	if (m->hdr.src_port) {
1391c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];
1392c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
139357800e6cSAlex Vesker 		fc->is_range = l && l->hdr.src_port;
1394c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_tcp_src_port_set;
1395c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l4, source_port, inner);
1396c55c2bf3SAlex Vesker 	}
1397c55c2bf3SAlex Vesker 
1398c55c2bf3SAlex Vesker 	if (m->hdr.dst_port) {
1399c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
1400c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
140157800e6cSAlex Vesker 		fc->is_range = l && l->hdr.dst_port;
1402c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_tcp_dst_port_set;
1403c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l4, destination_port, inner);
1404c55c2bf3SAlex Vesker 	}
1405c55c2bf3SAlex Vesker 
1406c55c2bf3SAlex Vesker 	return 0;
1407c55c2bf3SAlex Vesker }
1408c55c2bf3SAlex Vesker 
1409c55c2bf3SAlex Vesker static int
1410c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd,
1411c55c2bf3SAlex Vesker 			     struct rte_flow_item *item,
1412c55c2bf3SAlex Vesker 			     int item_idx)
1413c55c2bf3SAlex Vesker {
1414d72b8fbdSGregory Etelson 	struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps;
1415c55c2bf3SAlex Vesker 	const struct rte_flow_item_gtp *m = item->mask;
1416c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1417c55c2bf3SAlex Vesker 
141828e69588SAlex Vesker 	if (cd->tunnel) {
141928e69588SAlex Vesker 		DR_LOG(ERR, "Inner GTPU item not supported");
142028e69588SAlex Vesker 		rte_errno = ENOTSUP;
142128e69588SAlex Vesker 		return rte_errno;
142228e69588SAlex Vesker 	}
142328e69588SAlex Vesker 
1424c55c2bf3SAlex Vesker 	/* Overwrite GTPU dest port if not present */
1425c55c2bf3SAlex Vesker 	fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)];
1426c55c2bf3SAlex Vesker 	if (!fc->tag_set && !cd->relaxed) {
1427c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1428c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gtp_udp_port_set;
1429c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1430c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l4, destination_port, false);
1431c55c2bf3SAlex Vesker 	}
1432c55c2bf3SAlex Vesker 
1433c55c2bf3SAlex Vesker 	if (!m)
1434c55c2bf3SAlex Vesker 		return 0;
1435c55c2bf3SAlex Vesker 
14362397616cSThomas Monjalon 	if (m->hdr.plen || m->hdr.gtp_hdr_info & ~MLX5DR_DEFINER_GTP_EXT_HDR_BIT) {
1437c55c2bf3SAlex Vesker 		rte_errno = ENOTSUP;
1438c55c2bf3SAlex Vesker 		return rte_errno;
1439c55c2bf3SAlex Vesker 	}
1440c55c2bf3SAlex Vesker 
14412397616cSThomas Monjalon 	if (m->hdr.teid) {
1442d72b8fbdSGregory Etelson 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_TEID_ENABLED)) {
1443c55c2bf3SAlex Vesker 			rte_errno = ENOTSUP;
1444c55c2bf3SAlex Vesker 			return rte_errno;
1445c55c2bf3SAlex Vesker 		}
1446c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_TEID];
1447c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1448c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gtp_teid_set;
1449c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_gtp, teid);
1450d72b8fbdSGregory Etelson 		fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
1451c55c2bf3SAlex Vesker 	}
1452c55c2bf3SAlex Vesker 
14532397616cSThomas Monjalon 	if (m->hdr.gtp_hdr_info) {
1454d72b8fbdSGregory Etelson 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {
1455c55c2bf3SAlex Vesker 			rte_errno = ENOTSUP;
1456c55c2bf3SAlex Vesker 			return rte_errno;
1457c55c2bf3SAlex Vesker 		}
1458c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_FLAG];
1459c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1460c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gtp_ext_flag_set;
1461c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag);
1462c55c2bf3SAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag);
1463d72b8fbdSGregory Etelson 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1464c55c2bf3SAlex Vesker 	}
1465c55c2bf3SAlex Vesker 
1466c55c2bf3SAlex Vesker 
14672397616cSThomas Monjalon 	if (m->hdr.msg_type) {
1468d72b8fbdSGregory Etelson 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {
1469c55c2bf3SAlex Vesker 			rte_errno = ENOTSUP;
1470c55c2bf3SAlex Vesker 			return rte_errno;
1471c55c2bf3SAlex Vesker 		}
1472c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_MSG_TYPE];
1473c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1474c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gtp_msg_type_set;
1475c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
1476c55c2bf3SAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
1477d72b8fbdSGregory Etelson 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1478c55c2bf3SAlex Vesker 	}
1479c55c2bf3SAlex Vesker 
1480c55c2bf3SAlex Vesker 	return 0;
1481c55c2bf3SAlex Vesker }
1482c55c2bf3SAlex Vesker 
1483c55c2bf3SAlex Vesker static int
1484c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_gtp_psc(struct mlx5dr_definer_conv_data *cd,
1485c55c2bf3SAlex Vesker 				 struct rte_flow_item *item,
1486c55c2bf3SAlex Vesker 				 int item_idx)
1487c55c2bf3SAlex Vesker {
1488d72b8fbdSGregory Etelson 	struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps;
1489c55c2bf3SAlex Vesker 	const struct rte_flow_item_gtp_psc *m = item->mask;
1490c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1491c55c2bf3SAlex Vesker 
1492c55c2bf3SAlex Vesker 	/* Overwrite GTP extension flag to be 1 */
1493c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
1494d72b8fbdSGregory Etelson 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {
1495c55c2bf3SAlex Vesker 			rte_errno = ENOTSUP;
1496c55c2bf3SAlex Vesker 			return rte_errno;
1497c55c2bf3SAlex Vesker 		}
1498c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_FLAG];
1499c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1500c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ones_set;
1501c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag);
1502c55c2bf3SAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag);
1503d72b8fbdSGregory Etelson 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1504c55c2bf3SAlex Vesker 	}
1505c55c2bf3SAlex Vesker 
1506c55c2bf3SAlex Vesker 	/* Overwrite next extension header type */
1507c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
1508d72b8fbdSGregory Etelson 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_2_ENABLED)) {
1509c55c2bf3SAlex Vesker 			rte_errno = ENOTSUP;
1510c55c2bf3SAlex Vesker 			return rte_errno;
1511c55c2bf3SAlex Vesker 		}
1512c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_NEXT_EXT_HDR];
1513c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1514c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gtp_next_ext_hdr_set;
1515c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1516c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_opt_gtp, next_ext_hdr_type);
1517c55c2bf3SAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_opt_gtp, next_ext_hdr_type);
1518d72b8fbdSGregory Etelson 		fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
1519c55c2bf3SAlex Vesker 	}
1520c55c2bf3SAlex Vesker 
1521c55c2bf3SAlex Vesker 	if (!m)
1522c55c2bf3SAlex Vesker 		return 0;
1523c55c2bf3SAlex Vesker 
1524c55c2bf3SAlex Vesker 	if (m->hdr.type) {
1525d72b8fbdSGregory Etelson 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) {
1526c55c2bf3SAlex Vesker 			rte_errno = ENOTSUP;
1527c55c2bf3SAlex Vesker 			return rte_errno;
1528c55c2bf3SAlex Vesker 		}
1529c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_HDR_PDU];
1530c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1531c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gtp_ext_hdr_pdu_set;
1532c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_gtp_psc, pdu_type);
1533c55c2bf3SAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, pdu_type);
1534d72b8fbdSGregory Etelson 		fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
1535c55c2bf3SAlex Vesker 	}
1536c55c2bf3SAlex Vesker 
1537c55c2bf3SAlex Vesker 	if (m->hdr.qfi) {
1538d72b8fbdSGregory Etelson 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) {
1539c55c2bf3SAlex Vesker 			rte_errno = ENOTSUP;
1540c55c2bf3SAlex Vesker 			return rte_errno;
1541c55c2bf3SAlex Vesker 		}
1542c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_HDR_QFI];
1543c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1544c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gtp_ext_hdr_qfi_set;
1545c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_gtp_psc, qfi);
1546c55c2bf3SAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, qfi);
1547d72b8fbdSGregory Etelson 		fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
1548c55c2bf3SAlex Vesker 	}
1549c55c2bf3SAlex Vesker 
1550c55c2bf3SAlex Vesker 	return 0;
1551c55c2bf3SAlex Vesker }
1552c55c2bf3SAlex Vesker 
1553c55c2bf3SAlex Vesker static int
1554c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_port(struct mlx5dr_definer_conv_data *cd,
1555c55c2bf3SAlex Vesker 			      struct rte_flow_item *item,
1556c55c2bf3SAlex Vesker 			      int item_idx)
1557c55c2bf3SAlex Vesker {
1558d72b8fbdSGregory Etelson 	struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps;
1559c55c2bf3SAlex Vesker 	const struct rte_flow_item_ethdev *m = item->mask;
1560c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1561c55c2bf3SAlex Vesker 
1562c55c2bf3SAlex Vesker 	if (m->port_id) {
1563d72b8fbdSGregory Etelson 		if (!caps->wire_regc_mask) {
1564c55c2bf3SAlex Vesker 			DR_LOG(ERR, "Port ID item not supported, missing wire REGC mask");
1565c55c2bf3SAlex Vesker 			rte_errno = ENOTSUP;
1566c55c2bf3SAlex Vesker 			return rte_errno;
1567c55c2bf3SAlex Vesker 		}
1568c55c2bf3SAlex Vesker 
1569c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VPORT_REG_C_0];
1570c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1571c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_vport_set;
1572c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1573c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, registers, register_c_0);
1574191128d7SDavid Marchand 		fc->bit_off = rte_ctz32(caps->wire_regc_mask);
15758f8dad42SAlex Vesker 		fc->bit_mask = caps->wire_regc_mask >> fc->bit_off;
15764cbeba6fSSuanming Mou 		fc->dr_ctx = cd->ctx;
1577c55c2bf3SAlex Vesker 	} else {
1578c55c2bf3SAlex Vesker 		DR_LOG(ERR, "Pord ID item mask must specify ID mask");
1579c55c2bf3SAlex Vesker 		rte_errno = EINVAL;
1580c55c2bf3SAlex Vesker 		return rte_errno;
1581c55c2bf3SAlex Vesker 	}
1582c55c2bf3SAlex Vesker 
1583c55c2bf3SAlex Vesker 	return 0;
1584c55c2bf3SAlex Vesker }
1585c55c2bf3SAlex Vesker 
1586c55c2bf3SAlex Vesker static int
1587c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd,
1588c55c2bf3SAlex Vesker 			       struct rte_flow_item *item,
1589c55c2bf3SAlex Vesker 			       int item_idx)
1590c55c2bf3SAlex Vesker {
1591c55c2bf3SAlex Vesker 	const struct rte_flow_item_vxlan *m = item->mask;
1592c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1593c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
1594c55c2bf3SAlex Vesker 
159528e69588SAlex Vesker 	if (inner) {
159628e69588SAlex Vesker 		DR_LOG(ERR, "Inner VXLAN item not supported");
159728e69588SAlex Vesker 		rte_errno = ENOTSUP;
159828e69588SAlex Vesker 		return rte_errno;
159928e69588SAlex Vesker 	}
160028e69588SAlex Vesker 
160128e69588SAlex Vesker 	/* In order to match on VXLAN we must match on ip_protocol and l4_dport */
1602c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
1603c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1604c55c2bf3SAlex Vesker 		if (!fc->tag_set) {
1605c55c2bf3SAlex Vesker 			fc->item_idx = item_idx;
1606c55c2bf3SAlex Vesker 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1607c55c2bf3SAlex Vesker 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
1608c55c2bf3SAlex Vesker 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
1609c55c2bf3SAlex Vesker 		}
1610c55c2bf3SAlex Vesker 
1611c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
1612c55c2bf3SAlex Vesker 		if (!fc->tag_set) {
1613c55c2bf3SAlex Vesker 			fc->item_idx = item_idx;
1614c55c2bf3SAlex Vesker 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1615c55c2bf3SAlex Vesker 			fc->tag_set = &mlx5dr_definer_vxlan_udp_port_set;
1616c55c2bf3SAlex Vesker 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
1617c55c2bf3SAlex Vesker 		}
1618c55c2bf3SAlex Vesker 	}
1619c55c2bf3SAlex Vesker 
1620c55c2bf3SAlex Vesker 	if (!m)
1621c55c2bf3SAlex Vesker 		return 0;
1622c55c2bf3SAlex Vesker 
162349765b78SRongwei Liu 	if (m->hdr.vx_flags) {
162449765b78SRongwei Liu 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_DW0];
1625c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
162649765b78SRongwei Liu 		fc->tag_set = &mlx5dr_definer_vxlan_vx_flags_set;
1627c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1628c55c2bf3SAlex Vesker 	}
1629c55c2bf3SAlex Vesker 
163049765b78SRongwei Liu 	if (m->hdr.vx_vni) {
163149765b78SRongwei Liu 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_DW1];
1632c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
163349765b78SRongwei Liu 		fc->tag_set = &mlx5dr_definer_vxlan_vx_vni_set;
1634c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
1635c55c2bf3SAlex Vesker 	}
1636c55c2bf3SAlex Vesker 
1637c55c2bf3SAlex Vesker 	return 0;
1638c55c2bf3SAlex Vesker }
1639c55c2bf3SAlex Vesker 
16405bf14a4bSErez Shitrit static int
16415bf14a4bSErez Shitrit mlx5dr_definer_conv_item_mpls(struct mlx5dr_definer_conv_data *cd,
16425bf14a4bSErez Shitrit 			      struct rte_flow_item *item,
16435bf14a4bSErez Shitrit 			      int item_idx)
16445bf14a4bSErez Shitrit {
16455bf14a4bSErez Shitrit 	const struct rte_flow_item_mpls *m = item->mask;
16465bf14a4bSErez Shitrit 	struct mlx5dr_definer_fc *fc;
16475bf14a4bSErez Shitrit 	bool inner = cd->tunnel;
16485bf14a4bSErez Shitrit 
16495bf14a4bSErez Shitrit 	if (inner) {
16505bf14a4bSErez Shitrit 		DR_LOG(ERR, "Inner MPLS item not supported");
16515bf14a4bSErez Shitrit 		rte_errno = ENOTSUP;
16525bf14a4bSErez Shitrit 		return rte_errno;
16535bf14a4bSErez Shitrit 	}
16545bf14a4bSErez Shitrit 
16554fa44e6fSErez Shitrit 	if (!cd->relaxed) {
16565bf14a4bSErez Shitrit 		/* In order to match on MPLS we must match on ip_protocol and l4_dport. */
16575bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
16585bf14a4bSErez Shitrit 		if (!fc->tag_set) {
16595bf14a4bSErez Shitrit 			fc->item_idx = item_idx;
16605bf14a4bSErez Shitrit 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
16615bf14a4bSErez Shitrit 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
16625bf14a4bSErez Shitrit 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
16635bf14a4bSErez Shitrit 		}
16645bf14a4bSErez Shitrit 
16654fa44e6fSErez Shitrit 		/* Currently support only MPLSoUDP */
16665bf14a4bSErez Shitrit 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)];
16675bf14a4bSErez Shitrit 		if (!fc->tag_set) {
16685bf14a4bSErez Shitrit 			fc->item_idx = item_idx;
16695bf14a4bSErez Shitrit 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
16705bf14a4bSErez Shitrit 			fc->tag_set = &mlx5dr_definer_mpls_udp_port_set;
16715bf14a4bSErez Shitrit 			DR_CALC_SET(fc, eth_l4, destination_port, false);
16725bf14a4bSErez Shitrit 		}
16734fa44e6fSErez Shitrit 	}
16745bf14a4bSErez Shitrit 
16755bf14a4bSErez Shitrit 	if (m && (!is_mem_zero(m->label_tc_s, 3) || m->ttl)) {
16765bf14a4bSErez Shitrit 		/* According to HW MPLSoUDP is handled as inner */
16775bf14a4bSErez Shitrit 		fc = mlx5dr_definer_get_mpls_fc(cd, true);
16785bf14a4bSErez Shitrit 		if (!fc)
16795bf14a4bSErez Shitrit 			return rte_errno;
16805bf14a4bSErez Shitrit 
16815bf14a4bSErez Shitrit 		fc->item_idx = item_idx;
16825bf14a4bSErez Shitrit 		fc->tag_set = &mlx5dr_definer_mpls_label_set;
16835bf14a4bSErez Shitrit 	} else { /* Mask relevant oks2 bit, indicates MPLS label exists.
16845bf14a4bSErez Shitrit 		  * According to HW MPLSoUDP is handled as inner
16855bf14a4bSErez Shitrit 		  */
16865bf14a4bSErez Shitrit 		fc = mlx5dr_definer_get_mpls_oks_fc(cd, true);
16875bf14a4bSErez Shitrit 		if (!fc)
16885bf14a4bSErez Shitrit 			return rte_errno;
16895bf14a4bSErez Shitrit 
16905bf14a4bSErez Shitrit 		fc->item_idx = item_idx;
16915bf14a4bSErez Shitrit 		fc->tag_set = mlx5dr_definer_ones_set;
16925bf14a4bSErez Shitrit 	}
16935bf14a4bSErez Shitrit 
16945bf14a4bSErez Shitrit 	return 0;
16955bf14a4bSErez Shitrit }
16965bf14a4bSErez Shitrit 
1697c55c2bf3SAlex Vesker static struct mlx5dr_definer_fc *
1698c55c2bf3SAlex Vesker mlx5dr_definer_get_register_fc(struct mlx5dr_definer_conv_data *cd, int reg)
1699c55c2bf3SAlex Vesker {
1700c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1701c55c2bf3SAlex Vesker 
1702c55c2bf3SAlex Vesker 	switch (reg) {
1703c55c2bf3SAlex Vesker 	case REG_C_0:
1704c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_0];
1705c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, registers, register_c_0);
1706c55c2bf3SAlex Vesker 		break;
1707c55c2bf3SAlex Vesker 	case REG_C_1:
1708c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_1];
1709c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, registers, register_c_1);
1710c55c2bf3SAlex Vesker 		break;
1711c55c2bf3SAlex Vesker 	case REG_C_2:
1712c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_2];
1713c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, registers, register_c_2);
1714c55c2bf3SAlex Vesker 		break;
1715c55c2bf3SAlex Vesker 	case REG_C_3:
1716c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_3];
1717c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, registers, register_c_3);
1718c55c2bf3SAlex Vesker 		break;
1719c55c2bf3SAlex Vesker 	case REG_C_4:
1720c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_4];
1721c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, registers, register_c_4);
1722c55c2bf3SAlex Vesker 		break;
1723c55c2bf3SAlex Vesker 	case REG_C_5:
1724c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_5];
1725c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, registers, register_c_5);
1726c55c2bf3SAlex Vesker 		break;
1727c55c2bf3SAlex Vesker 	case REG_C_6:
1728c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_6];
1729c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, registers, register_c_6);
1730c55c2bf3SAlex Vesker 		break;
1731c55c2bf3SAlex Vesker 	case REG_C_7:
1732c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_7];
1733c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, registers, register_c_7);
1734c55c2bf3SAlex Vesker 		break;
17357e3a1442SItamar Gozlan 	case REG_C_8:
17367e3a1442SItamar Gozlan 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_8];
17377e3a1442SItamar Gozlan 		DR_CALC_SET_HDR(fc, registers, register_c_8);
17387e3a1442SItamar Gozlan 		break;
17397e3a1442SItamar Gozlan 	case REG_C_9:
17407e3a1442SItamar Gozlan 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_9];
17417e3a1442SItamar Gozlan 		DR_CALC_SET_HDR(fc, registers, register_c_9);
17427e3a1442SItamar Gozlan 		break;
17437e3a1442SItamar Gozlan 	case REG_C_10:
17447e3a1442SItamar Gozlan 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_10];
17457e3a1442SItamar Gozlan 		DR_CALC_SET_HDR(fc, registers, register_c_10);
17467e3a1442SItamar Gozlan 		break;
17477e3a1442SItamar Gozlan 	case REG_C_11:
17487e3a1442SItamar Gozlan 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_11];
17497e3a1442SItamar Gozlan 		DR_CALC_SET_HDR(fc, registers, register_c_11);
17507e3a1442SItamar Gozlan 		break;
1751c55c2bf3SAlex Vesker 	case REG_A:
1752c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_A];
1753c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, metadata, general_purpose);
1754c55c2bf3SAlex Vesker 		break;
1755c55c2bf3SAlex Vesker 	case REG_B:
1756c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_B];
1757c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, metadata, metadata_to_cqe);
1758c55c2bf3SAlex Vesker 		break;
1759c55c2bf3SAlex Vesker 	default:
1760c55c2bf3SAlex Vesker 		rte_errno = ENOTSUP;
1761c55c2bf3SAlex Vesker 		return NULL;
1762c55c2bf3SAlex Vesker 	}
1763c55c2bf3SAlex Vesker 
1764c55c2bf3SAlex Vesker 	return fc;
1765c55c2bf3SAlex Vesker }
1766c55c2bf3SAlex Vesker 
1767c55c2bf3SAlex Vesker static int
1768c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_tag(struct mlx5dr_definer_conv_data *cd,
1769c55c2bf3SAlex Vesker 			     struct rte_flow_item *item,
1770c55c2bf3SAlex Vesker 			     int item_idx)
1771c55c2bf3SAlex Vesker {
1772c55c2bf3SAlex Vesker 	const struct rte_flow_item_tag *m = item->mask;
1773c55c2bf3SAlex Vesker 	const struct rte_flow_item_tag *v = item->spec;
177457800e6cSAlex Vesker 	const struct rte_flow_item_tag *l = item->last;
1775c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1776c55c2bf3SAlex Vesker 	int reg;
1777c55c2bf3SAlex Vesker 
1778c55c2bf3SAlex Vesker 	if (!m || !v)
1779c55c2bf3SAlex Vesker 		return 0;
1780c55c2bf3SAlex Vesker 
1781c55c2bf3SAlex Vesker 	if (item->type == RTE_FLOW_ITEM_TYPE_TAG)
178204e740e6SGregory Etelson 		reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
178304e740e6SGregory Etelson 						  RTE_FLOW_ITEM_TYPE_TAG,
17842b45a773SMichael Baum 						  cd->table_type,
178504e740e6SGregory Etelson 						  v->index);
1786c55c2bf3SAlex Vesker 	else
1787c55c2bf3SAlex Vesker 		reg = (int)v->index;
1788c55c2bf3SAlex Vesker 
1789c55c2bf3SAlex Vesker 	if (reg <= 0) {
1790c55c2bf3SAlex Vesker 		DR_LOG(ERR, "Invalid register for item tag");
1791c55c2bf3SAlex Vesker 		rte_errno = EINVAL;
1792c55c2bf3SAlex Vesker 		return rte_errno;
1793c55c2bf3SAlex Vesker 	}
1794c55c2bf3SAlex Vesker 
1795c55c2bf3SAlex Vesker 	fc = mlx5dr_definer_get_register_fc(cd, reg);
1796c55c2bf3SAlex Vesker 	if (!fc)
1797c55c2bf3SAlex Vesker 		return rte_errno;
1798c55c2bf3SAlex Vesker 
1799c55c2bf3SAlex Vesker 	fc->item_idx = item_idx;
180057800e6cSAlex Vesker 	fc->is_range = l && l->index;
1801c55c2bf3SAlex Vesker 	fc->tag_set = &mlx5dr_definer_tag_set;
180257800e6cSAlex Vesker 
1803c55c2bf3SAlex Vesker 	return 0;
1804c55c2bf3SAlex Vesker }
1805c55c2bf3SAlex Vesker 
18069fa0e142SGregory Etelson static void
18079fa0e142SGregory Etelson mlx5dr_definer_quota_set(struct mlx5dr_definer_fc *fc,
18089fa0e142SGregory Etelson 			 const void *item_data, uint8_t *tag)
18099fa0e142SGregory Etelson {
18109fa0e142SGregory Etelson 	/**
18119fa0e142SGregory Etelson 	 * MLX5 PMD implements QUOTA with Meter object.
18129fa0e142SGregory Etelson 	 * PMD Quota action translation implicitly increments
18139fa0e142SGregory Etelson 	 * Meter register value after HW assigns it.
18149fa0e142SGregory Etelson 	 * Meter register values are:
18159fa0e142SGregory Etelson 	 *            HW     QUOTA(HW+1)  QUOTA state
18169fa0e142SGregory Etelson 	 * RED        0        1 (01b)       BLOCK
18179fa0e142SGregory Etelson 	 * YELLOW     1        2 (10b)       PASS
18189fa0e142SGregory Etelson 	 * GREEN      2        3 (11b)       PASS
18199fa0e142SGregory Etelson 	 *
18209fa0e142SGregory Etelson 	 * Quota item checks Meter register bit 1 value to determine state:
18219fa0e142SGregory Etelson 	 *            SPEC       MASK
18229fa0e142SGregory Etelson 	 * PASS     2 (10b)    2 (10b)
18239fa0e142SGregory Etelson 	 * BLOCK    0 (00b)    2 (10b)
18249fa0e142SGregory Etelson 	 *
18259fa0e142SGregory Etelson 	 * item_data is NULL when template quota item is non-masked:
18269fa0e142SGregory Etelson 	 * .. / quota / ..
18279fa0e142SGregory Etelson 	 */
18289fa0e142SGregory Etelson 
18299fa0e142SGregory Etelson 	const struct rte_flow_item_quota *quota = item_data;
18309fa0e142SGregory Etelson 	uint32_t val;
18319fa0e142SGregory Etelson 
18329fa0e142SGregory Etelson 	if (quota && quota->state == RTE_FLOW_QUOTA_STATE_BLOCK)
18339fa0e142SGregory Etelson 		val = MLX5DR_DEFINER_QUOTA_BLOCK;
18349fa0e142SGregory Etelson 	else
18359fa0e142SGregory Etelson 		val = MLX5DR_DEFINER_QUOTA_PASS;
18369fa0e142SGregory Etelson 
18379fa0e142SGregory Etelson 	DR_SET(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
18389fa0e142SGregory Etelson }
18399fa0e142SGregory Etelson 
18409fa0e142SGregory Etelson static int
18419fa0e142SGregory Etelson mlx5dr_definer_conv_item_quota(struct mlx5dr_definer_conv_data *cd,
18429fa0e142SGregory Etelson 			       __rte_unused struct rte_flow_item *item,
18439fa0e142SGregory Etelson 			       int item_idx)
18449fa0e142SGregory Etelson {
184510943706SMichael Baum 	int mtr_reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
184610943706SMichael Baum 						  RTE_FLOW_ITEM_TYPE_METER_COLOR,
18472b45a773SMichael Baum 						  cd->table_type, 0);
18489fa0e142SGregory Etelson 	struct mlx5dr_definer_fc *fc;
18499fa0e142SGregory Etelson 
18509fa0e142SGregory Etelson 	if (mtr_reg < 0) {
18519fa0e142SGregory Etelson 		rte_errno = EINVAL;
18529fa0e142SGregory Etelson 		return rte_errno;
18539fa0e142SGregory Etelson 	}
18549fa0e142SGregory Etelson 
18559fa0e142SGregory Etelson 	fc = mlx5dr_definer_get_register_fc(cd, mtr_reg);
18569fa0e142SGregory Etelson 	if (!fc)
18579fa0e142SGregory Etelson 		return rte_errno;
18589fa0e142SGregory Etelson 
18599fa0e142SGregory Etelson 	fc->tag_set = &mlx5dr_definer_quota_set;
18609fa0e142SGregory Etelson 	fc->item_idx = item_idx;
18619fa0e142SGregory Etelson 	return 0;
18629fa0e142SGregory Etelson }
18639fa0e142SGregory Etelson 
1864c55c2bf3SAlex Vesker static int
1865c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd,
1866c55c2bf3SAlex Vesker 				  struct rte_flow_item *item,
1867c55c2bf3SAlex Vesker 				  int item_idx)
1868c55c2bf3SAlex Vesker {
1869c55c2bf3SAlex Vesker 	const struct rte_flow_item_meta *m = item->mask;
187057800e6cSAlex Vesker 	const struct rte_flow_item_meta *l = item->last;
1871c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1872c55c2bf3SAlex Vesker 	int reg;
1873c55c2bf3SAlex Vesker 
1874c55c2bf3SAlex Vesker 	if (!m)
1875c55c2bf3SAlex Vesker 		return 0;
1876c55c2bf3SAlex Vesker 
187710943706SMichael Baum 	reg = flow_hw_get_reg_id_from_ctx(cd->ctx, RTE_FLOW_ITEM_TYPE_META,
18782b45a773SMichael Baum 					  cd->table_type, -1);
1879c55c2bf3SAlex Vesker 	if (reg <= 0) {
1880c55c2bf3SAlex Vesker 		DR_LOG(ERR, "Invalid register for item metadata");
1881c55c2bf3SAlex Vesker 		rte_errno = EINVAL;
1882c55c2bf3SAlex Vesker 		return rte_errno;
1883c55c2bf3SAlex Vesker 	}
1884c55c2bf3SAlex Vesker 
1885c55c2bf3SAlex Vesker 	fc = mlx5dr_definer_get_register_fc(cd, reg);
1886c55c2bf3SAlex Vesker 	if (!fc)
1887c55c2bf3SAlex Vesker 		return rte_errno;
1888c55c2bf3SAlex Vesker 
1889c55c2bf3SAlex Vesker 	fc->item_idx = item_idx;
189057800e6cSAlex Vesker 	fc->is_range = l && l->data;
1891c55c2bf3SAlex Vesker 	fc->tag_set = &mlx5dr_definer_metadata_set;
189257800e6cSAlex Vesker 
1893c55c2bf3SAlex Vesker 	return 0;
1894c55c2bf3SAlex Vesker }
1895c55c2bf3SAlex Vesker 
1896c55c2bf3SAlex Vesker static int
18977aa6c077SSuanming Mou mlx5dr_definer_conv_item_tx_queue(struct mlx5dr_definer_conv_data *cd,
18987aa6c077SSuanming Mou 				  struct rte_flow_item *item,
18997aa6c077SSuanming Mou 				  int item_idx)
19007aa6c077SSuanming Mou {
19017aa6c077SSuanming Mou 	const struct rte_flow_item_tx_queue *m = item->mask;
19027aa6c077SSuanming Mou 	struct mlx5dr_definer_fc *fc;
19037aa6c077SSuanming Mou 
19047aa6c077SSuanming Mou 	if (!m)
19057aa6c077SSuanming Mou 		return 0;
19067aa6c077SSuanming Mou 
19077aa6c077SSuanming Mou 	if (m->tx_queue) {
19087aa6c077SSuanming Mou 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_SOURCE_QP];
19097aa6c077SSuanming Mou 		fc->item_idx = item_idx;
19107aa6c077SSuanming Mou 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
19117aa6c077SSuanming Mou 		fc->tag_set = &mlx5dr_definer_tx_queue_set;
19127aa6c077SSuanming Mou 		/* User extra_data to save DPDK port_id. */
19137aa6c077SSuanming Mou 		fc->extra_data = flow_hw_get_port_id(cd->ctx);
19147aa6c077SSuanming Mou 		if (fc->extra_data == UINT16_MAX) {
19157aa6c077SSuanming Mou 			DR_LOG(ERR, "Invalid port for item tx_queue");
19167aa6c077SSuanming Mou 			rte_errno = EINVAL;
19177aa6c077SSuanming Mou 			return rte_errno;
19187aa6c077SSuanming Mou 		}
19197aa6c077SSuanming Mou 		DR_CALC_SET_HDR(fc, source_qp_gvmi, source_qp);
19207aa6c077SSuanming Mou 	}
19217aa6c077SSuanming Mou 
19227aa6c077SSuanming Mou 	return 0;
19237aa6c077SSuanming Mou }
19247aa6c077SSuanming Mou 
19257aa6c077SSuanming Mou static int
1926c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_sq(struct mlx5dr_definer_conv_data *cd,
1927c55c2bf3SAlex Vesker 			    struct rte_flow_item *item,
1928c55c2bf3SAlex Vesker 			    int item_idx)
1929c55c2bf3SAlex Vesker {
1930c55c2bf3SAlex Vesker 	const struct mlx5_rte_flow_item_sq *m = item->mask;
1931c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1932c55c2bf3SAlex Vesker 
1933c55c2bf3SAlex Vesker 	if (!m)
1934c55c2bf3SAlex Vesker 		return 0;
1935c55c2bf3SAlex Vesker 
1936c55c2bf3SAlex Vesker 	if (m->queue) {
1937c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_SOURCE_QP];
1938c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1939c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1940c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_source_qp_set;
1941c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, source_qp_gvmi, source_qp);
1942c55c2bf3SAlex Vesker 	}
1943c55c2bf3SAlex Vesker 
1944c55c2bf3SAlex Vesker 	return 0;
1945c55c2bf3SAlex Vesker }
1946c55c2bf3SAlex Vesker 
1947c55c2bf3SAlex Vesker static int
1948c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_gre(struct mlx5dr_definer_conv_data *cd,
1949c55c2bf3SAlex Vesker 			     struct rte_flow_item *item,
1950c55c2bf3SAlex Vesker 			     int item_idx)
1951c55c2bf3SAlex Vesker {
1952c55c2bf3SAlex Vesker 	const struct rte_flow_item_gre *m = item->mask;
1953c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
1954c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
1955c55c2bf3SAlex Vesker 
1956c55c2bf3SAlex Vesker 	if (inner) {
1957c55c2bf3SAlex Vesker 		DR_LOG(ERR, "Inner GRE item not supported");
1958c55c2bf3SAlex Vesker 		rte_errno = ENOTSUP;
1959c55c2bf3SAlex Vesker 		return rte_errno;
1960c55c2bf3SAlex Vesker 	}
1961c55c2bf3SAlex Vesker 
1962c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
1963c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1964c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1965c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1966c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
1967c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
1968c55c2bf3SAlex Vesker 	}
1969c55c2bf3SAlex Vesker 
1970c55c2bf3SAlex Vesker 	if (!m)
1971c55c2bf3SAlex Vesker 		return 0;
1972c55c2bf3SAlex Vesker 
1973c55c2bf3SAlex Vesker 	if (m->c_rsvd0_ver) {
1974c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_C_VER];
1975c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1976c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gre_c_ver_set;
1977c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1978c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);
1979c55c2bf3SAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);
1980c55c2bf3SAlex Vesker 	}
1981c55c2bf3SAlex Vesker 
1982c55c2bf3SAlex Vesker 	if (m->protocol) {
1983c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_PROTOCOL];
1984c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
1985c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gre_protocol_type_set;
1986c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1987c55c2bf3SAlex Vesker 		fc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);
1988c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
1989c55c2bf3SAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
1990c55c2bf3SAlex Vesker 	}
1991c55c2bf3SAlex Vesker 
1992c55c2bf3SAlex Vesker 	return 0;
1993c55c2bf3SAlex Vesker }
1994c55c2bf3SAlex Vesker 
1995c55c2bf3SAlex Vesker static int
1996c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_gre_opt(struct mlx5dr_definer_conv_data *cd,
1997c55c2bf3SAlex Vesker 				 struct rte_flow_item *item,
1998c55c2bf3SAlex Vesker 				 int item_idx)
1999c55c2bf3SAlex Vesker {
2000c55c2bf3SAlex Vesker 	const struct rte_flow_item_gre_opt *m = item->mask;
2001c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
2002c55c2bf3SAlex Vesker 
2003c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
2004c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
2005c55c2bf3SAlex Vesker 		if (!fc->tag_set) {
2006c55c2bf3SAlex Vesker 			fc->item_idx = item_idx;
2007c55c2bf3SAlex Vesker 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2008c55c2bf3SAlex Vesker 			fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
2009c55c2bf3SAlex Vesker 			DR_CALC_SET(fc, eth_l3, protocol_next_header, false);
2010c55c2bf3SAlex Vesker 		}
2011c55c2bf3SAlex Vesker 	}
2012c55c2bf3SAlex Vesker 
2013c55c2bf3SAlex Vesker 	if (!m)
2014c55c2bf3SAlex Vesker 		return 0;
2015c55c2bf3SAlex Vesker 
2016c55c2bf3SAlex Vesker 	if (m->checksum_rsvd.checksum) {
2017c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_CHECKSUM];
2018c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2019c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gre_opt_checksum_set;
2020c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
2021c55c2bf3SAlex Vesker 	}
2022c55c2bf3SAlex Vesker 
2023c55c2bf3SAlex Vesker 	if (m->key.key) {
2024c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_KEY];
2025c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2026c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gre_opt_key_set;
2027c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);
2028c55c2bf3SAlex Vesker 	}
2029c55c2bf3SAlex Vesker 
2030c55c2bf3SAlex Vesker 	if (m->sequence.sequence) {
2031c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_SEQ];
2032c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2033c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gre_opt_seq_set;
2034c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_3);
2035c55c2bf3SAlex Vesker 	}
2036c55c2bf3SAlex Vesker 
2037c55c2bf3SAlex Vesker 	return 0;
2038c55c2bf3SAlex Vesker }
2039c55c2bf3SAlex Vesker 
2040c55c2bf3SAlex Vesker static int
2041c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,
2042c55c2bf3SAlex Vesker 				 struct rte_flow_item *item,
2043c55c2bf3SAlex Vesker 				 int item_idx)
2044c55c2bf3SAlex Vesker {
2045c55c2bf3SAlex Vesker 	const rte_be32_t *m = item->mask;
2046c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
2047c55c2bf3SAlex Vesker 
2048c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
2049c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_KEY_PRESENT];
2050c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2051c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ones_set;
2052c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2053c55c2bf3SAlex Vesker 		fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
2054c55c2bf3SAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
2055c55c2bf3SAlex Vesker 
2056c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
2057c55c2bf3SAlex Vesker 		if (!fc->tag_set) {
2058c55c2bf3SAlex Vesker 			fc->item_idx = item_idx;
2059c55c2bf3SAlex Vesker 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2060c55c2bf3SAlex Vesker 			fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
2061c55c2bf3SAlex Vesker 			DR_CALC_SET(fc, eth_l3, protocol_next_header, false);
2062c55c2bf3SAlex Vesker 		}
2063c55c2bf3SAlex Vesker 	}
2064c55c2bf3SAlex Vesker 
2065c55c2bf3SAlex Vesker 	if (!m)
2066c55c2bf3SAlex Vesker 		return 0;
2067c55c2bf3SAlex Vesker 
2068c55c2bf3SAlex Vesker 	if (*m) {
2069c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_KEY];
2070c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2071c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_gre_key_set;
2072c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);
2073c55c2bf3SAlex Vesker 	}
2074c55c2bf3SAlex Vesker 
2075c55c2bf3SAlex Vesker 	return 0;
2076c55c2bf3SAlex Vesker }
2077c55c2bf3SAlex Vesker 
2078c55c2bf3SAlex Vesker static int
20796c299801SDong Zhou mlx5dr_definer_conv_item_nvgre(struct mlx5dr_definer_conv_data *cd,
20806c299801SDong Zhou 				struct rte_flow_item *item,
20816c299801SDong Zhou 				int item_idx)
20826c299801SDong Zhou {
20836c299801SDong Zhou 	const struct rte_flow_item_nvgre *m = item->mask;
20846c299801SDong Zhou 	struct mlx5dr_definer_fc *fc;
20856c299801SDong Zhou 	bool inner = cd->tunnel;
20866c299801SDong Zhou 
20876c299801SDong Zhou 	if (inner) {
20886c299801SDong Zhou 		DR_LOG(ERR, "Inner gre item not supported");
20896c299801SDong Zhou 		rte_errno = ENOTSUP;
20906c299801SDong Zhou 		return rte_errno;
20916c299801SDong Zhou 	}
20926c299801SDong Zhou 
20936c299801SDong Zhou 	if (!cd->relaxed) {
20946c299801SDong Zhou 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
20956c299801SDong Zhou 		if (!fc->tag_set) {
20966c299801SDong Zhou 			fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
20976c299801SDong Zhou 			fc->item_idx = item_idx;
20986c299801SDong Zhou 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
20996c299801SDong Zhou 			fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
21006c299801SDong Zhou 			DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
21016c299801SDong Zhou 		}
21026c299801SDong Zhou 
21036c299801SDong Zhou 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_C_K_S];
21046c299801SDong Zhou 		fc->item_idx = item_idx;
21056c299801SDong Zhou 		fc->tag_set = &mlx5dr_definer_nvgre_def_c_rsvd0_ver_set;
21066c299801SDong Zhou 		fc->tag_mask_set = &mlx5dr_definer_nvgre_def_c_rsvd0_ver_mask_set;
21076c299801SDong Zhou 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
21086c299801SDong Zhou 		fc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);
21096c299801SDong Zhou 		fc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);
21106c299801SDong Zhou 
21116c299801SDong Zhou 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_PROTOCOL];
21126c299801SDong Zhou 		fc->item_idx = item_idx;
21136c299801SDong Zhou 		fc->tag_set = &mlx5dr_definer_nvgre_def_protocol_set;
21146c299801SDong Zhou 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
21156c299801SDong Zhou 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
21166c299801SDong Zhou 		fc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);
21176c299801SDong Zhou 		fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
21186c299801SDong Zhou 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
21196c299801SDong Zhou 	}
21206c299801SDong Zhou 
21216c299801SDong Zhou 	if (!m)
21226c299801SDong Zhou 		return 0;
21236c299801SDong Zhou 
21246c299801SDong Zhou 	if (m->c_k_s_rsvd0_ver) {
21256c299801SDong Zhou 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_C_K_S];
21266c299801SDong Zhou 		fc->item_idx = item_idx;
21276c299801SDong Zhou 		fc->tag_set = &mlx5dr_definer_nvgre_c_rsvd0_ver_set;
21286c299801SDong Zhou 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
21296c299801SDong Zhou 		fc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);
21306c299801SDong Zhou 		fc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);
21316c299801SDong Zhou 	}
21326c299801SDong Zhou 
21336c299801SDong Zhou 	if (m->protocol) {
21346c299801SDong Zhou 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_PROTOCOL];
21356c299801SDong Zhou 		fc->item_idx = item_idx;
21366c299801SDong Zhou 		fc->tag_set = &mlx5dr_definer_nvgre_protocol_set;
21376c299801SDong Zhou 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
21386c299801SDong Zhou 		fc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);
21396c299801SDong Zhou 		fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
21406c299801SDong Zhou 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
21416c299801SDong Zhou 	}
21426c299801SDong Zhou 
21436c299801SDong Zhou 	if (!is_mem_zero(m->tni, 4)) {
21446c299801SDong Zhou 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_DW1];
21456c299801SDong Zhou 		fc->item_idx = item_idx;
21466c299801SDong Zhou 		fc->tag_set = &mlx5dr_definer_nvgre_dw1_set;
21476c299801SDong Zhou 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);
21486c299801SDong Zhou 	}
21496c299801SDong Zhou 	return 0;
21506c299801SDong Zhou }
21516c299801SDong Zhou 
21526c299801SDong Zhou static int
2153ad17988aSAlexander Kozyrev mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
2154ad17988aSAlexander Kozyrev 			       struct rte_flow_item *item,
2155ad17988aSAlexander Kozyrev 			       int item_idx)
2156ad17988aSAlexander Kozyrev {
2157ad17988aSAlexander Kozyrev 	const struct rte_flow_item_ptype *m = item->mask;
2158ad17988aSAlexander Kozyrev 	struct mlx5dr_definer_fc *fc;
2159ad17988aSAlexander Kozyrev 
2160ad17988aSAlexander Kozyrev 	if (!m)
2161ad17988aSAlexander Kozyrev 		return 0;
2162ad17988aSAlexander Kozyrev 
2163ad17988aSAlexander Kozyrev 	if (!(m->packet_type &
2164ad17988aSAlexander Kozyrev 	      (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | RTE_PTYPE_TUNNEL_MASK |
2165ad17988aSAlexander Kozyrev 	       RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK))) {
2166ad17988aSAlexander Kozyrev 		rte_errno = ENOTSUP;
2167ad17988aSAlexander Kozyrev 		return rte_errno;
2168ad17988aSAlexander Kozyrev 	}
2169ad17988aSAlexander Kozyrev 
2170ad17988aSAlexander Kozyrev 	if (m->packet_type & RTE_PTYPE_L2_MASK) {
2171ad17988aSAlexander Kozyrev 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, false)];
2172ad17988aSAlexander Kozyrev 		fc->item_idx = item_idx;
2173ad17988aSAlexander Kozyrev 		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
2174ad17988aSAlexander Kozyrev 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2175ad17988aSAlexander Kozyrev 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, false);
2176ad17988aSAlexander Kozyrev 	}
2177ad17988aSAlexander Kozyrev 
2178ad17988aSAlexander Kozyrev 	if (m->packet_type & RTE_PTYPE_INNER_L2_MASK) {
2179ad17988aSAlexander Kozyrev 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, true)];
2180ad17988aSAlexander Kozyrev 		fc->item_idx = item_idx;
2181ad17988aSAlexander Kozyrev 		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
2182ad17988aSAlexander Kozyrev 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2183ad17988aSAlexander Kozyrev 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, true);
2184ad17988aSAlexander Kozyrev 	}
2185ad17988aSAlexander Kozyrev 
2186ad17988aSAlexander Kozyrev 	if (m->packet_type & RTE_PTYPE_L3_MASK) {
2187ad17988aSAlexander Kozyrev 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, false)];
2188ad17988aSAlexander Kozyrev 		fc->item_idx = item_idx;
2189ad17988aSAlexander Kozyrev 		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
2190ad17988aSAlexander Kozyrev 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2191ad17988aSAlexander Kozyrev 		DR_CALC_SET(fc, eth_l2, l3_type, false);
2192ad17988aSAlexander Kozyrev 	}
2193ad17988aSAlexander Kozyrev 
2194ad17988aSAlexander Kozyrev 	if (m->packet_type & RTE_PTYPE_INNER_L3_MASK) {
2195ad17988aSAlexander Kozyrev 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, true)];
2196ad17988aSAlexander Kozyrev 		fc->item_idx = item_idx;
2197ad17988aSAlexander Kozyrev 		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
2198ad17988aSAlexander Kozyrev 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2199ad17988aSAlexander Kozyrev 		DR_CALC_SET(fc, eth_l2, l3_type, true);
2200ad17988aSAlexander Kozyrev 	}
2201ad17988aSAlexander Kozyrev 
2202ad17988aSAlexander Kozyrev 	if (m->packet_type & RTE_PTYPE_L4_MASK) {
2203761439a2SAlexander Kozyrev 		/*
2204761439a2SAlexander Kozyrev 		 * Fragmented IP (Internet Protocol) packet type.
2205761439a2SAlexander Kozyrev 		 * Cannot be combined with Layer 4 Types (TCP/UDP).
2206761439a2SAlexander Kozyrev 		 * The exact value must be specified in the mask.
2207761439a2SAlexander Kozyrev 		 */
2208761439a2SAlexander Kozyrev 		if (m->packet_type == RTE_PTYPE_L4_FRAG) {
2209761439a2SAlexander Kozyrev 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, false)];
2210761439a2SAlexander Kozyrev 			fc->item_idx = item_idx;
2211761439a2SAlexander Kozyrev 			fc->tag_set = &mlx5dr_definer_ptype_frag_set;
2212761439a2SAlexander Kozyrev 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2213761439a2SAlexander Kozyrev 			DR_CALC_SET(fc, eth_l2, ip_fragmented, false);
2214761439a2SAlexander Kozyrev 		} else {
2215ad17988aSAlexander Kozyrev 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
2216ad17988aSAlexander Kozyrev 			fc->item_idx = item_idx;
2217ad17988aSAlexander Kozyrev 			fc->tag_set = &mlx5dr_definer_ptype_l4_set;
2218ad17988aSAlexander Kozyrev 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2219*a3711190SAlexander Kozyrev 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
2220*a3711190SAlexander Kozyrev 
2221*a3711190SAlexander Kozyrev 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, false)];
2222*a3711190SAlexander Kozyrev 			fc->item_idx = item_idx;
2223*a3711190SAlexander Kozyrev 			fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set;
2224*a3711190SAlexander Kozyrev 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2225ad17988aSAlexander Kozyrev 			DR_CALC_SET(fc, eth_l2, l4_type, false);
2226ad17988aSAlexander Kozyrev 		}
2227761439a2SAlexander Kozyrev 	}
2228ad17988aSAlexander Kozyrev 
2229ad17988aSAlexander Kozyrev 	if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
2230761439a2SAlexander Kozyrev 		if (m->packet_type == RTE_PTYPE_INNER_L4_FRAG) {
2231761439a2SAlexander Kozyrev 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, true)];
2232761439a2SAlexander Kozyrev 			fc->item_idx = item_idx;
2233761439a2SAlexander Kozyrev 			fc->tag_set = &mlx5dr_definer_ptype_frag_set;
2234761439a2SAlexander Kozyrev 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2235761439a2SAlexander Kozyrev 			DR_CALC_SET(fc, eth_l2, ip_fragmented, true);
2236761439a2SAlexander Kozyrev 		} else {
2237ad17988aSAlexander Kozyrev 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
2238ad17988aSAlexander Kozyrev 			fc->item_idx = item_idx;
2239ad17988aSAlexander Kozyrev 			fc->tag_set = &mlx5dr_definer_ptype_l4_set;
2240ad17988aSAlexander Kozyrev 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2241*a3711190SAlexander Kozyrev 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, true);
2242*a3711190SAlexander Kozyrev 
2243*a3711190SAlexander Kozyrev 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, true)];
2244*a3711190SAlexander Kozyrev 			fc->item_idx = item_idx;
2245*a3711190SAlexander Kozyrev 			fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set;
2246*a3711190SAlexander Kozyrev 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2247ad17988aSAlexander Kozyrev 			DR_CALC_SET(fc, eth_l2, l4_type, true);
2248ad17988aSAlexander Kozyrev 		}
2249761439a2SAlexander Kozyrev 	}
2250ad17988aSAlexander Kozyrev 
2251ad17988aSAlexander Kozyrev 	if (m->packet_type & RTE_PTYPE_TUNNEL_MASK) {
2252ad17988aSAlexander Kozyrev 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL];
2253ad17988aSAlexander Kozyrev 		fc->item_idx = item_idx;
2254ad17988aSAlexander Kozyrev 		fc->tag_set = &mlx5dr_definer_ptype_tunnel_set;
2255ad17988aSAlexander Kozyrev 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2256ad17988aSAlexander Kozyrev 		DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
2257ad17988aSAlexander Kozyrev 	}
2258ad17988aSAlexander Kozyrev 
2259ad17988aSAlexander Kozyrev 	return 0;
2260ad17988aSAlexander Kozyrev }
2261ad17988aSAlexander Kozyrev 
2262ad17988aSAlexander Kozyrev static int
2263c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
2264c55c2bf3SAlex Vesker 				   struct rte_flow_item *item,
2265c55c2bf3SAlex Vesker 				   int item_idx)
2266c55c2bf3SAlex Vesker {
2267c55c2bf3SAlex Vesker 	const struct rte_flow_item_integrity *m = item->mask;
2268c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
2269c55c2bf3SAlex Vesker 
2270c55c2bf3SAlex Vesker 	if (!m)
2271c55c2bf3SAlex Vesker 		return 0;
2272c55c2bf3SAlex Vesker 
2273c55c2bf3SAlex Vesker 	if (m->packet_ok || m->l2_ok || m->l2_crc_ok || m->l3_len_ok) {
2274c55c2bf3SAlex Vesker 		rte_errno = ENOTSUP;
2275c55c2bf3SAlex Vesker 		return rte_errno;
2276c55c2bf3SAlex Vesker 	}
2277c55c2bf3SAlex Vesker 
2278c55c2bf3SAlex Vesker 	if (m->l3_ok || m->ipv4_csum_ok || m->l4_ok || m->l4_csum_ok) {
22794c4e04d4SAlexander Kozyrev 		fc = &cd->fc[DR_CALC_FNAME(INTEGRITY, m->level)];
2280c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2281c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_integrity_set;
2282c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, oks1, oks1_bits);
2283c55c2bf3SAlex Vesker 	}
2284c55c2bf3SAlex Vesker 
2285c55c2bf3SAlex Vesker 	return 0;
2286c55c2bf3SAlex Vesker }
2287c55c2bf3SAlex Vesker 
2288c55c2bf3SAlex Vesker static int
2289c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_conntrack(struct mlx5dr_definer_conv_data *cd,
2290c55c2bf3SAlex Vesker 				   struct rte_flow_item *item,
2291c55c2bf3SAlex Vesker 				   int item_idx)
2292c55c2bf3SAlex Vesker {
2293c55c2bf3SAlex Vesker 	const struct rte_flow_item_conntrack *m = item->mask;
2294c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
2295c55c2bf3SAlex Vesker 	int reg;
2296c55c2bf3SAlex Vesker 
2297c55c2bf3SAlex Vesker 	if (!m)
2298c55c2bf3SAlex Vesker 		return 0;
2299c55c2bf3SAlex Vesker 
230010943706SMichael Baum 	reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
230110943706SMichael Baum 					  RTE_FLOW_ITEM_TYPE_CONNTRACK,
23022b45a773SMichael Baum 					  cd->table_type, -1);
2303c55c2bf3SAlex Vesker 	if (reg <= 0) {
2304c55c2bf3SAlex Vesker 		DR_LOG(ERR, "Invalid register for item conntrack");
2305c55c2bf3SAlex Vesker 		rte_errno = EINVAL;
2306c55c2bf3SAlex Vesker 		return rte_errno;
2307c55c2bf3SAlex Vesker 	}
2308c55c2bf3SAlex Vesker 
2309c55c2bf3SAlex Vesker 	fc = mlx5dr_definer_get_register_fc(cd, reg);
2310c55c2bf3SAlex Vesker 	if (!fc)
2311c55c2bf3SAlex Vesker 		return rte_errno;
2312c55c2bf3SAlex Vesker 
2313c55c2bf3SAlex Vesker 	fc->item_idx = item_idx;
2314c55c2bf3SAlex Vesker 	fc->tag_mask_set = &mlx5dr_definer_conntrack_mask;
2315c55c2bf3SAlex Vesker 	fc->tag_set = &mlx5dr_definer_conntrack_tag;
2316c55c2bf3SAlex Vesker 
2317c55c2bf3SAlex Vesker 	return 0;
2318c55c2bf3SAlex Vesker }
2319c55c2bf3SAlex Vesker 
2320c55c2bf3SAlex Vesker static int
2321c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_icmp(struct mlx5dr_definer_conv_data *cd,
2322c55c2bf3SAlex Vesker 			      struct rte_flow_item *item,
2323c55c2bf3SAlex Vesker 			      int item_idx)
2324c55c2bf3SAlex Vesker {
2325c55c2bf3SAlex Vesker 	const struct rte_flow_item_icmp *m = item->mask;
2326c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
2327c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
2328c55c2bf3SAlex Vesker 
2329c55c2bf3SAlex Vesker 	/* Overwrite match on L4 type ICMP */
2330c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
2331c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2332c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2333c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
2334c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2335c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, l4_type, inner);
2336c55c2bf3SAlex Vesker 	}
2337c55c2bf3SAlex Vesker 
2338c55c2bf3SAlex Vesker 	if (!m)
2339c55c2bf3SAlex Vesker 		return 0;
2340c55c2bf3SAlex Vesker 
2341c55c2bf3SAlex Vesker 	if (m->hdr.icmp_type || m->hdr.icmp_code || m->hdr.icmp_cksum) {
2342c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
2343c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2344c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_icmp_dw1_set;
2345c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
2346c55c2bf3SAlex Vesker 	}
2347c55c2bf3SAlex Vesker 
2348c55c2bf3SAlex Vesker 	if (m->hdr.icmp_ident || m->hdr.icmp_seq_nb) {
2349c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];
2350c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2351c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_icmp_dw2_set;
2352c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);
2353c55c2bf3SAlex Vesker 	}
2354c55c2bf3SAlex Vesker 
2355c55c2bf3SAlex Vesker 	return 0;
2356c55c2bf3SAlex Vesker }
2357c55c2bf3SAlex Vesker 
2358c55c2bf3SAlex Vesker static int
2359c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_icmp6(struct mlx5dr_definer_conv_data *cd,
2360c55c2bf3SAlex Vesker 			       struct rte_flow_item *item,
2361c55c2bf3SAlex Vesker 			       int item_idx)
2362c55c2bf3SAlex Vesker {
2363c55c2bf3SAlex Vesker 	const struct rte_flow_item_icmp6 *m = item->mask;
2364c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
2365c55c2bf3SAlex Vesker 	bool inner = cd->tunnel;
2366c55c2bf3SAlex Vesker 
2367c55c2bf3SAlex Vesker 	/* Overwrite match on L4 type ICMP6 */
2368c55c2bf3SAlex Vesker 	if (!cd->relaxed) {
2369c55c2bf3SAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2370c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2371c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
2372c55c2bf3SAlex Vesker 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2373c55c2bf3SAlex Vesker 		DR_CALC_SET(fc, eth_l2, l4_type, inner);
2374c55c2bf3SAlex Vesker 	}
2375c55c2bf3SAlex Vesker 
2376c55c2bf3SAlex Vesker 	if (!m)
2377c55c2bf3SAlex Vesker 		return 0;
2378c55c2bf3SAlex Vesker 
2379c55c2bf3SAlex Vesker 	if (m->type || m->code || m->checksum) {
2380c55c2bf3SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
2381c55c2bf3SAlex Vesker 		fc->item_idx = item_idx;
2382c55c2bf3SAlex Vesker 		fc->tag_set = &mlx5dr_definer_icmp6_dw1_set;
2383c55c2bf3SAlex Vesker 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
2384c55c2bf3SAlex Vesker 	}
2385c55c2bf3SAlex Vesker 
2386c55c2bf3SAlex Vesker 	return 0;
2387c55c2bf3SAlex Vesker }
2388c55c2bf3SAlex Vesker 
2389c55c2bf3SAlex Vesker static int
239001314192SLeo Xu mlx5dr_definer_conv_item_icmp6_echo(struct mlx5dr_definer_conv_data *cd,
239101314192SLeo Xu 				    struct rte_flow_item *item,
239201314192SLeo Xu 				    int item_idx)
239301314192SLeo Xu {
239401314192SLeo Xu 	const struct rte_flow_item_icmp6_echo *m = item->mask;
239501314192SLeo Xu 	struct mlx5dr_definer_fc *fc;
239601314192SLeo Xu 	bool inner = cd->tunnel;
239701314192SLeo Xu 
239801314192SLeo Xu 	if (!cd->relaxed) {
239901314192SLeo Xu 		/* Overwrite match on L4 type ICMP6 */
240001314192SLeo Xu 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
240101314192SLeo Xu 		fc->item_idx = item_idx;
240201314192SLeo Xu 		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
240301314192SLeo Xu 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
240401314192SLeo Xu 		DR_CALC_SET(fc, eth_l2, l4_type, inner);
240501314192SLeo Xu 
240601314192SLeo Xu 		/* Set fixed type and code for icmp6 echo request/reply */
240701314192SLeo Xu 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
240801314192SLeo Xu 		fc->item_idx = item_idx;
240901314192SLeo Xu 		fc->tag_mask_set = &mlx5dr_definer_icmp6_echo_dw1_mask_set;
241001314192SLeo Xu 		if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST)
241101314192SLeo Xu 			fc->tag_set = &mlx5dr_definer_icmp6_echo_request_dw1_set;
241201314192SLeo Xu 		else /* RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY */
241301314192SLeo Xu 			fc->tag_set = &mlx5dr_definer_icmp6_echo_reply_dw1_set;
241401314192SLeo Xu 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
241501314192SLeo Xu 	}
241601314192SLeo Xu 
241701314192SLeo Xu 	if (!m)
241801314192SLeo Xu 		return 0;
241901314192SLeo Xu 
242001314192SLeo Xu 	/* Set identifier & sequence into icmp_dw2 */
242101314192SLeo Xu 	if (m->hdr.identifier || m->hdr.sequence) {
242201314192SLeo Xu 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];
242301314192SLeo Xu 		fc->item_idx = item_idx;
242401314192SLeo Xu 		fc->tag_set = &mlx5dr_definer_icmp6_echo_dw2_set;
242501314192SLeo Xu 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);
242601314192SLeo Xu 	}
242701314192SLeo Xu 
242801314192SLeo Xu 	return 0;
242901314192SLeo Xu }
243001314192SLeo Xu 
243101314192SLeo Xu static int
2432c55c2bf3SAlex Vesker mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
2433c55c2bf3SAlex Vesker 			     struct rte_flow_item *item,
2434c55c2bf3SAlex Vesker 			     int item_idx)
2435c55c2bf3SAlex Vesker {
2436c55c2bf3SAlex Vesker 	const struct rte_flow_item_meter_color *m = item->mask;
2437c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc *fc;
2438c55c2bf3SAlex Vesker 	int reg;
2439c55c2bf3SAlex Vesker 
2440c55c2bf3SAlex Vesker 	if (!m)
2441c55c2bf3SAlex Vesker 		return 0;
2442c55c2bf3SAlex Vesker 
244304e740e6SGregory Etelson 	reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
244410943706SMichael Baum 					  RTE_FLOW_ITEM_TYPE_METER_COLOR,
24452b45a773SMichael Baum 					  cd->table_type, 0);
2446c55c2bf3SAlex Vesker 	MLX5_ASSERT(reg > 0);
2447c55c2bf3SAlex Vesker 
2448c55c2bf3SAlex Vesker 	fc = mlx5dr_definer_get_register_fc(cd, reg);
2449c55c2bf3SAlex Vesker 	if (!fc)
2450c55c2bf3SAlex Vesker 		return rte_errno;
2451c55c2bf3SAlex Vesker 
2452c55c2bf3SAlex Vesker 	fc->item_idx = item_idx;
2453c55c2bf3SAlex Vesker 	fc->tag_set = &mlx5dr_definer_meter_color_set;
2454c55c2bf3SAlex Vesker 	return 0;
2455c55c2bf3SAlex Vesker }
2456c55c2bf3SAlex Vesker 
245700e57916SRongwei Liu static struct mlx5dr_definer_fc *
245800e57916SRongwei Liu mlx5dr_definer_get_flex_parser_fc(struct mlx5dr_definer_conv_data *cd, uint32_t byte_off)
245900e57916SRongwei Liu {
246000e57916SRongwei Liu 	uint32_t byte_off_fp7 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_7);
246100e57916SRongwei Liu 	uint32_t byte_off_fp0 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
246200e57916SRongwei Liu 	enum mlx5dr_definer_fname fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
246300e57916SRongwei Liu 	struct mlx5dr_definer_fc *fc;
246400e57916SRongwei Liu 	uint32_t idx;
246500e57916SRongwei Liu 
246600e57916SRongwei Liu 	if (byte_off < byte_off_fp7 || byte_off > byte_off_fp0) {
246700e57916SRongwei Liu 		rte_errno = EINVAL;
246800e57916SRongwei Liu 		return NULL;
246900e57916SRongwei Liu 	}
247000e57916SRongwei Liu 	idx = (byte_off_fp0 - byte_off) / (sizeof(uint32_t));
247100e57916SRongwei Liu 	fname += (enum mlx5dr_definer_fname)idx;
247200e57916SRongwei Liu 	fc = &cd->fc[fname];
247300e57916SRongwei Liu 	fc->byte_off = byte_off;
247400e57916SRongwei Liu 	fc->bit_mask = UINT32_MAX;
247500e57916SRongwei Liu 	return fc;
247600e57916SRongwei Liu }
247700e57916SRongwei Liu 
247800e57916SRongwei Liu static int
247900e57916SRongwei Liu mlx5dr_definer_conv_item_ipv6_routing_ext(struct mlx5dr_definer_conv_data *cd,
248000e57916SRongwei Liu 					  struct rte_flow_item *item,
248100e57916SRongwei Liu 					  int item_idx)
248200e57916SRongwei Liu {
248300e57916SRongwei Liu 	const struct rte_flow_item_ipv6_routing_ext *m = item->mask;
248400e57916SRongwei Liu 	struct mlx5dr_definer_fc *fc;
248500e57916SRongwei Liu 	bool inner = cd->tunnel;
248600e57916SRongwei Liu 	uint32_t byte_off;
248700e57916SRongwei Liu 
248800e57916SRongwei Liu 	if (!cd->relaxed) {
248900e57916SRongwei Liu 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
249000e57916SRongwei Liu 		fc->item_idx = item_idx;
249100e57916SRongwei Liu 		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
249200e57916SRongwei Liu 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
249300e57916SRongwei Liu 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
249400e57916SRongwei Liu 
249500e57916SRongwei Liu 		/* Overwrite - Unset ethertype if present */
249600e57916SRongwei Liu 		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
249700e57916SRongwei Liu 
249800e57916SRongwei Liu 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
249900e57916SRongwei Liu 		if (!fc->tag_set) {
250000e57916SRongwei Liu 			fc->item_idx = item_idx;
250100e57916SRongwei Liu 			fc->tag_set = &mlx5dr_definer_ipv6_routing_hdr_set;
250200e57916SRongwei Liu 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
25039cbf5076SRongwei Liu 			fc->not_overwrite = 1;
250400e57916SRongwei Liu 			DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
250500e57916SRongwei Liu 		}
2506972feabbSRongwei Liu 	} else {
2507972feabbSRongwei Liu 		rte_errno = ENOTSUP;
2508972feabbSRongwei Liu 		return rte_errno;
250900e57916SRongwei Liu 	}
251000e57916SRongwei Liu 
251100e57916SRongwei Liu 	if (!m)
251200e57916SRongwei Liu 		return 0;
251300e57916SRongwei Liu 
251400e57916SRongwei Liu 	if (m->hdr.hdr_len || m->hdr.flags) {
251500e57916SRongwei Liu 		rte_errno = ENOTSUP;
251600e57916SRongwei Liu 		return rte_errno;
251700e57916SRongwei Liu 	}
251800e57916SRongwei Liu 
251900e57916SRongwei Liu 	if (m->hdr.next_hdr || m->hdr.type || m->hdr.segments_left) {
252000e57916SRongwei Liu 		byte_off = flow_hw_get_srh_flex_parser_byte_off_from_ctx(cd->ctx);
252100e57916SRongwei Liu 		fc = mlx5dr_definer_get_flex_parser_fc(cd, byte_off);
252200e57916SRongwei Liu 		if (!fc)
252300e57916SRongwei Liu 			return rte_errno;
252400e57916SRongwei Liu 
252500e57916SRongwei Liu 		fc->item_idx = item_idx;
252600e57916SRongwei Liu 		fc->tag_set = &mlx5dr_definer_ipv6_routing_ext_set;
252700e57916SRongwei Liu 	}
252800e57916SRongwei Liu 	return 0;
252900e57916SRongwei Liu }
253000e57916SRongwei Liu 
2531c55c2bf3SAlex Vesker static int
2532fcd7b8c6SErez Shitrit mlx5dr_definer_conv_item_random(struct mlx5dr_definer_conv_data *cd,
2533fcd7b8c6SErez Shitrit 				struct rte_flow_item *item,
2534fcd7b8c6SErez Shitrit 				int item_idx)
2535fcd7b8c6SErez Shitrit {
2536fcd7b8c6SErez Shitrit 	const struct rte_flow_item_random *m = item->mask;
2537fcd7b8c6SErez Shitrit 	const struct rte_flow_item_random *l = item->last;
2538fcd7b8c6SErez Shitrit 	struct mlx5dr_definer_fc *fc;
2539fcd7b8c6SErez Shitrit 
2540fcd7b8c6SErez Shitrit 	if (!m)
2541fcd7b8c6SErez Shitrit 		return 0;
2542fcd7b8c6SErez Shitrit 
2543fcd7b8c6SErez Shitrit 	if (m->value != (m->value & UINT16_MAX)) {
2544fcd7b8c6SErez Shitrit 		DR_LOG(ERR, "Random value is 16 bits only");
2545fcd7b8c6SErez Shitrit 		rte_errno = EINVAL;
2546fcd7b8c6SErez Shitrit 		return rte_errno;
2547fcd7b8c6SErez Shitrit 	}
2548fcd7b8c6SErez Shitrit 
2549fcd7b8c6SErez Shitrit 	fc = &cd->fc[MLX5DR_DEFINER_FNAME_RANDOM_NUM];
2550fcd7b8c6SErez Shitrit 	fc->item_idx = item_idx;
2551fcd7b8c6SErez Shitrit 	fc->tag_set = &mlx5dr_definer_random_number_set;
2552fcd7b8c6SErez Shitrit 	fc->is_range = l && l->value;
2553fcd7b8c6SErez Shitrit 	DR_CALC_SET_HDR(fc, random_number, random_number);
2554fcd7b8c6SErez Shitrit 
2555fcd7b8c6SErez Shitrit 	return 0;
2556fcd7b8c6SErez Shitrit }
2557fcd7b8c6SErez Shitrit 
2558fcd7b8c6SErez Shitrit static int
255943b5adbaSAlex Vesker mlx5dr_definer_conv_item_geneve(struct mlx5dr_definer_conv_data *cd,
256043b5adbaSAlex Vesker 				struct rte_flow_item *item,
256143b5adbaSAlex Vesker 				int item_idx)
256243b5adbaSAlex Vesker {
256343b5adbaSAlex Vesker 	const struct rte_flow_item_geneve *m = item->mask;
256443b5adbaSAlex Vesker 	struct mlx5dr_definer_fc *fc;
256543b5adbaSAlex Vesker 	bool inner = cd->tunnel;
256643b5adbaSAlex Vesker 
256743b5adbaSAlex Vesker 	if (inner) {
256843b5adbaSAlex Vesker 		DR_LOG(ERR, "Inner GENEVE item not supported");
256943b5adbaSAlex Vesker 		rte_errno = ENOTSUP;
257043b5adbaSAlex Vesker 		return rte_errno;
257143b5adbaSAlex Vesker 	}
257243b5adbaSAlex Vesker 
257343b5adbaSAlex Vesker 	/* In order to match on Geneve we must match on ip_protocol and l4_dport */
257443b5adbaSAlex Vesker 	if (!cd->relaxed) {
257543b5adbaSAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
257643b5adbaSAlex Vesker 		if (!fc->tag_set) {
257743b5adbaSAlex Vesker 			fc->item_idx = item_idx;
257843b5adbaSAlex Vesker 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
257943b5adbaSAlex Vesker 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
258043b5adbaSAlex Vesker 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
258143b5adbaSAlex Vesker 		}
258243b5adbaSAlex Vesker 
258343b5adbaSAlex Vesker 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
258443b5adbaSAlex Vesker 		if (!fc->tag_set) {
258543b5adbaSAlex Vesker 			fc->item_idx = item_idx;
258643b5adbaSAlex Vesker 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
258743b5adbaSAlex Vesker 			fc->tag_set = &mlx5dr_definer_geneve_udp_port_set;
258843b5adbaSAlex Vesker 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
258943b5adbaSAlex Vesker 		}
259043b5adbaSAlex Vesker 	}
259143b5adbaSAlex Vesker 
259243b5adbaSAlex Vesker 	if (!m)
259343b5adbaSAlex Vesker 		return 0;
259443b5adbaSAlex Vesker 
259543b5adbaSAlex Vesker 	if (m->rsvd1) {
259643b5adbaSAlex Vesker 		rte_errno = ENOTSUP;
259743b5adbaSAlex Vesker 		return rte_errno;
259843b5adbaSAlex Vesker 	}
259943b5adbaSAlex Vesker 
260043b5adbaSAlex Vesker 	if (m->ver_opt_len_o_c_rsvd0) {
260143b5adbaSAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_CTRL];
260243b5adbaSAlex Vesker 		fc->item_idx = item_idx;
260343b5adbaSAlex Vesker 		fc->tag_set = &mlx5dr_definer_geneve_ctrl_set;
260443b5adbaSAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
260543b5adbaSAlex Vesker 		fc->bit_mask = __mlx5_mask(header_geneve, ver_opt_len_o_c_rsvd);
260643b5adbaSAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_geneve, ver_opt_len_o_c_rsvd);
260743b5adbaSAlex Vesker 	}
260843b5adbaSAlex Vesker 
260943b5adbaSAlex Vesker 	if (m->protocol) {
261043b5adbaSAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_PROTO];
261143b5adbaSAlex Vesker 		fc->item_idx = item_idx;
261243b5adbaSAlex Vesker 		fc->tag_set = &mlx5dr_definer_geneve_protocol_set;
261343b5adbaSAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
261443b5adbaSAlex Vesker 		fc->byte_off += MLX5_BYTE_OFF(header_geneve, protocol_type);
261543b5adbaSAlex Vesker 		fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
261643b5adbaSAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
261743b5adbaSAlex Vesker 	}
261843b5adbaSAlex Vesker 
261943b5adbaSAlex Vesker 	if (!is_mem_zero(m->vni, 3)) {
262043b5adbaSAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_VNI];
262143b5adbaSAlex Vesker 		fc->item_idx = item_idx;
262243b5adbaSAlex Vesker 		fc->tag_set = &mlx5dr_definer_geneve_vni_set;
262343b5adbaSAlex Vesker 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
262443b5adbaSAlex Vesker 		fc->bit_mask = __mlx5_mask(header_geneve, vni);
262543b5adbaSAlex Vesker 		fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
262643b5adbaSAlex Vesker 	}
262743b5adbaSAlex Vesker 
262843b5adbaSAlex Vesker 	return 0;
262943b5adbaSAlex Vesker }
263043b5adbaSAlex Vesker 
263143b5adbaSAlex Vesker static int
26328f8dad42SAlex Vesker mlx5dr_definer_conv_item_geneve_opt(struct mlx5dr_definer_conv_data *cd,
26338f8dad42SAlex Vesker 				    struct rte_flow_item *item,
26348f8dad42SAlex Vesker 				    int item_idx)
26358f8dad42SAlex Vesker {
26368f8dad42SAlex Vesker 	const struct rte_flow_item_geneve_opt *m = item->mask;
26378f8dad42SAlex Vesker 	const struct rte_flow_item_geneve_opt *v = item->spec;
26388f8dad42SAlex Vesker 	struct mlx5_hl_data *hl_ok_bit, *hl_dws;
26398f8dad42SAlex Vesker 	struct mlx5dr_definer_fc *fc;
26408f8dad42SAlex Vesker 	uint8_t num_of_dws, i;
26418f8dad42SAlex Vesker 	bool ok_bit_on_class;
26428f8dad42SAlex Vesker 	int ret;
26438f8dad42SAlex Vesker 
26448f8dad42SAlex Vesker 	if (!m || !(m->option_class || m->option_type || m->data))
26458f8dad42SAlex Vesker 		return 0;
26468f8dad42SAlex Vesker 
26478f8dad42SAlex Vesker 	if (!v || m->option_type != 0xff) {
26488f8dad42SAlex Vesker 		DR_LOG(ERR, "Cannot match geneve opt without valid opt type");
26498f8dad42SAlex Vesker 		goto out_not_supp;
26508f8dad42SAlex Vesker 	}
26518f8dad42SAlex Vesker 
26528f8dad42SAlex Vesker 	ret = mlx5_get_geneve_hl_data(cd->ctx,
26538f8dad42SAlex Vesker 				      v->option_type,
26548f8dad42SAlex Vesker 				      v->option_class,
26558f8dad42SAlex Vesker 				      &hl_ok_bit,
26568f8dad42SAlex Vesker 				      &num_of_dws,
26578f8dad42SAlex Vesker 				      &hl_dws,
26588f8dad42SAlex Vesker 				      &ok_bit_on_class);
26598f8dad42SAlex Vesker 	if (ret) {
26608f8dad42SAlex Vesker 		DR_LOG(ERR, "Geneve opt type and class %d not supported", v->option_type);
26618f8dad42SAlex Vesker 		goto out_not_supp;
26628f8dad42SAlex Vesker 	}
26638f8dad42SAlex Vesker 
26649e1b0160SMichael Baum 	if (ok_bit_on_class && m->option_class != RTE_BE16(UINT16_MAX)) {
26659e1b0160SMichael Baum 		DR_LOG(ERR, "Geneve option class has invalid mask");
26669e1b0160SMichael Baum 		goto out_not_supp;
26679e1b0160SMichael Baum 	}
26689e1b0160SMichael Baum 
26698f8dad42SAlex Vesker 	if (!ok_bit_on_class && m->option_class) {
26708f8dad42SAlex Vesker 		/* DW0 is used, we will match type, class */
26718f8dad42SAlex Vesker 		if (!num_of_dws || hl_dws[0].dw_mask != UINT32_MAX) {
26728f8dad42SAlex Vesker 			DR_LOG(ERR, "Geneve opt type %d DW0 not supported", v->option_type);
26738f8dad42SAlex Vesker 			goto out_not_supp;
26748f8dad42SAlex Vesker 		}
26758f8dad42SAlex Vesker 
26768f8dad42SAlex Vesker 		if (MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx >
26778f8dad42SAlex Vesker 		    MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_7) {
26788f8dad42SAlex Vesker 			DR_LOG(ERR, "Max match geneve opt DWs reached");
26798f8dad42SAlex Vesker 			goto out_not_supp;
26808f8dad42SAlex Vesker 		}
26818f8dad42SAlex Vesker 
26828f8dad42SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx++];
26838f8dad42SAlex Vesker 		fc->item_idx = item_idx;
26848f8dad42SAlex Vesker 		fc->tag_set = &mlx5dr_definer_geneve_opt_ctrl_set;
26858f8dad42SAlex Vesker 		fc->byte_off = hl_dws[0].dw_offset * DW_SIZE;
26868f8dad42SAlex Vesker 		fc->bit_mask = UINT32_MAX;
26878f8dad42SAlex Vesker 	} else {
26888f8dad42SAlex Vesker 		/* DW0 is not used, we must verify geneve opt type exists in packet */
26898f8dad42SAlex Vesker 		if (!hl_ok_bit->dw_mask) {
26908f8dad42SAlex Vesker 			DR_LOG(ERR, "Geneve opt OK bits not supported");
26918f8dad42SAlex Vesker 			goto out_not_supp;
26928f8dad42SAlex Vesker 		}
26938f8dad42SAlex Vesker 
26948f8dad42SAlex Vesker 		if (MLX5DR_DEFINER_FNAME_GENEVE_OPT_OK_0 + cd->geneve_opt_ok_idx >
26958f8dad42SAlex Vesker 		    MLX5DR_DEFINER_FNAME_GENEVE_OPT_OK_7) {
26968f8dad42SAlex Vesker 			DR_LOG(ERR, "Max match geneve opt reached");
26978f8dad42SAlex Vesker 			goto out_not_supp;
26988f8dad42SAlex Vesker 		}
26998f8dad42SAlex Vesker 
27008f8dad42SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_OPT_OK_0 + cd->geneve_opt_ok_idx++];
27018f8dad42SAlex Vesker 		fc->item_idx = item_idx;
27028f8dad42SAlex Vesker 		fc->tag_set = &mlx5dr_definer_ones_set;
27038f8dad42SAlex Vesker 		fc->byte_off = hl_ok_bit->dw_offset * DW_SIZE +
2704191128d7SDavid Marchand 				rte_clz32(hl_ok_bit->dw_mask) / 8;
2705191128d7SDavid Marchand 		fc->bit_off = rte_ctz32(hl_ok_bit->dw_mask);
27068f8dad42SAlex Vesker 		fc->bit_mask = 0x1;
27078f8dad42SAlex Vesker 	}
27088f8dad42SAlex Vesker 
27098f8dad42SAlex Vesker 	for (i = 1; i < num_of_dws; i++) {
27108f8dad42SAlex Vesker 		/* Process each valid geneve option data DW1..N */
27118f8dad42SAlex Vesker 		if (!m->data[i - 1])
27128f8dad42SAlex Vesker 			continue;
27138f8dad42SAlex Vesker 
27148f8dad42SAlex Vesker 		if (hl_dws[i].dw_mask != UINT32_MAX) {
27158f8dad42SAlex Vesker 			DR_LOG(ERR, "Matching Geneve opt data[%d] not supported", i - 1);
27168f8dad42SAlex Vesker 			goto out_not_supp;
27178f8dad42SAlex Vesker 		}
27188f8dad42SAlex Vesker 
27198f8dad42SAlex Vesker 		if (MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx >
27208f8dad42SAlex Vesker 		    MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_7) {
27218f8dad42SAlex Vesker 			DR_LOG(ERR, "Max match geneve options DWs reached");
27228f8dad42SAlex Vesker 			goto out_not_supp;
27238f8dad42SAlex Vesker 		}
27248f8dad42SAlex Vesker 
27258f8dad42SAlex Vesker 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx++];
27268f8dad42SAlex Vesker 		fc->item_idx = item_idx;
27278f8dad42SAlex Vesker 		fc->tag_set = &mlx5dr_definer_geneve_opt_data_set;
27288f8dad42SAlex Vesker 		fc->byte_off = hl_dws[i].dw_offset * DW_SIZE;
27298f8dad42SAlex Vesker 		fc->bit_mask = m->data[i - 1];
27308f8dad42SAlex Vesker 		/* Use extra_data for data[] set offset */
27318f8dad42SAlex Vesker 		fc->extra_data = i - 1;
27328f8dad42SAlex Vesker 	}
27338f8dad42SAlex Vesker 
27348f8dad42SAlex Vesker 	return 0;
27358f8dad42SAlex Vesker 
27368f8dad42SAlex Vesker out_not_supp:
27378f8dad42SAlex Vesker 	rte_errno = ENOTSUP;
27388f8dad42SAlex Vesker 	return rte_errno;
27398f8dad42SAlex Vesker }
27408f8dad42SAlex Vesker 
27418f8dad42SAlex Vesker static int
274227ac2da9SAlex Vesker mlx5dr_definer_mt_set_fc(struct mlx5dr_match_template *mt,
274327ac2da9SAlex Vesker 			 struct mlx5dr_definer_fc *fc,
274427ac2da9SAlex Vesker 			 uint8_t *hl)
274527ac2da9SAlex Vesker {
27469732ffe1SAlex Vesker 	uint32_t fc_sz = 0, fcr_sz = 0;
274727ac2da9SAlex Vesker 	int i;
274827ac2da9SAlex Vesker 
274927ac2da9SAlex Vesker 	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++)
275027ac2da9SAlex Vesker 		if (fc[i].tag_set)
27519732ffe1SAlex Vesker 			fc[i].is_range ? fcr_sz++ : fc_sz++;
275227ac2da9SAlex Vesker 
27539732ffe1SAlex Vesker 	mt->fc = simple_calloc(fc_sz + fcr_sz, sizeof(*mt->fc));
275427ac2da9SAlex Vesker 	if (!mt->fc) {
275527ac2da9SAlex Vesker 		rte_errno = ENOMEM;
275627ac2da9SAlex Vesker 		return rte_errno;
275727ac2da9SAlex Vesker 	}
275827ac2da9SAlex Vesker 
27599732ffe1SAlex Vesker 	mt->fcr = mt->fc + fc_sz;
27609732ffe1SAlex Vesker 
276127ac2da9SAlex Vesker 	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++) {
276227ac2da9SAlex Vesker 		if (!fc[i].tag_set)
276327ac2da9SAlex Vesker 			continue;
276427ac2da9SAlex Vesker 
276527ac2da9SAlex Vesker 		fc[i].fname = i;
27669732ffe1SAlex Vesker 
27679732ffe1SAlex Vesker 		if (fc[i].is_range) {
27689732ffe1SAlex Vesker 			memcpy(&mt->fcr[mt->fcr_sz++], &fc[i], sizeof(*mt->fcr));
27699732ffe1SAlex Vesker 		} else {
277027ac2da9SAlex Vesker 			memcpy(&mt->fc[mt->fc_sz++], &fc[i], sizeof(*mt->fc));
277127ac2da9SAlex Vesker 			DR_SET(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
277227ac2da9SAlex Vesker 		}
27739732ffe1SAlex Vesker 	}
277427ac2da9SAlex Vesker 
277527ac2da9SAlex Vesker 	return 0;
277627ac2da9SAlex Vesker }
277727ac2da9SAlex Vesker 
277827ac2da9SAlex Vesker static int
277957800e6cSAlex Vesker mlx5dr_definer_check_item_range_supp(struct rte_flow_item *item)
278057800e6cSAlex Vesker {
278157800e6cSAlex Vesker 	if (!item->last)
278257800e6cSAlex Vesker 		return 0;
278357800e6cSAlex Vesker 
278457800e6cSAlex Vesker 	switch ((int)item->type) {
278557800e6cSAlex Vesker 	case RTE_FLOW_ITEM_TYPE_IPV4:
278657800e6cSAlex Vesker 	case RTE_FLOW_ITEM_TYPE_IPV6:
278757800e6cSAlex Vesker 	case RTE_FLOW_ITEM_TYPE_UDP:
278857800e6cSAlex Vesker 	case RTE_FLOW_ITEM_TYPE_TCP:
278957800e6cSAlex Vesker 	case RTE_FLOW_ITEM_TYPE_TAG:
279057800e6cSAlex Vesker 	case RTE_FLOW_ITEM_TYPE_META:
279157800e6cSAlex Vesker 	case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
2792fcd7b8c6SErez Shitrit 	case RTE_FLOW_ITEM_TYPE_RANDOM:
279357800e6cSAlex Vesker 		return 0;
279457800e6cSAlex Vesker 	default:
279557800e6cSAlex Vesker 		DR_LOG(ERR, "Range not supported over item type %d", item->type);
279657800e6cSAlex Vesker 		rte_errno = ENOTSUP;
279757800e6cSAlex Vesker 		return rte_errno;
279857800e6cSAlex Vesker 	}
279957800e6cSAlex Vesker }
280057800e6cSAlex Vesker 
280157800e6cSAlex Vesker static int
280281cf20a2SHamdan Igbaria mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd,
280381cf20a2SHamdan Igbaria 			     struct rte_flow_item *item,
280481cf20a2SHamdan Igbaria 			     int item_idx)
280581cf20a2SHamdan Igbaria {
280681cf20a2SHamdan Igbaria 	const struct rte_flow_item_esp *m = item->mask;
280781cf20a2SHamdan Igbaria 	struct mlx5dr_definer_fc *fc;
280881cf20a2SHamdan Igbaria 
280981cf20a2SHamdan Igbaria 	if (!m)
281081cf20a2SHamdan Igbaria 		return 0;
281181cf20a2SHamdan Igbaria 	if (m->hdr.spi) {
281281cf20a2SHamdan Igbaria 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ESP_SPI];
281381cf20a2SHamdan Igbaria 		fc->item_idx = item_idx;
281481cf20a2SHamdan Igbaria 		fc->tag_set = &mlx5dr_definer_ipsec_spi_set;
281581cf20a2SHamdan Igbaria 		DR_CALC_SET_HDR(fc, ipsec, spi);
281681cf20a2SHamdan Igbaria 	}
281781cf20a2SHamdan Igbaria 	if (m->hdr.seq) {
281881cf20a2SHamdan Igbaria 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ESP_SEQUENCE_NUMBER];
281981cf20a2SHamdan Igbaria 		fc->item_idx = item_idx;
282081cf20a2SHamdan Igbaria 		fc->tag_set = &mlx5dr_definer_ipsec_sequence_number_set;
282181cf20a2SHamdan Igbaria 		DR_CALC_SET_HDR(fc, ipsec, sequence_number);
282281cf20a2SHamdan Igbaria 	}
282381cf20a2SHamdan Igbaria 	return 0;
282481cf20a2SHamdan Igbaria }
282581cf20a2SHamdan Igbaria 
28265bf14a4bSErez Shitrit static void mlx5dr_definer_set_conv_tunnel(enum rte_flow_item_type cur_type,
28275bf14a4bSErez Shitrit 					   uint64_t item_flags,
28285bf14a4bSErez Shitrit 					   struct mlx5dr_definer_conv_data *cd)
28295bf14a4bSErez Shitrit {
28305bf14a4bSErez Shitrit 	/* Already tunnel nothing to change */
28315bf14a4bSErez Shitrit 	if (cd->tunnel)
28325bf14a4bSErez Shitrit 		return;
28335bf14a4bSErez Shitrit 
28345bf14a4bSErez Shitrit 	/* We can have more than one MPLS label at each level (inner/outer), so
28355bf14a4bSErez Shitrit 	 * consider tunnel only when it is already under tunnel or if we moved to the
28365bf14a4bSErez Shitrit 	 * second MPLS level.
28375bf14a4bSErez Shitrit 	 */
28385bf14a4bSErez Shitrit 	if (cur_type != RTE_FLOW_ITEM_TYPE_MPLS)
28395bf14a4bSErez Shitrit 		cd->tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
28405bf14a4bSErez Shitrit 	else
28415bf14a4bSErez Shitrit 		cd->tunnel = !!(item_flags & DR_FLOW_LAYER_TUNNEL_NO_MPLS);
28425bf14a4bSErez Shitrit }
28435bf14a4bSErez Shitrit 
284481cf20a2SHamdan Igbaria static int
28458c0ca752SRongwei Liu mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
28468c0ca752SRongwei Liu 				     struct rte_flow_item *item,
28478c0ca752SRongwei Liu 				     int item_idx)
28488c0ca752SRongwei Liu {
28498c0ca752SRongwei Liu 	uint32_t base_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
28508c0ca752SRongwei Liu 	const struct rte_flow_item_flex *v, *m;
28518c0ca752SRongwei Liu 	enum mlx5dr_definer_fname fname;
28528c0ca752SRongwei Liu 	struct mlx5dr_definer_fc *fc;
28538c0ca752SRongwei Liu 	uint32_t i, mask, byte_off;
28548c0ca752SRongwei Liu 	bool is_inner = cd->tunnel;
28558c0ca752SRongwei Liu 	int ret;
28568c0ca752SRongwei Liu 
28578c0ca752SRongwei Liu 	m = item->mask;
28588c0ca752SRongwei Liu 	v = item->spec;
28598c0ca752SRongwei Liu 	mask = 0;
28608c0ca752SRongwei Liu 	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
28618c0ca752SRongwei Liu 		byte_off = base_off - i * sizeof(uint32_t);
28628c0ca752SRongwei Liu 		ret = mlx5_flex_get_parser_value_per_byte_off(m, v->handle, byte_off,
286397e19f07SViacheslav Ovsiienko 							      is_inner, &mask);
28648c0ca752SRongwei Liu 		if (ret == -1) {
28658c0ca752SRongwei Liu 			rte_errno = EINVAL;
28668c0ca752SRongwei Liu 			return rte_errno;
28678c0ca752SRongwei Liu 		}
28688c0ca752SRongwei Liu 
28698c0ca752SRongwei Liu 		if (!mask)
28708c0ca752SRongwei Liu 			continue;
28718c0ca752SRongwei Liu 
28728c0ca752SRongwei Liu 		fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
28738c0ca752SRongwei Liu 		fname += (enum mlx5dr_definer_fname)i;
28748c0ca752SRongwei Liu 		fc = &cd->fc[fname];
28758c0ca752SRongwei Liu 		fc->byte_off = byte_off;
28768c0ca752SRongwei Liu 		fc->item_idx = item_idx;
28778c0ca752SRongwei Liu 		fc->tag_set = cd->tunnel ? &mlx5dr_definer_flex_parser_inner_set :
28788c0ca752SRongwei Liu 					   &mlx5dr_definer_flex_parser_outer_set;
28798c0ca752SRongwei Liu 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
28808c0ca752SRongwei Liu 		fc->bit_mask = mask;
28818c0ca752SRongwei Liu 	}
28828c0ca752SRongwei Liu 	return 0;
28838c0ca752SRongwei Liu }
28848c0ca752SRongwei Liu 
28858c0ca752SRongwei Liu static int
28867bf29065SDong Zhou mlx5dr_definer_conv_item_ib_l4(struct mlx5dr_definer_conv_data *cd,
28877bf29065SDong Zhou 			       struct rte_flow_item *item,
28887bf29065SDong Zhou 			       int item_idx)
28897bf29065SDong Zhou {
28907bf29065SDong Zhou 	const struct rte_flow_item_ib_bth *m = item->mask;
28917bf29065SDong Zhou 	struct mlx5dr_definer_fc *fc;
28927bf29065SDong Zhou 	bool inner = cd->tunnel;
28937bf29065SDong Zhou 
28947bf29065SDong Zhou 	/* In order to match on RoCEv2(layer4 ib), we must match
28957bf29065SDong Zhou 	 * on ip_protocol and l4_dport.
28967bf29065SDong Zhou 	 */
28977bf29065SDong Zhou 	if (!cd->relaxed) {
28987bf29065SDong Zhou 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
28997bf29065SDong Zhou 		if (!fc->tag_set) {
29007bf29065SDong Zhou 			fc->item_idx = item_idx;
29017bf29065SDong Zhou 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
29027bf29065SDong Zhou 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
29037bf29065SDong Zhou 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
29047bf29065SDong Zhou 		}
29057bf29065SDong Zhou 
29067bf29065SDong Zhou 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
29077bf29065SDong Zhou 		if (!fc->tag_set) {
29087bf29065SDong Zhou 			fc->item_idx = item_idx;
29097bf29065SDong Zhou 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
29107bf29065SDong Zhou 			fc->tag_set = &mlx5dr_definer_ib_l4_udp_port_set;
29117bf29065SDong Zhou 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
29127bf29065SDong Zhou 		}
29137bf29065SDong Zhou 	}
29147bf29065SDong Zhou 
29157bf29065SDong Zhou 	if (!m)
29167bf29065SDong Zhou 		return 0;
29177bf29065SDong Zhou 
29187bf29065SDong Zhou 	if (m->hdr.se || m->hdr.m || m->hdr.padcnt || m->hdr.tver ||
29197bf29065SDong Zhou 		m->hdr.pkey || m->hdr.f || m->hdr.b || m->hdr.rsvd0 ||
2920ff249a62SItamar Gozlan 		m->hdr.rsvd1 || !is_mem_zero(m->hdr.psn, 3)) {
29217bf29065SDong Zhou 		rte_errno = ENOTSUP;
29227bf29065SDong Zhou 		return rte_errno;
29237bf29065SDong Zhou 	}
29247bf29065SDong Zhou 
29257bf29065SDong Zhou 	if (m->hdr.opcode) {
29267bf29065SDong Zhou 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_OPCODE];
29277bf29065SDong Zhou 		fc->item_idx = item_idx;
29287bf29065SDong Zhou 		fc->tag_set = &mlx5dr_definer_ib_l4_opcode_set;
29297bf29065SDong Zhou 		DR_CALC_SET_HDR(fc, ib_l4, opcode);
29307bf29065SDong Zhou 	}
29317bf29065SDong Zhou 
29327bf29065SDong Zhou 	if (!is_mem_zero(m->hdr.dst_qp, 3)) {
29337bf29065SDong Zhou 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_QPN];
29347bf29065SDong Zhou 		fc->item_idx = item_idx;
29357bf29065SDong Zhou 		fc->tag_set = &mlx5dr_definer_ib_l4_qp_set;
29367bf29065SDong Zhou 		DR_CALC_SET_HDR(fc, ib_l4, qp);
29377bf29065SDong Zhou 	}
29387bf29065SDong Zhou 
2939ff249a62SItamar Gozlan 	if (m->hdr.a) {
2940ff249a62SItamar Gozlan 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_A];
2941ff249a62SItamar Gozlan 		fc->item_idx = item_idx;
2942ff249a62SItamar Gozlan 		fc->tag_set = &mlx5dr_definer_ib_l4_bth_a_set;
2943ff249a62SItamar Gozlan 		DR_CALC_SET_HDR(fc, ib_l4, ackreq);
2944ff249a62SItamar Gozlan 	}
2945ff249a62SItamar Gozlan 
29467bf29065SDong Zhou 	return 0;
29477bf29065SDong Zhou }
29487bf29065SDong Zhou 
29497bf29065SDong Zhou static int
2950f6164649SGavin Li mlx5dr_definer_conv_item_vxlan_gpe(struct mlx5dr_definer_conv_data *cd,
2951f6164649SGavin Li 				   struct rte_flow_item *item,
2952f6164649SGavin Li 				   int item_idx)
2953f6164649SGavin Li {
2954f6164649SGavin Li 	const struct rte_flow_item_vxlan_gpe *m = item->mask;
2955f6164649SGavin Li 	struct mlx5dr_definer_fc *fc;
2956f6164649SGavin Li 	bool inner = cd->tunnel;
2957f6164649SGavin Li 
2958f6164649SGavin Li 	if (inner) {
2959f6164649SGavin Li 		DR_LOG(ERR, "Inner VXLAN GPE item not supported");
2960f6164649SGavin Li 		rte_errno = ENOTSUP;
2961f6164649SGavin Li 		return rte_errno;
2962f6164649SGavin Li 	}
2963f6164649SGavin Li 
2964f6164649SGavin Li 	/* In order to match on VXLAN GPE we must match on ip_protocol and l4_dport */
2965f6164649SGavin Li 	if (!cd->relaxed) {
2966f6164649SGavin Li 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2967f6164649SGavin Li 		if (!fc->tag_set) {
2968f6164649SGavin Li 			fc->item_idx = item_idx;
2969f6164649SGavin Li 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2970f6164649SGavin Li 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
2971f6164649SGavin Li 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
2972f6164649SGavin Li 		}
2973f6164649SGavin Li 
2974f6164649SGavin Li 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
2975f6164649SGavin Li 		if (!fc->tag_set) {
2976f6164649SGavin Li 			fc->item_idx = item_idx;
2977f6164649SGavin Li 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2978f6164649SGavin Li 			fc->tag_set = &mlx5dr_definer_vxlan_gpe_udp_port_set;
2979f6164649SGavin Li 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
2980f6164649SGavin Li 		}
2981f6164649SGavin Li 	}
2982f6164649SGavin Li 
2983f6164649SGavin Li 	if (!m)
2984f6164649SGavin Li 		return 0;
2985f6164649SGavin Li 
2986f6164649SGavin Li 	if (m->flags) {
2987f6164649SGavin Li 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_FLAGS];
2988f6164649SGavin Li 		fc->item_idx = item_idx;
2989f6164649SGavin Li 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_flags_set;
2990f6164649SGavin Li 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2991f6164649SGavin Li 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
2992f6164649SGavin Li 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
2993f6164649SGavin Li 	}
2994f6164649SGavin Li 
2995f6164649SGavin Li 	if (!is_mem_zero(m->rsvd0, 2)) {
2996f6164649SGavin Li 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_RSVD0];
2997f6164649SGavin Li 		fc->item_idx = item_idx;
2998f6164649SGavin Li 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_rsvd0_set;
2999f6164649SGavin Li 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
3000f6164649SGavin Li 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, rsvd0);
3001f6164649SGavin Li 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, rsvd0);
3002f6164649SGavin Li 	}
3003f6164649SGavin Li 
3004f6164649SGavin Li 	if (m->protocol) {
3005f6164649SGavin Li 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_PROTO];
3006f6164649SGavin Li 		fc->item_idx = item_idx;
3007f6164649SGavin Li 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_protocol_set;
3008f6164649SGavin Li 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
3009f6164649SGavin Li 		fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
3010f6164649SGavin Li 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
3011f6164649SGavin Li 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
3012f6164649SGavin Li 	}
3013f6164649SGavin Li 
3014f6164649SGavin Li 	if (!is_mem_zero(m->vni, 3)) {
3015f6164649SGavin Li 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_VNI];
3016f6164649SGavin Li 		fc->item_idx = item_idx;
3017f6164649SGavin Li 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_vni_set;
3018f6164649SGavin Li 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
3019f6164649SGavin Li 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
3020f6164649SGavin Li 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
3021f6164649SGavin Li 	}
3022f6164649SGavin Li 
3023f6164649SGavin Li 	if (m->rsvd1) {
3024f6164649SGavin Li 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_RSVD1];
3025f6164649SGavin Li 		fc->item_idx = item_idx;
3026f6164649SGavin Li 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_rsvd1_set;
3027f6164649SGavin Li 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
3028f6164649SGavin Li 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, rsvd1);
3029f6164649SGavin Li 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, rsvd1);
3030f6164649SGavin Li 	}
3031f6164649SGavin Li 
3032f6164649SGavin Li 	return 0;
3033f6164649SGavin Li }
3034f6164649SGavin Li 
3035f6164649SGavin Li static int
3036a5230507SHamdan Igbaria mlx5dr_definer_conv_item_compare_field(const struct rte_flow_field_data *f,
3037a5230507SHamdan Igbaria 				       const struct rte_flow_field_data *other_f,
3038a5230507SHamdan Igbaria 				       struct mlx5dr_definer_conv_data *cd,
3039a5230507SHamdan Igbaria 				       int item_idx,
3040a5230507SHamdan Igbaria 				       enum mlx5dr_definer_compare_dw_selectors dw_offset)
3041a5230507SHamdan Igbaria {
3042a5230507SHamdan Igbaria 	struct mlx5dr_definer_fc *fc = NULL;
3043a5230507SHamdan Igbaria 	int reg;
3044a5230507SHamdan Igbaria 
3045a5230507SHamdan Igbaria 	if (f->offset) {
3046a5230507SHamdan Igbaria 		DR_LOG(ERR, "field offset %u is not supported, only offset zero supported",
3047a5230507SHamdan Igbaria 		       f->offset);
3048a5230507SHamdan Igbaria 		goto err_notsup;
3049a5230507SHamdan Igbaria 	}
3050a5230507SHamdan Igbaria 
3051a5230507SHamdan Igbaria 	switch (f->field) {
3052a5230507SHamdan Igbaria 	case RTE_FLOW_FIELD_META:
3053a5230507SHamdan Igbaria 		reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
305410943706SMichael Baum 						  RTE_FLOW_ITEM_TYPE_META,
30552b45a773SMichael Baum 						  cd->table_type, -1);
3056a5230507SHamdan Igbaria 		if (reg <= 0) {
3057a5230507SHamdan Igbaria 			DR_LOG(ERR, "Invalid register for compare metadata field");
3058a5230507SHamdan Igbaria 			rte_errno = EINVAL;
3059a5230507SHamdan Igbaria 			return rte_errno;
3060a5230507SHamdan Igbaria 		}
3061a5230507SHamdan Igbaria 
3062a5230507SHamdan Igbaria 		fc = mlx5dr_definer_get_register_fc(cd, reg);
3063a5230507SHamdan Igbaria 		if (!fc)
3064a5230507SHamdan Igbaria 			return rte_errno;
3065a5230507SHamdan Igbaria 
3066a5230507SHamdan Igbaria 		fc->item_idx = item_idx;
3067a5230507SHamdan Igbaria 		fc->tag_set = &mlx5dr_definer_compare_set;
3068a5230507SHamdan Igbaria 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
3069a5230507SHamdan Igbaria 		fc->compare_idx = dw_offset;
3070a5230507SHamdan Igbaria 		break;
3071a5230507SHamdan Igbaria 	case RTE_FLOW_FIELD_TAG:
3072a5230507SHamdan Igbaria 		reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
3073a5230507SHamdan Igbaria 						  RTE_FLOW_ITEM_TYPE_TAG,
30742b45a773SMichael Baum 						  cd->table_type,
3075a5230507SHamdan Igbaria 						  f->tag_index);
3076a5230507SHamdan Igbaria 		if (reg <= 0) {
3077a5230507SHamdan Igbaria 			DR_LOG(ERR, "Invalid register for compare tag field");
3078a5230507SHamdan Igbaria 			rte_errno = EINVAL;
3079a5230507SHamdan Igbaria 			return rte_errno;
3080a5230507SHamdan Igbaria 		}
3081a5230507SHamdan Igbaria 
3082a5230507SHamdan Igbaria 		fc = mlx5dr_definer_get_register_fc(cd, reg);
3083a5230507SHamdan Igbaria 		if (!fc)
3084a5230507SHamdan Igbaria 			return rte_errno;
3085a5230507SHamdan Igbaria 
3086a5230507SHamdan Igbaria 		fc->item_idx = item_idx;
3087a5230507SHamdan Igbaria 		fc->tag_set = &mlx5dr_definer_compare_set;
3088a5230507SHamdan Igbaria 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
3089a5230507SHamdan Igbaria 		fc->compare_idx = dw_offset;
3090a5230507SHamdan Igbaria 		break;
3091a5230507SHamdan Igbaria 	case RTE_FLOW_FIELD_VALUE:
3092a5230507SHamdan Igbaria 		if (dw_offset == MLX5DR_DEFINER_COMPARE_ARGUMENT_0) {
3093a5230507SHamdan Igbaria 			DR_LOG(ERR, "Argument field does not support immediate value");
3094a5230507SHamdan Igbaria 			goto err_notsup;
3095a5230507SHamdan Igbaria 		}
3096a5230507SHamdan Igbaria 		break;
3097a5230507SHamdan Igbaria 	case RTE_FLOW_FIELD_RANDOM:
3098a5230507SHamdan Igbaria 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_RANDOM_NUM];
3099a5230507SHamdan Igbaria 		fc->item_idx = item_idx;
3100a5230507SHamdan Igbaria 		fc->tag_set = &mlx5dr_definer_compare_set;
3101a5230507SHamdan Igbaria 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
3102a5230507SHamdan Igbaria 		fc->compare_idx = dw_offset;
3103a5230507SHamdan Igbaria 		DR_CALC_SET_HDR(fc, random_number, random_number);
3104a5230507SHamdan Igbaria 		break;
3105e1af096eSMichael Baum 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
3106e1af096eSMichael Baum 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ESP_SEQUENCE_NUMBER];
3107e1af096eSMichael Baum 		fc->item_idx = item_idx;
3108e1af096eSMichael Baum 		fc->tag_set = &mlx5dr_definer_compare_set;
3109e1af096eSMichael Baum 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
3110e1af096eSMichael Baum 		fc->compare_idx = dw_offset;
3111e1af096eSMichael Baum 		DR_CALC_SET_HDR(fc, ipsec, sequence_number);
3112e1af096eSMichael Baum 		break;
3113a5230507SHamdan Igbaria 	default:
3114a5230507SHamdan Igbaria 		DR_LOG(ERR, "%u field is not supported", f->field);
3115a5230507SHamdan Igbaria 		goto err_notsup;
3116a5230507SHamdan Igbaria 	}
3117a5230507SHamdan Igbaria 
3118a5230507SHamdan Igbaria 	if (fc && other_f && other_f->field == RTE_FLOW_FIELD_VALUE)
3119a5230507SHamdan Igbaria 		fc->compare_set_base = true;
3120a5230507SHamdan Igbaria 
3121a5230507SHamdan Igbaria 	return 0;
3122a5230507SHamdan Igbaria 
3123a5230507SHamdan Igbaria err_notsup:
3124a5230507SHamdan Igbaria 	rte_errno = ENOTSUP;
3125a5230507SHamdan Igbaria 	return rte_errno;
3126a5230507SHamdan Igbaria }
3127a5230507SHamdan Igbaria 
3128a5230507SHamdan Igbaria static int
3129a5230507SHamdan Igbaria mlx5dr_definer_conv_item_compare(struct mlx5dr_definer_conv_data *cd,
3130a5230507SHamdan Igbaria 				 struct rte_flow_item *item,
3131a5230507SHamdan Igbaria 				 int item_idx)
3132a5230507SHamdan Igbaria {
3133a5230507SHamdan Igbaria 	const struct rte_flow_item_compare *m = item->mask;
3134a5230507SHamdan Igbaria 	const struct rte_flow_field_data *a = &m->a;
3135a5230507SHamdan Igbaria 	const struct rte_flow_field_data *b = &m->b;
3136a5230507SHamdan Igbaria 	int ret;
3137a5230507SHamdan Igbaria 
3138a5230507SHamdan Igbaria 	if (m->width != 0xffffffff) {
3139a5230507SHamdan Igbaria 		DR_LOG(ERR, "compare item width of 0x%x is not supported, only full DW supported",
3140a5230507SHamdan Igbaria 				m->width);
3141a5230507SHamdan Igbaria 		rte_errno = ENOTSUP;
3142a5230507SHamdan Igbaria 		return rte_errno;
3143a5230507SHamdan Igbaria 	}
3144a5230507SHamdan Igbaria 
3145a5230507SHamdan Igbaria 	ret = mlx5dr_definer_conv_item_compare_field(a, b, cd, item_idx,
3146a5230507SHamdan Igbaria 						     MLX5DR_DEFINER_COMPARE_ARGUMENT_0);
3147a5230507SHamdan Igbaria 	if (ret)
3148a5230507SHamdan Igbaria 		return ret;
3149a5230507SHamdan Igbaria 
3150a5230507SHamdan Igbaria 	ret = mlx5dr_definer_conv_item_compare_field(b, NULL, cd, item_idx,
3151a5230507SHamdan Igbaria 						     MLX5DR_DEFINER_COMPARE_BASE_0);
3152a5230507SHamdan Igbaria 	if (ret)
3153a5230507SHamdan Igbaria 		return ret;
3154a5230507SHamdan Igbaria 
3155a5230507SHamdan Igbaria 	return 0;
3156a5230507SHamdan Igbaria }
3157a5230507SHamdan Igbaria 
3158a5230507SHamdan Igbaria static int
3159c55c2bf3SAlex Vesker mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
3160c55c2bf3SAlex Vesker 				struct mlx5dr_match_template *mt,
3161a5230507SHamdan Igbaria 				uint8_t *hl,
3162a5230507SHamdan Igbaria 				struct mlx5dr_matcher *matcher)
3163c55c2bf3SAlex Vesker {
3164c55c2bf3SAlex Vesker 	struct mlx5dr_definer_fc fc[MLX5DR_DEFINER_FNAME_MAX] = {{0}};
3165c55c2bf3SAlex Vesker 	struct mlx5dr_definer_conv_data cd = {0};
3166c55c2bf3SAlex Vesker 	struct rte_flow_item *items = mt->items;
3167c55c2bf3SAlex Vesker 	uint64_t item_flags = 0;
316827ac2da9SAlex Vesker 	int i, ret;
3169c55c2bf3SAlex Vesker 
3170c55c2bf3SAlex Vesker 	cd.fc = fc;
3171d72b8fbdSGregory Etelson 	cd.ctx = ctx;
3172c55c2bf3SAlex Vesker 	cd.relaxed = mt->flags & MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH;
31732b45a773SMichael Baum 	cd.table_type = matcher->tbl->type;
3174c55c2bf3SAlex Vesker 
3175c55c2bf3SAlex Vesker 	/* Collect all RTE fields to the field array and set header layout */
3176c55c2bf3SAlex Vesker 	for (i = 0; items->type != RTE_FLOW_ITEM_TYPE_END; i++, items++) {
31775bf14a4bSErez Shitrit 		mlx5dr_definer_set_conv_tunnel(items->type, item_flags, &cd);
3178c55c2bf3SAlex Vesker 
317957800e6cSAlex Vesker 		ret = mlx5dr_definer_check_item_range_supp(items);
318057800e6cSAlex Vesker 		if (ret)
318157800e6cSAlex Vesker 			return ret;
318257800e6cSAlex Vesker 
3183a5230507SHamdan Igbaria 		if (mlx5dr_matcher_is_compare(matcher)) {
3184a5230507SHamdan Igbaria 			DR_LOG(ERR, "Compare matcher not supported for more than one item");
3185a5230507SHamdan Igbaria 			goto not_supp;
3186a5230507SHamdan Igbaria 		}
3187a5230507SHamdan Igbaria 
3188c55c2bf3SAlex Vesker 		switch ((int)items->type) {
3189c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_ETH:
3190c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_eth(&cd, items, i);
3191c55c2bf3SAlex Vesker 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3192c55c2bf3SAlex Vesker 						  MLX5_FLOW_LAYER_OUTER_L2;
3193c55c2bf3SAlex Vesker 			break;
3194c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_VLAN:
3195c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_vlan(&cd, items, i);
3196c55c2bf3SAlex Vesker 			item_flags |= cd.tunnel ?
3197c55c2bf3SAlex Vesker 				(MLX5_FLOW_LAYER_INNER_VLAN | MLX5_FLOW_LAYER_INNER_L2) :
3198c55c2bf3SAlex Vesker 				(MLX5_FLOW_LAYER_OUTER_VLAN | MLX5_FLOW_LAYER_OUTER_L2);
3199c55c2bf3SAlex Vesker 			break;
3200c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_IPV4:
3201c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_ipv4(&cd, items, i);
3202c55c2bf3SAlex Vesker 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3203c55c2bf3SAlex Vesker 						  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3204c55c2bf3SAlex Vesker 			break;
3205c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_IPV6:
3206c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_ipv6(&cd, items, i);
3207c55c2bf3SAlex Vesker 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3208c55c2bf3SAlex Vesker 						  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3209c55c2bf3SAlex Vesker 			break;
3210c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_UDP:
3211c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_udp(&cd, items, i);
3212c55c2bf3SAlex Vesker 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3213c55c2bf3SAlex Vesker 						  MLX5_FLOW_LAYER_OUTER_L4_UDP;
3214c55c2bf3SAlex Vesker 			break;
3215c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_TCP:
3216c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_tcp(&cd, items, i);
3217c55c2bf3SAlex Vesker 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3218c55c2bf3SAlex Vesker 						  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3219c55c2bf3SAlex Vesker 			break;
3220c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_GTP:
3221c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_gtp(&cd, items, i);
3222c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_GTP;
3223c55c2bf3SAlex Vesker 			break;
3224c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
3225c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_gtp_psc(&cd, items, i);
3226c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_GTP_PSC;
3227c55c2bf3SAlex Vesker 			break;
3228c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
3229c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_port(&cd, items, i);
3230c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_ITEM_REPRESENTED_PORT;
3231c55c2bf3SAlex Vesker 			mt->vport_item_id = i;
3232c55c2bf3SAlex Vesker 			break;
3233c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_VXLAN:
3234c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_vxlan(&cd, items, i);
3235c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
3236c55c2bf3SAlex Vesker 			break;
32377aa6c077SSuanming Mou 		case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
32387aa6c077SSuanming Mou 			ret = mlx5dr_definer_conv_item_tx_queue(&cd, items, i);
32397aa6c077SSuanming Mou 			item_flags |= MLX5_FLOW_ITEM_SQ;
32407aa6c077SSuanming Mou 			break;
3241c55c2bf3SAlex Vesker 		case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
3242c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_sq(&cd, items, i);
3243c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_ITEM_SQ;
3244c55c2bf3SAlex Vesker 			break;
3245c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_TAG:
3246c55c2bf3SAlex Vesker 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
3247c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_tag(&cd, items, i);
3248c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_ITEM_TAG;
3249c55c2bf3SAlex Vesker 			break;
3250c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_META:
32512b45a773SMichael Baum 			ret = mlx5dr_definer_conv_item_metadata(&cd, items, i);
3252c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_ITEM_METADATA;
3253c55c2bf3SAlex Vesker 			break;
3254c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_GRE:
3255c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_gre(&cd, items, i);
3256c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_GRE;
3257c55c2bf3SAlex Vesker 			break;
3258c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
3259c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_gre_opt(&cd, items, i);
3260c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_GRE;
3261c55c2bf3SAlex Vesker 			break;
3262c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3263c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_gre_key(&cd, items, i);
3264c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
3265c55c2bf3SAlex Vesker 			break;
3266c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
3267c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_integrity(&cd, items, i);
32684c4e04d4SAlexander Kozyrev 			item_flags |= MLX5_FLOW_ITEM_INTEGRITY;
3269c55c2bf3SAlex Vesker 			break;
3270c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
3271c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_conntrack(&cd, items, i);
3272c55c2bf3SAlex Vesker 			break;
3273c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_ICMP:
3274c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_icmp(&cd, items, i);
3275c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_ICMP;
3276c55c2bf3SAlex Vesker 			break;
3277c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_ICMP6:
3278c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_icmp6(&cd, items, i);
3279c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_ICMP6;
3280c55c2bf3SAlex Vesker 			break;
328101314192SLeo Xu 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
328201314192SLeo Xu 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
328301314192SLeo Xu 			ret = mlx5dr_definer_conv_item_icmp6_echo(&cd, items, i);
328401314192SLeo Xu 			item_flags |= MLX5_FLOW_LAYER_ICMP6;
328501314192SLeo Xu 			break;
3286c55c2bf3SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
3287c55c2bf3SAlex Vesker 			ret = mlx5dr_definer_conv_item_meter_color(&cd, items, i);
3288c55c2bf3SAlex Vesker 			item_flags |= MLX5_FLOW_ITEM_METER_COLOR;
3289c55c2bf3SAlex Vesker 			break;
32909fa0e142SGregory Etelson 		case RTE_FLOW_ITEM_TYPE_QUOTA:
32919fa0e142SGregory Etelson 			ret = mlx5dr_definer_conv_item_quota(&cd, items, i);
32929fa0e142SGregory Etelson 			item_flags |= MLX5_FLOW_ITEM_QUOTA;
32939fa0e142SGregory Etelson 			break;
329400e57916SRongwei Liu 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
329500e57916SRongwei Liu 			ret = mlx5dr_definer_conv_item_ipv6_routing_ext(&cd, items, i);
329600e57916SRongwei Liu 			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
329700e57916SRongwei Liu 						  MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
329800e57916SRongwei Liu 			break;
329981cf20a2SHamdan Igbaria 		case RTE_FLOW_ITEM_TYPE_ESP:
330081cf20a2SHamdan Igbaria 			ret = mlx5dr_definer_conv_item_esp(&cd, items, i);
330181cf20a2SHamdan Igbaria 			item_flags |= MLX5_FLOW_ITEM_ESP;
330281cf20a2SHamdan Igbaria 			break;
33038c0ca752SRongwei Liu 		case RTE_FLOW_ITEM_TYPE_FLEX:
33048c0ca752SRongwei Liu 			ret = mlx5dr_definer_conv_item_flex_parser(&cd, items, i);
3305624ca89bSViacheslav Ovsiienko 			if (ret == 0) {
3306624ca89bSViacheslav Ovsiienko 				enum rte_flow_item_flex_tunnel_mode tunnel_mode =
3307624ca89bSViacheslav Ovsiienko 								FLEX_TUNNEL_MODE_SINGLE;
3308624ca89bSViacheslav Ovsiienko 
3309624ca89bSViacheslav Ovsiienko 				ret = mlx5_flex_get_tunnel_mode(items, &tunnel_mode);
3310624ca89bSViacheslav Ovsiienko 				if (tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL)
3311624ca89bSViacheslav Ovsiienko 					item_flags |= MLX5_FLOW_ITEM_FLEX_TUNNEL;
3312624ca89bSViacheslav Ovsiienko 				else
33138c0ca752SRongwei Liu 					item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
33148c0ca752SRongwei Liu 								  MLX5_FLOW_ITEM_OUTER_FLEX;
3315624ca89bSViacheslav Ovsiienko 			}
33168c0ca752SRongwei Liu 			break;
33175bf14a4bSErez Shitrit 		case RTE_FLOW_ITEM_TYPE_MPLS:
33185bf14a4bSErez Shitrit 			ret = mlx5dr_definer_conv_item_mpls(&cd, items, i);
33195bf14a4bSErez Shitrit 			item_flags |= MLX5_FLOW_LAYER_MPLS;
33205bf14a4bSErez Shitrit 			cd.mpls_idx++;
33215bf14a4bSErez Shitrit 			break;
332243b5adbaSAlex Vesker 		case RTE_FLOW_ITEM_TYPE_GENEVE:
332343b5adbaSAlex Vesker 			ret = mlx5dr_definer_conv_item_geneve(&cd, items, i);
332443b5adbaSAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_GENEVE;
332543b5adbaSAlex Vesker 			break;
33268f8dad42SAlex Vesker 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
33278f8dad42SAlex Vesker 			ret = mlx5dr_definer_conv_item_geneve_opt(&cd, items, i);
33288f8dad42SAlex Vesker 			item_flags |= MLX5_FLOW_LAYER_GENEVE_OPT;
33298f8dad42SAlex Vesker 			break;
33307bf29065SDong Zhou 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
33317bf29065SDong Zhou 			ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
33327bf29065SDong Zhou 			item_flags |= MLX5_FLOW_ITEM_IB_BTH;
33337bf29065SDong Zhou 			break;
3334ad17988aSAlexander Kozyrev 		case RTE_FLOW_ITEM_TYPE_PTYPE:
3335ad17988aSAlexander Kozyrev 			ret = mlx5dr_definer_conv_item_ptype(&cd, items, i);
3336ad17988aSAlexander Kozyrev 			item_flags |= MLX5_FLOW_ITEM_PTYPE;
3337ad17988aSAlexander Kozyrev 			break;
3338fcd7b8c6SErez Shitrit 		case RTE_FLOW_ITEM_TYPE_RANDOM:
3339fcd7b8c6SErez Shitrit 			ret = mlx5dr_definer_conv_item_random(&cd, items, i);
3340fcd7b8c6SErez Shitrit 			item_flags |= MLX5_FLOW_ITEM_RANDOM;
3341fcd7b8c6SErez Shitrit 			break;
3342f6164649SGavin Li 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3343f6164649SGavin Li 			ret = mlx5dr_definer_conv_item_vxlan_gpe(&cd, items, i);
3344f6164649SGavin Li 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
3345f6164649SGavin Li 			break;
3346a5230507SHamdan Igbaria 		case RTE_FLOW_ITEM_TYPE_COMPARE:
3347a5230507SHamdan Igbaria 			if (i) {
3348a5230507SHamdan Igbaria 				DR_LOG(ERR, "Compare matcher not supported for more than one item");
3349a5230507SHamdan Igbaria 				goto not_supp;
3350a5230507SHamdan Igbaria 			}
33512b45a773SMichael Baum 			ret = mlx5dr_definer_conv_item_compare(&cd, items, i);
3352a5230507SHamdan Igbaria 			item_flags |= MLX5_FLOW_ITEM_COMPARE;
3353a5230507SHamdan Igbaria 			matcher->flags |= MLX5DR_MATCHER_FLAGS_COMPARE;
3354a5230507SHamdan Igbaria 			break;
3355b3358782SGavin Li 		case RTE_FLOW_ITEM_TYPE_NSH:
3356b3358782SGavin Li 			item_flags |= MLX5_FLOW_ITEM_NSH;
3357b3358782SGavin Li 			break;
33584c23d4e5SGregory Etelson 		case RTE_FLOW_ITEM_TYPE_VOID:
33594c23d4e5SGregory Etelson 			break;
33606c299801SDong Zhou 		case RTE_FLOW_ITEM_TYPE_NVGRE:
33616c299801SDong Zhou 			ret = mlx5dr_definer_conv_item_nvgre(&cd, items, i);
33626c299801SDong Zhou 			item_flags |= MLX5_FLOW_LAYER_NVGRE;
33636c299801SDong Zhou 			break;
3364c55c2bf3SAlex Vesker 		default:
3365c55c2bf3SAlex Vesker 			DR_LOG(ERR, "Unsupported item type %d", items->type);
3366a5230507SHamdan Igbaria 			goto not_supp;
3367c55c2bf3SAlex Vesker 		}
3368c55c2bf3SAlex Vesker 
33695bf14a4bSErez Shitrit 		cd.last_item = items->type;
33705bf14a4bSErez Shitrit 
3371c55c2bf3SAlex Vesker 		if (ret) {
3372c55c2bf3SAlex Vesker 			DR_LOG(ERR, "Failed processing item type: %d", items->type);
3373c55c2bf3SAlex Vesker 			return ret;
3374c55c2bf3SAlex Vesker 		}
3375c55c2bf3SAlex Vesker 	}
3376c55c2bf3SAlex Vesker 
3377c55c2bf3SAlex Vesker 	mt->item_flags = item_flags;
3378c55c2bf3SAlex Vesker 
33799732ffe1SAlex Vesker 	/* Fill in headers layout and allocate fc & fcr array on mt */
338027ac2da9SAlex Vesker 	ret = mlx5dr_definer_mt_set_fc(mt, fc, hl);
338127ac2da9SAlex Vesker 	if (ret) {
338227ac2da9SAlex Vesker 		DR_LOG(ERR, "Failed to set field copy to match template");
338327ac2da9SAlex Vesker 		return ret;
3384c55c2bf3SAlex Vesker 	}
3385c55c2bf3SAlex Vesker 
3386c55c2bf3SAlex Vesker 	return 0;
3387a5230507SHamdan Igbaria 
3388a5230507SHamdan Igbaria not_supp:
3389a5230507SHamdan Igbaria 	rte_errno = ENOTSUP;
3390a5230507SHamdan Igbaria 	return rte_errno;
3391c55c2bf3SAlex Vesker }
3392c55c2bf3SAlex Vesker 
3393c55c2bf3SAlex Vesker static int
3394c55c2bf3SAlex Vesker mlx5dr_definer_find_byte_in_tag(struct mlx5dr_definer *definer,
3395c55c2bf3SAlex Vesker 				uint32_t hl_byte_off,
3396c55c2bf3SAlex Vesker 				uint32_t *tag_byte_off)
3397c55c2bf3SAlex Vesker {
3398c55c2bf3SAlex Vesker 	uint8_t byte_offset;
33991e6cb8f2SAlex Vesker 	int i, dw_to_scan;
34001e6cb8f2SAlex Vesker 
34011e6cb8f2SAlex Vesker 	/* Avoid accessing unused DW selectors */
34021e6cb8f2SAlex Vesker 	dw_to_scan = mlx5dr_definer_is_jumbo(definer) ?
34031e6cb8f2SAlex Vesker 		DW_SELECTORS : DW_SELECTORS_MATCH;
3404c55c2bf3SAlex Vesker 
3405c55c2bf3SAlex Vesker 	/* Add offset since each DW covers multiple BYTEs */
3406c55c2bf3SAlex Vesker 	byte_offset = hl_byte_off % DW_SIZE;
34071e6cb8f2SAlex Vesker 	for (i = 0; i < dw_to_scan; i++) {
3408c55c2bf3SAlex Vesker 		if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
3409c55c2bf3SAlex Vesker 			*tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
3410c55c2bf3SAlex Vesker 			return 0;
3411c55c2bf3SAlex Vesker 		}
3412c55c2bf3SAlex Vesker 	}
3413c55c2bf3SAlex Vesker 
3414c55c2bf3SAlex Vesker 	/* Add offset to skip DWs in definer */
3415c55c2bf3SAlex Vesker 	byte_offset = DW_SIZE * DW_SELECTORS;
3416c55c2bf3SAlex Vesker 	/* Iterate in reverse since the code uses bytes from 7 -> 0 */
3417c55c2bf3SAlex Vesker 	for (i = BYTE_SELECTORS; i-- > 0 ;) {
3418c55c2bf3SAlex Vesker 		if (definer->byte_selector[i] == hl_byte_off) {
3419c55c2bf3SAlex Vesker 			*tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
3420c55c2bf3SAlex Vesker 			return 0;
3421c55c2bf3SAlex Vesker 		}
3422c55c2bf3SAlex Vesker 	}
3423c55c2bf3SAlex Vesker 
3424c55c2bf3SAlex Vesker 	/* The hl byte offset must be part of the definer */
3425c55c2bf3SAlex Vesker 	DR_LOG(INFO, "Failed to map to definer, HL byte [%d] not found", byte_offset);
3426c55c2bf3SAlex Vesker 	rte_errno = EINVAL;
3427c55c2bf3SAlex Vesker 	return rte_errno;
3428c55c2bf3SAlex Vesker }
3429c55c2bf3SAlex Vesker 
3430c55c2bf3SAlex Vesker static int
3431c55c2bf3SAlex Vesker mlx5dr_definer_fc_bind(struct mlx5dr_definer *definer,
3432c55c2bf3SAlex Vesker 		       struct mlx5dr_definer_fc *fc,
3433c55c2bf3SAlex Vesker 		       uint32_t fc_sz)
3434c55c2bf3SAlex Vesker {
3435c55c2bf3SAlex Vesker 	uint32_t tag_offset = 0;
3436c55c2bf3SAlex Vesker 	int ret, byte_diff;
3437c55c2bf3SAlex Vesker 	uint32_t i;
3438c55c2bf3SAlex Vesker 
3439c55c2bf3SAlex Vesker 	for (i = 0; i < fc_sz; i++) {
3440c55c2bf3SAlex Vesker 		/* Map header layout byte offset to byte offset in tag */
3441c55c2bf3SAlex Vesker 		ret = mlx5dr_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
3442c55c2bf3SAlex Vesker 		if (ret)
3443c55c2bf3SAlex Vesker 			return ret;
3444c55c2bf3SAlex Vesker 
3445c55c2bf3SAlex Vesker 		/* Move setter based on the location in the definer */
3446c55c2bf3SAlex Vesker 		byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
3447c55c2bf3SAlex Vesker 		fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
3448c55c2bf3SAlex Vesker 
3449c55c2bf3SAlex Vesker 		/* Update offset in headers layout to offset in tag */
3450c55c2bf3SAlex Vesker 		fc->byte_off = tag_offset;
3451c55c2bf3SAlex Vesker 		fc++;
3452c55c2bf3SAlex Vesker 	}
3453c55c2bf3SAlex Vesker 
3454c55c2bf3SAlex Vesker 	return 0;
3455c55c2bf3SAlex Vesker }
3456c55c2bf3SAlex Vesker 
3457c55c2bf3SAlex Vesker static bool
3458c55c2bf3SAlex Vesker mlx5dr_definer_best_hl_fit_recu(struct mlx5dr_definer_sel_ctrl *ctrl,
3459c55c2bf3SAlex Vesker 				uint32_t cur_dw,
3460c55c2bf3SAlex Vesker 				uint32_t *data)
3461c55c2bf3SAlex Vesker {
3462c55c2bf3SAlex Vesker 	uint8_t bytes_set;
3463c55c2bf3SAlex Vesker 	int byte_idx;
3464c55c2bf3SAlex Vesker 	bool ret;
3465c55c2bf3SAlex Vesker 	int i;
3466c55c2bf3SAlex Vesker 
3467c55c2bf3SAlex Vesker 	/* Reached end, nothing left to do */
3468c55c2bf3SAlex Vesker 	if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
3469c55c2bf3SAlex Vesker 		return true;
3470c55c2bf3SAlex Vesker 
3471c55c2bf3SAlex Vesker 	/* No data set, can skip to next DW */
3472c55c2bf3SAlex Vesker 	while (!*data) {
3473c55c2bf3SAlex Vesker 		cur_dw++;
3474c55c2bf3SAlex Vesker 		data++;
3475c55c2bf3SAlex Vesker 
3476c55c2bf3SAlex Vesker 		/* Reached end, nothing left to do */
3477c55c2bf3SAlex Vesker 		if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
3478c55c2bf3SAlex Vesker 			return true;
3479c55c2bf3SAlex Vesker 	}
3480c55c2bf3SAlex Vesker 
3481c55c2bf3SAlex Vesker 	/* Used all DW selectors and Byte selectors, no possible solution */
3482c55c2bf3SAlex Vesker 	if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
3483c55c2bf3SAlex Vesker 	    ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
3484c55c2bf3SAlex Vesker 	    ctrl->allowed_bytes == ctrl->used_bytes)
3485c55c2bf3SAlex Vesker 		return false;
3486c55c2bf3SAlex Vesker 
3487c55c2bf3SAlex Vesker 	/* Try to use limited DW selectors */
3488c55c2bf3SAlex Vesker 	if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
3489c55c2bf3SAlex Vesker 		ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
3490c55c2bf3SAlex Vesker 
3491c55c2bf3SAlex Vesker 		ret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
3492c55c2bf3SAlex Vesker 		if (ret)
3493c55c2bf3SAlex Vesker 			return ret;
3494c55c2bf3SAlex Vesker 
3495c55c2bf3SAlex Vesker 		ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
3496c55c2bf3SAlex Vesker 	}
3497c55c2bf3SAlex Vesker 
3498c55c2bf3SAlex Vesker 	/* Try to use DW selectors */
3499c55c2bf3SAlex Vesker 	if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
3500c55c2bf3SAlex Vesker 		ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
3501c55c2bf3SAlex Vesker 
3502c55c2bf3SAlex Vesker 		ret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
3503c55c2bf3SAlex Vesker 		if (ret)
3504c55c2bf3SAlex Vesker 			return ret;
3505c55c2bf3SAlex Vesker 
3506c55c2bf3SAlex Vesker 		ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
3507c55c2bf3SAlex Vesker 	}
3508c55c2bf3SAlex Vesker 
3509c55c2bf3SAlex Vesker 	/* No byte selector for offset bigger than 255 */
3510c55c2bf3SAlex Vesker 	if (cur_dw * DW_SIZE > 255)
3511c55c2bf3SAlex Vesker 		return false;
3512c55c2bf3SAlex Vesker 
3513c55c2bf3SAlex Vesker 	bytes_set = !!(0x000000ff & *data) +
3514c55c2bf3SAlex Vesker 		    !!(0x0000ff00 & *data) +
3515c55c2bf3SAlex Vesker 		    !!(0x00ff0000 & *data) +
3516c55c2bf3SAlex Vesker 		    !!(0xff000000 & *data);
3517c55c2bf3SAlex Vesker 
3518c55c2bf3SAlex Vesker 	/* Check if there are enough byte selectors left */
3519c55c2bf3SAlex Vesker 	if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
3520c55c2bf3SAlex Vesker 		return false;
3521c55c2bf3SAlex Vesker 
3522c55c2bf3SAlex Vesker 	/* Try to use Byte selectors */
3523c55c2bf3SAlex Vesker 	for (i = 0; i < DW_SIZE; i++)
3524c55c2bf3SAlex Vesker 		if ((0xff000000 >> (i * BITS_IN_BYTE)) & rte_be_to_cpu_32(*data)) {
3525c55c2bf3SAlex Vesker 			/* Use byte selectors high to low */
3526c55c2bf3SAlex Vesker 			byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
3527c55c2bf3SAlex Vesker 			ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
3528c55c2bf3SAlex Vesker 			ctrl->used_bytes++;
3529c55c2bf3SAlex Vesker 		}
3530c55c2bf3SAlex Vesker 
3531c55c2bf3SAlex Vesker 	ret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
3532c55c2bf3SAlex Vesker 	if (ret)
3533c55c2bf3SAlex Vesker 		return ret;
3534c55c2bf3SAlex Vesker 
3535c55c2bf3SAlex Vesker 	for (i = 0; i < DW_SIZE; i++)
3536c55c2bf3SAlex Vesker 		if ((0xff << (i * BITS_IN_BYTE)) & rte_be_to_cpu_32(*data)) {
3537c55c2bf3SAlex Vesker 			ctrl->used_bytes--;
3538c55c2bf3SAlex Vesker 			byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
3539c55c2bf3SAlex Vesker 			ctrl->byte_selector[byte_idx] = 0;
3540c55c2bf3SAlex Vesker 		}
3541c55c2bf3SAlex Vesker 
3542c55c2bf3SAlex Vesker 	return false;
3543c55c2bf3SAlex Vesker }
3544c55c2bf3SAlex Vesker 
3545c55c2bf3SAlex Vesker static void
354627ac2da9SAlex Vesker mlx5dr_definer_copy_sel_ctrl(struct mlx5dr_definer_sel_ctrl *ctrl,
3547c55c2bf3SAlex Vesker 			     struct mlx5dr_definer *definer)
3548c55c2bf3SAlex Vesker {
3549c55c2bf3SAlex Vesker 	memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
3550c55c2bf3SAlex Vesker 	memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
3551c55c2bf3SAlex Vesker 	memcpy(definer->dw_selector + ctrl->allowed_full_dw,
3552c55c2bf3SAlex Vesker 	       ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
3553c55c2bf3SAlex Vesker }
3554c55c2bf3SAlex Vesker 
3555c55c2bf3SAlex Vesker static int
35569732ffe1SAlex Vesker mlx5dr_definer_find_best_range_fit(struct mlx5dr_definer *definer,
35579732ffe1SAlex Vesker 				   struct mlx5dr_matcher *matcher)
35589732ffe1SAlex Vesker {
35599732ffe1SAlex Vesker 	uint8_t tag_byte_offset[MLX5DR_DEFINER_FNAME_MAX] = {0};
35609732ffe1SAlex Vesker 	uint8_t field_select[MLX5DR_DEFINER_FNAME_MAX] = {0};
35619732ffe1SAlex Vesker 	struct mlx5dr_definer_sel_ctrl ctrl = {0};
35629732ffe1SAlex Vesker 	uint32_t byte_offset, algn_byte_off;
35639732ffe1SAlex Vesker 	struct mlx5dr_definer_fc *fcr;
35649732ffe1SAlex Vesker 	bool require_dw;
35659732ffe1SAlex Vesker 	int idx, i, j;
35669732ffe1SAlex Vesker 
35679732ffe1SAlex Vesker 	/* Try to create a range definer */
35689732ffe1SAlex Vesker 	ctrl.allowed_full_dw = DW_SELECTORS_RANGE;
35699732ffe1SAlex Vesker 	ctrl.allowed_bytes = BYTE_SELECTORS_RANGE;
35709732ffe1SAlex Vesker 
35719732ffe1SAlex Vesker 	/* Multiple fields cannot share the same DW for range match.
35729732ffe1SAlex Vesker 	 * The HW doesn't recognize each field but compares the full dw.
35739732ffe1SAlex Vesker 	 * For example definer DW consists of FieldA_FieldB
35749732ffe1SAlex Vesker 	 * FieldA: Mask 0xFFFF range 0x1 to 0x2
35759732ffe1SAlex Vesker 	 * FieldB: Mask 0xFFFF range 0x3 to 0x4
35769732ffe1SAlex Vesker 	 * STE DW range will be 0x00010003 - 0x00020004
35779732ffe1SAlex Vesker 	 * This will cause invalid match for FieldB if FieldA=1 and FieldB=8
35789732ffe1SAlex Vesker 	 * Since 0x10003 < 0x10008 < 0x20004
35799732ffe1SAlex Vesker 	 */
35809732ffe1SAlex Vesker 	for (i = 0; i < matcher->num_of_mt; i++) {
35819732ffe1SAlex Vesker 		for (j = 0; j < matcher->mt[i].fcr_sz; j++) {
35829732ffe1SAlex Vesker 			fcr = &matcher->mt[i].fcr[j];
35839732ffe1SAlex Vesker 
35849732ffe1SAlex Vesker 			/* Found - Reuse previous mt binding */
35859732ffe1SAlex Vesker 			if (field_select[fcr->fname]) {
35869732ffe1SAlex Vesker 				fcr->byte_off = tag_byte_offset[fcr->fname];
35879732ffe1SAlex Vesker 				continue;
35889732ffe1SAlex Vesker 			}
35899732ffe1SAlex Vesker 
35909732ffe1SAlex Vesker 			/* Not found */
35919732ffe1SAlex Vesker 			require_dw = fcr->byte_off >= (64 * DW_SIZE);
35929732ffe1SAlex Vesker 			if (require_dw || ctrl.used_bytes == ctrl.allowed_bytes) {
35939732ffe1SAlex Vesker 				/* Try to cover using DW selector */
35949732ffe1SAlex Vesker 				if (ctrl.used_full_dw == ctrl.allowed_full_dw)
35959732ffe1SAlex Vesker 					goto not_supported;
35969732ffe1SAlex Vesker 
35979732ffe1SAlex Vesker 				ctrl.full_dw_selector[ctrl.used_full_dw++] =
35989732ffe1SAlex Vesker 					fcr->byte_off / DW_SIZE;
35999732ffe1SAlex Vesker 
36009732ffe1SAlex Vesker 				/* Bind DW */
36019732ffe1SAlex Vesker 				idx = ctrl.used_full_dw - 1;
36029732ffe1SAlex Vesker 				byte_offset = fcr->byte_off % DW_SIZE;
36039732ffe1SAlex Vesker 				byte_offset += DW_SIZE * (DW_SELECTORS - idx - 1);
36049732ffe1SAlex Vesker 			} else {
36059732ffe1SAlex Vesker 				/* Try to cover using Bytes selectors */
36069732ffe1SAlex Vesker 				if (ctrl.used_bytes == ctrl.allowed_bytes)
36079732ffe1SAlex Vesker 					goto not_supported;
36089732ffe1SAlex Vesker 
36099732ffe1SAlex Vesker 				algn_byte_off = DW_SIZE * (fcr->byte_off / DW_SIZE);
36109732ffe1SAlex Vesker 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 3;
36119732ffe1SAlex Vesker 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 2;
36129732ffe1SAlex Vesker 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 1;
36139732ffe1SAlex Vesker 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off;
36149732ffe1SAlex Vesker 
36159732ffe1SAlex Vesker 				/* Bind BYTE */
36169732ffe1SAlex Vesker 				byte_offset = DW_SIZE * DW_SELECTORS;
36179732ffe1SAlex Vesker 				byte_offset += BYTE_SELECTORS - ctrl.used_bytes;
36189732ffe1SAlex Vesker 				byte_offset += fcr->byte_off % DW_SIZE;
36199732ffe1SAlex Vesker 			}
36209732ffe1SAlex Vesker 
36219732ffe1SAlex Vesker 			fcr->byte_off = byte_offset;
36229732ffe1SAlex Vesker 			tag_byte_offset[fcr->fname] = byte_offset;
36239732ffe1SAlex Vesker 			field_select[fcr->fname] = 1;
36249732ffe1SAlex Vesker 		}
36259732ffe1SAlex Vesker 	}
36269732ffe1SAlex Vesker 
36279732ffe1SAlex Vesker 	mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
36289732ffe1SAlex Vesker 	definer->type = MLX5DR_DEFINER_TYPE_RANGE;
36299732ffe1SAlex Vesker 
36309732ffe1SAlex Vesker 	return 0;
36319732ffe1SAlex Vesker 
36329732ffe1SAlex Vesker not_supported:
36339732ffe1SAlex Vesker 	DR_LOG(ERR, "Unable to find supporting range definer combination");
36349732ffe1SAlex Vesker 	rte_errno = ENOTSUP;
36359732ffe1SAlex Vesker 	return rte_errno;
36369732ffe1SAlex Vesker }
36379732ffe1SAlex Vesker 
363888ff4179SItamar Gozlan static void mlx5dr_definer_optimize_order(struct mlx5dr_definer *definer, int num_log)
363988ff4179SItamar Gozlan {
3640b53a95abSItamar Gozlan 	uint8_t hl_prio[MLX5DR_DEFINER_HL_OPT_MAX];
364188ff4179SItamar Gozlan 	int dw = 0, i = 0, j;
364288ff4179SItamar Gozlan 	int *dw_flag;
364388ff4179SItamar Gozlan 	uint8_t tmp;
364488ff4179SItamar Gozlan 
3645b53a95abSItamar Gozlan 	dw_flag = mlx5dr_optimal_dist_dw[num_log];
3646b53a95abSItamar Gozlan 	hl_prio[0] = __mlx5_dw_off(definer_hl, ipv4_src_dest_outer.source_address);
3647b53a95abSItamar Gozlan 	hl_prio[1] = __mlx5_dw_off(definer_hl, ipv4_src_dest_outer.destination_address);
364888ff4179SItamar Gozlan 
3649b53a95abSItamar Gozlan 	while (i < MLX5DR_DEFINER_HL_OPT_MAX) {
365088ff4179SItamar Gozlan 		j = 0;
365188ff4179SItamar Gozlan 		/* Finding a candidate to improve its hash distribution */
365288ff4179SItamar Gozlan 		while (j < DW_SELECTORS_MATCH && (hl_prio[i] != definer->dw_selector[j]))
365388ff4179SItamar Gozlan 			j++;
365488ff4179SItamar Gozlan 
365588ff4179SItamar Gozlan 		/* Finding a DW location with good hash distribution */
365688ff4179SItamar Gozlan 		while (dw < DW_SELECTORS_MATCH && dw_flag[dw] == 0)
365788ff4179SItamar Gozlan 			dw++;
365888ff4179SItamar Gozlan 
365988ff4179SItamar Gozlan 		if (dw < DW_SELECTORS_MATCH && j < DW_SELECTORS_MATCH) {
366088ff4179SItamar Gozlan 			tmp = definer->dw_selector[dw];
366188ff4179SItamar Gozlan 			definer->dw_selector[dw] = definer->dw_selector[j];
366288ff4179SItamar Gozlan 			definer->dw_selector[j] = tmp;
366388ff4179SItamar Gozlan 			dw++;
366488ff4179SItamar Gozlan 		}
366588ff4179SItamar Gozlan 		i++;
366688ff4179SItamar Gozlan 	}
366788ff4179SItamar Gozlan }
366888ff4179SItamar Gozlan 
36699732ffe1SAlex Vesker static int
36709732ffe1SAlex Vesker mlx5dr_definer_find_best_match_fit(struct mlx5dr_context *ctx,
367127ac2da9SAlex Vesker 				   struct mlx5dr_definer *definer,
3672c55c2bf3SAlex Vesker 				   uint8_t *hl)
3673c55c2bf3SAlex Vesker {
3674c55c2bf3SAlex Vesker 	struct mlx5dr_definer_sel_ctrl ctrl = {0};
3675c55c2bf3SAlex Vesker 	bool found;
3676c55c2bf3SAlex Vesker 
3677c55c2bf3SAlex Vesker 	/* Try to create a match definer */
3678c55c2bf3SAlex Vesker 	ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
3679c55c2bf3SAlex Vesker 	ctrl.allowed_lim_dw = 0;
3680c55c2bf3SAlex Vesker 	ctrl.allowed_bytes = BYTE_SELECTORS;
3681c55c2bf3SAlex Vesker 
3682c55c2bf3SAlex Vesker 	found = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);
3683c55c2bf3SAlex Vesker 	if (found) {
368427ac2da9SAlex Vesker 		mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
368527ac2da9SAlex Vesker 		definer->type = MLX5DR_DEFINER_TYPE_MATCH;
3686c55c2bf3SAlex Vesker 		return 0;
3687c55c2bf3SAlex Vesker 	}
3688c55c2bf3SAlex Vesker 
3689c55c2bf3SAlex Vesker 	/* Try to create a full/limited jumbo definer */
3690c55c2bf3SAlex Vesker 	ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
3691c55c2bf3SAlex Vesker 								  DW_SELECTORS_MATCH;
3692c55c2bf3SAlex Vesker 	ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
3693c55c2bf3SAlex Vesker 								 DW_SELECTORS_LIMITED;
3694c55c2bf3SAlex Vesker 	ctrl.allowed_bytes = BYTE_SELECTORS;
3695c55c2bf3SAlex Vesker 
3696c55c2bf3SAlex Vesker 	found = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);
3697c55c2bf3SAlex Vesker 	if (found) {
369827ac2da9SAlex Vesker 		mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
369927ac2da9SAlex Vesker 		definer->type = MLX5DR_DEFINER_TYPE_JUMBO;
3700c55c2bf3SAlex Vesker 		return 0;
3701c55c2bf3SAlex Vesker 	}
3702c55c2bf3SAlex Vesker 
3703e014d5e2SErez Shitrit 	DR_LOG(DEBUG, "Unable to find supporting match/jumbo definer combination");
37044c23d4e5SGregory Etelson 	rte_errno = E2BIG;
3705c55c2bf3SAlex Vesker 	return rte_errno;
3706c55c2bf3SAlex Vesker }
3707c55c2bf3SAlex Vesker 
3708c55c2bf3SAlex Vesker static void
3709c55c2bf3SAlex Vesker mlx5dr_definer_create_tag_mask(struct rte_flow_item *items,
3710c55c2bf3SAlex Vesker 			       struct mlx5dr_definer_fc *fc,
3711c55c2bf3SAlex Vesker 			       uint32_t fc_sz,
3712c55c2bf3SAlex Vesker 			       uint8_t *tag)
3713c55c2bf3SAlex Vesker {
3714c55c2bf3SAlex Vesker 	uint32_t i;
3715c55c2bf3SAlex Vesker 
3716c55c2bf3SAlex Vesker 	for (i = 0; i < fc_sz; i++) {
3717c55c2bf3SAlex Vesker 		if (fc->tag_mask_set)
3718c55c2bf3SAlex Vesker 			fc->tag_mask_set(fc, items[fc->item_idx].mask, tag);
3719c55c2bf3SAlex Vesker 		else
3720c55c2bf3SAlex Vesker 			fc->tag_set(fc, items[fc->item_idx].mask, tag);
3721c55c2bf3SAlex Vesker 		fc++;
3722c55c2bf3SAlex Vesker 	}
3723c55c2bf3SAlex Vesker }
3724c55c2bf3SAlex Vesker 
3725c55c2bf3SAlex Vesker void mlx5dr_definer_create_tag(const struct rte_flow_item *items,
3726c55c2bf3SAlex Vesker 			       struct mlx5dr_definer_fc *fc,
3727c55c2bf3SAlex Vesker 			       uint32_t fc_sz,
3728c55c2bf3SAlex Vesker 			       uint8_t *tag)
3729c55c2bf3SAlex Vesker {
3730c55c2bf3SAlex Vesker 	uint32_t i;
3731c55c2bf3SAlex Vesker 
3732c55c2bf3SAlex Vesker 	for (i = 0; i < fc_sz; i++) {
3733c55c2bf3SAlex Vesker 		fc->tag_set(fc, items[fc->item_idx].spec, tag);
3734c55c2bf3SAlex Vesker 		fc++;
3735c55c2bf3SAlex Vesker 	}
3736c55c2bf3SAlex Vesker }
3737c55c2bf3SAlex Vesker 
37389732ffe1SAlex Vesker static uint32_t mlx5dr_definer_get_range_byte_off(uint32_t match_byte_off)
37399732ffe1SAlex Vesker {
37409732ffe1SAlex Vesker 	uint8_t curr_dw_idx = match_byte_off / DW_SIZE;
37419732ffe1SAlex Vesker 	uint8_t new_dw_idx;
37429732ffe1SAlex Vesker 
37439732ffe1SAlex Vesker 	/* Range DW can have the following values 7,8,9,10
37449732ffe1SAlex Vesker 	 * -DW7 is mapped to DW9
37459732ffe1SAlex Vesker 	 * -DW8 is mapped to DW7
37469732ffe1SAlex Vesker 	 * -DW9 is mapped to DW5
37479732ffe1SAlex Vesker 	 * -DW10 is mapped to DW3
37489732ffe1SAlex Vesker 	 * To reduce calculation the following formula is used:
37499732ffe1SAlex Vesker 	 */
37509732ffe1SAlex Vesker 	new_dw_idx = curr_dw_idx * (-2) + 23;
37519732ffe1SAlex Vesker 
37529732ffe1SAlex Vesker 	return new_dw_idx * DW_SIZE + match_byte_off % DW_SIZE;
37539732ffe1SAlex Vesker }
37549732ffe1SAlex Vesker 
37559732ffe1SAlex Vesker void mlx5dr_definer_create_tag_range(const struct rte_flow_item *items,
37569732ffe1SAlex Vesker 				     struct mlx5dr_definer_fc *fc,
37579732ffe1SAlex Vesker 				     uint32_t fc_sz,
37589732ffe1SAlex Vesker 				     uint8_t *tag)
37599732ffe1SAlex Vesker {
37609732ffe1SAlex Vesker 	struct mlx5dr_definer_fc tmp_fc;
37619732ffe1SAlex Vesker 	uint32_t i;
37629732ffe1SAlex Vesker 
37639732ffe1SAlex Vesker 	for (i = 0; i < fc_sz; i++) {
37649732ffe1SAlex Vesker 		tmp_fc = *fc;
37659732ffe1SAlex Vesker 		/* Set MAX value */
37669732ffe1SAlex Vesker 		tmp_fc.byte_off = mlx5dr_definer_get_range_byte_off(fc->byte_off);
37679732ffe1SAlex Vesker 		tmp_fc.tag_set(&tmp_fc, items[fc->item_idx].last, tag);
37689732ffe1SAlex Vesker 		/* Set MIN value */
37699732ffe1SAlex Vesker 		tmp_fc.byte_off += DW_SIZE;
37709732ffe1SAlex Vesker 		tmp_fc.tag_set(&tmp_fc, items[fc->item_idx].spec, tag);
37719732ffe1SAlex Vesker 		fc++;
37729732ffe1SAlex Vesker 	}
37739732ffe1SAlex Vesker }
37749732ffe1SAlex Vesker 
3775c55c2bf3SAlex Vesker int mlx5dr_definer_get_id(struct mlx5dr_definer *definer)
3776c55c2bf3SAlex Vesker {
3777c55c2bf3SAlex Vesker 	return definer->obj->id;
3778c55c2bf3SAlex Vesker }
3779c55c2bf3SAlex Vesker 
3780762fecebSYevgeny Kliteynik int mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,
3781238190f3SAlex Vesker 			   struct mlx5dr_definer *definer_b)
3782238190f3SAlex Vesker {
3783238190f3SAlex Vesker 	int i;
3784238190f3SAlex Vesker 
37852e2d6533SAlex Vesker 	/* Future: Optimize by comparing selectors with valid mask only */
3786238190f3SAlex Vesker 	for (i = 0; i < BYTE_SELECTORS; i++)
3787238190f3SAlex Vesker 		if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
3788238190f3SAlex Vesker 			return 1;
3789238190f3SAlex Vesker 
3790238190f3SAlex Vesker 	for (i = 0; i < DW_SELECTORS; i++)
3791238190f3SAlex Vesker 		if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
3792238190f3SAlex Vesker 			return 1;
3793238190f3SAlex Vesker 
3794238190f3SAlex Vesker 	for (i = 0; i < MLX5DR_JUMBO_TAG_SZ; i++)
3795238190f3SAlex Vesker 		if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
3796238190f3SAlex Vesker 			return 1;
3797238190f3SAlex Vesker 
3798238190f3SAlex Vesker 	return 0;
3799238190f3SAlex Vesker }
3800238190f3SAlex Vesker 
3801238190f3SAlex Vesker static int
3802b53a95abSItamar Gozlan mlx5dr_definer_optimize_order_supported(struct mlx5dr_definer *match_definer,
3803b53a95abSItamar Gozlan 					struct mlx5dr_matcher *matcher)
3804b53a95abSItamar Gozlan {
3805b53a95abSItamar Gozlan 	return !mlx5dr_definer_is_jumbo(match_definer) &&
3806b53a95abSItamar Gozlan 	       !mlx5dr_matcher_req_fw_wqe(matcher) &&
3807b53a95abSItamar Gozlan 	       !mlx5dr_matcher_is_resizable(matcher) &&
3808b53a95abSItamar Gozlan 	       !mlx5dr_matcher_is_insert_by_idx(matcher);
3809b53a95abSItamar Gozlan }
3810b53a95abSItamar Gozlan 
3811b53a95abSItamar Gozlan static int
381227ac2da9SAlex Vesker mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
38139732ffe1SAlex Vesker 			   struct mlx5dr_definer *match_definer,
38149732ffe1SAlex Vesker 			   struct mlx5dr_definer *range_definer)
3815c55c2bf3SAlex Vesker {
381627ac2da9SAlex Vesker 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
381727ac2da9SAlex Vesker 	struct mlx5dr_match_template *mt = matcher->mt;
3818a5230507SHamdan Igbaria 	struct mlx5dr_definer_fc *fc;
38199732ffe1SAlex Vesker 	uint8_t *match_hl;
382027ac2da9SAlex Vesker 	int i, ret;
3821c55c2bf3SAlex Vesker 
382227ac2da9SAlex Vesker 	/* Union header-layout (hl) is used for creating a single definer
382327ac2da9SAlex Vesker 	 * field layout used with different bitmasks for hash and match.
382427ac2da9SAlex Vesker 	 */
38259732ffe1SAlex Vesker 	match_hl = simple_calloc(1, MLX5_ST_SZ_BYTES(definer_hl));
38269732ffe1SAlex Vesker 	if (!match_hl) {
3827c55c2bf3SAlex Vesker 		DR_LOG(ERR, "Failed to allocate memory for header layout");
3828c55c2bf3SAlex Vesker 		rte_errno = ENOMEM;
382927ac2da9SAlex Vesker 		return rte_errno;
383027ac2da9SAlex Vesker 	}
383127ac2da9SAlex Vesker 
383227ac2da9SAlex Vesker 	/* Convert all mt items to header layout (hl)
38339732ffe1SAlex Vesker 	 * and allocate the match and range field copy array (fc & fcr).
383427ac2da9SAlex Vesker 	 */
383527ac2da9SAlex Vesker 	for (i = 0; i < matcher->num_of_mt; i++) {
3836a5230507SHamdan Igbaria 		ret = mlx5dr_definer_conv_items_to_hl(ctx, &mt[i], match_hl, matcher);
383727ac2da9SAlex Vesker 		if (ret) {
383827ac2da9SAlex Vesker 			DR_LOG(ERR, "Failed to convert items to header layout");
383927ac2da9SAlex Vesker 			goto free_fc;
384027ac2da9SAlex Vesker 		}
384127ac2da9SAlex Vesker 	}
384227ac2da9SAlex Vesker 
3843a5230507SHamdan Igbaria 	if (mlx5dr_matcher_is_compare(matcher)) {
3844a5230507SHamdan Igbaria 		ret = mlx5dr_matcher_validate_compare_attr(matcher);
3845a5230507SHamdan Igbaria 		if (ret)
3846a5230507SHamdan Igbaria 			goto free_fc;
3847a5230507SHamdan Igbaria 
3848a5230507SHamdan Igbaria 		/* Due some HW limitation need to fill unused
3849a5230507SHamdan Igbaria 		 * DW's 0-5 and byte selectors with 0xff.
3850a5230507SHamdan Igbaria 		 */
3851a5230507SHamdan Igbaria 		for (i = 0; i < DW_SELECTORS_MATCH; i++)
3852a5230507SHamdan Igbaria 			match_definer->dw_selector[i] = 0xff;
3853a5230507SHamdan Igbaria 
3854a5230507SHamdan Igbaria 		for (i = 0; i < BYTE_SELECTORS; i++)
3855a5230507SHamdan Igbaria 			match_definer->byte_selector[i] = 0xff;
3856a5230507SHamdan Igbaria 
3857a5230507SHamdan Igbaria 		for (i = 0; i < mt[0].fc_sz; i++) {
3858a5230507SHamdan Igbaria 			fc = &mt[0].fc[i];
3859a5230507SHamdan Igbaria 			match_definer->dw_selector[fc->compare_idx] = fc->byte_off / DW_SIZE;
3860a5230507SHamdan Igbaria 		}
3861a5230507SHamdan Igbaria 
3862a5230507SHamdan Igbaria 		goto out;
3863a5230507SHamdan Igbaria 	}
3864a5230507SHamdan Igbaria 
386527ac2da9SAlex Vesker 	/* Find the match definer layout for header layout match union */
38669732ffe1SAlex Vesker 	ret = mlx5dr_definer_find_best_match_fit(ctx, match_definer, match_hl);
386727ac2da9SAlex Vesker 	if (ret) {
3868e014d5e2SErez Shitrit 		DR_LOG(DEBUG, "Failed to create match definer from header layout");
386927ac2da9SAlex Vesker 		goto free_fc;
387027ac2da9SAlex Vesker 	}
387127ac2da9SAlex Vesker 
3872b53a95abSItamar Gozlan 	if (mlx5dr_definer_optimize_order_supported(match_definer, matcher))
387388ff4179SItamar Gozlan 		mlx5dr_definer_optimize_order(match_definer, matcher->attr.rule.num_log);
387488ff4179SItamar Gozlan 
38759732ffe1SAlex Vesker 	/* Find the range definer layout for match templates fcrs */
38769732ffe1SAlex Vesker 	ret = mlx5dr_definer_find_best_range_fit(range_definer, matcher);
38779732ffe1SAlex Vesker 	if (ret) {
38789732ffe1SAlex Vesker 		DR_LOG(ERR, "Failed to create range definer from header layout");
38799732ffe1SAlex Vesker 		goto free_fc;
38809732ffe1SAlex Vesker 	}
38819732ffe1SAlex Vesker 
3882a5230507SHamdan Igbaria out:
38839732ffe1SAlex Vesker 	simple_free(match_hl);
388427ac2da9SAlex Vesker 	return 0;
388527ac2da9SAlex Vesker 
388627ac2da9SAlex Vesker free_fc:
388727ac2da9SAlex Vesker 	for (i = 0; i < matcher->num_of_mt; i++)
388827ac2da9SAlex Vesker 		if (mt[i].fc)
388927ac2da9SAlex Vesker 			simple_free(mt[i].fc);
389027ac2da9SAlex Vesker 
38919732ffe1SAlex Vesker 	simple_free(match_hl);
389227ac2da9SAlex Vesker 	return rte_errno;
389327ac2da9SAlex Vesker }
389427ac2da9SAlex Vesker 
38952e2d6533SAlex Vesker int mlx5dr_definer_init_cache(struct mlx5dr_definer_cache **cache)
38962e2d6533SAlex Vesker {
38972e2d6533SAlex Vesker 	struct mlx5dr_definer_cache *new_cache;
38982e2d6533SAlex Vesker 
38992e2d6533SAlex Vesker 	new_cache = simple_calloc(1, sizeof(*new_cache));
39002e2d6533SAlex Vesker 	if (!new_cache) {
39012e2d6533SAlex Vesker 		rte_errno = ENOMEM;
39022e2d6533SAlex Vesker 		return rte_errno;
39032e2d6533SAlex Vesker 	}
39042e2d6533SAlex Vesker 	LIST_INIT(&new_cache->head);
39052e2d6533SAlex Vesker 	*cache = new_cache;
39062e2d6533SAlex Vesker 
39072e2d6533SAlex Vesker 	return 0;
39082e2d6533SAlex Vesker }
39092e2d6533SAlex Vesker 
39102e2d6533SAlex Vesker void mlx5dr_definer_uninit_cache(struct mlx5dr_definer_cache *cache)
39112e2d6533SAlex Vesker {
39122e2d6533SAlex Vesker 	simple_free(cache);
39132e2d6533SAlex Vesker }
39142e2d6533SAlex Vesker 
39152e2d6533SAlex Vesker static struct mlx5dr_devx_obj *
39162e2d6533SAlex Vesker mlx5dr_definer_get_obj(struct mlx5dr_context *ctx,
39172e2d6533SAlex Vesker 		       struct mlx5dr_definer *definer)
39182e2d6533SAlex Vesker {
39192e2d6533SAlex Vesker 	struct mlx5dr_definer_cache *cache = ctx->definer_cache;
39202e2d6533SAlex Vesker 	struct mlx5dr_cmd_definer_create_attr def_attr = {0};
39212e2d6533SAlex Vesker 	struct mlx5dr_definer_cache_item *cached_definer;
39222e2d6533SAlex Vesker 	struct mlx5dr_devx_obj *obj;
39232e2d6533SAlex Vesker 
39242e2d6533SAlex Vesker 	/* Search definer cache for requested definer */
39252e2d6533SAlex Vesker 	LIST_FOREACH(cached_definer, &cache->head, next) {
39262e2d6533SAlex Vesker 		if (mlx5dr_definer_compare(&cached_definer->definer, definer))
39272e2d6533SAlex Vesker 			continue;
39282e2d6533SAlex Vesker 
39292e2d6533SAlex Vesker 		/* Reuse definer and set LRU (move to be first in the list) */
39302e2d6533SAlex Vesker 		LIST_REMOVE(cached_definer, next);
39312e2d6533SAlex Vesker 		LIST_INSERT_HEAD(&cache->head, cached_definer, next);
39322e2d6533SAlex Vesker 		cached_definer->refcount++;
39332e2d6533SAlex Vesker 		return cached_definer->definer.obj;
39342e2d6533SAlex Vesker 	}
39352e2d6533SAlex Vesker 
39362e2d6533SAlex Vesker 	/* Allocate and create definer based on the bitmask tag */
39372e2d6533SAlex Vesker 	def_attr.match_mask = definer->mask.jumbo;
39382e2d6533SAlex Vesker 	def_attr.dw_selector = definer->dw_selector;
39392e2d6533SAlex Vesker 	def_attr.byte_selector = definer->byte_selector;
39402e2d6533SAlex Vesker 
39412e2d6533SAlex Vesker 	obj = mlx5dr_cmd_definer_create(ctx->ibv_ctx, &def_attr);
39422e2d6533SAlex Vesker 	if (!obj)
39432e2d6533SAlex Vesker 		return NULL;
39442e2d6533SAlex Vesker 
39452e2d6533SAlex Vesker 	cached_definer = simple_calloc(1, sizeof(*cached_definer));
39462e2d6533SAlex Vesker 	if (!cached_definer) {
39472e2d6533SAlex Vesker 		rte_errno = ENOMEM;
39482e2d6533SAlex Vesker 		goto free_definer_obj;
39492e2d6533SAlex Vesker 	}
39502e2d6533SAlex Vesker 
39512e2d6533SAlex Vesker 	memcpy(&cached_definer->definer, definer, sizeof(*definer));
39522e2d6533SAlex Vesker 	cached_definer->definer.obj = obj;
39532e2d6533SAlex Vesker 	cached_definer->refcount = 1;
39542e2d6533SAlex Vesker 	LIST_INSERT_HEAD(&cache->head, cached_definer, next);
39552e2d6533SAlex Vesker 
39562e2d6533SAlex Vesker 	return obj;
39572e2d6533SAlex Vesker 
39582e2d6533SAlex Vesker free_definer_obj:
39592e2d6533SAlex Vesker 	mlx5dr_cmd_destroy_obj(obj);
39602e2d6533SAlex Vesker 	return NULL;
39612e2d6533SAlex Vesker }
39622e2d6533SAlex Vesker 
39632e2d6533SAlex Vesker static void
39642e2d6533SAlex Vesker mlx5dr_definer_put_obj(struct mlx5dr_context *ctx,
39652e2d6533SAlex Vesker 		       struct mlx5dr_devx_obj *obj)
39662e2d6533SAlex Vesker {
39672e2d6533SAlex Vesker 	struct mlx5dr_definer_cache_item *cached_definer;
39682e2d6533SAlex Vesker 
39692e2d6533SAlex Vesker 	LIST_FOREACH(cached_definer, &ctx->definer_cache->head, next) {
39702e2d6533SAlex Vesker 		if (cached_definer->definer.obj != obj)
39712e2d6533SAlex Vesker 			continue;
39722e2d6533SAlex Vesker 
39732e2d6533SAlex Vesker 		/* Object found */
39742e2d6533SAlex Vesker 		if (--cached_definer->refcount)
39752e2d6533SAlex Vesker 			return;
39762e2d6533SAlex Vesker 
39772e2d6533SAlex Vesker 		LIST_REMOVE(cached_definer, next);
39782e2d6533SAlex Vesker 		mlx5dr_cmd_destroy_obj(cached_definer->definer.obj);
39792e2d6533SAlex Vesker 		simple_free(cached_definer);
39802e2d6533SAlex Vesker 		return;
39812e2d6533SAlex Vesker 	}
39822e2d6533SAlex Vesker 
39832e2d6533SAlex Vesker 	/* Programming error, object must be part of cache */
39842e2d6533SAlex Vesker 	assert(false);
39852e2d6533SAlex Vesker }
39862e2d6533SAlex Vesker 
398727ac2da9SAlex Vesker static struct mlx5dr_definer *
39882e2d6533SAlex Vesker mlx5dr_definer_alloc(struct mlx5dr_context *ctx,
398927ac2da9SAlex Vesker 		     struct mlx5dr_definer_fc *fc,
399027ac2da9SAlex Vesker 		     int fc_sz,
399127ac2da9SAlex Vesker 		     struct rte_flow_item *items,
39929732ffe1SAlex Vesker 		     struct mlx5dr_definer *layout,
39939732ffe1SAlex Vesker 		     bool bind_fc)
399427ac2da9SAlex Vesker {
399527ac2da9SAlex Vesker 	struct mlx5dr_definer *definer;
399627ac2da9SAlex Vesker 	int ret;
399727ac2da9SAlex Vesker 
399827ac2da9SAlex Vesker 	definer = simple_calloc(1, sizeof(*definer));
399927ac2da9SAlex Vesker 	if (!definer) {
400027ac2da9SAlex Vesker 		DR_LOG(ERR, "Failed to allocate memory for definer");
400127ac2da9SAlex Vesker 		rte_errno = ENOMEM;
400227ac2da9SAlex Vesker 		return NULL;
400327ac2da9SAlex Vesker 	}
400427ac2da9SAlex Vesker 
400527ac2da9SAlex Vesker 	memcpy(definer, layout, sizeof(*definer));
400627ac2da9SAlex Vesker 
400727ac2da9SAlex Vesker 	/* Align field copy array based on given layout */
40089732ffe1SAlex Vesker 	if (bind_fc) {
400927ac2da9SAlex Vesker 		ret = mlx5dr_definer_fc_bind(definer, fc, fc_sz);
401027ac2da9SAlex Vesker 		if (ret) {
401127ac2da9SAlex Vesker 			DR_LOG(ERR, "Failed to bind field copy to definer");
4012c55c2bf3SAlex Vesker 			goto free_definer;
4013c55c2bf3SAlex Vesker 		}
40149732ffe1SAlex Vesker 	}
4015c55c2bf3SAlex Vesker 
4016c55c2bf3SAlex Vesker 	/* Create the tag mask used for definer creation */
401727ac2da9SAlex Vesker 	mlx5dr_definer_create_tag_mask(items, fc, fc_sz, definer->mask.jumbo);
4018c55c2bf3SAlex Vesker 
40192e2d6533SAlex Vesker 	definer->obj = mlx5dr_definer_get_obj(ctx, definer);
402027ac2da9SAlex Vesker 	if (!definer->obj)
402127ac2da9SAlex Vesker 		goto free_definer;
4022c55c2bf3SAlex Vesker 
402327ac2da9SAlex Vesker 	return definer;
402427ac2da9SAlex Vesker 
402527ac2da9SAlex Vesker free_definer:
402627ac2da9SAlex Vesker 	simple_free(definer);
402727ac2da9SAlex Vesker 	return NULL;
402827ac2da9SAlex Vesker }
402927ac2da9SAlex Vesker 
403027ac2da9SAlex Vesker static void
40312e2d6533SAlex Vesker mlx5dr_definer_free(struct mlx5dr_context *ctx,
40322e2d6533SAlex Vesker 		    struct mlx5dr_definer *definer)
403327ac2da9SAlex Vesker {
40342e2d6533SAlex Vesker 	mlx5dr_definer_put_obj(ctx, definer->obj);
403527ac2da9SAlex Vesker 	simple_free(definer);
403627ac2da9SAlex Vesker }
403727ac2da9SAlex Vesker 
403827ac2da9SAlex Vesker static int
403927ac2da9SAlex Vesker mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
404027ac2da9SAlex Vesker 				  struct mlx5dr_matcher *matcher,
404127ac2da9SAlex Vesker 				  struct mlx5dr_definer *match_layout)
404227ac2da9SAlex Vesker {
404327ac2da9SAlex Vesker 	struct mlx5dr_match_template *mt = matcher->mt;
404427ac2da9SAlex Vesker 	int i;
404527ac2da9SAlex Vesker 
404627ac2da9SAlex Vesker 	/* Create mendatory match definer */
404727ac2da9SAlex Vesker 	for (i = 0; i < matcher->num_of_mt; i++) {
40482e2d6533SAlex Vesker 		mt[i].definer = mlx5dr_definer_alloc(ctx,
404927ac2da9SAlex Vesker 						     mt[i].fc,
405027ac2da9SAlex Vesker 						     mt[i].fc_sz,
405127ac2da9SAlex Vesker 						     mt[i].items,
40529732ffe1SAlex Vesker 						     match_layout,
40539732ffe1SAlex Vesker 						     true);
405427ac2da9SAlex Vesker 		if (!mt[i].definer) {
405527ac2da9SAlex Vesker 			DR_LOG(ERR, "Failed to create match definer");
405627ac2da9SAlex Vesker 			goto free_definers;
405727ac2da9SAlex Vesker 		}
405827ac2da9SAlex Vesker 	}
4059c55c2bf3SAlex Vesker 	return 0;
4060c55c2bf3SAlex Vesker 
406127ac2da9SAlex Vesker free_definers:
406227ac2da9SAlex Vesker 	while (i--)
40632e2d6533SAlex Vesker 		mlx5dr_definer_free(ctx, mt[i].definer);
4064c55c2bf3SAlex Vesker 
4065c55c2bf3SAlex Vesker 	return rte_errno;
4066c55c2bf3SAlex Vesker }
4067c55c2bf3SAlex Vesker 
406827ac2da9SAlex Vesker static void
406927ac2da9SAlex Vesker mlx5dr_definer_matcher_match_uninit(struct mlx5dr_matcher *matcher)
4070c55c2bf3SAlex Vesker {
40712e2d6533SAlex Vesker 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
407227ac2da9SAlex Vesker 	int i;
407327ac2da9SAlex Vesker 
407427ac2da9SAlex Vesker 	for (i = 0; i < matcher->num_of_mt; i++)
40752e2d6533SAlex Vesker 		mlx5dr_definer_free(ctx, matcher->mt[i].definer);
407627ac2da9SAlex Vesker }
407727ac2da9SAlex Vesker 
4078238190f3SAlex Vesker static int
40799732ffe1SAlex Vesker mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
40809732ffe1SAlex Vesker 				  struct mlx5dr_matcher *matcher,
40819732ffe1SAlex Vesker 				  struct mlx5dr_definer *range_layout)
40829732ffe1SAlex Vesker {
40839732ffe1SAlex Vesker 	struct mlx5dr_match_template *mt = matcher->mt;
40849732ffe1SAlex Vesker 	int i;
40859732ffe1SAlex Vesker 
40869732ffe1SAlex Vesker 	/* Create optional range definers */
40879732ffe1SAlex Vesker 	for (i = 0; i < matcher->num_of_mt; i++) {
40889732ffe1SAlex Vesker 		/* All must use range if requested */
40892cb66e44SItamar Gozlan 		bool is_range = !!mt[i].fcr_sz;
40902cb66e44SItamar Gozlan 		bool has_range = matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
40912cb66e44SItamar Gozlan 
40922cb66e44SItamar Gozlan 		if (i && ((is_range && !has_range) || (!is_range && has_range))) {
40939732ffe1SAlex Vesker 			DR_LOG(ERR, "Using range and non range templates is not allowed");
409484c3090eSGregory Etelson 			rte_errno = EINVAL;
40959732ffe1SAlex Vesker 			goto free_definers;
40969732ffe1SAlex Vesker 		}
40979732ffe1SAlex Vesker 
40982cb66e44SItamar Gozlan 		if (!mt[i].fcr_sz)
40992cb66e44SItamar Gozlan 			continue;
41002cb66e44SItamar Gozlan 
41019732ffe1SAlex Vesker 		matcher->flags |= MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
41029732ffe1SAlex Vesker 		/* Create definer without fcr binding, already binded */
41032e2d6533SAlex Vesker 		mt[i].range_definer = mlx5dr_definer_alloc(ctx,
41049732ffe1SAlex Vesker 							   mt[i].fcr,
41059732ffe1SAlex Vesker 							   mt[i].fcr_sz,
41069732ffe1SAlex Vesker 							   mt[i].items,
41079732ffe1SAlex Vesker 							   range_layout,
41089732ffe1SAlex Vesker 							   false);
41099732ffe1SAlex Vesker 		if (!mt[i].range_definer) {
41109732ffe1SAlex Vesker 			DR_LOG(ERR, "Failed to create match definer");
41119732ffe1SAlex Vesker 			goto free_definers;
41129732ffe1SAlex Vesker 		}
41139732ffe1SAlex Vesker 	}
41149732ffe1SAlex Vesker 	return 0;
41159732ffe1SAlex Vesker 
41169732ffe1SAlex Vesker free_definers:
41179732ffe1SAlex Vesker 	while (i--)
41189732ffe1SAlex Vesker 		if (mt[i].range_definer)
41192e2d6533SAlex Vesker 			mlx5dr_definer_free(ctx, mt[i].range_definer);
41209732ffe1SAlex Vesker 
41219732ffe1SAlex Vesker 	return rte_errno;
41229732ffe1SAlex Vesker }
41239732ffe1SAlex Vesker 
41249732ffe1SAlex Vesker static void
41259732ffe1SAlex Vesker mlx5dr_definer_matcher_range_uninit(struct mlx5dr_matcher *matcher)
41269732ffe1SAlex Vesker {
41272e2d6533SAlex Vesker 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
41289732ffe1SAlex Vesker 	int i;
41299732ffe1SAlex Vesker 
41309732ffe1SAlex Vesker 	for (i = 0; i < matcher->num_of_mt; i++)
41319732ffe1SAlex Vesker 		if (matcher->mt[i].range_definer)
41322e2d6533SAlex Vesker 			mlx5dr_definer_free(ctx, matcher->mt[i].range_definer);
41339732ffe1SAlex Vesker }
41349732ffe1SAlex Vesker 
41359732ffe1SAlex Vesker static int
4136238190f3SAlex Vesker mlx5dr_definer_matcher_hash_init(struct mlx5dr_context *ctx,
4137238190f3SAlex Vesker 				 struct mlx5dr_matcher *matcher)
4138238190f3SAlex Vesker {
4139238190f3SAlex Vesker 	struct mlx5dr_cmd_definer_create_attr def_attr = {0};
4140238190f3SAlex Vesker 	struct mlx5dr_match_template *mt = matcher->mt;
4141238190f3SAlex Vesker 	struct ibv_context *ibv_ctx = ctx->ibv_ctx;
4142238190f3SAlex Vesker 	uint8_t *bit_mask;
4143238190f3SAlex Vesker 	int i, j;
4144238190f3SAlex Vesker 
4145238190f3SAlex Vesker 	for (i = 1; i < matcher->num_of_mt; i++)
4146238190f3SAlex Vesker 		if (mlx5dr_definer_compare(mt[i].definer, mt[i - 1].definer))
4147238190f3SAlex Vesker 			matcher->flags |= MLX5DR_MATCHER_FLAGS_HASH_DEFINER;
4148238190f3SAlex Vesker 
4149238190f3SAlex Vesker 	if (!(matcher->flags & MLX5DR_MATCHER_FLAGS_HASH_DEFINER))
4150238190f3SAlex Vesker 		return 0;
4151238190f3SAlex Vesker 
4152238190f3SAlex Vesker 	/* Insert by index requires all MT using the same definer */
4153238190f3SAlex Vesker 	if (matcher->attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
4154238190f3SAlex Vesker 		DR_LOG(ERR, "Insert by index not supported with MT combination");
4155238190f3SAlex Vesker 		rte_errno = EOPNOTSUPP;
4156238190f3SAlex Vesker 		return rte_errno;
4157238190f3SAlex Vesker 	}
4158238190f3SAlex Vesker 
4159238190f3SAlex Vesker 	matcher->hash_definer = simple_calloc(1, sizeof(*matcher->hash_definer));
4160238190f3SAlex Vesker 	if (!matcher->hash_definer) {
4161238190f3SAlex Vesker 		DR_LOG(ERR, "Failed to allocate memory for hash definer");
4162238190f3SAlex Vesker 		rte_errno = ENOMEM;
4163238190f3SAlex Vesker 		return rte_errno;
4164238190f3SAlex Vesker 	}
4165238190f3SAlex Vesker 
4166238190f3SAlex Vesker 	/* Calculate intersection between all match templates bitmasks.
4167238190f3SAlex Vesker 	 * We will use mt[0] as reference and intersect it with mt[1..n].
4168238190f3SAlex Vesker 	 * From this we will get:
4169238190f3SAlex Vesker 	 * hash_definer.selectors = mt[0].selecotrs
4170238190f3SAlex Vesker 	 * hash_definer.mask =  mt[0].mask & mt[0].mask & ... & mt[n].mask
4171238190f3SAlex Vesker 	 */
4172238190f3SAlex Vesker 
4173238190f3SAlex Vesker 	/* Use first definer which should also contain intersection fields */
4174238190f3SAlex Vesker 	memcpy(matcher->hash_definer, mt->definer, sizeof(struct mlx5dr_definer));
4175238190f3SAlex Vesker 
4176238190f3SAlex Vesker 	/* Calculate intersection between first to all match templates bitmasks */
4177238190f3SAlex Vesker 	for (i = 1; i < matcher->num_of_mt; i++) {
4178238190f3SAlex Vesker 		bit_mask = (uint8_t *)&mt[i].definer->mask;
4179238190f3SAlex Vesker 		for (j = 0; j < MLX5DR_JUMBO_TAG_SZ; j++)
4180238190f3SAlex Vesker 			((uint8_t *)&matcher->hash_definer->mask)[j] &= bit_mask[j];
4181238190f3SAlex Vesker 	}
4182238190f3SAlex Vesker 
4183238190f3SAlex Vesker 	def_attr.match_mask = matcher->hash_definer->mask.jumbo;
4184238190f3SAlex Vesker 	def_attr.dw_selector = matcher->hash_definer->dw_selector;
4185238190f3SAlex Vesker 	def_attr.byte_selector = matcher->hash_definer->byte_selector;
4186238190f3SAlex Vesker 	matcher->hash_definer->obj = mlx5dr_cmd_definer_create(ibv_ctx, &def_attr);
4187238190f3SAlex Vesker 	if (!matcher->hash_definer->obj) {
4188238190f3SAlex Vesker 		DR_LOG(ERR, "Failed to create hash definer");
4189238190f3SAlex Vesker 		goto free_hash_definer;
4190238190f3SAlex Vesker 	}
4191238190f3SAlex Vesker 
4192238190f3SAlex Vesker 	return 0;
4193238190f3SAlex Vesker 
4194238190f3SAlex Vesker free_hash_definer:
4195238190f3SAlex Vesker 	simple_free(matcher->hash_definer);
4196238190f3SAlex Vesker 	return rte_errno;
4197238190f3SAlex Vesker }
4198238190f3SAlex Vesker 
4199238190f3SAlex Vesker static void
4200238190f3SAlex Vesker mlx5dr_definer_matcher_hash_uninit(struct mlx5dr_matcher *matcher)
4201238190f3SAlex Vesker {
4202238190f3SAlex Vesker 	if (!matcher->hash_definer)
4203238190f3SAlex Vesker 		return;
4204238190f3SAlex Vesker 
4205238190f3SAlex Vesker 	mlx5dr_cmd_destroy_obj(matcher->hash_definer->obj);
4206238190f3SAlex Vesker 	simple_free(matcher->hash_definer);
4207238190f3SAlex Vesker }
4208238190f3SAlex Vesker 
420927ac2da9SAlex Vesker int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
421027ac2da9SAlex Vesker 				struct mlx5dr_matcher *matcher)
421127ac2da9SAlex Vesker {
421227ac2da9SAlex Vesker 	struct mlx5dr_definer match_layout = {0};
42139732ffe1SAlex Vesker 	struct mlx5dr_definer range_layout = {0};
421427ac2da9SAlex Vesker 	int ret, i;
421527ac2da9SAlex Vesker 
421627ac2da9SAlex Vesker 	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLLISION)
421727ac2da9SAlex Vesker 		return 0;
421827ac2da9SAlex Vesker 
42199732ffe1SAlex Vesker 	ret = mlx5dr_definer_calc_layout(matcher, &match_layout, &range_layout);
422027ac2da9SAlex Vesker 	if (ret) {
4221e014d5e2SErez Shitrit 		DR_LOG(DEBUG, "Failed to calculate matcher definer layout");
422227ac2da9SAlex Vesker 		return ret;
422327ac2da9SAlex Vesker 	}
422427ac2da9SAlex Vesker 
422527ac2da9SAlex Vesker 	/* Calculate definers needed for exact match */
422627ac2da9SAlex Vesker 	ret = mlx5dr_definer_matcher_match_init(ctx, matcher, &match_layout);
422727ac2da9SAlex Vesker 	if (ret) {
422827ac2da9SAlex Vesker 		DR_LOG(ERR, "Failed to init match definers");
422927ac2da9SAlex Vesker 		goto free_fc;
423027ac2da9SAlex Vesker 	}
423127ac2da9SAlex Vesker 
42329732ffe1SAlex Vesker 	/* Calculate definers needed for range */
42339732ffe1SAlex Vesker 	ret = mlx5dr_definer_matcher_range_init(ctx, matcher, &range_layout);
42349732ffe1SAlex Vesker 	if (ret) {
42359732ffe1SAlex Vesker 		DR_LOG(ERR, "Failed to init range definers");
42369732ffe1SAlex Vesker 		goto uninit_match_definer;
42379732ffe1SAlex Vesker 	}
42389732ffe1SAlex Vesker 
4239238190f3SAlex Vesker 	/* Calculate partial hash definer */
4240238190f3SAlex Vesker 	ret = mlx5dr_definer_matcher_hash_init(ctx, matcher);
4241238190f3SAlex Vesker 	if (ret) {
4242238190f3SAlex Vesker 		DR_LOG(ERR, "Failed to init hash definer");
42439732ffe1SAlex Vesker 		goto uninit_range_definer;
4244238190f3SAlex Vesker 	}
4245238190f3SAlex Vesker 
424627ac2da9SAlex Vesker 	return 0;
424727ac2da9SAlex Vesker 
42489732ffe1SAlex Vesker uninit_range_definer:
42499732ffe1SAlex Vesker 	mlx5dr_definer_matcher_range_uninit(matcher);
4250238190f3SAlex Vesker uninit_match_definer:
4251238190f3SAlex Vesker 	mlx5dr_definer_matcher_match_uninit(matcher);
425227ac2da9SAlex Vesker free_fc:
425327ac2da9SAlex Vesker 	for (i = 0; i < matcher->num_of_mt; i++)
425427ac2da9SAlex Vesker 		simple_free(matcher->mt[i].fc);
425527ac2da9SAlex Vesker 
425627ac2da9SAlex Vesker 	return ret;
425727ac2da9SAlex Vesker }
425827ac2da9SAlex Vesker 
425927ac2da9SAlex Vesker void mlx5dr_definer_matcher_uninit(struct mlx5dr_matcher *matcher)
426027ac2da9SAlex Vesker {
426127ac2da9SAlex Vesker 	int i;
426227ac2da9SAlex Vesker 
426327ac2da9SAlex Vesker 	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLLISION)
4264c55c2bf3SAlex Vesker 		return;
4265c55c2bf3SAlex Vesker 
4266238190f3SAlex Vesker 	mlx5dr_definer_matcher_hash_uninit(matcher);
42679732ffe1SAlex Vesker 	mlx5dr_definer_matcher_range_uninit(matcher);
426827ac2da9SAlex Vesker 	mlx5dr_definer_matcher_match_uninit(matcher);
426927ac2da9SAlex Vesker 
427027ac2da9SAlex Vesker 	for (i = 0; i < matcher->num_of_mt; i++)
427127ac2da9SAlex Vesker 		simple_free(matcher->mt[i].fc);
4272c55c2bf3SAlex Vesker }
4273