xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c (revision a371119084b81f77400fa3aed061d570cfc0eefe)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include <rte_bitops.h>
6 
7 #include "mlx5dr_internal.h"
8 
9 #define GTP_PDU_SC	0x85
10 #define BAD_PORT	0xBAD
11 #define BAD_SQN		0xBAD
12 #define ETH_TYPE_IPV4_VXLAN	0x0800
13 #define ETH_TYPE_IPV6_VXLAN	0x86DD
14 #define UDP_VXLAN_PORT	4789
15 #define UDP_VXLAN_GPE_PORT	4790
16 #define UDP_GTPU_PORT	2152
17 #define UDP_PORT_MPLS	6635
18 #define UDP_GENEVE_PORT 6081
19 #define UDP_ROCEV2_PORT	4791
20 #define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
21 #define NVGRE_PORT 0x6558
22 #define NVGRE_C_RSVD0_VER 0x2000
23 #define NVGRE_C_RSVD0_VER_MASK 0xB000
24 
25 #define STE_NO_VLAN	0x0
26 #define STE_SVLAN	0x1
27 #define STE_CVLAN	0x2
28 #define STE_NO_L3	0x0
29 #define STE_IPV4	0x1
30 #define STE_IPV6	0x2
31 #define STE_NO_L4	0x0
32 #define STE_TCP		0x1
33 #define STE_UDP		0x2
34 #define STE_ICMP	0x3
35 #define STE_NO_TUN	0x0
36 #define STE_ESP		0x3
37 
38 #define MLX5DR_DEFINER_QUOTA_BLOCK 0
39 #define MLX5DR_DEFINER_QUOTA_PASS  2
40 #define MLX5DR_DEFINER_MAX_ROW_LOG 32
41 #define MLX5DR_DEFINER_HL_OPT_MAX 2
42 
43 /* Setter function based on bit offset and mask, for 32bit DW*/
44 #define _DR_SET_32(p, v, byte_off, bit_off, mask) \
45 	do { \
46 		u32 _v = v; \
47 		*((rte_be32_t *)(p) + ((byte_off) / 4)) = \
48 		rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \
49 				  ((byte_off) / 4))) & \
50 				  (~((mask) << (bit_off)))) | \
51 				 (((_v) & (mask)) << \
52 				  (bit_off))); \
53 	} while (0)
54 
55 /* Getter function based on bit offset and mask, for 32bit DW*/
56 #define DR_GET_32(p, byte_off, bit_off, mask) \
57 	((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + ((byte_off) / 4))) >> (bit_off)) & (mask))
58 
59 /* Setter function based on bit offset and mask */
60 #define DR_SET(p, v, byte_off, bit_off, mask) \
61 	do { \
62 		if (unlikely((bit_off) < 0)) { \
63 			u32 _bit_off = -1 * (bit_off); \
64 			u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
65 			_DR_SET_32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
66 			_DR_SET_32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
67 				   (bit_off) % BITS_IN_DW, second_dw_mask); \
68 		} else { \
69 			_DR_SET_32(p, v, byte_off, (bit_off), (mask)); \
70 		} \
71 	} while (0)
72 
73 /* Setter function based on byte offset to directly set FULL BE32 value  */
74 #define DR_SET_BE32(p, v, byte_off, bit_off, mask) \
75 	(*((rte_be32_t *)((uint8_t *)(p) + (byte_off))) = (v))
76 
77 /* Setter function based on byte offset to directly set FULL BE32 value from ptr  */
78 #define DR_SET_BE32P(p, v_ptr, byte_off, bit_off, mask) \
79 	memcpy((uint8_t *)(p) + (byte_off), v_ptr, 4)
80 
81 /* Setter function based on byte offset to directly set FULL BE16 value  */
82 #define DR_SET_BE16(p, v, byte_off, bit_off, mask) \
83 	(*((rte_be16_t *)((uint8_t *)(p) + (byte_off))) = (v))
84 
85 /* Setter function based on byte offset to directly set FULL BE16 value from ptr  */
86 #define DR_SET_BE16P(p, v_ptr, byte_off, bit_off, mask) \
87 	memcpy((uint8_t *)(p) + (byte_off), v_ptr, 2)
88 
89 #define DR_CALC_FNAME(field, inner) \
90 	((inner) ? MLX5DR_DEFINER_FNAME_##field##_I : \
91 		   MLX5DR_DEFINER_FNAME_##field##_O)
92 
93 #define DR_CALC_SET_HDR(fc, hdr, field) \
94 	do { \
95 		(fc)->bit_mask = __mlx5_mask(definer_hl, hdr.field); \
96 		(fc)->bit_off = __mlx5_dw_bit_off(definer_hl, hdr.field); \
97 		(fc)->byte_off = MLX5_BYTE_OFF(definer_hl, hdr.field); \
98 	} while (0)
99 
100 /* Helper to calculate data used by DR_SET */
101 #define DR_CALC_SET(fc, hdr, field, is_inner) \
102 	do { \
103 		if (is_inner) { \
104 			DR_CALC_SET_HDR(fc, hdr##_inner, field); \
105 		} else { \
106 			DR_CALC_SET_HDR(fc, hdr##_outer, field); \
107 		} \
108 	} while (0)
109 
110  #define DR_GET(typ, p, fld) \
111 	((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + \
112 	__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
113 	__mlx5_mask(typ, fld))
114 
115 /* Each row (i) indicates a different matcher size, and each column (j)
116  * represents {DW5, DW4, DW3, DW2, DW1, DW0}.
117  * For values 0,..,2^i, and j (DW) 0,..,5: mlx5dr_optimal_dist_dw[i][j] is 1 if the
118  * number of different hash results on these values equals 2^i, meaning this
119  * DW hash distribution is complete.
120  */
121 int mlx5dr_optimal_dist_dw[MLX5DR_DEFINER_MAX_ROW_LOG][DW_SELECTORS_MATCH] = {
122 	{1, 1, 1, 1, 1, 1}, {0, 1, 1, 0, 1, 0}, {0, 1, 1, 0, 1, 0},
123 	{1, 0, 1, 0, 1, 0}, {0, 0, 0, 1, 1, 0}, {0, 1, 1, 0, 1, 0},
124 	{0, 0, 0, 0, 1, 0}, {0, 1, 1, 0, 1, 0}, {0, 0, 0, 0, 0, 0},
125 	{1, 0, 1, 0, 0, 0}, {0, 0, 0, 0, 0, 0}, {0, 1, 0, 1, 0, 0},
126 	{1, 0, 0, 0, 0, 0}, {0, 0, 1, 0, 0, 1}, {1, 1, 1, 0, 0, 0},
127 	{1, 1, 1, 0, 1, 0}, {0, 0, 1, 1, 0, 0}, {0, 1, 1, 0, 0, 1},
128 	{0, 0, 1, 0, 0, 1}, {0, 0, 1, 0, 0, 0}, {1, 0, 1, 1, 0, 0},
129 	{1, 0, 1, 0, 0, 1}, {0, 0, 1, 1, 0, 1}, {1, 1, 1, 0, 0, 0},
130 	{0, 1, 0, 1, 0, 1}, {0, 0, 0, 0, 0, 1}, {0, 0, 0, 1, 1, 1},
131 	{0, 0, 1, 0, 0, 1}, {1, 1, 0, 1, 1, 0}, {0, 0, 0, 0, 1, 0},
132 	{0, 0, 0, 1, 1, 0}};
133 
134 struct mlx5dr_definer_sel_ctrl {
135 	uint8_t allowed_full_dw; /* Full DW selectors cover all offsets */
136 	uint8_t allowed_lim_dw;  /* Limited DW selectors cover offset < 64 */
137 	uint8_t allowed_bytes;   /* Bytes selectors, up to offset 255 */
138 	uint8_t used_full_dw;
139 	uint8_t used_lim_dw;
140 	uint8_t used_bytes;
141 	uint8_t full_dw_selector[DW_SELECTORS];
142 	uint8_t lim_dw_selector[DW_SELECTORS_LIMITED];
143 	uint8_t byte_selector[BYTE_SELECTORS];
144 };
145 
146 struct mlx5dr_definer_conv_data {
147 	struct mlx5dr_context *ctx;
148 	struct mlx5dr_definer_fc *fc;
149 	uint8_t relaxed;
150 	uint8_t tunnel;
151 	uint8_t mpls_idx;
152 	uint8_t geneve_opt_ok_idx;
153 	uint8_t geneve_opt_data_idx;
154 	enum rte_flow_item_type last_item;
155 	enum mlx5dr_table_type table_type;
156 };
157 
158 /* Xmacro used to create generic item setter from items */
159 #define LIST_OF_FIELDS_INFO \
160 	X(SET_BE16,	eth_type,		v->hdr.ether_type,		rte_flow_item_eth) \
161 	X(SET_BE32P,	eth_smac_47_16,		&v->hdr.src_addr.addr_bytes[0],	rte_flow_item_eth) \
162 	X(SET_BE16P,	eth_smac_15_0,		&v->hdr.src_addr.addr_bytes[4],	rte_flow_item_eth) \
163 	X(SET_BE32P,	eth_dmac_47_16,		&v->hdr.dst_addr.addr_bytes[0],	rte_flow_item_eth) \
164 	X(SET_BE16P,	eth_dmac_15_0,		&v->hdr.dst_addr.addr_bytes[4],	rte_flow_item_eth) \
165 	X(SET_BE16,	tci,			v->hdr.vlan_tci,		rte_flow_item_vlan) \
166 	X(SET,		ipv4_ihl,		v->ihl,			rte_ipv4_hdr) \
167 	X(SET,		ipv4_tos,		v->type_of_service,	rte_ipv4_hdr) \
168 	X(SET,		ipv4_time_to_live,	v->time_to_live,	rte_ipv4_hdr) \
169 	X(SET_BE32,	ipv4_dst_addr,		v->dst_addr,		rte_ipv4_hdr) \
170 	X(SET_BE32,	ipv4_src_addr,		v->src_addr,		rte_ipv4_hdr) \
171 	X(SET,		ipv4_next_proto,	v->next_proto_id,	rte_ipv4_hdr) \
172 	X(SET,		ipv4_version,		STE_IPV4,		rte_ipv4_hdr) \
173 	X(SET_BE16,	ipv4_frag,		v->fragment_offset,	rte_ipv4_hdr) \
174 	X(SET_BE16,	ipv4_len,		v->total_length,	rte_ipv4_hdr) \
175 	X(SET_BE16,	ipv4_identification,	v->packet_id,		rte_ipv4_hdr) \
176 	X(SET,          ip_fragmented,          !!v->fragment_offset,   rte_ipv4_hdr) \
177 	X(SET_BE16,	ipv6_payload_len,	v->hdr.payload_len,	rte_flow_item_ipv6) \
178 	X(SET,		ipv6_proto,		v->hdr.proto,		rte_flow_item_ipv6) \
179 	X(SET,		ipv6_routing_hdr,	IPPROTO_ROUTING,	rte_flow_item_ipv6) \
180 	X(SET,		ipv6_hop_limits,	v->hdr.hop_limits,	rte_flow_item_ipv6) \
181 	X(SET_BE32P,	ipv6_src_addr_127_96,	&v->hdr.src_addr.a[0],	rte_flow_item_ipv6) \
182 	X(SET_BE32P,	ipv6_src_addr_95_64,	&v->hdr.src_addr.a[4],	rte_flow_item_ipv6) \
183 	X(SET_BE32P,	ipv6_src_addr_63_32,	&v->hdr.src_addr.a[8],	rte_flow_item_ipv6) \
184 	X(SET_BE32P,	ipv6_src_addr_31_0,	&v->hdr.src_addr.a[12],	rte_flow_item_ipv6) \
185 	X(SET_BE32P,	ipv6_dst_addr_127_96,	&v->hdr.dst_addr.a[0],	rte_flow_item_ipv6) \
186 	X(SET_BE32P,	ipv6_dst_addr_95_64,	&v->hdr.dst_addr.a[4],	rte_flow_item_ipv6) \
187 	X(SET_BE32P,	ipv6_dst_addr_63_32,	&v->hdr.dst_addr.a[8],	rte_flow_item_ipv6) \
188 	X(SET_BE32P,	ipv6_dst_addr_31_0,	&v->hdr.dst_addr.a[12],	rte_flow_item_ipv6) \
189 	X(SET,		ipv6_version,		STE_IPV6,		rte_flow_item_ipv6) \
190 	X(SET,		ipv6_frag,		v->has_frag_ext,	rte_flow_item_ipv6) \
191 	X(SET,		icmp_protocol,		STE_ICMP,		rte_flow_item_icmp) \
192 	X(SET,		udp_protocol,		STE_UDP,		rte_flow_item_udp) \
193 	X(SET_BE16,	udp_src_port,		v->hdr.src_port,	rte_flow_item_udp) \
194 	X(SET_BE16,	udp_dst_port,		v->hdr.dst_port,	rte_flow_item_udp) \
195 	X(SET,		tcp_flags,		v->hdr.tcp_flags,	rte_flow_item_tcp) \
196 	X(SET,		tcp_protocol,		STE_TCP,		rte_flow_item_tcp) \
197 	X(SET_BE16,	tcp_src_port,		v->hdr.src_port,	rte_flow_item_tcp) \
198 	X(SET_BE16,	tcp_dst_port,		v->hdr.dst_port,	rte_flow_item_tcp) \
199 	X(SET,		gtp_udp_port,		UDP_GTPU_PORT,		rte_flow_item_gtp) \
200 	X(SET_BE32,	gtp_teid,		v->hdr.teid,		rte_flow_item_gtp) \
201 	X(SET,		gtp_msg_type,		v->hdr.msg_type,	rte_flow_item_gtp) \
202 	X(SET,		gtp_ext_flag,		!!v->hdr.gtp_hdr_info,	rte_flow_item_gtp) \
203 	X(SET,		gtp_next_ext_hdr,	GTP_PDU_SC,		rte_flow_item_gtp_psc) \
204 	X(SET,		gtp_ext_hdr_pdu,	v->hdr.type,		rte_flow_item_gtp_psc) \
205 	X(SET,		gtp_ext_hdr_qfi,	v->hdr.qfi,		rte_flow_item_gtp_psc) \
206 	X(SET_BE32,	vxlan_vx_flags,		v->hdr.vx_flags,	rte_flow_item_vxlan) \
207 	X(SET_BE32,	vxlan_vx_vni,		v->hdr.vx_vni,		rte_flow_item_vxlan) \
208 	X(SET,		vxlan_udp_port,		UDP_VXLAN_PORT,		rte_flow_item_vxlan) \
209 	X(SET,		vxlan_gpe_udp_port,	UDP_VXLAN_GPE_PORT,	rte_flow_item_vxlan_gpe) \
210 	X(SET,		vxlan_gpe_flags,	v->flags,		rte_flow_item_vxlan_gpe) \
211 	X(SET,		vxlan_gpe_protocol,	v->protocol,		rte_flow_item_vxlan_gpe) \
212 	X(SET,		vxlan_gpe_rsvd1,	v->rsvd1,		rte_flow_item_vxlan_gpe) \
213 	X(SET,		mpls_udp_port,		UDP_PORT_MPLS,		rte_flow_item_mpls) \
214 	X(SET,		source_qp,		v->queue,		mlx5_rte_flow_item_sq) \
215 	X(SET,		tag,			v->data,		rte_flow_item_tag) \
216 	X(SET,		metadata,		v->data,		rte_flow_item_meta) \
217 	X(SET_BE16,	geneve_protocol,	v->protocol,		rte_flow_item_geneve) \
218 	X(SET,		geneve_udp_port,	UDP_GENEVE_PORT,	rte_flow_item_geneve) \
219 	X(SET_BE16,	geneve_ctrl,		v->ver_opt_len_o_c_rsvd0,	rte_flow_item_geneve) \
220 	X(SET_BE16,	gre_c_ver,		v->c_rsvd0_ver,		rte_flow_item_gre) \
221 	X(SET_BE16,	gre_protocol_type,	v->protocol,		rte_flow_item_gre) \
222 	X(SET,		ipv4_protocol_gre,	IPPROTO_GRE,		rte_flow_item_gre) \
223 	X(SET_BE32,	gre_opt_key,		v->key.key,		rte_flow_item_gre_opt) \
224 	X(SET_BE32,	gre_opt_seq,		v->sequence.sequence,	rte_flow_item_gre_opt) \
225 	X(SET_BE16,	gre_opt_checksum,	v->checksum_rsvd.checksum,	rte_flow_item_gre_opt) \
226 	X(SET,		nvgre_def_c_rsvd0_ver,	NVGRE_C_RSVD0_VER,	rte_flow_item_nvgre) \
227 	X(SET,		nvgre_def_c_rsvd0_ver_mask,	NVGRE_C_RSVD0_VER_MASK,	rte_flow_item_nvgre) \
228 	X(SET,		nvgre_def_protocol,	NVGRE_PORT,		rte_flow_item_nvgre) \
229 	X(SET_BE16,	nvgre_c_rsvd0_ver,	v->c_k_s_rsvd0_ver,	rte_flow_item_nvgre) \
230 	X(SET_BE16,	nvgre_protocol,		v->protocol,		rte_flow_item_nvgre) \
231 	X(SET_BE32P,	nvgre_dw1,		&v->tni[0],		rte_flow_item_nvgre) \
232 	X(SET,		meter_color,		rte_col_2_mlx5_col(v->color),	rte_flow_item_meter_color) \
233 	X(SET_BE32,     ipsec_spi,              v->hdr.spi,             rte_flow_item_esp) \
234 	X(SET_BE32,     ipsec_sequence_number,  v->hdr.seq,             rte_flow_item_esp) \
235 	X(SET,		ib_l4_udp_port,		UDP_ROCEV2_PORT,	rte_flow_item_ib_bth) \
236 	X(SET,		ib_l4_opcode,		v->hdr.opcode,		rte_flow_item_ib_bth) \
237 	X(SET,		random_number,		v->value,		rte_flow_item_random) \
238 	X(SET,		ib_l4_bth_a,		v->hdr.a,		rte_flow_item_ib_bth) \
239 	X(SET,		cvlan,			STE_CVLAN,		rte_flow_item_vlan) \
240 	X(SET_BE16,	inner_type,		v->inner_type,		rte_flow_item_vlan) \
241 
242 /* Item set function format */
243 #define X(set_type, func_name, value, item_type) \
244 static void mlx5dr_definer_##func_name##_set( \
245 	struct mlx5dr_definer_fc *fc, \
246 	const void *item_spec, \
247 	uint8_t *tag) \
248 { \
249 	__rte_unused const struct item_type *v = item_spec; \
250 	DR_##set_type(tag, value, fc->byte_off, fc->bit_off, fc->bit_mask); \
251 }
252 LIST_OF_FIELDS_INFO
253 #undef X
254 
255 static void
256 mlx5dr_definer_ones_set(struct mlx5dr_definer_fc *fc,
257 			__rte_unused const void *item_spec,
258 			__rte_unused uint8_t *tag)
259 {
260 	DR_SET(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
261 }
262 
263 static void
264 mlx5dr_definer_eth_first_vlan_q_set(struct mlx5dr_definer_fc *fc,
265 				    const void *item_spec,
266 				    uint8_t *tag)
267 {
268 	const struct rte_flow_item_eth *v = item_spec;
269 	uint8_t vlan_type;
270 
271 	vlan_type = v->has_vlan ? STE_CVLAN : STE_NO_VLAN;
272 
273 	DR_SET(tag, vlan_type, fc->byte_off, fc->bit_off, fc->bit_mask);
274 }
275 
276 static void
277 mlx5dr_definer_first_vlan_q_set(struct mlx5dr_definer_fc *fc,
278 				const void *item_spec,
279 				uint8_t *tag)
280 {
281 	const struct rte_flow_item_vlan *v = item_spec;
282 	uint8_t vlan_type;
283 
284 	vlan_type = v->has_more_vlan ? STE_SVLAN : STE_CVLAN;
285 
286 	DR_SET(tag, vlan_type, fc->byte_off, fc->bit_off, fc->bit_mask);
287 }
288 
289 static void
290 mlx5dr_definer_conntrack_mask(struct mlx5dr_definer_fc *fc,
291 			      const void *item_spec,
292 			      uint8_t *tag)
293 {
294 	const struct rte_flow_item_conntrack *m = item_spec;
295 	uint32_t reg_mask = 0;
296 
297 	if (m->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
298 			RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
299 			RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
300 		reg_mask |= (MLX5_CT_SYNDROME_VALID | MLX5_CT_SYNDROME_INVALID |
301 			     MLX5_CT_SYNDROME_TRAP);
302 
303 	if (m->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
304 		reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
305 
306 	if (m->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
307 		reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
308 
309 	DR_SET(tag, reg_mask, fc->byte_off, fc->bit_off, fc->bit_mask);
310 }
311 
312 static void
313 mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc,
314 			     const void *item_spec,
315 			     uint8_t *tag)
316 {
317 	const struct rte_flow_item_conntrack *v = item_spec;
318 	uint32_t reg_value = 0;
319 
320 	/* The conflict should be checked in the validation. */
321 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
322 		reg_value |= MLX5_CT_SYNDROME_VALID;
323 
324 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
325 		reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
326 
327 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
328 		reg_value |= MLX5_CT_SYNDROME_INVALID;
329 
330 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
331 		reg_value |= MLX5_CT_SYNDROME_TRAP;
332 
333 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
334 		reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
335 
336 	DR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask);
337 }
338 
339 static void
340 mlx5dr_definer_ptype_l2_set(struct mlx5dr_definer_fc *fc,
341 			    const void *item_spec,
342 			    uint8_t *tag)
343 {
344 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L2_I);
345 	const struct rte_flow_item_ptype *v = item_spec;
346 	uint32_t packet_type = v->packet_type &
347 		(inner ? RTE_PTYPE_INNER_L2_MASK : RTE_PTYPE_L2_MASK);
348 	uint8_t l2_type = STE_NO_VLAN;
349 
350 	if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER : RTE_PTYPE_L2_ETHER))
351 		l2_type = STE_NO_VLAN;
352 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN))
353 		l2_type = STE_CVLAN;
354 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_QINQ : RTE_PTYPE_L2_ETHER_QINQ))
355 		l2_type = STE_SVLAN;
356 
357 	DR_SET(tag, l2_type, fc->byte_off, fc->bit_off, fc->bit_mask);
358 }
359 
360 static void
361 mlx5dr_definer_ptype_l3_set(struct mlx5dr_definer_fc *fc,
362 			    const void *item_spec,
363 			    uint8_t *tag)
364 {
365 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L3_I);
366 	const struct rte_flow_item_ptype *v = item_spec;
367 	uint32_t packet_type = v->packet_type &
368 		(inner ? RTE_PTYPE_INNER_L3_MASK : RTE_PTYPE_L3_MASK);
369 	uint8_t l3_type = STE_NO_L3;
370 
371 	if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4))
372 		l3_type = STE_IPV4;
373 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6))
374 		l3_type = STE_IPV6;
375 
376 	DR_SET(tag, l3_type, fc->byte_off, fc->bit_off, fc->bit_mask);
377 }
378 
379 static void
380 mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc,
381 			    const void *item_spec,
382 			    uint8_t *tag)
383 {
384 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_I);
385 	const struct rte_flow_item_ptype *v = item_spec;
386 	uint32_t packet_type = v->packet_type &
387 		(inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
388 	uint8_t l4_type = STE_NO_L4;
389 
390 	if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
391 		l4_type = STE_TCP;
392 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
393 		l4_type = STE_UDP;
394 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ESP : RTE_PTYPE_L4_ESP))
395 		l4_type = STE_ESP;
396 
397 	DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
398 }
399 
400 static void
401 mlx5dr_definer_ptype_l4_ext_set(struct mlx5dr_definer_fc *fc,
402 				const void *item_spec,
403 				uint8_t *tag)
404 {
405 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_EXT_I);
406 	const struct rte_flow_item_ptype *v = item_spec;
407 	uint32_t packet_type = v->packet_type &
408 		(inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
409 	uint8_t l4_type = STE_NO_L4;
410 
411 	if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
412 		l4_type = STE_TCP;
413 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
414 		l4_type = STE_UDP;
415 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ICMP : RTE_PTYPE_L4_ICMP))
416 		l4_type = STE_ICMP;
417 	else if (packet_type == RTE_PTYPE_TUNNEL_ESP)
418 		l4_type = STE_ESP;
419 
420 	DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
421 }
422 
423 static void
424 mlx5dr_definer_ptype_tunnel_set(struct mlx5dr_definer_fc *fc,
425 				const void *item_spec,
426 				uint8_t *tag)
427 {
428 	const struct rte_flow_item_ptype *v = item_spec;
429 	uint32_t packet_type = v->packet_type & RTE_PTYPE_TUNNEL_MASK;
430 	uint8_t tun_type = STE_NO_TUN;
431 
432 	if (packet_type == RTE_PTYPE_TUNNEL_ESP)
433 		tun_type = STE_ESP;
434 
435 	DR_SET(tag, tun_type, fc->byte_off, fc->bit_off, fc->bit_mask);
436 }
437 
438 static void
439 mlx5dr_definer_ptype_frag_set(struct mlx5dr_definer_fc *fc,
440 			      const void *item_spec,
441 			      uint8_t *tag)
442 {
443 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_FRAG_I);
444 	const struct rte_flow_item_ptype *v = item_spec;
445 	uint32_t packet_type = v->packet_type &
446 		(inner ? RTE_PTYPE_INNER_L4_FRAG : RTE_PTYPE_L4_FRAG);
447 
448 	DR_SET(tag, !!packet_type, fc->byte_off, fc->bit_off, fc->bit_mask);
449 }
450 
451 static void
452 mlx5dr_definer_compare_base_value_set(const void *item_spec,
453 				      uint8_t *tag)
454 {
455 	uint32_t *ctrl = &(((uint32_t *)tag)[MLX5DR_DEFINER_COMPARE_STE_ARGUMENT_1]);
456 	uint32_t *base = &(((uint32_t *)tag)[MLX5DR_DEFINER_COMPARE_STE_BASE_0]);
457 	const struct rte_flow_item_compare *v = item_spec;
458 	const struct rte_flow_field_data *a = &v->a;
459 	const struct rte_flow_field_data *b = &v->b;
460 	const uint32_t *value;
461 
462 	value = (const uint32_t *)&b->value[0];
463 
464 	switch (a->field) {
465 	case RTE_FLOW_FIELD_RANDOM:
466 		*base = htobe32(*value << 16);
467 		break;
468 	case RTE_FLOW_FIELD_TAG:
469 	case RTE_FLOW_FIELD_META:
470 		*base = htobe32(*value);
471 		break;
472 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
473 		*base = *value;
474 		break;
475 	default:
476 		break;
477 	}
478 
479 	MLX5_SET(ste_match_4dw_range_ctrl_dw, ctrl, base0, 1);
480 }
481 
482 static void
483 mlx5dr_definer_compare_op_translate(enum rte_flow_item_compare_op op,
484 				    uint8_t *tag)
485 {
486 	uint32_t *ctrl = &(((uint32_t *)tag)[MLX5DR_DEFINER_COMPARE_STE_ARGUMENT_1]);
487 	uint8_t operator = 0;
488 	uint8_t inverse = 0;
489 
490 	switch (op) {
491 	case RTE_FLOW_ITEM_COMPARE_EQ:
492 		operator = 2;
493 		break;
494 	case RTE_FLOW_ITEM_COMPARE_NE:
495 		operator = 2;
496 		inverse = 1;
497 		break;
498 	case RTE_FLOW_ITEM_COMPARE_LT:
499 		inverse = 1;
500 		break;
501 	case RTE_FLOW_ITEM_COMPARE_LE:
502 		operator = 1;
503 		break;
504 	case RTE_FLOW_ITEM_COMPARE_GT:
505 		operator = 1;
506 		inverse = 1;
507 		break;
508 	case RTE_FLOW_ITEM_COMPARE_GE:
509 		break;
510 	default:
511 		DR_LOG(ERR, "Invalid operation type %d", op);
512 		assert(false);
513 	}
514 
515 	MLX5_SET(ste_match_4dw_range_ctrl_dw, ctrl, inverse0, inverse);
516 	MLX5_SET(ste_match_4dw_range_ctrl_dw, ctrl, operator0, operator);
517 }
518 
519 static void
520 mlx5dr_definer_compare_arg_set(const void *item_spec,
521 			       uint8_t *tag)
522 {
523 	const struct rte_flow_item_compare *v = item_spec;
524 	enum rte_flow_item_compare_op op = v->operation;
525 
526 	mlx5dr_definer_compare_op_translate(op, tag);
527 }
528 
529 static void
530 mlx5dr_definer_compare_set(struct mlx5dr_definer_fc *fc,
531 			   const void *item_spec,
532 			   uint8_t *tag)
533 {
534 	if (fc->compare_idx == MLX5DR_DEFINER_COMPARE_ARGUMENT_0) {
535 		mlx5dr_definer_compare_arg_set(item_spec, tag);
536 		if (fc->compare_set_base)
537 			mlx5dr_definer_compare_base_value_set(item_spec, tag);
538 	}
539 }
540 
541 static void
542 mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
543 			     const void *item_spec,
544 			     uint8_t *tag)
545 {
546 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_INTEGRITY_I);
547 	const struct rte_flow_item_integrity *v = item_spec;
548 	uint32_t ok1_bits = DR_GET_32(tag, fc->byte_off, fc->bit_off, fc->bit_mask);
549 
550 	if (v->l3_ok)
551 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) :
552 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L3_OK);
553 
554 	if (v->ipv4_csum_ok)
555 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_IPV4_CSUM_OK) :
556 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_IPV4_CSUM_OK);
557 
558 	if (v->l4_ok)
559 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_OK) |
560 				    BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_CSUM_OK) :
561 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_OK) |
562 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_CSUM_OK);
563 
564 	if (v->l4_csum_ok)
565 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_CSUM_OK) :
566 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_CSUM_OK);
567 
568 	DR_SET(tag, ok1_bits, fc->byte_off, fc->bit_off, fc->bit_mask);
569 }
570 
571 static void
572 mlx5dr_definer_ipv6_routing_ext_set(struct mlx5dr_definer_fc *fc,
573 				    const void *item,
574 				    uint8_t *tag)
575 {
576 	const struct rte_flow_item_ipv6_routing_ext *v = item;
577 	uint32_t val;
578 
579 	val = v->hdr.next_hdr << __mlx5_dw_bit_off(header_ipv6_routing_ext, next_hdr);
580 	val |= v->hdr.type << __mlx5_dw_bit_off(header_ipv6_routing_ext, type);
581 	val |= v->hdr.segments_left <<
582 		__mlx5_dw_bit_off(header_ipv6_routing_ext, segments_left);
583 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
584 }
585 
586 static void
587 mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc,
588 			       const void *item,
589 			       uint8_t *tag, bool is_inner)
590 {
591 	const struct rte_flow_item_flex *flex = item;
592 	uint32_t byte_off, val, idx;
593 	int ret;
594 
595 	val = 0;
596 	byte_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
597 	idx = fc->fname - MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
598 	byte_off -= idx * sizeof(uint32_t);
599 	ret = mlx5_flex_get_parser_value_per_byte_off(flex, flex->handle, byte_off,
600 						      is_inner, &val);
601 	if (ret == -1 || !val)
602 		return;
603 
604 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
605 }
606 
607 static void
608 mlx5dr_definer_flex_parser_inner_set(struct mlx5dr_definer_fc *fc,
609 				     const void *item,
610 				     uint8_t *tag)
611 {
612 	mlx5dr_definer_flex_parser_set(fc, item, tag, true);
613 }
614 
615 static void
616 mlx5dr_definer_flex_parser_outer_set(struct mlx5dr_definer_fc *fc,
617 				     const void *item,
618 				     uint8_t *tag)
619 {
620 	mlx5dr_definer_flex_parser_set(fc, item, tag, false);
621 }
622 
623 static void
624 mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
625 			   const void *item_spec,
626 			   uint8_t *tag)
627 {
628 	const rte_be32_t *v = item_spec;
629 
630 	DR_SET_BE32(tag, *v, fc->byte_off, fc->bit_off, fc->bit_mask);
631 }
632 
633 static void
634 mlx5dr_definer_ipv6_tos_set(struct mlx5dr_definer_fc *fc,
635 			    const void *item_spec,
636 			    uint8_t *tag)
637 {
638 	const struct rte_flow_item_ipv6 *v = item_spec;
639 	uint8_t tos = DR_GET(header_ipv6_vtc, &v->hdr.vtc_flow, tos);
640 
641 	DR_SET(tag, tos, fc->byte_off, fc->bit_off, fc->bit_mask);
642 }
643 
644 static void
645 mlx5dr_definer_icmp_dw1_set(struct mlx5dr_definer_fc *fc,
646 			    const void *item_spec,
647 			    uint8_t *tag)
648 {
649 	const struct rte_flow_item_icmp *v = item_spec;
650 	rte_be32_t icmp_dw1;
651 
652 	icmp_dw1 = (v->hdr.icmp_type << __mlx5_dw_bit_off(header_icmp, type)) |
653 		   (v->hdr.icmp_code << __mlx5_dw_bit_off(header_icmp, code)) |
654 		   (rte_be_to_cpu_16(v->hdr.icmp_cksum) << __mlx5_dw_bit_off(header_icmp, cksum));
655 
656 	DR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);
657 }
658 
659 static void
660 mlx5dr_definer_icmp_dw2_set(struct mlx5dr_definer_fc *fc,
661 			    const void *item_spec,
662 			    uint8_t *tag)
663 {
664 	const struct rte_flow_item_icmp *v = item_spec;
665 	rte_be32_t icmp_dw2;
666 
667 	icmp_dw2 = (rte_be_to_cpu_16(v->hdr.icmp_ident) << __mlx5_dw_bit_off(header_icmp, ident)) |
668 		   (rte_be_to_cpu_16(v->hdr.icmp_seq_nb) << __mlx5_dw_bit_off(header_icmp, seq_nb));
669 
670 	DR_SET(tag, icmp_dw2, fc->byte_off, fc->bit_off, fc->bit_mask);
671 }
672 
673 static void
674 mlx5dr_definer_icmp6_dw1_set(struct mlx5dr_definer_fc *fc,
675 			    const void *item_spec,
676 			    uint8_t *tag)
677 {
678 	const struct rte_flow_item_icmp6 *v = item_spec;
679 	rte_be32_t icmp_dw1;
680 
681 	icmp_dw1 = (v->type << __mlx5_dw_bit_off(header_icmp, type)) |
682 		   (v->code << __mlx5_dw_bit_off(header_icmp, code)) |
683 		   (rte_be_to_cpu_16(v->checksum) << __mlx5_dw_bit_off(header_icmp, cksum));
684 
685 	DR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);
686 }
687 
688 static void
689 mlx5dr_definer_icmp6_echo_dw1_mask_set(struct mlx5dr_definer_fc *fc,
690 				       __rte_unused const void *item_spec,
691 				       uint8_t *tag)
692 {
693 	const struct rte_flow_item_icmp6 spec = {0xFF, 0xFF, 0x0};
694 	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
695 }
696 
697 static void
698 mlx5dr_definer_icmp6_echo_request_dw1_set(struct mlx5dr_definer_fc *fc,
699 					  __rte_unused const void *item_spec,
700 					  uint8_t *tag)
701 {
702 	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REQUEST, 0, 0};
703 	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
704 }
705 
706 static void
707 mlx5dr_definer_icmp6_echo_reply_dw1_set(struct mlx5dr_definer_fc *fc,
708 					__rte_unused const void *item_spec,
709 					uint8_t *tag)
710 {
711 	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REPLY, 0, 0};
712 	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
713 }
714 
715 static void
716 mlx5dr_definer_icmp6_echo_dw2_set(struct mlx5dr_definer_fc *fc,
717 				  const void *item_spec,
718 				  uint8_t *tag)
719 {
720 	const struct rte_flow_item_icmp6_echo *v = item_spec;
721 	rte_be32_t dw2;
722 
723 	dw2 = (rte_be_to_cpu_16(v->hdr.identifier) << __mlx5_dw_bit_off(header_icmp, ident)) |
724 	      (rte_be_to_cpu_16(v->hdr.sequence) << __mlx5_dw_bit_off(header_icmp, seq_nb));
725 
726 	DR_SET(tag, dw2, fc->byte_off, fc->bit_off, fc->bit_mask);
727 }
728 
729 static void
730 mlx5dr_definer_ipv6_flow_label_set(struct mlx5dr_definer_fc *fc,
731 				   const void *item_spec,
732 				   uint8_t *tag)
733 {
734 	const struct rte_flow_item_ipv6 *v = item_spec;
735 	uint32_t flow_label = DR_GET(header_ipv6_vtc, &v->hdr.vtc_flow, flow_label);
736 
737 	DR_SET(tag, flow_label, fc->byte_off, fc->bit_off, fc->bit_mask);
738 }
739 
740 static void
741 mlx5dr_definer_vport_set(struct mlx5dr_definer_fc *fc,
742 			 const void *item_spec,
743 			 uint8_t *tag)
744 {
745 	const struct rte_flow_item_ethdev *v = item_spec;
746 	const struct flow_hw_port_info *port_info;
747 	uint32_t regc_value;
748 
749 	port_info = flow_hw_conv_port_id(fc->dr_ctx, v->port_id);
750 	if (unlikely(!port_info))
751 		regc_value = BAD_PORT;
752 	else
753 		regc_value = port_info->regc_value >> fc->bit_off;
754 
755 	/* Bit offset is set to 0 to since regc value is 32bit */
756 	DR_SET(tag, regc_value, fc->byte_off, fc->bit_off, fc->bit_mask);
757 }
758 
759 static struct mlx5dr_definer_fc *
760 mlx5dr_definer_get_mpls_fc(struct mlx5dr_definer_conv_data *cd, bool inner)
761 {
762 	uint8_t mpls_idx = cd->mpls_idx;
763 	struct mlx5dr_definer_fc *fc;
764 
765 	switch (mpls_idx) {
766 	case 0:
767 		fc = &cd->fc[DR_CALC_FNAME(MPLS0, inner)];
768 		DR_CALC_SET_HDR(fc, mpls_inner, mpls0_label);
769 		break;
770 	case 1:
771 		fc = &cd->fc[DR_CALC_FNAME(MPLS1, inner)];
772 		DR_CALC_SET_HDR(fc, mpls_inner, mpls1_label);
773 		break;
774 	case 2:
775 		fc = &cd->fc[DR_CALC_FNAME(MPLS2, inner)];
776 		DR_CALC_SET_HDR(fc, mpls_inner, mpls2_label);
777 		break;
778 	case 3:
779 		fc = &cd->fc[DR_CALC_FNAME(MPLS3, inner)];
780 		DR_CALC_SET_HDR(fc, mpls_inner, mpls3_label);
781 		break;
782 	case 4:
783 		fc = &cd->fc[DR_CALC_FNAME(MPLS4, inner)];
784 		DR_CALC_SET_HDR(fc, mpls_inner, mpls4_label);
785 		break;
786 	default:
787 		rte_errno = ENOTSUP;
788 		DR_LOG(ERR, "MPLS index %d is not supported", mpls_idx);
789 		return NULL;
790 	}
791 
792 	return fc;
793 }
794 
795 static struct mlx5dr_definer_fc *
796 mlx5dr_definer_get_mpls_oks_fc(struct mlx5dr_definer_conv_data *cd, bool inner)
797 {
798 	uint8_t mpls_idx = cd->mpls_idx;
799 	struct mlx5dr_definer_fc *fc;
800 
801 	switch (mpls_idx) {
802 	case 0:
803 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS0, inner)];
804 		DR_CALC_SET_HDR(fc, oks2, second_mpls0_qualifier);
805 		break;
806 	case 1:
807 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS1, inner)];
808 		DR_CALC_SET_HDR(fc, oks2, second_mpls1_qualifier);
809 		break;
810 	case 2:
811 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS2, inner)];
812 		DR_CALC_SET_HDR(fc, oks2, second_mpls2_qualifier);
813 		break;
814 	case 3:
815 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS3, inner)];
816 		DR_CALC_SET_HDR(fc, oks2, second_mpls3_qualifier);
817 		break;
818 	case 4:
819 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS4, inner)];
820 		DR_CALC_SET_HDR(fc, oks2, second_mpls4_qualifier);
821 		break;
822 	default:
823 		rte_errno = ENOTSUP;
824 		DR_LOG(ERR, "MPLS index %d is not supported", mpls_idx);
825 		return NULL;
826 	}
827 
828 	return fc;
829 }
830 
831 static void
832 mlx5dr_definer_mpls_label_set(struct mlx5dr_definer_fc *fc,
833 			      const void *item_spec,
834 			      uint8_t *tag)
835 {
836 	const struct rte_flow_item_mpls *v = item_spec;
837 
838 	memcpy(tag + fc->byte_off, v->label_tc_s, sizeof(v->label_tc_s));
839 	memcpy(tag + fc->byte_off + sizeof(v->label_tc_s), &v->ttl, sizeof(v->ttl));
840 }
841 
842 static void
843 mlx5dr_definer_geneve_vni_set(struct mlx5dr_definer_fc *fc,
844 			      const void *item_spec,
845 			      uint8_t *tag)
846 {
847 	const struct rte_flow_item_geneve *v = item_spec;
848 
849 	memcpy(tag + fc->byte_off, v->vni, sizeof(v->vni));
850 }
851 
852 static void
853 mlx5dr_definer_geneve_opt_ctrl_set(struct mlx5dr_definer_fc *fc,
854 				   const void *item_spec,
855 				   uint8_t *tag)
856 {
857 	const struct rte_flow_item_geneve_opt *v = item_spec;
858 	uint32_t dw0 = 0;
859 
860 	dw0 |= v->option_type << __mlx5_dw_bit_off(header_geneve_opt, type);
861 	dw0 |= rte_cpu_to_be_16(v->option_class) << __mlx5_dw_bit_off(header_geneve_opt, class);
862 	DR_SET(tag, dw0, fc->byte_off, fc->bit_off, fc->bit_mask);
863 }
864 
865 static void
866 mlx5dr_definer_geneve_opt_data_set(struct mlx5dr_definer_fc *fc,
867 				   const void *item_spec,
868 				   uint8_t *tag)
869 {
870 	const struct rte_flow_item_geneve_opt *v = item_spec;
871 
872 	DR_SET_BE32(tag, v->data[fc->extra_data], fc->byte_off, fc->bit_off, fc->bit_mask);
873 }
874 
875 static void
876 mlx5dr_definer_ib_l4_qp_set(struct mlx5dr_definer_fc *fc,
877 			    const void *item_spec,
878 			    uint8_t *tag)
879 {
880 	const struct rte_flow_item_ib_bth *v = item_spec;
881 
882 	memcpy(tag + fc->byte_off, &v->hdr.dst_qp, sizeof(v->hdr.dst_qp));
883 }
884 
885 static void
886 mlx5dr_definer_vxlan_gpe_vni_set(struct mlx5dr_definer_fc *fc,
887 				 const void *item_spec,
888 				 uint8_t *tag)
889 {
890 	const struct rte_flow_item_vxlan_gpe *v = item_spec;
891 
892 	memcpy(tag + fc->byte_off, v->vni, sizeof(v->vni));
893 }
894 
895 static void
896 mlx5dr_definer_vxlan_gpe_rsvd0_set(struct mlx5dr_definer_fc *fc,
897 				   const void *item_spec,
898 				   uint8_t *tag)
899 {
900 	const struct rte_flow_item_vxlan_gpe *v = item_spec;
901 	uint16_t rsvd0;
902 
903 	rsvd0 = (v->rsvd0[0] << 8 | v->rsvd0[1]);
904 	DR_SET(tag, rsvd0, fc->byte_off, fc->bit_off, fc->bit_mask);
905 }
906 
907 static void
908 mlx5dr_definer_tx_queue_set(struct mlx5dr_definer_fc *fc,
909 			    const void *item_spec,
910 			    uint8_t *tag)
911 {
912 	const struct rte_flow_item_tx_queue *v = item_spec;
913 	uint32_t sqn = 0;
914 	int ret;
915 
916 	ret = flow_hw_conv_sqn(fc->extra_data, v->tx_queue, &sqn);
917 	if (unlikely(ret))
918 		sqn = BAD_SQN;
919 
920 	DR_SET(tag, sqn, fc->byte_off, fc->bit_off, fc->bit_mask);
921 }
922 
923 static int
924 mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,
925 			     struct rte_flow_item *item,
926 			     int item_idx)
927 {
928 	const struct rte_flow_item_eth *m = item->mask;
929 	uint8_t empty_mac[RTE_ETHER_ADDR_LEN] = {0};
930 	struct mlx5dr_definer_fc *fc;
931 	bool inner = cd->tunnel;
932 
933 	if (!m)
934 		return 0;
935 
936 	if (m->reserved) {
937 		rte_errno = ENOTSUP;
938 		return rte_errno;
939 	}
940 
941 	if (m->hdr.ether_type) {
942 		fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)];
943 		fc->item_idx = item_idx;
944 		fc->tag_set = &mlx5dr_definer_eth_type_set;
945 		DR_CALC_SET(fc, eth_l2, l3_ethertype, inner);
946 	}
947 
948 	/* Check SMAC 47_16 */
949 	if (memcmp(m->hdr.src_addr.addr_bytes, empty_mac, 4)) {
950 		fc = &cd->fc[DR_CALC_FNAME(ETH_SMAC_48_16, inner)];
951 		fc->item_idx = item_idx;
952 		fc->tag_set = &mlx5dr_definer_eth_smac_47_16_set;
953 		DR_CALC_SET(fc, eth_l2_src, smac_47_16, inner);
954 	}
955 
956 	/* Check SMAC 15_0 */
957 	if (memcmp(m->hdr.src_addr.addr_bytes + 4, empty_mac + 4, 2)) {
958 		fc = &cd->fc[DR_CALC_FNAME(ETH_SMAC_15_0, inner)];
959 		fc->item_idx = item_idx;
960 		fc->tag_set = &mlx5dr_definer_eth_smac_15_0_set;
961 		DR_CALC_SET(fc, eth_l2_src, smac_15_0, inner);
962 	}
963 
964 	/* Check DMAC 47_16 */
965 	if (memcmp(m->hdr.dst_addr.addr_bytes, empty_mac, 4)) {
966 		fc = &cd->fc[DR_CALC_FNAME(ETH_DMAC_48_16, inner)];
967 		fc->item_idx = item_idx;
968 		fc->tag_set = &mlx5dr_definer_eth_dmac_47_16_set;
969 		DR_CALC_SET(fc, eth_l2, dmac_47_16, inner);
970 	}
971 
972 	/* Check DMAC 15_0 */
973 	if (memcmp(m->hdr.dst_addr.addr_bytes + 4, empty_mac + 4, 2)) {
974 		fc = &cd->fc[DR_CALC_FNAME(ETH_DMAC_15_0, inner)];
975 		fc->item_idx = item_idx;
976 		fc->tag_set = &mlx5dr_definer_eth_dmac_15_0_set;
977 		DR_CALC_SET(fc, eth_l2, dmac_15_0, inner);
978 	}
979 
980 	if (m->has_vlan) {
981 		/* Mark packet as tagged (CVLAN) */
982 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
983 		fc->item_idx = item_idx;
984 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
985 		fc->tag_set = &mlx5dr_definer_eth_first_vlan_q_set;
986 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);
987 	}
988 
989 	return 0;
990 }
991 
992 static int
993 mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd,
994 			      struct rte_flow_item *item,
995 			      int item_idx)
996 {
997 	const struct rte_flow_item_vlan *m = item->mask;
998 	struct mlx5dr_definer_fc *fc;
999 	bool inner = cd->tunnel;
1000 
1001 	if (!cd->relaxed) {
1002 		/* Mark packet as tagged (CVLAN) */
1003 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
1004 		fc->item_idx = item_idx;
1005 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1006 		fc->tag_set = &mlx5dr_definer_cvlan_set;
1007 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);
1008 	}
1009 
1010 	if (!m)
1011 		return 0;
1012 
1013 	if (m->reserved) {
1014 		rte_errno = ENOTSUP;
1015 		return rte_errno;
1016 	}
1017 
1018 	if (m->has_more_vlan) {
1019 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
1020 		fc->item_idx = item_idx;
1021 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1022 		fc->tag_set = &mlx5dr_definer_first_vlan_q_set;
1023 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);
1024 	}
1025 
1026 	if (m->hdr.vlan_tci) {
1027 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TCI, inner)];
1028 		fc->item_idx = item_idx;
1029 		fc->tag_set = &mlx5dr_definer_tci_set;
1030 		DR_CALC_SET(fc, eth_l2, tci, inner);
1031 	}
1032 
1033 	if (m->hdr.eth_proto) {
1034 		fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)];
1035 		fc->item_idx = item_idx;
1036 		fc->tag_set = &mlx5dr_definer_inner_type_set;
1037 		DR_CALC_SET(fc, eth_l2, l3_ethertype, inner);
1038 	}
1039 
1040 	return 0;
1041 }
1042 
1043 static int
1044 mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,
1045 			      struct rte_flow_item *item,
1046 			      int item_idx)
1047 {
1048 	const struct rte_ipv4_hdr *m = item->mask;
1049 	const struct rte_ipv4_hdr *l = item->last;
1050 	struct mlx5dr_definer_fc *fc;
1051 	bool inner = cd->tunnel;
1052 
1053 	if (!cd->relaxed) {
1054 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
1055 		fc->item_idx = item_idx;
1056 		fc->tag_set = &mlx5dr_definer_ipv4_version_set;
1057 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1058 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
1059 
1060 		/* Overwrite - Unset ethertype if present */
1061 		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
1062 	}
1063 
1064 	if (!m)
1065 		return 0;
1066 
1067 	if (m->hdr_checksum ||
1068 	    (l && (l->next_proto_id || l->type_of_service))) {
1069 		rte_errno = ENOTSUP;
1070 		return rte_errno;
1071 	}
1072 
1073 	if (m->version) {
1074 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
1075 		fc->item_idx = item_idx;
1076 		fc->tag_set = &mlx5dr_definer_ipv4_version_set;
1077 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1078 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
1079 	}
1080 
1081 	if (m->fragment_offset) {
1082 		fc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)];
1083 		fc->item_idx = item_idx;
1084 		if (rte_be_to_cpu_16(m->fragment_offset) == 0x3fff) {
1085 			fc->tag_set = &mlx5dr_definer_ip_fragmented_set;
1086 			DR_CALC_SET(fc, eth_l2, ip_fragmented, inner);
1087 		} else {
1088 			fc->is_range = l && l->fragment_offset;
1089 			fc->tag_set = &mlx5dr_definer_ipv4_frag_set;
1090 			DR_CALC_SET(fc, eth_l3, ipv4_frag, inner);
1091 		}
1092 	}
1093 
1094 	if (m->next_proto_id) {
1095 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1096 		fc->item_idx = item_idx;
1097 		fc->tag_set = &mlx5dr_definer_ipv4_next_proto_set;
1098 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
1099 	}
1100 
1101 	if (m->packet_id) {
1102 		fc = &cd->fc[DR_CALC_FNAME(IP_ID, inner)];
1103 		fc->item_idx = item_idx;
1104 		fc->is_range = l && l->packet_id;
1105 		fc->tag_set = &mlx5dr_definer_ipv4_identification_set;
1106 		DR_CALC_SET(fc, eth_l3, identification, inner);
1107 	}
1108 
1109 	if (m->total_length) {
1110 		fc = &cd->fc[DR_CALC_FNAME(IP_LEN, inner)];
1111 		fc->item_idx = item_idx;
1112 		fc->is_range = l && l->total_length;
1113 		fc->tag_set = &mlx5dr_definer_ipv4_len_set;
1114 		DR_CALC_SET(fc, eth_l3, ipv4_total_length, inner);
1115 	}
1116 
1117 	if (m->dst_addr) {
1118 		fc = &cd->fc[DR_CALC_FNAME(IPV4_DST, inner)];
1119 		fc->item_idx = item_idx;
1120 		fc->is_range = l && l->dst_addr;
1121 		fc->tag_set = &mlx5dr_definer_ipv4_dst_addr_set;
1122 		DR_CALC_SET(fc, ipv4_src_dest, destination_address, inner);
1123 	}
1124 
1125 	if (m->src_addr) {
1126 		fc = &cd->fc[DR_CALC_FNAME(IPV4_SRC, inner)];
1127 		fc->item_idx = item_idx;
1128 		fc->is_range = l && l->src_addr;
1129 		fc->tag_set = &mlx5dr_definer_ipv4_src_addr_set;
1130 		DR_CALC_SET(fc, ipv4_src_dest, source_address, inner);
1131 	}
1132 
1133 	if (m->ihl) {
1134 		fc = &cd->fc[DR_CALC_FNAME(IPV4_IHL, inner)];
1135 		fc->item_idx = item_idx;
1136 		fc->is_range = l && l->ihl;
1137 		fc->tag_set = &mlx5dr_definer_ipv4_ihl_set;
1138 		DR_CALC_SET(fc, eth_l3, ihl, inner);
1139 	}
1140 
1141 	if (m->time_to_live) {
1142 		fc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];
1143 		fc->item_idx = item_idx;
1144 		fc->is_range = l && l->time_to_live;
1145 		fc->tag_set = &mlx5dr_definer_ipv4_time_to_live_set;
1146 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
1147 	}
1148 
1149 	if (m->type_of_service) {
1150 		fc = &cd->fc[DR_CALC_FNAME(IP_TOS, inner)];
1151 		fc->item_idx = item_idx;
1152 		fc->tag_set = &mlx5dr_definer_ipv4_tos_set;
1153 		DR_CALC_SET(fc, eth_l3, tos, inner);
1154 	}
1155 
1156 	return 0;
1157 }
1158 
1159 static int
1160 mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
1161 			      struct rte_flow_item *item,
1162 			      int item_idx)
1163 {
1164 	const struct rte_flow_item_ipv6 *m = item->mask;
1165 	const struct rte_flow_item_ipv6 *l = item->last;
1166 	struct mlx5dr_definer_fc *fc;
1167 	bool inner = cd->tunnel;
1168 
1169 	if (!cd->relaxed) {
1170 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
1171 		fc->item_idx = item_idx;
1172 		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
1173 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1174 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
1175 
1176 		/* Overwrite - Unset ethertype if present */
1177 		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
1178 	}
1179 
1180 	if (!m)
1181 		return 0;
1182 
1183 	if (m->has_hop_ext || m->has_route_ext || m->has_auth_ext ||
1184 	    m->has_esp_ext || m->has_dest_ext || m->has_mobil_ext ||
1185 	    m->has_hip_ext || m->has_shim6_ext ||
1186 	    (l && (l->has_frag_ext || l->hdr.vtc_flow || l->hdr.proto ||
1187 		   !is_mem_zero(l->hdr.src_addr.a, 16) ||
1188 		   !is_mem_zero(l->hdr.dst_addr.a, 16)))) {
1189 		rte_errno = ENOTSUP;
1190 		return rte_errno;
1191 	}
1192 
1193 	if (m->has_frag_ext) {
1194 		fc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)];
1195 		fc->item_idx = item_idx;
1196 		fc->tag_set = &mlx5dr_definer_ipv6_frag_set;
1197 		DR_CALC_SET(fc, eth_l4, ip_fragmented, inner);
1198 	}
1199 
1200 	if (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, version)) {
1201 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
1202 		fc->item_idx = item_idx;
1203 		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
1204 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1205 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
1206 	}
1207 
1208 	if (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, tos)) {
1209 		fc = &cd->fc[DR_CALC_FNAME(IP_TOS, inner)];
1210 		fc->item_idx = item_idx;
1211 		fc->tag_set = &mlx5dr_definer_ipv6_tos_set;
1212 		DR_CALC_SET(fc, eth_l3, tos, inner);
1213 	}
1214 
1215 	if (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, flow_label)) {
1216 		fc = &cd->fc[DR_CALC_FNAME(IPV6_FLOW_LABEL, inner)];
1217 		fc->item_idx = item_idx;
1218 		fc->tag_set = &mlx5dr_definer_ipv6_flow_label_set;
1219 		DR_CALC_SET(fc, eth_l3, flow_label, inner);
1220 	}
1221 
1222 	if (m->hdr.payload_len) {
1223 		fc = &cd->fc[DR_CALC_FNAME(IP_LEN, inner)];
1224 		fc->item_idx = item_idx;
1225 		fc->is_range = l && l->hdr.payload_len;
1226 		fc->tag_set = &mlx5dr_definer_ipv6_payload_len_set;
1227 		DR_CALC_SET(fc, eth_l3, ipv6_payload_length, inner);
1228 	}
1229 
1230 	if (m->hdr.proto) {
1231 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1232 		fc->item_idx = item_idx;
1233 		fc->tag_set = &mlx5dr_definer_ipv6_proto_set;
1234 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
1235 	}
1236 
1237 	if (m->hdr.hop_limits) {
1238 		fc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];
1239 		fc->item_idx = item_idx;
1240 		fc->is_range = l && l->hdr.hop_limits;
1241 		fc->tag_set = &mlx5dr_definer_ipv6_hop_limits_set;
1242 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
1243 	}
1244 
1245 	if (!is_mem_zero(m->hdr.src_addr.a, 4)) {
1246 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_127_96, inner)];
1247 		fc->item_idx = item_idx;
1248 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_127_96_set;
1249 		DR_CALC_SET(fc, ipv6_src, ipv6_address_127_96, inner);
1250 	}
1251 
1252 	if (!is_mem_zero(m->hdr.src_addr.a + 4, 4)) {
1253 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_95_64, inner)];
1254 		fc->item_idx = item_idx;
1255 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_95_64_set;
1256 		DR_CALC_SET(fc, ipv6_src, ipv6_address_95_64, inner);
1257 	}
1258 
1259 	if (!is_mem_zero(m->hdr.src_addr.a + 8, 4)) {
1260 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_63_32, inner)];
1261 		fc->item_idx = item_idx;
1262 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_63_32_set;
1263 		DR_CALC_SET(fc, ipv6_src, ipv6_address_63_32, inner);
1264 	}
1265 
1266 	if (!is_mem_zero(m->hdr.src_addr.a + 12, 4)) {
1267 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_31_0, inner)];
1268 		fc->item_idx = item_idx;
1269 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_31_0_set;
1270 		DR_CALC_SET(fc, ipv6_src, ipv6_address_31_0, inner);
1271 	}
1272 
1273 	if (!is_mem_zero(m->hdr.dst_addr.a, 4)) {
1274 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_127_96, inner)];
1275 		fc->item_idx = item_idx;
1276 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_127_96_set;
1277 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_127_96, inner);
1278 	}
1279 
1280 	if (!is_mem_zero(m->hdr.dst_addr.a + 4, 4)) {
1281 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_95_64, inner)];
1282 		fc->item_idx = item_idx;
1283 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_95_64_set;
1284 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_95_64, inner);
1285 	}
1286 
1287 	if (!is_mem_zero(m->hdr.dst_addr.a + 8, 4)) {
1288 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_63_32, inner)];
1289 		fc->item_idx = item_idx;
1290 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_63_32_set;
1291 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_63_32, inner);
1292 	}
1293 
1294 	if (!is_mem_zero(m->hdr.dst_addr.a + 12, 4)) {
1295 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_31_0, inner)];
1296 		fc->item_idx = item_idx;
1297 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_31_0_set;
1298 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_31_0, inner);
1299 	}
1300 
1301 	return 0;
1302 }
1303 
1304 static int
1305 mlx5dr_definer_conv_item_udp(struct mlx5dr_definer_conv_data *cd,
1306 			     struct rte_flow_item *item,
1307 			     int item_idx)
1308 {
1309 	const struct rte_flow_item_udp *m = item->mask;
1310 	const struct rte_flow_item_udp *l = item->last;
1311 	struct mlx5dr_definer_fc *fc;
1312 	bool inner = cd->tunnel;
1313 
1314 	/* Set match on L4 type UDP */
1315 	if (!cd->relaxed) {
1316 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1317 		if (!fc->not_overwrite) {
1318 			fc->item_idx = item_idx;
1319 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
1320 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1321 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
1322 		}
1323 	}
1324 
1325 	if (!m)
1326 		return 0;
1327 
1328 	if (m->hdr.dgram_cksum || m->hdr.dgram_len) {
1329 		rte_errno = ENOTSUP;
1330 		return rte_errno;
1331 	}
1332 
1333 	if (m->hdr.src_port) {
1334 		fc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];
1335 		fc->item_idx = item_idx;
1336 		fc->is_range = l && l->hdr.src_port;
1337 		fc->tag_set = &mlx5dr_definer_udp_src_port_set;
1338 		DR_CALC_SET(fc, eth_l4, source_port, inner);
1339 	}
1340 
1341 	if (m->hdr.dst_port) {
1342 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
1343 		fc->item_idx = item_idx;
1344 		fc->is_range = l && l->hdr.dst_port;
1345 		fc->tag_set = &mlx5dr_definer_udp_dst_port_set;
1346 		DR_CALC_SET(fc, eth_l4, destination_port, inner);
1347 	}
1348 
1349 	return 0;
1350 }
1351 
1352 static int
1353 mlx5dr_definer_conv_item_tcp(struct mlx5dr_definer_conv_data *cd,
1354 			     struct rte_flow_item *item,
1355 			     int item_idx)
1356 {
1357 	const struct rte_flow_item_tcp *m = item->mask;
1358 	const struct rte_flow_item_tcp *l = item->last;
1359 	struct mlx5dr_definer_fc *fc;
1360 	bool inner = cd->tunnel;
1361 
1362 	/* Overwrite match on L4 type TCP */
1363 	if (!cd->relaxed) {
1364 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1365 		if (!fc->not_overwrite) {
1366 			fc->item_idx = item_idx;
1367 			fc->tag_set = &mlx5dr_definer_tcp_protocol_set;
1368 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1369 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
1370 		}
1371 	}
1372 
1373 	if (!m)
1374 		return 0;
1375 
1376 	if (m->hdr.sent_seq || m->hdr.recv_ack || m->hdr.data_off ||
1377 	    m->hdr.rx_win || m->hdr.cksum || m->hdr.tcp_urp) {
1378 		rte_errno = ENOTSUP;
1379 		return rte_errno;
1380 	}
1381 
1382 	if (m->hdr.tcp_flags) {
1383 		fc = &cd->fc[DR_CALC_FNAME(TCP_FLAGS, inner)];
1384 		fc->item_idx = item_idx;
1385 		fc->is_range = l && l->hdr.tcp_flags;
1386 		fc->tag_set = &mlx5dr_definer_tcp_flags_set;
1387 		DR_CALC_SET(fc, eth_l4, tcp_flags, inner);
1388 	}
1389 
1390 	if (m->hdr.src_port) {
1391 		fc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];
1392 		fc->item_idx = item_idx;
1393 		fc->is_range = l && l->hdr.src_port;
1394 		fc->tag_set = &mlx5dr_definer_tcp_src_port_set;
1395 		DR_CALC_SET(fc, eth_l4, source_port, inner);
1396 	}
1397 
1398 	if (m->hdr.dst_port) {
1399 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
1400 		fc->item_idx = item_idx;
1401 		fc->is_range = l && l->hdr.dst_port;
1402 		fc->tag_set = &mlx5dr_definer_tcp_dst_port_set;
1403 		DR_CALC_SET(fc, eth_l4, destination_port, inner);
1404 	}
1405 
1406 	return 0;
1407 }
1408 
1409 static int
1410 mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd,
1411 			     struct rte_flow_item *item,
1412 			     int item_idx)
1413 {
1414 	struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps;
1415 	const struct rte_flow_item_gtp *m = item->mask;
1416 	struct mlx5dr_definer_fc *fc;
1417 
1418 	if (cd->tunnel) {
1419 		DR_LOG(ERR, "Inner GTPU item not supported");
1420 		rte_errno = ENOTSUP;
1421 		return rte_errno;
1422 	}
1423 
1424 	/* Overwrite GTPU dest port if not present */
1425 	fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)];
1426 	if (!fc->tag_set && !cd->relaxed) {
1427 		fc->item_idx = item_idx;
1428 		fc->tag_set = &mlx5dr_definer_gtp_udp_port_set;
1429 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1430 		DR_CALC_SET(fc, eth_l4, destination_port, false);
1431 	}
1432 
1433 	if (!m)
1434 		return 0;
1435 
1436 	if (m->hdr.plen || m->hdr.gtp_hdr_info & ~MLX5DR_DEFINER_GTP_EXT_HDR_BIT) {
1437 		rte_errno = ENOTSUP;
1438 		return rte_errno;
1439 	}
1440 
1441 	if (m->hdr.teid) {
1442 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_TEID_ENABLED)) {
1443 			rte_errno = ENOTSUP;
1444 			return rte_errno;
1445 		}
1446 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_TEID];
1447 		fc->item_idx = item_idx;
1448 		fc->tag_set = &mlx5dr_definer_gtp_teid_set;
1449 		fc->bit_mask = __mlx5_mask(header_gtp, teid);
1450 		fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
1451 	}
1452 
1453 	if (m->hdr.gtp_hdr_info) {
1454 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {
1455 			rte_errno = ENOTSUP;
1456 			return rte_errno;
1457 		}
1458 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_FLAG];
1459 		fc->item_idx = item_idx;
1460 		fc->tag_set = &mlx5dr_definer_gtp_ext_flag_set;
1461 		fc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag);
1462 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag);
1463 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1464 	}
1465 
1466 
1467 	if (m->hdr.msg_type) {
1468 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {
1469 			rte_errno = ENOTSUP;
1470 			return rte_errno;
1471 		}
1472 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_MSG_TYPE];
1473 		fc->item_idx = item_idx;
1474 		fc->tag_set = &mlx5dr_definer_gtp_msg_type_set;
1475 		fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
1476 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
1477 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1478 	}
1479 
1480 	return 0;
1481 }
1482 
1483 static int
1484 mlx5dr_definer_conv_item_gtp_psc(struct mlx5dr_definer_conv_data *cd,
1485 				 struct rte_flow_item *item,
1486 				 int item_idx)
1487 {
1488 	struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps;
1489 	const struct rte_flow_item_gtp_psc *m = item->mask;
1490 	struct mlx5dr_definer_fc *fc;
1491 
1492 	/* Overwrite GTP extension flag to be 1 */
1493 	if (!cd->relaxed) {
1494 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {
1495 			rte_errno = ENOTSUP;
1496 			return rte_errno;
1497 		}
1498 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_FLAG];
1499 		fc->item_idx = item_idx;
1500 		fc->tag_set = &mlx5dr_definer_ones_set;
1501 		fc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag);
1502 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag);
1503 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1504 	}
1505 
1506 	/* Overwrite next extension header type */
1507 	if (!cd->relaxed) {
1508 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_2_ENABLED)) {
1509 			rte_errno = ENOTSUP;
1510 			return rte_errno;
1511 		}
1512 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_NEXT_EXT_HDR];
1513 		fc->item_idx = item_idx;
1514 		fc->tag_set = &mlx5dr_definer_gtp_next_ext_hdr_set;
1515 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1516 		fc->bit_mask = __mlx5_mask(header_opt_gtp, next_ext_hdr_type);
1517 		fc->bit_off = __mlx5_dw_bit_off(header_opt_gtp, next_ext_hdr_type);
1518 		fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
1519 	}
1520 
1521 	if (!m)
1522 		return 0;
1523 
1524 	if (m->hdr.type) {
1525 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) {
1526 			rte_errno = ENOTSUP;
1527 			return rte_errno;
1528 		}
1529 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_HDR_PDU];
1530 		fc->item_idx = item_idx;
1531 		fc->tag_set = &mlx5dr_definer_gtp_ext_hdr_pdu_set;
1532 		fc->bit_mask = __mlx5_mask(header_gtp_psc, pdu_type);
1533 		fc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, pdu_type);
1534 		fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
1535 	}
1536 
1537 	if (m->hdr.qfi) {
1538 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) {
1539 			rte_errno = ENOTSUP;
1540 			return rte_errno;
1541 		}
1542 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_HDR_QFI];
1543 		fc->item_idx = item_idx;
1544 		fc->tag_set = &mlx5dr_definer_gtp_ext_hdr_qfi_set;
1545 		fc->bit_mask = __mlx5_mask(header_gtp_psc, qfi);
1546 		fc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, qfi);
1547 		fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
1548 	}
1549 
1550 	return 0;
1551 }
1552 
1553 static int
1554 mlx5dr_definer_conv_item_port(struct mlx5dr_definer_conv_data *cd,
1555 			      struct rte_flow_item *item,
1556 			      int item_idx)
1557 {
1558 	struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps;
1559 	const struct rte_flow_item_ethdev *m = item->mask;
1560 	struct mlx5dr_definer_fc *fc;
1561 
1562 	if (m->port_id) {
1563 		if (!caps->wire_regc_mask) {
1564 			DR_LOG(ERR, "Port ID item not supported, missing wire REGC mask");
1565 			rte_errno = ENOTSUP;
1566 			return rte_errno;
1567 		}
1568 
1569 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VPORT_REG_C_0];
1570 		fc->item_idx = item_idx;
1571 		fc->tag_set = &mlx5dr_definer_vport_set;
1572 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1573 		DR_CALC_SET_HDR(fc, registers, register_c_0);
1574 		fc->bit_off = rte_ctz32(caps->wire_regc_mask);
1575 		fc->bit_mask = caps->wire_regc_mask >> fc->bit_off;
1576 		fc->dr_ctx = cd->ctx;
1577 	} else {
1578 		DR_LOG(ERR, "Pord ID item mask must specify ID mask");
1579 		rte_errno = EINVAL;
1580 		return rte_errno;
1581 	}
1582 
1583 	return 0;
1584 }
1585 
1586 static int
1587 mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd,
1588 			       struct rte_flow_item *item,
1589 			       int item_idx)
1590 {
1591 	const struct rte_flow_item_vxlan *m = item->mask;
1592 	struct mlx5dr_definer_fc *fc;
1593 	bool inner = cd->tunnel;
1594 
1595 	if (inner) {
1596 		DR_LOG(ERR, "Inner VXLAN item not supported");
1597 		rte_errno = ENOTSUP;
1598 		return rte_errno;
1599 	}
1600 
1601 	/* In order to match on VXLAN we must match on ip_protocol and l4_dport */
1602 	if (!cd->relaxed) {
1603 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1604 		if (!fc->tag_set) {
1605 			fc->item_idx = item_idx;
1606 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1607 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
1608 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
1609 		}
1610 
1611 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
1612 		if (!fc->tag_set) {
1613 			fc->item_idx = item_idx;
1614 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1615 			fc->tag_set = &mlx5dr_definer_vxlan_udp_port_set;
1616 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
1617 		}
1618 	}
1619 
1620 	if (!m)
1621 		return 0;
1622 
1623 	if (m->hdr.vx_flags) {
1624 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_DW0];
1625 		fc->item_idx = item_idx;
1626 		fc->tag_set = &mlx5dr_definer_vxlan_vx_flags_set;
1627 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1628 	}
1629 
1630 	if (m->hdr.vx_vni) {
1631 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_DW1];
1632 		fc->item_idx = item_idx;
1633 		fc->tag_set = &mlx5dr_definer_vxlan_vx_vni_set;
1634 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
1635 	}
1636 
1637 	return 0;
1638 }
1639 
1640 static int
1641 mlx5dr_definer_conv_item_mpls(struct mlx5dr_definer_conv_data *cd,
1642 			      struct rte_flow_item *item,
1643 			      int item_idx)
1644 {
1645 	const struct rte_flow_item_mpls *m = item->mask;
1646 	struct mlx5dr_definer_fc *fc;
1647 	bool inner = cd->tunnel;
1648 
1649 	if (inner) {
1650 		DR_LOG(ERR, "Inner MPLS item not supported");
1651 		rte_errno = ENOTSUP;
1652 		return rte_errno;
1653 	}
1654 
1655 	if (!cd->relaxed) {
1656 		/* In order to match on MPLS we must match on ip_protocol and l4_dport. */
1657 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
1658 		if (!fc->tag_set) {
1659 			fc->item_idx = item_idx;
1660 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1661 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
1662 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
1663 		}
1664 
1665 		/* Currently support only MPLSoUDP */
1666 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)];
1667 		if (!fc->tag_set) {
1668 			fc->item_idx = item_idx;
1669 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1670 			fc->tag_set = &mlx5dr_definer_mpls_udp_port_set;
1671 			DR_CALC_SET(fc, eth_l4, destination_port, false);
1672 		}
1673 	}
1674 
1675 	if (m && (!is_mem_zero(m->label_tc_s, 3) || m->ttl)) {
1676 		/* According to HW MPLSoUDP is handled as inner */
1677 		fc = mlx5dr_definer_get_mpls_fc(cd, true);
1678 		if (!fc)
1679 			return rte_errno;
1680 
1681 		fc->item_idx = item_idx;
1682 		fc->tag_set = &mlx5dr_definer_mpls_label_set;
1683 	} else { /* Mask relevant oks2 bit, indicates MPLS label exists.
1684 		  * According to HW MPLSoUDP is handled as inner
1685 		  */
1686 		fc = mlx5dr_definer_get_mpls_oks_fc(cd, true);
1687 		if (!fc)
1688 			return rte_errno;
1689 
1690 		fc->item_idx = item_idx;
1691 		fc->tag_set = mlx5dr_definer_ones_set;
1692 	}
1693 
1694 	return 0;
1695 }
1696 
1697 static struct mlx5dr_definer_fc *
1698 mlx5dr_definer_get_register_fc(struct mlx5dr_definer_conv_data *cd, int reg)
1699 {
1700 	struct mlx5dr_definer_fc *fc;
1701 
1702 	switch (reg) {
1703 	case REG_C_0:
1704 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_0];
1705 		DR_CALC_SET_HDR(fc, registers, register_c_0);
1706 		break;
1707 	case REG_C_1:
1708 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_1];
1709 		DR_CALC_SET_HDR(fc, registers, register_c_1);
1710 		break;
1711 	case REG_C_2:
1712 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_2];
1713 		DR_CALC_SET_HDR(fc, registers, register_c_2);
1714 		break;
1715 	case REG_C_3:
1716 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_3];
1717 		DR_CALC_SET_HDR(fc, registers, register_c_3);
1718 		break;
1719 	case REG_C_4:
1720 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_4];
1721 		DR_CALC_SET_HDR(fc, registers, register_c_4);
1722 		break;
1723 	case REG_C_5:
1724 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_5];
1725 		DR_CALC_SET_HDR(fc, registers, register_c_5);
1726 		break;
1727 	case REG_C_6:
1728 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_6];
1729 		DR_CALC_SET_HDR(fc, registers, register_c_6);
1730 		break;
1731 	case REG_C_7:
1732 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_7];
1733 		DR_CALC_SET_HDR(fc, registers, register_c_7);
1734 		break;
1735 	case REG_C_8:
1736 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_8];
1737 		DR_CALC_SET_HDR(fc, registers, register_c_8);
1738 		break;
1739 	case REG_C_9:
1740 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_9];
1741 		DR_CALC_SET_HDR(fc, registers, register_c_9);
1742 		break;
1743 	case REG_C_10:
1744 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_10];
1745 		DR_CALC_SET_HDR(fc, registers, register_c_10);
1746 		break;
1747 	case REG_C_11:
1748 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_11];
1749 		DR_CALC_SET_HDR(fc, registers, register_c_11);
1750 		break;
1751 	case REG_A:
1752 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_A];
1753 		DR_CALC_SET_HDR(fc, metadata, general_purpose);
1754 		break;
1755 	case REG_B:
1756 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_B];
1757 		DR_CALC_SET_HDR(fc, metadata, metadata_to_cqe);
1758 		break;
1759 	default:
1760 		rte_errno = ENOTSUP;
1761 		return NULL;
1762 	}
1763 
1764 	return fc;
1765 }
1766 
1767 static int
1768 mlx5dr_definer_conv_item_tag(struct mlx5dr_definer_conv_data *cd,
1769 			     struct rte_flow_item *item,
1770 			     int item_idx)
1771 {
1772 	const struct rte_flow_item_tag *m = item->mask;
1773 	const struct rte_flow_item_tag *v = item->spec;
1774 	const struct rte_flow_item_tag *l = item->last;
1775 	struct mlx5dr_definer_fc *fc;
1776 	int reg;
1777 
1778 	if (!m || !v)
1779 		return 0;
1780 
1781 	if (item->type == RTE_FLOW_ITEM_TYPE_TAG)
1782 		reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
1783 						  RTE_FLOW_ITEM_TYPE_TAG,
1784 						  cd->table_type,
1785 						  v->index);
1786 	else
1787 		reg = (int)v->index;
1788 
1789 	if (reg <= 0) {
1790 		DR_LOG(ERR, "Invalid register for item tag");
1791 		rte_errno = EINVAL;
1792 		return rte_errno;
1793 	}
1794 
1795 	fc = mlx5dr_definer_get_register_fc(cd, reg);
1796 	if (!fc)
1797 		return rte_errno;
1798 
1799 	fc->item_idx = item_idx;
1800 	fc->is_range = l && l->index;
1801 	fc->tag_set = &mlx5dr_definer_tag_set;
1802 
1803 	return 0;
1804 }
1805 
1806 static void
1807 mlx5dr_definer_quota_set(struct mlx5dr_definer_fc *fc,
1808 			 const void *item_data, uint8_t *tag)
1809 {
1810 	/**
1811 	 * MLX5 PMD implements QUOTA with Meter object.
1812 	 * PMD Quota action translation implicitly increments
1813 	 * Meter register value after HW assigns it.
1814 	 * Meter register values are:
1815 	 *            HW     QUOTA(HW+1)  QUOTA state
1816 	 * RED        0        1 (01b)       BLOCK
1817 	 * YELLOW     1        2 (10b)       PASS
1818 	 * GREEN      2        3 (11b)       PASS
1819 	 *
1820 	 * Quota item checks Meter register bit 1 value to determine state:
1821 	 *            SPEC       MASK
1822 	 * PASS     2 (10b)    2 (10b)
1823 	 * BLOCK    0 (00b)    2 (10b)
1824 	 *
1825 	 * item_data is NULL when template quota item is non-masked:
1826 	 * .. / quota / ..
1827 	 */
1828 
1829 	const struct rte_flow_item_quota *quota = item_data;
1830 	uint32_t val;
1831 
1832 	if (quota && quota->state == RTE_FLOW_QUOTA_STATE_BLOCK)
1833 		val = MLX5DR_DEFINER_QUOTA_BLOCK;
1834 	else
1835 		val = MLX5DR_DEFINER_QUOTA_PASS;
1836 
1837 	DR_SET(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
1838 }
1839 
1840 static int
1841 mlx5dr_definer_conv_item_quota(struct mlx5dr_definer_conv_data *cd,
1842 			       __rte_unused struct rte_flow_item *item,
1843 			       int item_idx)
1844 {
1845 	int mtr_reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
1846 						  RTE_FLOW_ITEM_TYPE_METER_COLOR,
1847 						  cd->table_type, 0);
1848 	struct mlx5dr_definer_fc *fc;
1849 
1850 	if (mtr_reg < 0) {
1851 		rte_errno = EINVAL;
1852 		return rte_errno;
1853 	}
1854 
1855 	fc = mlx5dr_definer_get_register_fc(cd, mtr_reg);
1856 	if (!fc)
1857 		return rte_errno;
1858 
1859 	fc->tag_set = &mlx5dr_definer_quota_set;
1860 	fc->item_idx = item_idx;
1861 	return 0;
1862 }
1863 
1864 static int
1865 mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd,
1866 				  struct rte_flow_item *item,
1867 				  int item_idx)
1868 {
1869 	const struct rte_flow_item_meta *m = item->mask;
1870 	const struct rte_flow_item_meta *l = item->last;
1871 	struct mlx5dr_definer_fc *fc;
1872 	int reg;
1873 
1874 	if (!m)
1875 		return 0;
1876 
1877 	reg = flow_hw_get_reg_id_from_ctx(cd->ctx, RTE_FLOW_ITEM_TYPE_META,
1878 					  cd->table_type, -1);
1879 	if (reg <= 0) {
1880 		DR_LOG(ERR, "Invalid register for item metadata");
1881 		rte_errno = EINVAL;
1882 		return rte_errno;
1883 	}
1884 
1885 	fc = mlx5dr_definer_get_register_fc(cd, reg);
1886 	if (!fc)
1887 		return rte_errno;
1888 
1889 	fc->item_idx = item_idx;
1890 	fc->is_range = l && l->data;
1891 	fc->tag_set = &mlx5dr_definer_metadata_set;
1892 
1893 	return 0;
1894 }
1895 
1896 static int
1897 mlx5dr_definer_conv_item_tx_queue(struct mlx5dr_definer_conv_data *cd,
1898 				  struct rte_flow_item *item,
1899 				  int item_idx)
1900 {
1901 	const struct rte_flow_item_tx_queue *m = item->mask;
1902 	struct mlx5dr_definer_fc *fc;
1903 
1904 	if (!m)
1905 		return 0;
1906 
1907 	if (m->tx_queue) {
1908 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_SOURCE_QP];
1909 		fc->item_idx = item_idx;
1910 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1911 		fc->tag_set = &mlx5dr_definer_tx_queue_set;
1912 		/* User extra_data to save DPDK port_id. */
1913 		fc->extra_data = flow_hw_get_port_id(cd->ctx);
1914 		if (fc->extra_data == UINT16_MAX) {
1915 			DR_LOG(ERR, "Invalid port for item tx_queue");
1916 			rte_errno = EINVAL;
1917 			return rte_errno;
1918 		}
1919 		DR_CALC_SET_HDR(fc, source_qp_gvmi, source_qp);
1920 	}
1921 
1922 	return 0;
1923 }
1924 
1925 static int
1926 mlx5dr_definer_conv_item_sq(struct mlx5dr_definer_conv_data *cd,
1927 			    struct rte_flow_item *item,
1928 			    int item_idx)
1929 {
1930 	const struct mlx5_rte_flow_item_sq *m = item->mask;
1931 	struct mlx5dr_definer_fc *fc;
1932 
1933 	if (!m)
1934 		return 0;
1935 
1936 	if (m->queue) {
1937 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_SOURCE_QP];
1938 		fc->item_idx = item_idx;
1939 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1940 		fc->tag_set = &mlx5dr_definer_source_qp_set;
1941 		DR_CALC_SET_HDR(fc, source_qp_gvmi, source_qp);
1942 	}
1943 
1944 	return 0;
1945 }
1946 
1947 static int
1948 mlx5dr_definer_conv_item_gre(struct mlx5dr_definer_conv_data *cd,
1949 			     struct rte_flow_item *item,
1950 			     int item_idx)
1951 {
1952 	const struct rte_flow_item_gre *m = item->mask;
1953 	struct mlx5dr_definer_fc *fc;
1954 	bool inner = cd->tunnel;
1955 
1956 	if (inner) {
1957 		DR_LOG(ERR, "Inner GRE item not supported");
1958 		rte_errno = ENOTSUP;
1959 		return rte_errno;
1960 	}
1961 
1962 	if (!cd->relaxed) {
1963 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1964 		fc->item_idx = item_idx;
1965 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1966 		fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
1967 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
1968 	}
1969 
1970 	if (!m)
1971 		return 0;
1972 
1973 	if (m->c_rsvd0_ver) {
1974 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_C_VER];
1975 		fc->item_idx = item_idx;
1976 		fc->tag_set = &mlx5dr_definer_gre_c_ver_set;
1977 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1978 		fc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);
1979 		fc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);
1980 	}
1981 
1982 	if (m->protocol) {
1983 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_PROTOCOL];
1984 		fc->item_idx = item_idx;
1985 		fc->tag_set = &mlx5dr_definer_gre_protocol_type_set;
1986 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1987 		fc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);
1988 		fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
1989 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
1990 	}
1991 
1992 	return 0;
1993 }
1994 
1995 static int
1996 mlx5dr_definer_conv_item_gre_opt(struct mlx5dr_definer_conv_data *cd,
1997 				 struct rte_flow_item *item,
1998 				 int item_idx)
1999 {
2000 	const struct rte_flow_item_gre_opt *m = item->mask;
2001 	struct mlx5dr_definer_fc *fc;
2002 
2003 	if (!cd->relaxed) {
2004 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
2005 		if (!fc->tag_set) {
2006 			fc->item_idx = item_idx;
2007 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2008 			fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
2009 			DR_CALC_SET(fc, eth_l3, protocol_next_header, false);
2010 		}
2011 	}
2012 
2013 	if (!m)
2014 		return 0;
2015 
2016 	if (m->checksum_rsvd.checksum) {
2017 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_CHECKSUM];
2018 		fc->item_idx = item_idx;
2019 		fc->tag_set = &mlx5dr_definer_gre_opt_checksum_set;
2020 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
2021 	}
2022 
2023 	if (m->key.key) {
2024 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_KEY];
2025 		fc->item_idx = item_idx;
2026 		fc->tag_set = &mlx5dr_definer_gre_opt_key_set;
2027 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);
2028 	}
2029 
2030 	if (m->sequence.sequence) {
2031 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_SEQ];
2032 		fc->item_idx = item_idx;
2033 		fc->tag_set = &mlx5dr_definer_gre_opt_seq_set;
2034 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_3);
2035 	}
2036 
2037 	return 0;
2038 }
2039 
2040 static int
2041 mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,
2042 				 struct rte_flow_item *item,
2043 				 int item_idx)
2044 {
2045 	const rte_be32_t *m = item->mask;
2046 	struct mlx5dr_definer_fc *fc;
2047 
2048 	if (!cd->relaxed) {
2049 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_KEY_PRESENT];
2050 		fc->item_idx = item_idx;
2051 		fc->tag_set = &mlx5dr_definer_ones_set;
2052 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2053 		fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
2054 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
2055 
2056 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
2057 		if (!fc->tag_set) {
2058 			fc->item_idx = item_idx;
2059 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2060 			fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
2061 			DR_CALC_SET(fc, eth_l3, protocol_next_header, false);
2062 		}
2063 	}
2064 
2065 	if (!m)
2066 		return 0;
2067 
2068 	if (*m) {
2069 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_KEY];
2070 		fc->item_idx = item_idx;
2071 		fc->tag_set = &mlx5dr_definer_gre_key_set;
2072 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);
2073 	}
2074 
2075 	return 0;
2076 }
2077 
2078 static int
2079 mlx5dr_definer_conv_item_nvgre(struct mlx5dr_definer_conv_data *cd,
2080 				struct rte_flow_item *item,
2081 				int item_idx)
2082 {
2083 	const struct rte_flow_item_nvgre *m = item->mask;
2084 	struct mlx5dr_definer_fc *fc;
2085 	bool inner = cd->tunnel;
2086 
2087 	if (inner) {
2088 		DR_LOG(ERR, "Inner gre item not supported");
2089 		rte_errno = ENOTSUP;
2090 		return rte_errno;
2091 	}
2092 
2093 	if (!cd->relaxed) {
2094 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2095 		if (!fc->tag_set) {
2096 			fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2097 			fc->item_idx = item_idx;
2098 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2099 			fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
2100 			DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
2101 		}
2102 
2103 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_C_K_S];
2104 		fc->item_idx = item_idx;
2105 		fc->tag_set = &mlx5dr_definer_nvgre_def_c_rsvd0_ver_set;
2106 		fc->tag_mask_set = &mlx5dr_definer_nvgre_def_c_rsvd0_ver_mask_set;
2107 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2108 		fc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);
2109 		fc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);
2110 
2111 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_PROTOCOL];
2112 		fc->item_idx = item_idx;
2113 		fc->tag_set = &mlx5dr_definer_nvgre_def_protocol_set;
2114 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2115 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2116 		fc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);
2117 		fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
2118 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
2119 	}
2120 
2121 	if (!m)
2122 		return 0;
2123 
2124 	if (m->c_k_s_rsvd0_ver) {
2125 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_C_K_S];
2126 		fc->item_idx = item_idx;
2127 		fc->tag_set = &mlx5dr_definer_nvgre_c_rsvd0_ver_set;
2128 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2129 		fc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);
2130 		fc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);
2131 	}
2132 
2133 	if (m->protocol) {
2134 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_PROTOCOL];
2135 		fc->item_idx = item_idx;
2136 		fc->tag_set = &mlx5dr_definer_nvgre_protocol_set;
2137 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2138 		fc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);
2139 		fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
2140 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
2141 	}
2142 
2143 	if (!is_mem_zero(m->tni, 4)) {
2144 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_NVGRE_DW1];
2145 		fc->item_idx = item_idx;
2146 		fc->tag_set = &mlx5dr_definer_nvgre_dw1_set;
2147 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);
2148 	}
2149 	return 0;
2150 }
2151 
2152 static int
2153 mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
2154 			       struct rte_flow_item *item,
2155 			       int item_idx)
2156 {
2157 	const struct rte_flow_item_ptype *m = item->mask;
2158 	struct mlx5dr_definer_fc *fc;
2159 
2160 	if (!m)
2161 		return 0;
2162 
2163 	if (!(m->packet_type &
2164 	      (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | RTE_PTYPE_TUNNEL_MASK |
2165 	       RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK))) {
2166 		rte_errno = ENOTSUP;
2167 		return rte_errno;
2168 	}
2169 
2170 	if (m->packet_type & RTE_PTYPE_L2_MASK) {
2171 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, false)];
2172 		fc->item_idx = item_idx;
2173 		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
2174 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2175 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, false);
2176 	}
2177 
2178 	if (m->packet_type & RTE_PTYPE_INNER_L2_MASK) {
2179 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, true)];
2180 		fc->item_idx = item_idx;
2181 		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
2182 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2183 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, true);
2184 	}
2185 
2186 	if (m->packet_type & RTE_PTYPE_L3_MASK) {
2187 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, false)];
2188 		fc->item_idx = item_idx;
2189 		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
2190 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2191 		DR_CALC_SET(fc, eth_l2, l3_type, false);
2192 	}
2193 
2194 	if (m->packet_type & RTE_PTYPE_INNER_L3_MASK) {
2195 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, true)];
2196 		fc->item_idx = item_idx;
2197 		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
2198 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2199 		DR_CALC_SET(fc, eth_l2, l3_type, true);
2200 	}
2201 
2202 	if (m->packet_type & RTE_PTYPE_L4_MASK) {
2203 		/*
2204 		 * Fragmented IP (Internet Protocol) packet type.
2205 		 * Cannot be combined with Layer 4 Types (TCP/UDP).
2206 		 * The exact value must be specified in the mask.
2207 		 */
2208 		if (m->packet_type == RTE_PTYPE_L4_FRAG) {
2209 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, false)];
2210 			fc->item_idx = item_idx;
2211 			fc->tag_set = &mlx5dr_definer_ptype_frag_set;
2212 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2213 			DR_CALC_SET(fc, eth_l2, ip_fragmented, false);
2214 		} else {
2215 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
2216 			fc->item_idx = item_idx;
2217 			fc->tag_set = &mlx5dr_definer_ptype_l4_set;
2218 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2219 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
2220 
2221 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, false)];
2222 			fc->item_idx = item_idx;
2223 			fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set;
2224 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2225 			DR_CALC_SET(fc, eth_l2, l4_type, false);
2226 		}
2227 	}
2228 
2229 	if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
2230 		if (m->packet_type == RTE_PTYPE_INNER_L4_FRAG) {
2231 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, true)];
2232 			fc->item_idx = item_idx;
2233 			fc->tag_set = &mlx5dr_definer_ptype_frag_set;
2234 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2235 			DR_CALC_SET(fc, eth_l2, ip_fragmented, true);
2236 		} else {
2237 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
2238 			fc->item_idx = item_idx;
2239 			fc->tag_set = &mlx5dr_definer_ptype_l4_set;
2240 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2241 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, true);
2242 
2243 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4_EXT, true)];
2244 			fc->item_idx = item_idx;
2245 			fc->tag_set = &mlx5dr_definer_ptype_l4_ext_set;
2246 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2247 			DR_CALC_SET(fc, eth_l2, l4_type, true);
2248 		}
2249 	}
2250 
2251 	if (m->packet_type & RTE_PTYPE_TUNNEL_MASK) {
2252 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL];
2253 		fc->item_idx = item_idx;
2254 		fc->tag_set = &mlx5dr_definer_ptype_tunnel_set;
2255 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2256 		DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
2257 	}
2258 
2259 	return 0;
2260 }
2261 
2262 static int
2263 mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
2264 				   struct rte_flow_item *item,
2265 				   int item_idx)
2266 {
2267 	const struct rte_flow_item_integrity *m = item->mask;
2268 	struct mlx5dr_definer_fc *fc;
2269 
2270 	if (!m)
2271 		return 0;
2272 
2273 	if (m->packet_ok || m->l2_ok || m->l2_crc_ok || m->l3_len_ok) {
2274 		rte_errno = ENOTSUP;
2275 		return rte_errno;
2276 	}
2277 
2278 	if (m->l3_ok || m->ipv4_csum_ok || m->l4_ok || m->l4_csum_ok) {
2279 		fc = &cd->fc[DR_CALC_FNAME(INTEGRITY, m->level)];
2280 		fc->item_idx = item_idx;
2281 		fc->tag_set = &mlx5dr_definer_integrity_set;
2282 		DR_CALC_SET_HDR(fc, oks1, oks1_bits);
2283 	}
2284 
2285 	return 0;
2286 }
2287 
2288 static int
2289 mlx5dr_definer_conv_item_conntrack(struct mlx5dr_definer_conv_data *cd,
2290 				   struct rte_flow_item *item,
2291 				   int item_idx)
2292 {
2293 	const struct rte_flow_item_conntrack *m = item->mask;
2294 	struct mlx5dr_definer_fc *fc;
2295 	int reg;
2296 
2297 	if (!m)
2298 		return 0;
2299 
2300 	reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
2301 					  RTE_FLOW_ITEM_TYPE_CONNTRACK,
2302 					  cd->table_type, -1);
2303 	if (reg <= 0) {
2304 		DR_LOG(ERR, "Invalid register for item conntrack");
2305 		rte_errno = EINVAL;
2306 		return rte_errno;
2307 	}
2308 
2309 	fc = mlx5dr_definer_get_register_fc(cd, reg);
2310 	if (!fc)
2311 		return rte_errno;
2312 
2313 	fc->item_idx = item_idx;
2314 	fc->tag_mask_set = &mlx5dr_definer_conntrack_mask;
2315 	fc->tag_set = &mlx5dr_definer_conntrack_tag;
2316 
2317 	return 0;
2318 }
2319 
2320 static int
2321 mlx5dr_definer_conv_item_icmp(struct mlx5dr_definer_conv_data *cd,
2322 			      struct rte_flow_item *item,
2323 			      int item_idx)
2324 {
2325 	const struct rte_flow_item_icmp *m = item->mask;
2326 	struct mlx5dr_definer_fc *fc;
2327 	bool inner = cd->tunnel;
2328 
2329 	/* Overwrite match on L4 type ICMP */
2330 	if (!cd->relaxed) {
2331 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2332 		fc->item_idx = item_idx;
2333 		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
2334 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2335 		DR_CALC_SET(fc, eth_l2, l4_type, inner);
2336 	}
2337 
2338 	if (!m)
2339 		return 0;
2340 
2341 	if (m->hdr.icmp_type || m->hdr.icmp_code || m->hdr.icmp_cksum) {
2342 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
2343 		fc->item_idx = item_idx;
2344 		fc->tag_set = &mlx5dr_definer_icmp_dw1_set;
2345 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
2346 	}
2347 
2348 	if (m->hdr.icmp_ident || m->hdr.icmp_seq_nb) {
2349 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];
2350 		fc->item_idx = item_idx;
2351 		fc->tag_set = &mlx5dr_definer_icmp_dw2_set;
2352 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);
2353 	}
2354 
2355 	return 0;
2356 }
2357 
2358 static int
2359 mlx5dr_definer_conv_item_icmp6(struct mlx5dr_definer_conv_data *cd,
2360 			       struct rte_flow_item *item,
2361 			       int item_idx)
2362 {
2363 	const struct rte_flow_item_icmp6 *m = item->mask;
2364 	struct mlx5dr_definer_fc *fc;
2365 	bool inner = cd->tunnel;
2366 
2367 	/* Overwrite match on L4 type ICMP6 */
2368 	if (!cd->relaxed) {
2369 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2370 		fc->item_idx = item_idx;
2371 		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
2372 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2373 		DR_CALC_SET(fc, eth_l2, l4_type, inner);
2374 	}
2375 
2376 	if (!m)
2377 		return 0;
2378 
2379 	if (m->type || m->code || m->checksum) {
2380 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
2381 		fc->item_idx = item_idx;
2382 		fc->tag_set = &mlx5dr_definer_icmp6_dw1_set;
2383 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
2384 	}
2385 
2386 	return 0;
2387 }
2388 
2389 static int
2390 mlx5dr_definer_conv_item_icmp6_echo(struct mlx5dr_definer_conv_data *cd,
2391 				    struct rte_flow_item *item,
2392 				    int item_idx)
2393 {
2394 	const struct rte_flow_item_icmp6_echo *m = item->mask;
2395 	struct mlx5dr_definer_fc *fc;
2396 	bool inner = cd->tunnel;
2397 
2398 	if (!cd->relaxed) {
2399 		/* Overwrite match on L4 type ICMP6 */
2400 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2401 		fc->item_idx = item_idx;
2402 		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
2403 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2404 		DR_CALC_SET(fc, eth_l2, l4_type, inner);
2405 
2406 		/* Set fixed type and code for icmp6 echo request/reply */
2407 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
2408 		fc->item_idx = item_idx;
2409 		fc->tag_mask_set = &mlx5dr_definer_icmp6_echo_dw1_mask_set;
2410 		if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST)
2411 			fc->tag_set = &mlx5dr_definer_icmp6_echo_request_dw1_set;
2412 		else /* RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY */
2413 			fc->tag_set = &mlx5dr_definer_icmp6_echo_reply_dw1_set;
2414 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
2415 	}
2416 
2417 	if (!m)
2418 		return 0;
2419 
2420 	/* Set identifier & sequence into icmp_dw2 */
2421 	if (m->hdr.identifier || m->hdr.sequence) {
2422 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];
2423 		fc->item_idx = item_idx;
2424 		fc->tag_set = &mlx5dr_definer_icmp6_echo_dw2_set;
2425 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);
2426 	}
2427 
2428 	return 0;
2429 }
2430 
2431 static int
2432 mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
2433 			     struct rte_flow_item *item,
2434 			     int item_idx)
2435 {
2436 	const struct rte_flow_item_meter_color *m = item->mask;
2437 	struct mlx5dr_definer_fc *fc;
2438 	int reg;
2439 
2440 	if (!m)
2441 		return 0;
2442 
2443 	reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
2444 					  RTE_FLOW_ITEM_TYPE_METER_COLOR,
2445 					  cd->table_type, 0);
2446 	MLX5_ASSERT(reg > 0);
2447 
2448 	fc = mlx5dr_definer_get_register_fc(cd, reg);
2449 	if (!fc)
2450 		return rte_errno;
2451 
2452 	fc->item_idx = item_idx;
2453 	fc->tag_set = &mlx5dr_definer_meter_color_set;
2454 	return 0;
2455 }
2456 
2457 static struct mlx5dr_definer_fc *
2458 mlx5dr_definer_get_flex_parser_fc(struct mlx5dr_definer_conv_data *cd, uint32_t byte_off)
2459 {
2460 	uint32_t byte_off_fp7 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_7);
2461 	uint32_t byte_off_fp0 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
2462 	enum mlx5dr_definer_fname fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
2463 	struct mlx5dr_definer_fc *fc;
2464 	uint32_t idx;
2465 
2466 	if (byte_off < byte_off_fp7 || byte_off > byte_off_fp0) {
2467 		rte_errno = EINVAL;
2468 		return NULL;
2469 	}
2470 	idx = (byte_off_fp0 - byte_off) / (sizeof(uint32_t));
2471 	fname += (enum mlx5dr_definer_fname)idx;
2472 	fc = &cd->fc[fname];
2473 	fc->byte_off = byte_off;
2474 	fc->bit_mask = UINT32_MAX;
2475 	return fc;
2476 }
2477 
2478 static int
2479 mlx5dr_definer_conv_item_ipv6_routing_ext(struct mlx5dr_definer_conv_data *cd,
2480 					  struct rte_flow_item *item,
2481 					  int item_idx)
2482 {
2483 	const struct rte_flow_item_ipv6_routing_ext *m = item->mask;
2484 	struct mlx5dr_definer_fc *fc;
2485 	bool inner = cd->tunnel;
2486 	uint32_t byte_off;
2487 
2488 	if (!cd->relaxed) {
2489 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
2490 		fc->item_idx = item_idx;
2491 		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
2492 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2493 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
2494 
2495 		/* Overwrite - Unset ethertype if present */
2496 		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
2497 
2498 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2499 		if (!fc->tag_set) {
2500 			fc->item_idx = item_idx;
2501 			fc->tag_set = &mlx5dr_definer_ipv6_routing_hdr_set;
2502 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2503 			fc->not_overwrite = 1;
2504 			DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
2505 		}
2506 	} else {
2507 		rte_errno = ENOTSUP;
2508 		return rte_errno;
2509 	}
2510 
2511 	if (!m)
2512 		return 0;
2513 
2514 	if (m->hdr.hdr_len || m->hdr.flags) {
2515 		rte_errno = ENOTSUP;
2516 		return rte_errno;
2517 	}
2518 
2519 	if (m->hdr.next_hdr || m->hdr.type || m->hdr.segments_left) {
2520 		byte_off = flow_hw_get_srh_flex_parser_byte_off_from_ctx(cd->ctx);
2521 		fc = mlx5dr_definer_get_flex_parser_fc(cd, byte_off);
2522 		if (!fc)
2523 			return rte_errno;
2524 
2525 		fc->item_idx = item_idx;
2526 		fc->tag_set = &mlx5dr_definer_ipv6_routing_ext_set;
2527 	}
2528 	return 0;
2529 }
2530 
2531 static int
2532 mlx5dr_definer_conv_item_random(struct mlx5dr_definer_conv_data *cd,
2533 				struct rte_flow_item *item,
2534 				int item_idx)
2535 {
2536 	const struct rte_flow_item_random *m = item->mask;
2537 	const struct rte_flow_item_random *l = item->last;
2538 	struct mlx5dr_definer_fc *fc;
2539 
2540 	if (!m)
2541 		return 0;
2542 
2543 	if (m->value != (m->value & UINT16_MAX)) {
2544 		DR_LOG(ERR, "Random value is 16 bits only");
2545 		rte_errno = EINVAL;
2546 		return rte_errno;
2547 	}
2548 
2549 	fc = &cd->fc[MLX5DR_DEFINER_FNAME_RANDOM_NUM];
2550 	fc->item_idx = item_idx;
2551 	fc->tag_set = &mlx5dr_definer_random_number_set;
2552 	fc->is_range = l && l->value;
2553 	DR_CALC_SET_HDR(fc, random_number, random_number);
2554 
2555 	return 0;
2556 }
2557 
2558 static int
2559 mlx5dr_definer_conv_item_geneve(struct mlx5dr_definer_conv_data *cd,
2560 				struct rte_flow_item *item,
2561 				int item_idx)
2562 {
2563 	const struct rte_flow_item_geneve *m = item->mask;
2564 	struct mlx5dr_definer_fc *fc;
2565 	bool inner = cd->tunnel;
2566 
2567 	if (inner) {
2568 		DR_LOG(ERR, "Inner GENEVE item not supported");
2569 		rte_errno = ENOTSUP;
2570 		return rte_errno;
2571 	}
2572 
2573 	/* In order to match on Geneve we must match on ip_protocol and l4_dport */
2574 	if (!cd->relaxed) {
2575 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2576 		if (!fc->tag_set) {
2577 			fc->item_idx = item_idx;
2578 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2579 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
2580 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
2581 		}
2582 
2583 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
2584 		if (!fc->tag_set) {
2585 			fc->item_idx = item_idx;
2586 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2587 			fc->tag_set = &mlx5dr_definer_geneve_udp_port_set;
2588 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
2589 		}
2590 	}
2591 
2592 	if (!m)
2593 		return 0;
2594 
2595 	if (m->rsvd1) {
2596 		rte_errno = ENOTSUP;
2597 		return rte_errno;
2598 	}
2599 
2600 	if (m->ver_opt_len_o_c_rsvd0) {
2601 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_CTRL];
2602 		fc->item_idx = item_idx;
2603 		fc->tag_set = &mlx5dr_definer_geneve_ctrl_set;
2604 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2605 		fc->bit_mask = __mlx5_mask(header_geneve, ver_opt_len_o_c_rsvd);
2606 		fc->bit_off = __mlx5_dw_bit_off(header_geneve, ver_opt_len_o_c_rsvd);
2607 	}
2608 
2609 	if (m->protocol) {
2610 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_PROTO];
2611 		fc->item_idx = item_idx;
2612 		fc->tag_set = &mlx5dr_definer_geneve_protocol_set;
2613 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2614 		fc->byte_off += MLX5_BYTE_OFF(header_geneve, protocol_type);
2615 		fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
2616 		fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
2617 	}
2618 
2619 	if (!is_mem_zero(m->vni, 3)) {
2620 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_VNI];
2621 		fc->item_idx = item_idx;
2622 		fc->tag_set = &mlx5dr_definer_geneve_vni_set;
2623 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
2624 		fc->bit_mask = __mlx5_mask(header_geneve, vni);
2625 		fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
2626 	}
2627 
2628 	return 0;
2629 }
2630 
2631 static int
2632 mlx5dr_definer_conv_item_geneve_opt(struct mlx5dr_definer_conv_data *cd,
2633 				    struct rte_flow_item *item,
2634 				    int item_idx)
2635 {
2636 	const struct rte_flow_item_geneve_opt *m = item->mask;
2637 	const struct rte_flow_item_geneve_opt *v = item->spec;
2638 	struct mlx5_hl_data *hl_ok_bit, *hl_dws;
2639 	struct mlx5dr_definer_fc *fc;
2640 	uint8_t num_of_dws, i;
2641 	bool ok_bit_on_class;
2642 	int ret;
2643 
2644 	if (!m || !(m->option_class || m->option_type || m->data))
2645 		return 0;
2646 
2647 	if (!v || m->option_type != 0xff) {
2648 		DR_LOG(ERR, "Cannot match geneve opt without valid opt type");
2649 		goto out_not_supp;
2650 	}
2651 
2652 	ret = mlx5_get_geneve_hl_data(cd->ctx,
2653 				      v->option_type,
2654 				      v->option_class,
2655 				      &hl_ok_bit,
2656 				      &num_of_dws,
2657 				      &hl_dws,
2658 				      &ok_bit_on_class);
2659 	if (ret) {
2660 		DR_LOG(ERR, "Geneve opt type and class %d not supported", v->option_type);
2661 		goto out_not_supp;
2662 	}
2663 
2664 	if (ok_bit_on_class && m->option_class != RTE_BE16(UINT16_MAX)) {
2665 		DR_LOG(ERR, "Geneve option class has invalid mask");
2666 		goto out_not_supp;
2667 	}
2668 
2669 	if (!ok_bit_on_class && m->option_class) {
2670 		/* DW0 is used, we will match type, class */
2671 		if (!num_of_dws || hl_dws[0].dw_mask != UINT32_MAX) {
2672 			DR_LOG(ERR, "Geneve opt type %d DW0 not supported", v->option_type);
2673 			goto out_not_supp;
2674 		}
2675 
2676 		if (MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx >
2677 		    MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_7) {
2678 			DR_LOG(ERR, "Max match geneve opt DWs reached");
2679 			goto out_not_supp;
2680 		}
2681 
2682 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx++];
2683 		fc->item_idx = item_idx;
2684 		fc->tag_set = &mlx5dr_definer_geneve_opt_ctrl_set;
2685 		fc->byte_off = hl_dws[0].dw_offset * DW_SIZE;
2686 		fc->bit_mask = UINT32_MAX;
2687 	} else {
2688 		/* DW0 is not used, we must verify geneve opt type exists in packet */
2689 		if (!hl_ok_bit->dw_mask) {
2690 			DR_LOG(ERR, "Geneve opt OK bits not supported");
2691 			goto out_not_supp;
2692 		}
2693 
2694 		if (MLX5DR_DEFINER_FNAME_GENEVE_OPT_OK_0 + cd->geneve_opt_ok_idx >
2695 		    MLX5DR_DEFINER_FNAME_GENEVE_OPT_OK_7) {
2696 			DR_LOG(ERR, "Max match geneve opt reached");
2697 			goto out_not_supp;
2698 		}
2699 
2700 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_OPT_OK_0 + cd->geneve_opt_ok_idx++];
2701 		fc->item_idx = item_idx;
2702 		fc->tag_set = &mlx5dr_definer_ones_set;
2703 		fc->byte_off = hl_ok_bit->dw_offset * DW_SIZE +
2704 				rte_clz32(hl_ok_bit->dw_mask) / 8;
2705 		fc->bit_off = rte_ctz32(hl_ok_bit->dw_mask);
2706 		fc->bit_mask = 0x1;
2707 	}
2708 
2709 	for (i = 1; i < num_of_dws; i++) {
2710 		/* Process each valid geneve option data DW1..N */
2711 		if (!m->data[i - 1])
2712 			continue;
2713 
2714 		if (hl_dws[i].dw_mask != UINT32_MAX) {
2715 			DR_LOG(ERR, "Matching Geneve opt data[%d] not supported", i - 1);
2716 			goto out_not_supp;
2717 		}
2718 
2719 		if (MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx >
2720 		    MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_7) {
2721 			DR_LOG(ERR, "Max match geneve options DWs reached");
2722 			goto out_not_supp;
2723 		}
2724 
2725 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx++];
2726 		fc->item_idx = item_idx;
2727 		fc->tag_set = &mlx5dr_definer_geneve_opt_data_set;
2728 		fc->byte_off = hl_dws[i].dw_offset * DW_SIZE;
2729 		fc->bit_mask = m->data[i - 1];
2730 		/* Use extra_data for data[] set offset */
2731 		fc->extra_data = i - 1;
2732 	}
2733 
2734 	return 0;
2735 
2736 out_not_supp:
2737 	rte_errno = ENOTSUP;
2738 	return rte_errno;
2739 }
2740 
2741 static int
2742 mlx5dr_definer_mt_set_fc(struct mlx5dr_match_template *mt,
2743 			 struct mlx5dr_definer_fc *fc,
2744 			 uint8_t *hl)
2745 {
2746 	uint32_t fc_sz = 0, fcr_sz = 0;
2747 	int i;
2748 
2749 	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++)
2750 		if (fc[i].tag_set)
2751 			fc[i].is_range ? fcr_sz++ : fc_sz++;
2752 
2753 	mt->fc = simple_calloc(fc_sz + fcr_sz, sizeof(*mt->fc));
2754 	if (!mt->fc) {
2755 		rte_errno = ENOMEM;
2756 		return rte_errno;
2757 	}
2758 
2759 	mt->fcr = mt->fc + fc_sz;
2760 
2761 	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++) {
2762 		if (!fc[i].tag_set)
2763 			continue;
2764 
2765 		fc[i].fname = i;
2766 
2767 		if (fc[i].is_range) {
2768 			memcpy(&mt->fcr[mt->fcr_sz++], &fc[i], sizeof(*mt->fcr));
2769 		} else {
2770 			memcpy(&mt->fc[mt->fc_sz++], &fc[i], sizeof(*mt->fc));
2771 			DR_SET(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
2772 		}
2773 	}
2774 
2775 	return 0;
2776 }
2777 
2778 static int
2779 mlx5dr_definer_check_item_range_supp(struct rte_flow_item *item)
2780 {
2781 	if (!item->last)
2782 		return 0;
2783 
2784 	switch ((int)item->type) {
2785 	case RTE_FLOW_ITEM_TYPE_IPV4:
2786 	case RTE_FLOW_ITEM_TYPE_IPV6:
2787 	case RTE_FLOW_ITEM_TYPE_UDP:
2788 	case RTE_FLOW_ITEM_TYPE_TCP:
2789 	case RTE_FLOW_ITEM_TYPE_TAG:
2790 	case RTE_FLOW_ITEM_TYPE_META:
2791 	case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
2792 	case RTE_FLOW_ITEM_TYPE_RANDOM:
2793 		return 0;
2794 	default:
2795 		DR_LOG(ERR, "Range not supported over item type %d", item->type);
2796 		rte_errno = ENOTSUP;
2797 		return rte_errno;
2798 	}
2799 }
2800 
2801 static int
2802 mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd,
2803 			     struct rte_flow_item *item,
2804 			     int item_idx)
2805 {
2806 	const struct rte_flow_item_esp *m = item->mask;
2807 	struct mlx5dr_definer_fc *fc;
2808 
2809 	if (!m)
2810 		return 0;
2811 	if (m->hdr.spi) {
2812 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ESP_SPI];
2813 		fc->item_idx = item_idx;
2814 		fc->tag_set = &mlx5dr_definer_ipsec_spi_set;
2815 		DR_CALC_SET_HDR(fc, ipsec, spi);
2816 	}
2817 	if (m->hdr.seq) {
2818 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ESP_SEQUENCE_NUMBER];
2819 		fc->item_idx = item_idx;
2820 		fc->tag_set = &mlx5dr_definer_ipsec_sequence_number_set;
2821 		DR_CALC_SET_HDR(fc, ipsec, sequence_number);
2822 	}
2823 	return 0;
2824 }
2825 
2826 static void mlx5dr_definer_set_conv_tunnel(enum rte_flow_item_type cur_type,
2827 					   uint64_t item_flags,
2828 					   struct mlx5dr_definer_conv_data *cd)
2829 {
2830 	/* Already tunnel nothing to change */
2831 	if (cd->tunnel)
2832 		return;
2833 
2834 	/* We can have more than one MPLS label at each level (inner/outer), so
2835 	 * consider tunnel only when it is already under tunnel or if we moved to the
2836 	 * second MPLS level.
2837 	 */
2838 	if (cur_type != RTE_FLOW_ITEM_TYPE_MPLS)
2839 		cd->tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2840 	else
2841 		cd->tunnel = !!(item_flags & DR_FLOW_LAYER_TUNNEL_NO_MPLS);
2842 }
2843 
2844 static int
2845 mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
2846 				     struct rte_flow_item *item,
2847 				     int item_idx)
2848 {
2849 	uint32_t base_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
2850 	const struct rte_flow_item_flex *v, *m;
2851 	enum mlx5dr_definer_fname fname;
2852 	struct mlx5dr_definer_fc *fc;
2853 	uint32_t i, mask, byte_off;
2854 	bool is_inner = cd->tunnel;
2855 	int ret;
2856 
2857 	m = item->mask;
2858 	v = item->spec;
2859 	mask = 0;
2860 	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
2861 		byte_off = base_off - i * sizeof(uint32_t);
2862 		ret = mlx5_flex_get_parser_value_per_byte_off(m, v->handle, byte_off,
2863 							      is_inner, &mask);
2864 		if (ret == -1) {
2865 			rte_errno = EINVAL;
2866 			return rte_errno;
2867 		}
2868 
2869 		if (!mask)
2870 			continue;
2871 
2872 		fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
2873 		fname += (enum mlx5dr_definer_fname)i;
2874 		fc = &cd->fc[fname];
2875 		fc->byte_off = byte_off;
2876 		fc->item_idx = item_idx;
2877 		fc->tag_set = cd->tunnel ? &mlx5dr_definer_flex_parser_inner_set :
2878 					   &mlx5dr_definer_flex_parser_outer_set;
2879 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2880 		fc->bit_mask = mask;
2881 	}
2882 	return 0;
2883 }
2884 
2885 static int
2886 mlx5dr_definer_conv_item_ib_l4(struct mlx5dr_definer_conv_data *cd,
2887 			       struct rte_flow_item *item,
2888 			       int item_idx)
2889 {
2890 	const struct rte_flow_item_ib_bth *m = item->mask;
2891 	struct mlx5dr_definer_fc *fc;
2892 	bool inner = cd->tunnel;
2893 
2894 	/* In order to match on RoCEv2(layer4 ib), we must match
2895 	 * on ip_protocol and l4_dport.
2896 	 */
2897 	if (!cd->relaxed) {
2898 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2899 		if (!fc->tag_set) {
2900 			fc->item_idx = item_idx;
2901 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2902 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
2903 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
2904 		}
2905 
2906 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
2907 		if (!fc->tag_set) {
2908 			fc->item_idx = item_idx;
2909 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2910 			fc->tag_set = &mlx5dr_definer_ib_l4_udp_port_set;
2911 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
2912 		}
2913 	}
2914 
2915 	if (!m)
2916 		return 0;
2917 
2918 	if (m->hdr.se || m->hdr.m || m->hdr.padcnt || m->hdr.tver ||
2919 		m->hdr.pkey || m->hdr.f || m->hdr.b || m->hdr.rsvd0 ||
2920 		m->hdr.rsvd1 || !is_mem_zero(m->hdr.psn, 3)) {
2921 		rte_errno = ENOTSUP;
2922 		return rte_errno;
2923 	}
2924 
2925 	if (m->hdr.opcode) {
2926 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_OPCODE];
2927 		fc->item_idx = item_idx;
2928 		fc->tag_set = &mlx5dr_definer_ib_l4_opcode_set;
2929 		DR_CALC_SET_HDR(fc, ib_l4, opcode);
2930 	}
2931 
2932 	if (!is_mem_zero(m->hdr.dst_qp, 3)) {
2933 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_QPN];
2934 		fc->item_idx = item_idx;
2935 		fc->tag_set = &mlx5dr_definer_ib_l4_qp_set;
2936 		DR_CALC_SET_HDR(fc, ib_l4, qp);
2937 	}
2938 
2939 	if (m->hdr.a) {
2940 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_A];
2941 		fc->item_idx = item_idx;
2942 		fc->tag_set = &mlx5dr_definer_ib_l4_bth_a_set;
2943 		DR_CALC_SET_HDR(fc, ib_l4, ackreq);
2944 	}
2945 
2946 	return 0;
2947 }
2948 
2949 static int
2950 mlx5dr_definer_conv_item_vxlan_gpe(struct mlx5dr_definer_conv_data *cd,
2951 				   struct rte_flow_item *item,
2952 				   int item_idx)
2953 {
2954 	const struct rte_flow_item_vxlan_gpe *m = item->mask;
2955 	struct mlx5dr_definer_fc *fc;
2956 	bool inner = cd->tunnel;
2957 
2958 	if (inner) {
2959 		DR_LOG(ERR, "Inner VXLAN GPE item not supported");
2960 		rte_errno = ENOTSUP;
2961 		return rte_errno;
2962 	}
2963 
2964 	/* In order to match on VXLAN GPE we must match on ip_protocol and l4_dport */
2965 	if (!cd->relaxed) {
2966 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2967 		if (!fc->tag_set) {
2968 			fc->item_idx = item_idx;
2969 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2970 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
2971 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
2972 		}
2973 
2974 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
2975 		if (!fc->tag_set) {
2976 			fc->item_idx = item_idx;
2977 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2978 			fc->tag_set = &mlx5dr_definer_vxlan_gpe_udp_port_set;
2979 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
2980 		}
2981 	}
2982 
2983 	if (!m)
2984 		return 0;
2985 
2986 	if (m->flags) {
2987 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_FLAGS];
2988 		fc->item_idx = item_idx;
2989 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_flags_set;
2990 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2991 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
2992 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
2993 	}
2994 
2995 	if (!is_mem_zero(m->rsvd0, 2)) {
2996 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_RSVD0];
2997 		fc->item_idx = item_idx;
2998 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_rsvd0_set;
2999 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
3000 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, rsvd0);
3001 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, rsvd0);
3002 	}
3003 
3004 	if (m->protocol) {
3005 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_PROTO];
3006 		fc->item_idx = item_idx;
3007 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_protocol_set;
3008 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
3009 		fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
3010 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
3011 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
3012 	}
3013 
3014 	if (!is_mem_zero(m->vni, 3)) {
3015 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_VNI];
3016 		fc->item_idx = item_idx;
3017 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_vni_set;
3018 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
3019 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
3020 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
3021 	}
3022 
3023 	if (m->rsvd1) {
3024 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_RSVD1];
3025 		fc->item_idx = item_idx;
3026 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_rsvd1_set;
3027 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
3028 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, rsvd1);
3029 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, rsvd1);
3030 	}
3031 
3032 	return 0;
3033 }
3034 
3035 static int
3036 mlx5dr_definer_conv_item_compare_field(const struct rte_flow_field_data *f,
3037 				       const struct rte_flow_field_data *other_f,
3038 				       struct mlx5dr_definer_conv_data *cd,
3039 				       int item_idx,
3040 				       enum mlx5dr_definer_compare_dw_selectors dw_offset)
3041 {
3042 	struct mlx5dr_definer_fc *fc = NULL;
3043 	int reg;
3044 
3045 	if (f->offset) {
3046 		DR_LOG(ERR, "field offset %u is not supported, only offset zero supported",
3047 		       f->offset);
3048 		goto err_notsup;
3049 	}
3050 
3051 	switch (f->field) {
3052 	case RTE_FLOW_FIELD_META:
3053 		reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
3054 						  RTE_FLOW_ITEM_TYPE_META,
3055 						  cd->table_type, -1);
3056 		if (reg <= 0) {
3057 			DR_LOG(ERR, "Invalid register for compare metadata field");
3058 			rte_errno = EINVAL;
3059 			return rte_errno;
3060 		}
3061 
3062 		fc = mlx5dr_definer_get_register_fc(cd, reg);
3063 		if (!fc)
3064 			return rte_errno;
3065 
3066 		fc->item_idx = item_idx;
3067 		fc->tag_set = &mlx5dr_definer_compare_set;
3068 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
3069 		fc->compare_idx = dw_offset;
3070 		break;
3071 	case RTE_FLOW_FIELD_TAG:
3072 		reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
3073 						  RTE_FLOW_ITEM_TYPE_TAG,
3074 						  cd->table_type,
3075 						  f->tag_index);
3076 		if (reg <= 0) {
3077 			DR_LOG(ERR, "Invalid register for compare tag field");
3078 			rte_errno = EINVAL;
3079 			return rte_errno;
3080 		}
3081 
3082 		fc = mlx5dr_definer_get_register_fc(cd, reg);
3083 		if (!fc)
3084 			return rte_errno;
3085 
3086 		fc->item_idx = item_idx;
3087 		fc->tag_set = &mlx5dr_definer_compare_set;
3088 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
3089 		fc->compare_idx = dw_offset;
3090 		break;
3091 	case RTE_FLOW_FIELD_VALUE:
3092 		if (dw_offset == MLX5DR_DEFINER_COMPARE_ARGUMENT_0) {
3093 			DR_LOG(ERR, "Argument field does not support immediate value");
3094 			goto err_notsup;
3095 		}
3096 		break;
3097 	case RTE_FLOW_FIELD_RANDOM:
3098 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_RANDOM_NUM];
3099 		fc->item_idx = item_idx;
3100 		fc->tag_set = &mlx5dr_definer_compare_set;
3101 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
3102 		fc->compare_idx = dw_offset;
3103 		DR_CALC_SET_HDR(fc, random_number, random_number);
3104 		break;
3105 	case RTE_FLOW_FIELD_ESP_SEQ_NUM:
3106 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ESP_SEQUENCE_NUMBER];
3107 		fc->item_idx = item_idx;
3108 		fc->tag_set = &mlx5dr_definer_compare_set;
3109 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
3110 		fc->compare_idx = dw_offset;
3111 		DR_CALC_SET_HDR(fc, ipsec, sequence_number);
3112 		break;
3113 	default:
3114 		DR_LOG(ERR, "%u field is not supported", f->field);
3115 		goto err_notsup;
3116 	}
3117 
3118 	if (fc && other_f && other_f->field == RTE_FLOW_FIELD_VALUE)
3119 		fc->compare_set_base = true;
3120 
3121 	return 0;
3122 
3123 err_notsup:
3124 	rte_errno = ENOTSUP;
3125 	return rte_errno;
3126 }
3127 
3128 static int
3129 mlx5dr_definer_conv_item_compare(struct mlx5dr_definer_conv_data *cd,
3130 				 struct rte_flow_item *item,
3131 				 int item_idx)
3132 {
3133 	const struct rte_flow_item_compare *m = item->mask;
3134 	const struct rte_flow_field_data *a = &m->a;
3135 	const struct rte_flow_field_data *b = &m->b;
3136 	int ret;
3137 
3138 	if (m->width != 0xffffffff) {
3139 		DR_LOG(ERR, "compare item width of 0x%x is not supported, only full DW supported",
3140 				m->width);
3141 		rte_errno = ENOTSUP;
3142 		return rte_errno;
3143 	}
3144 
3145 	ret = mlx5dr_definer_conv_item_compare_field(a, b, cd, item_idx,
3146 						     MLX5DR_DEFINER_COMPARE_ARGUMENT_0);
3147 	if (ret)
3148 		return ret;
3149 
3150 	ret = mlx5dr_definer_conv_item_compare_field(b, NULL, cd, item_idx,
3151 						     MLX5DR_DEFINER_COMPARE_BASE_0);
3152 	if (ret)
3153 		return ret;
3154 
3155 	return 0;
3156 }
3157 
3158 static int
3159 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
3160 				struct mlx5dr_match_template *mt,
3161 				uint8_t *hl,
3162 				struct mlx5dr_matcher *matcher)
3163 {
3164 	struct mlx5dr_definer_fc fc[MLX5DR_DEFINER_FNAME_MAX] = {{0}};
3165 	struct mlx5dr_definer_conv_data cd = {0};
3166 	struct rte_flow_item *items = mt->items;
3167 	uint64_t item_flags = 0;
3168 	int i, ret;
3169 
3170 	cd.fc = fc;
3171 	cd.ctx = ctx;
3172 	cd.relaxed = mt->flags & MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH;
3173 	cd.table_type = matcher->tbl->type;
3174 
3175 	/* Collect all RTE fields to the field array and set header layout */
3176 	for (i = 0; items->type != RTE_FLOW_ITEM_TYPE_END; i++, items++) {
3177 		mlx5dr_definer_set_conv_tunnel(items->type, item_flags, &cd);
3178 
3179 		ret = mlx5dr_definer_check_item_range_supp(items);
3180 		if (ret)
3181 			return ret;
3182 
3183 		if (mlx5dr_matcher_is_compare(matcher)) {
3184 			DR_LOG(ERR, "Compare matcher not supported for more than one item");
3185 			goto not_supp;
3186 		}
3187 
3188 		switch ((int)items->type) {
3189 		case RTE_FLOW_ITEM_TYPE_ETH:
3190 			ret = mlx5dr_definer_conv_item_eth(&cd, items, i);
3191 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
3192 						  MLX5_FLOW_LAYER_OUTER_L2;
3193 			break;
3194 		case RTE_FLOW_ITEM_TYPE_VLAN:
3195 			ret = mlx5dr_definer_conv_item_vlan(&cd, items, i);
3196 			item_flags |= cd.tunnel ?
3197 				(MLX5_FLOW_LAYER_INNER_VLAN | MLX5_FLOW_LAYER_INNER_L2) :
3198 				(MLX5_FLOW_LAYER_OUTER_VLAN | MLX5_FLOW_LAYER_OUTER_L2);
3199 			break;
3200 		case RTE_FLOW_ITEM_TYPE_IPV4:
3201 			ret = mlx5dr_definer_conv_item_ipv4(&cd, items, i);
3202 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
3203 						  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
3204 			break;
3205 		case RTE_FLOW_ITEM_TYPE_IPV6:
3206 			ret = mlx5dr_definer_conv_item_ipv6(&cd, items, i);
3207 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
3208 						  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
3209 			break;
3210 		case RTE_FLOW_ITEM_TYPE_UDP:
3211 			ret = mlx5dr_definer_conv_item_udp(&cd, items, i);
3212 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
3213 						  MLX5_FLOW_LAYER_OUTER_L4_UDP;
3214 			break;
3215 		case RTE_FLOW_ITEM_TYPE_TCP:
3216 			ret = mlx5dr_definer_conv_item_tcp(&cd, items, i);
3217 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
3218 						  MLX5_FLOW_LAYER_OUTER_L4_TCP;
3219 			break;
3220 		case RTE_FLOW_ITEM_TYPE_GTP:
3221 			ret = mlx5dr_definer_conv_item_gtp(&cd, items, i);
3222 			item_flags |= MLX5_FLOW_LAYER_GTP;
3223 			break;
3224 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
3225 			ret = mlx5dr_definer_conv_item_gtp_psc(&cd, items, i);
3226 			item_flags |= MLX5_FLOW_LAYER_GTP_PSC;
3227 			break;
3228 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
3229 			ret = mlx5dr_definer_conv_item_port(&cd, items, i);
3230 			item_flags |= MLX5_FLOW_ITEM_REPRESENTED_PORT;
3231 			mt->vport_item_id = i;
3232 			break;
3233 		case RTE_FLOW_ITEM_TYPE_VXLAN:
3234 			ret = mlx5dr_definer_conv_item_vxlan(&cd, items, i);
3235 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
3236 			break;
3237 		case RTE_FLOW_ITEM_TYPE_TX_QUEUE:
3238 			ret = mlx5dr_definer_conv_item_tx_queue(&cd, items, i);
3239 			item_flags |= MLX5_FLOW_ITEM_SQ;
3240 			break;
3241 		case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
3242 			ret = mlx5dr_definer_conv_item_sq(&cd, items, i);
3243 			item_flags |= MLX5_FLOW_ITEM_SQ;
3244 			break;
3245 		case RTE_FLOW_ITEM_TYPE_TAG:
3246 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
3247 			ret = mlx5dr_definer_conv_item_tag(&cd, items, i);
3248 			item_flags |= MLX5_FLOW_ITEM_TAG;
3249 			break;
3250 		case RTE_FLOW_ITEM_TYPE_META:
3251 			ret = mlx5dr_definer_conv_item_metadata(&cd, items, i);
3252 			item_flags |= MLX5_FLOW_ITEM_METADATA;
3253 			break;
3254 		case RTE_FLOW_ITEM_TYPE_GRE:
3255 			ret = mlx5dr_definer_conv_item_gre(&cd, items, i);
3256 			item_flags |= MLX5_FLOW_LAYER_GRE;
3257 			break;
3258 		case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
3259 			ret = mlx5dr_definer_conv_item_gre_opt(&cd, items, i);
3260 			item_flags |= MLX5_FLOW_LAYER_GRE;
3261 			break;
3262 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
3263 			ret = mlx5dr_definer_conv_item_gre_key(&cd, items, i);
3264 			item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
3265 			break;
3266 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
3267 			ret = mlx5dr_definer_conv_item_integrity(&cd, items, i);
3268 			item_flags |= MLX5_FLOW_ITEM_INTEGRITY;
3269 			break;
3270 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
3271 			ret = mlx5dr_definer_conv_item_conntrack(&cd, items, i);
3272 			break;
3273 		case RTE_FLOW_ITEM_TYPE_ICMP:
3274 			ret = mlx5dr_definer_conv_item_icmp(&cd, items, i);
3275 			item_flags |= MLX5_FLOW_LAYER_ICMP;
3276 			break;
3277 		case RTE_FLOW_ITEM_TYPE_ICMP6:
3278 			ret = mlx5dr_definer_conv_item_icmp6(&cd, items, i);
3279 			item_flags |= MLX5_FLOW_LAYER_ICMP6;
3280 			break;
3281 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
3282 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
3283 			ret = mlx5dr_definer_conv_item_icmp6_echo(&cd, items, i);
3284 			item_flags |= MLX5_FLOW_LAYER_ICMP6;
3285 			break;
3286 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
3287 			ret = mlx5dr_definer_conv_item_meter_color(&cd, items, i);
3288 			item_flags |= MLX5_FLOW_ITEM_METER_COLOR;
3289 			break;
3290 		case RTE_FLOW_ITEM_TYPE_QUOTA:
3291 			ret = mlx5dr_definer_conv_item_quota(&cd, items, i);
3292 			item_flags |= MLX5_FLOW_ITEM_QUOTA;
3293 			break;
3294 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
3295 			ret = mlx5dr_definer_conv_item_ipv6_routing_ext(&cd, items, i);
3296 			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
3297 						  MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
3298 			break;
3299 		case RTE_FLOW_ITEM_TYPE_ESP:
3300 			ret = mlx5dr_definer_conv_item_esp(&cd, items, i);
3301 			item_flags |= MLX5_FLOW_ITEM_ESP;
3302 			break;
3303 		case RTE_FLOW_ITEM_TYPE_FLEX:
3304 			ret = mlx5dr_definer_conv_item_flex_parser(&cd, items, i);
3305 			if (ret == 0) {
3306 				enum rte_flow_item_flex_tunnel_mode tunnel_mode =
3307 								FLEX_TUNNEL_MODE_SINGLE;
3308 
3309 				ret = mlx5_flex_get_tunnel_mode(items, &tunnel_mode);
3310 				if (tunnel_mode == FLEX_TUNNEL_MODE_TUNNEL)
3311 					item_flags |= MLX5_FLOW_ITEM_FLEX_TUNNEL;
3312 				else
3313 					item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
3314 								  MLX5_FLOW_ITEM_OUTER_FLEX;
3315 			}
3316 			break;
3317 		case RTE_FLOW_ITEM_TYPE_MPLS:
3318 			ret = mlx5dr_definer_conv_item_mpls(&cd, items, i);
3319 			item_flags |= MLX5_FLOW_LAYER_MPLS;
3320 			cd.mpls_idx++;
3321 			break;
3322 		case RTE_FLOW_ITEM_TYPE_GENEVE:
3323 			ret = mlx5dr_definer_conv_item_geneve(&cd, items, i);
3324 			item_flags |= MLX5_FLOW_LAYER_GENEVE;
3325 			break;
3326 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
3327 			ret = mlx5dr_definer_conv_item_geneve_opt(&cd, items, i);
3328 			item_flags |= MLX5_FLOW_LAYER_GENEVE_OPT;
3329 			break;
3330 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
3331 			ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
3332 			item_flags |= MLX5_FLOW_ITEM_IB_BTH;
3333 			break;
3334 		case RTE_FLOW_ITEM_TYPE_PTYPE:
3335 			ret = mlx5dr_definer_conv_item_ptype(&cd, items, i);
3336 			item_flags |= MLX5_FLOW_ITEM_PTYPE;
3337 			break;
3338 		case RTE_FLOW_ITEM_TYPE_RANDOM:
3339 			ret = mlx5dr_definer_conv_item_random(&cd, items, i);
3340 			item_flags |= MLX5_FLOW_ITEM_RANDOM;
3341 			break;
3342 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
3343 			ret = mlx5dr_definer_conv_item_vxlan_gpe(&cd, items, i);
3344 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
3345 			break;
3346 		case RTE_FLOW_ITEM_TYPE_COMPARE:
3347 			if (i) {
3348 				DR_LOG(ERR, "Compare matcher not supported for more than one item");
3349 				goto not_supp;
3350 			}
3351 			ret = mlx5dr_definer_conv_item_compare(&cd, items, i);
3352 			item_flags |= MLX5_FLOW_ITEM_COMPARE;
3353 			matcher->flags |= MLX5DR_MATCHER_FLAGS_COMPARE;
3354 			break;
3355 		case RTE_FLOW_ITEM_TYPE_NSH:
3356 			item_flags |= MLX5_FLOW_ITEM_NSH;
3357 			break;
3358 		case RTE_FLOW_ITEM_TYPE_VOID:
3359 			break;
3360 		case RTE_FLOW_ITEM_TYPE_NVGRE:
3361 			ret = mlx5dr_definer_conv_item_nvgre(&cd, items, i);
3362 			item_flags |= MLX5_FLOW_LAYER_NVGRE;
3363 			break;
3364 		default:
3365 			DR_LOG(ERR, "Unsupported item type %d", items->type);
3366 			goto not_supp;
3367 		}
3368 
3369 		cd.last_item = items->type;
3370 
3371 		if (ret) {
3372 			DR_LOG(ERR, "Failed processing item type: %d", items->type);
3373 			return ret;
3374 		}
3375 	}
3376 
3377 	mt->item_flags = item_flags;
3378 
3379 	/* Fill in headers layout and allocate fc & fcr array on mt */
3380 	ret = mlx5dr_definer_mt_set_fc(mt, fc, hl);
3381 	if (ret) {
3382 		DR_LOG(ERR, "Failed to set field copy to match template");
3383 		return ret;
3384 	}
3385 
3386 	return 0;
3387 
3388 not_supp:
3389 	rte_errno = ENOTSUP;
3390 	return rte_errno;
3391 }
3392 
3393 static int
3394 mlx5dr_definer_find_byte_in_tag(struct mlx5dr_definer *definer,
3395 				uint32_t hl_byte_off,
3396 				uint32_t *tag_byte_off)
3397 {
3398 	uint8_t byte_offset;
3399 	int i, dw_to_scan;
3400 
3401 	/* Avoid accessing unused DW selectors */
3402 	dw_to_scan = mlx5dr_definer_is_jumbo(definer) ?
3403 		DW_SELECTORS : DW_SELECTORS_MATCH;
3404 
3405 	/* Add offset since each DW covers multiple BYTEs */
3406 	byte_offset = hl_byte_off % DW_SIZE;
3407 	for (i = 0; i < dw_to_scan; i++) {
3408 		if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
3409 			*tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
3410 			return 0;
3411 		}
3412 	}
3413 
3414 	/* Add offset to skip DWs in definer */
3415 	byte_offset = DW_SIZE * DW_SELECTORS;
3416 	/* Iterate in reverse since the code uses bytes from 7 -> 0 */
3417 	for (i = BYTE_SELECTORS; i-- > 0 ;) {
3418 		if (definer->byte_selector[i] == hl_byte_off) {
3419 			*tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
3420 			return 0;
3421 		}
3422 	}
3423 
3424 	/* The hl byte offset must be part of the definer */
3425 	DR_LOG(INFO, "Failed to map to definer, HL byte [%d] not found", byte_offset);
3426 	rte_errno = EINVAL;
3427 	return rte_errno;
3428 }
3429 
3430 static int
3431 mlx5dr_definer_fc_bind(struct mlx5dr_definer *definer,
3432 		       struct mlx5dr_definer_fc *fc,
3433 		       uint32_t fc_sz)
3434 {
3435 	uint32_t tag_offset = 0;
3436 	int ret, byte_diff;
3437 	uint32_t i;
3438 
3439 	for (i = 0; i < fc_sz; i++) {
3440 		/* Map header layout byte offset to byte offset in tag */
3441 		ret = mlx5dr_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
3442 		if (ret)
3443 			return ret;
3444 
3445 		/* Move setter based on the location in the definer */
3446 		byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
3447 		fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
3448 
3449 		/* Update offset in headers layout to offset in tag */
3450 		fc->byte_off = tag_offset;
3451 		fc++;
3452 	}
3453 
3454 	return 0;
3455 }
3456 
3457 static bool
3458 mlx5dr_definer_best_hl_fit_recu(struct mlx5dr_definer_sel_ctrl *ctrl,
3459 				uint32_t cur_dw,
3460 				uint32_t *data)
3461 {
3462 	uint8_t bytes_set;
3463 	int byte_idx;
3464 	bool ret;
3465 	int i;
3466 
3467 	/* Reached end, nothing left to do */
3468 	if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
3469 		return true;
3470 
3471 	/* No data set, can skip to next DW */
3472 	while (!*data) {
3473 		cur_dw++;
3474 		data++;
3475 
3476 		/* Reached end, nothing left to do */
3477 		if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
3478 			return true;
3479 	}
3480 
3481 	/* Used all DW selectors and Byte selectors, no possible solution */
3482 	if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
3483 	    ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
3484 	    ctrl->allowed_bytes == ctrl->used_bytes)
3485 		return false;
3486 
3487 	/* Try to use limited DW selectors */
3488 	if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
3489 		ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
3490 
3491 		ret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
3492 		if (ret)
3493 			return ret;
3494 
3495 		ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
3496 	}
3497 
3498 	/* Try to use DW selectors */
3499 	if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
3500 		ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
3501 
3502 		ret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
3503 		if (ret)
3504 			return ret;
3505 
3506 		ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
3507 	}
3508 
3509 	/* No byte selector for offset bigger than 255 */
3510 	if (cur_dw * DW_SIZE > 255)
3511 		return false;
3512 
3513 	bytes_set = !!(0x000000ff & *data) +
3514 		    !!(0x0000ff00 & *data) +
3515 		    !!(0x00ff0000 & *data) +
3516 		    !!(0xff000000 & *data);
3517 
3518 	/* Check if there are enough byte selectors left */
3519 	if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
3520 		return false;
3521 
3522 	/* Try to use Byte selectors */
3523 	for (i = 0; i < DW_SIZE; i++)
3524 		if ((0xff000000 >> (i * BITS_IN_BYTE)) & rte_be_to_cpu_32(*data)) {
3525 			/* Use byte selectors high to low */
3526 			byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
3527 			ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
3528 			ctrl->used_bytes++;
3529 		}
3530 
3531 	ret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
3532 	if (ret)
3533 		return ret;
3534 
3535 	for (i = 0; i < DW_SIZE; i++)
3536 		if ((0xff << (i * BITS_IN_BYTE)) & rte_be_to_cpu_32(*data)) {
3537 			ctrl->used_bytes--;
3538 			byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
3539 			ctrl->byte_selector[byte_idx] = 0;
3540 		}
3541 
3542 	return false;
3543 }
3544 
3545 static void
3546 mlx5dr_definer_copy_sel_ctrl(struct mlx5dr_definer_sel_ctrl *ctrl,
3547 			     struct mlx5dr_definer *definer)
3548 {
3549 	memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
3550 	memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
3551 	memcpy(definer->dw_selector + ctrl->allowed_full_dw,
3552 	       ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
3553 }
3554 
3555 static int
3556 mlx5dr_definer_find_best_range_fit(struct mlx5dr_definer *definer,
3557 				   struct mlx5dr_matcher *matcher)
3558 {
3559 	uint8_t tag_byte_offset[MLX5DR_DEFINER_FNAME_MAX] = {0};
3560 	uint8_t field_select[MLX5DR_DEFINER_FNAME_MAX] = {0};
3561 	struct mlx5dr_definer_sel_ctrl ctrl = {0};
3562 	uint32_t byte_offset, algn_byte_off;
3563 	struct mlx5dr_definer_fc *fcr;
3564 	bool require_dw;
3565 	int idx, i, j;
3566 
3567 	/* Try to create a range definer */
3568 	ctrl.allowed_full_dw = DW_SELECTORS_RANGE;
3569 	ctrl.allowed_bytes = BYTE_SELECTORS_RANGE;
3570 
3571 	/* Multiple fields cannot share the same DW for range match.
3572 	 * The HW doesn't recognize each field but compares the full dw.
3573 	 * For example definer DW consists of FieldA_FieldB
3574 	 * FieldA: Mask 0xFFFF range 0x1 to 0x2
3575 	 * FieldB: Mask 0xFFFF range 0x3 to 0x4
3576 	 * STE DW range will be 0x00010003 - 0x00020004
3577 	 * This will cause invalid match for FieldB if FieldA=1 and FieldB=8
3578 	 * Since 0x10003 < 0x10008 < 0x20004
3579 	 */
3580 	for (i = 0; i < matcher->num_of_mt; i++) {
3581 		for (j = 0; j < matcher->mt[i].fcr_sz; j++) {
3582 			fcr = &matcher->mt[i].fcr[j];
3583 
3584 			/* Found - Reuse previous mt binding */
3585 			if (field_select[fcr->fname]) {
3586 				fcr->byte_off = tag_byte_offset[fcr->fname];
3587 				continue;
3588 			}
3589 
3590 			/* Not found */
3591 			require_dw = fcr->byte_off >= (64 * DW_SIZE);
3592 			if (require_dw || ctrl.used_bytes == ctrl.allowed_bytes) {
3593 				/* Try to cover using DW selector */
3594 				if (ctrl.used_full_dw == ctrl.allowed_full_dw)
3595 					goto not_supported;
3596 
3597 				ctrl.full_dw_selector[ctrl.used_full_dw++] =
3598 					fcr->byte_off / DW_SIZE;
3599 
3600 				/* Bind DW */
3601 				idx = ctrl.used_full_dw - 1;
3602 				byte_offset = fcr->byte_off % DW_SIZE;
3603 				byte_offset += DW_SIZE * (DW_SELECTORS - idx - 1);
3604 			} else {
3605 				/* Try to cover using Bytes selectors */
3606 				if (ctrl.used_bytes == ctrl.allowed_bytes)
3607 					goto not_supported;
3608 
3609 				algn_byte_off = DW_SIZE * (fcr->byte_off / DW_SIZE);
3610 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 3;
3611 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 2;
3612 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 1;
3613 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off;
3614 
3615 				/* Bind BYTE */
3616 				byte_offset = DW_SIZE * DW_SELECTORS;
3617 				byte_offset += BYTE_SELECTORS - ctrl.used_bytes;
3618 				byte_offset += fcr->byte_off % DW_SIZE;
3619 			}
3620 
3621 			fcr->byte_off = byte_offset;
3622 			tag_byte_offset[fcr->fname] = byte_offset;
3623 			field_select[fcr->fname] = 1;
3624 		}
3625 	}
3626 
3627 	mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
3628 	definer->type = MLX5DR_DEFINER_TYPE_RANGE;
3629 
3630 	return 0;
3631 
3632 not_supported:
3633 	DR_LOG(ERR, "Unable to find supporting range definer combination");
3634 	rte_errno = ENOTSUP;
3635 	return rte_errno;
3636 }
3637 
3638 static void mlx5dr_definer_optimize_order(struct mlx5dr_definer *definer, int num_log)
3639 {
3640 	uint8_t hl_prio[MLX5DR_DEFINER_HL_OPT_MAX];
3641 	int dw = 0, i = 0, j;
3642 	int *dw_flag;
3643 	uint8_t tmp;
3644 
3645 	dw_flag = mlx5dr_optimal_dist_dw[num_log];
3646 	hl_prio[0] = __mlx5_dw_off(definer_hl, ipv4_src_dest_outer.source_address);
3647 	hl_prio[1] = __mlx5_dw_off(definer_hl, ipv4_src_dest_outer.destination_address);
3648 
3649 	while (i < MLX5DR_DEFINER_HL_OPT_MAX) {
3650 		j = 0;
3651 		/* Finding a candidate to improve its hash distribution */
3652 		while (j < DW_SELECTORS_MATCH && (hl_prio[i] != definer->dw_selector[j]))
3653 			j++;
3654 
3655 		/* Finding a DW location with good hash distribution */
3656 		while (dw < DW_SELECTORS_MATCH && dw_flag[dw] == 0)
3657 			dw++;
3658 
3659 		if (dw < DW_SELECTORS_MATCH && j < DW_SELECTORS_MATCH) {
3660 			tmp = definer->dw_selector[dw];
3661 			definer->dw_selector[dw] = definer->dw_selector[j];
3662 			definer->dw_selector[j] = tmp;
3663 			dw++;
3664 		}
3665 		i++;
3666 	}
3667 }
3668 
3669 static int
3670 mlx5dr_definer_find_best_match_fit(struct mlx5dr_context *ctx,
3671 				   struct mlx5dr_definer *definer,
3672 				   uint8_t *hl)
3673 {
3674 	struct mlx5dr_definer_sel_ctrl ctrl = {0};
3675 	bool found;
3676 
3677 	/* Try to create a match definer */
3678 	ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
3679 	ctrl.allowed_lim_dw = 0;
3680 	ctrl.allowed_bytes = BYTE_SELECTORS;
3681 
3682 	found = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);
3683 	if (found) {
3684 		mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
3685 		definer->type = MLX5DR_DEFINER_TYPE_MATCH;
3686 		return 0;
3687 	}
3688 
3689 	/* Try to create a full/limited jumbo definer */
3690 	ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
3691 								  DW_SELECTORS_MATCH;
3692 	ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
3693 								 DW_SELECTORS_LIMITED;
3694 	ctrl.allowed_bytes = BYTE_SELECTORS;
3695 
3696 	found = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);
3697 	if (found) {
3698 		mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
3699 		definer->type = MLX5DR_DEFINER_TYPE_JUMBO;
3700 		return 0;
3701 	}
3702 
3703 	DR_LOG(DEBUG, "Unable to find supporting match/jumbo definer combination");
3704 	rte_errno = E2BIG;
3705 	return rte_errno;
3706 }
3707 
3708 static void
3709 mlx5dr_definer_create_tag_mask(struct rte_flow_item *items,
3710 			       struct mlx5dr_definer_fc *fc,
3711 			       uint32_t fc_sz,
3712 			       uint8_t *tag)
3713 {
3714 	uint32_t i;
3715 
3716 	for (i = 0; i < fc_sz; i++) {
3717 		if (fc->tag_mask_set)
3718 			fc->tag_mask_set(fc, items[fc->item_idx].mask, tag);
3719 		else
3720 			fc->tag_set(fc, items[fc->item_idx].mask, tag);
3721 		fc++;
3722 	}
3723 }
3724 
3725 void mlx5dr_definer_create_tag(const struct rte_flow_item *items,
3726 			       struct mlx5dr_definer_fc *fc,
3727 			       uint32_t fc_sz,
3728 			       uint8_t *tag)
3729 {
3730 	uint32_t i;
3731 
3732 	for (i = 0; i < fc_sz; i++) {
3733 		fc->tag_set(fc, items[fc->item_idx].spec, tag);
3734 		fc++;
3735 	}
3736 }
3737 
3738 static uint32_t mlx5dr_definer_get_range_byte_off(uint32_t match_byte_off)
3739 {
3740 	uint8_t curr_dw_idx = match_byte_off / DW_SIZE;
3741 	uint8_t new_dw_idx;
3742 
3743 	/* Range DW can have the following values 7,8,9,10
3744 	 * -DW7 is mapped to DW9
3745 	 * -DW8 is mapped to DW7
3746 	 * -DW9 is mapped to DW5
3747 	 * -DW10 is mapped to DW3
3748 	 * To reduce calculation the following formula is used:
3749 	 */
3750 	new_dw_idx = curr_dw_idx * (-2) + 23;
3751 
3752 	return new_dw_idx * DW_SIZE + match_byte_off % DW_SIZE;
3753 }
3754 
3755 void mlx5dr_definer_create_tag_range(const struct rte_flow_item *items,
3756 				     struct mlx5dr_definer_fc *fc,
3757 				     uint32_t fc_sz,
3758 				     uint8_t *tag)
3759 {
3760 	struct mlx5dr_definer_fc tmp_fc;
3761 	uint32_t i;
3762 
3763 	for (i = 0; i < fc_sz; i++) {
3764 		tmp_fc = *fc;
3765 		/* Set MAX value */
3766 		tmp_fc.byte_off = mlx5dr_definer_get_range_byte_off(fc->byte_off);
3767 		tmp_fc.tag_set(&tmp_fc, items[fc->item_idx].last, tag);
3768 		/* Set MIN value */
3769 		tmp_fc.byte_off += DW_SIZE;
3770 		tmp_fc.tag_set(&tmp_fc, items[fc->item_idx].spec, tag);
3771 		fc++;
3772 	}
3773 }
3774 
3775 int mlx5dr_definer_get_id(struct mlx5dr_definer *definer)
3776 {
3777 	return definer->obj->id;
3778 }
3779 
3780 int mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,
3781 			   struct mlx5dr_definer *definer_b)
3782 {
3783 	int i;
3784 
3785 	/* Future: Optimize by comparing selectors with valid mask only */
3786 	for (i = 0; i < BYTE_SELECTORS; i++)
3787 		if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
3788 			return 1;
3789 
3790 	for (i = 0; i < DW_SELECTORS; i++)
3791 		if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
3792 			return 1;
3793 
3794 	for (i = 0; i < MLX5DR_JUMBO_TAG_SZ; i++)
3795 		if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
3796 			return 1;
3797 
3798 	return 0;
3799 }
3800 
3801 static int
3802 mlx5dr_definer_optimize_order_supported(struct mlx5dr_definer *match_definer,
3803 					struct mlx5dr_matcher *matcher)
3804 {
3805 	return !mlx5dr_definer_is_jumbo(match_definer) &&
3806 	       !mlx5dr_matcher_req_fw_wqe(matcher) &&
3807 	       !mlx5dr_matcher_is_resizable(matcher) &&
3808 	       !mlx5dr_matcher_is_insert_by_idx(matcher);
3809 }
3810 
3811 static int
3812 mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
3813 			   struct mlx5dr_definer *match_definer,
3814 			   struct mlx5dr_definer *range_definer)
3815 {
3816 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
3817 	struct mlx5dr_match_template *mt = matcher->mt;
3818 	struct mlx5dr_definer_fc *fc;
3819 	uint8_t *match_hl;
3820 	int i, ret;
3821 
3822 	/* Union header-layout (hl) is used for creating a single definer
3823 	 * field layout used with different bitmasks for hash and match.
3824 	 */
3825 	match_hl = simple_calloc(1, MLX5_ST_SZ_BYTES(definer_hl));
3826 	if (!match_hl) {
3827 		DR_LOG(ERR, "Failed to allocate memory for header layout");
3828 		rte_errno = ENOMEM;
3829 		return rte_errno;
3830 	}
3831 
3832 	/* Convert all mt items to header layout (hl)
3833 	 * and allocate the match and range field copy array (fc & fcr).
3834 	 */
3835 	for (i = 0; i < matcher->num_of_mt; i++) {
3836 		ret = mlx5dr_definer_conv_items_to_hl(ctx, &mt[i], match_hl, matcher);
3837 		if (ret) {
3838 			DR_LOG(ERR, "Failed to convert items to header layout");
3839 			goto free_fc;
3840 		}
3841 	}
3842 
3843 	if (mlx5dr_matcher_is_compare(matcher)) {
3844 		ret = mlx5dr_matcher_validate_compare_attr(matcher);
3845 		if (ret)
3846 			goto free_fc;
3847 
3848 		/* Due some HW limitation need to fill unused
3849 		 * DW's 0-5 and byte selectors with 0xff.
3850 		 */
3851 		for (i = 0; i < DW_SELECTORS_MATCH; i++)
3852 			match_definer->dw_selector[i] = 0xff;
3853 
3854 		for (i = 0; i < BYTE_SELECTORS; i++)
3855 			match_definer->byte_selector[i] = 0xff;
3856 
3857 		for (i = 0; i < mt[0].fc_sz; i++) {
3858 			fc = &mt[0].fc[i];
3859 			match_definer->dw_selector[fc->compare_idx] = fc->byte_off / DW_SIZE;
3860 		}
3861 
3862 		goto out;
3863 	}
3864 
3865 	/* Find the match definer layout for header layout match union */
3866 	ret = mlx5dr_definer_find_best_match_fit(ctx, match_definer, match_hl);
3867 	if (ret) {
3868 		DR_LOG(DEBUG, "Failed to create match definer from header layout");
3869 		goto free_fc;
3870 	}
3871 
3872 	if (mlx5dr_definer_optimize_order_supported(match_definer, matcher))
3873 		mlx5dr_definer_optimize_order(match_definer, matcher->attr.rule.num_log);
3874 
3875 	/* Find the range definer layout for match templates fcrs */
3876 	ret = mlx5dr_definer_find_best_range_fit(range_definer, matcher);
3877 	if (ret) {
3878 		DR_LOG(ERR, "Failed to create range definer from header layout");
3879 		goto free_fc;
3880 	}
3881 
3882 out:
3883 	simple_free(match_hl);
3884 	return 0;
3885 
3886 free_fc:
3887 	for (i = 0; i < matcher->num_of_mt; i++)
3888 		if (mt[i].fc)
3889 			simple_free(mt[i].fc);
3890 
3891 	simple_free(match_hl);
3892 	return rte_errno;
3893 }
3894 
3895 int mlx5dr_definer_init_cache(struct mlx5dr_definer_cache **cache)
3896 {
3897 	struct mlx5dr_definer_cache *new_cache;
3898 
3899 	new_cache = simple_calloc(1, sizeof(*new_cache));
3900 	if (!new_cache) {
3901 		rte_errno = ENOMEM;
3902 		return rte_errno;
3903 	}
3904 	LIST_INIT(&new_cache->head);
3905 	*cache = new_cache;
3906 
3907 	return 0;
3908 }
3909 
3910 void mlx5dr_definer_uninit_cache(struct mlx5dr_definer_cache *cache)
3911 {
3912 	simple_free(cache);
3913 }
3914 
3915 static struct mlx5dr_devx_obj *
3916 mlx5dr_definer_get_obj(struct mlx5dr_context *ctx,
3917 		       struct mlx5dr_definer *definer)
3918 {
3919 	struct mlx5dr_definer_cache *cache = ctx->definer_cache;
3920 	struct mlx5dr_cmd_definer_create_attr def_attr = {0};
3921 	struct mlx5dr_definer_cache_item *cached_definer;
3922 	struct mlx5dr_devx_obj *obj;
3923 
3924 	/* Search definer cache for requested definer */
3925 	LIST_FOREACH(cached_definer, &cache->head, next) {
3926 		if (mlx5dr_definer_compare(&cached_definer->definer, definer))
3927 			continue;
3928 
3929 		/* Reuse definer and set LRU (move to be first in the list) */
3930 		LIST_REMOVE(cached_definer, next);
3931 		LIST_INSERT_HEAD(&cache->head, cached_definer, next);
3932 		cached_definer->refcount++;
3933 		return cached_definer->definer.obj;
3934 	}
3935 
3936 	/* Allocate and create definer based on the bitmask tag */
3937 	def_attr.match_mask = definer->mask.jumbo;
3938 	def_attr.dw_selector = definer->dw_selector;
3939 	def_attr.byte_selector = definer->byte_selector;
3940 
3941 	obj = mlx5dr_cmd_definer_create(ctx->ibv_ctx, &def_attr);
3942 	if (!obj)
3943 		return NULL;
3944 
3945 	cached_definer = simple_calloc(1, sizeof(*cached_definer));
3946 	if (!cached_definer) {
3947 		rte_errno = ENOMEM;
3948 		goto free_definer_obj;
3949 	}
3950 
3951 	memcpy(&cached_definer->definer, definer, sizeof(*definer));
3952 	cached_definer->definer.obj = obj;
3953 	cached_definer->refcount = 1;
3954 	LIST_INSERT_HEAD(&cache->head, cached_definer, next);
3955 
3956 	return obj;
3957 
3958 free_definer_obj:
3959 	mlx5dr_cmd_destroy_obj(obj);
3960 	return NULL;
3961 }
3962 
3963 static void
3964 mlx5dr_definer_put_obj(struct mlx5dr_context *ctx,
3965 		       struct mlx5dr_devx_obj *obj)
3966 {
3967 	struct mlx5dr_definer_cache_item *cached_definer;
3968 
3969 	LIST_FOREACH(cached_definer, &ctx->definer_cache->head, next) {
3970 		if (cached_definer->definer.obj != obj)
3971 			continue;
3972 
3973 		/* Object found */
3974 		if (--cached_definer->refcount)
3975 			return;
3976 
3977 		LIST_REMOVE(cached_definer, next);
3978 		mlx5dr_cmd_destroy_obj(cached_definer->definer.obj);
3979 		simple_free(cached_definer);
3980 		return;
3981 	}
3982 
3983 	/* Programming error, object must be part of cache */
3984 	assert(false);
3985 }
3986 
3987 static struct mlx5dr_definer *
3988 mlx5dr_definer_alloc(struct mlx5dr_context *ctx,
3989 		     struct mlx5dr_definer_fc *fc,
3990 		     int fc_sz,
3991 		     struct rte_flow_item *items,
3992 		     struct mlx5dr_definer *layout,
3993 		     bool bind_fc)
3994 {
3995 	struct mlx5dr_definer *definer;
3996 	int ret;
3997 
3998 	definer = simple_calloc(1, sizeof(*definer));
3999 	if (!definer) {
4000 		DR_LOG(ERR, "Failed to allocate memory for definer");
4001 		rte_errno = ENOMEM;
4002 		return NULL;
4003 	}
4004 
4005 	memcpy(definer, layout, sizeof(*definer));
4006 
4007 	/* Align field copy array based on given layout */
4008 	if (bind_fc) {
4009 		ret = mlx5dr_definer_fc_bind(definer, fc, fc_sz);
4010 		if (ret) {
4011 			DR_LOG(ERR, "Failed to bind field copy to definer");
4012 			goto free_definer;
4013 		}
4014 	}
4015 
4016 	/* Create the tag mask used for definer creation */
4017 	mlx5dr_definer_create_tag_mask(items, fc, fc_sz, definer->mask.jumbo);
4018 
4019 	definer->obj = mlx5dr_definer_get_obj(ctx, definer);
4020 	if (!definer->obj)
4021 		goto free_definer;
4022 
4023 	return definer;
4024 
4025 free_definer:
4026 	simple_free(definer);
4027 	return NULL;
4028 }
4029 
4030 static void
4031 mlx5dr_definer_free(struct mlx5dr_context *ctx,
4032 		    struct mlx5dr_definer *definer)
4033 {
4034 	mlx5dr_definer_put_obj(ctx, definer->obj);
4035 	simple_free(definer);
4036 }
4037 
4038 static int
4039 mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
4040 				  struct mlx5dr_matcher *matcher,
4041 				  struct mlx5dr_definer *match_layout)
4042 {
4043 	struct mlx5dr_match_template *mt = matcher->mt;
4044 	int i;
4045 
4046 	/* Create mendatory match definer */
4047 	for (i = 0; i < matcher->num_of_mt; i++) {
4048 		mt[i].definer = mlx5dr_definer_alloc(ctx,
4049 						     mt[i].fc,
4050 						     mt[i].fc_sz,
4051 						     mt[i].items,
4052 						     match_layout,
4053 						     true);
4054 		if (!mt[i].definer) {
4055 			DR_LOG(ERR, "Failed to create match definer");
4056 			goto free_definers;
4057 		}
4058 	}
4059 	return 0;
4060 
4061 free_definers:
4062 	while (i--)
4063 		mlx5dr_definer_free(ctx, mt[i].definer);
4064 
4065 	return rte_errno;
4066 }
4067 
4068 static void
4069 mlx5dr_definer_matcher_match_uninit(struct mlx5dr_matcher *matcher)
4070 {
4071 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
4072 	int i;
4073 
4074 	for (i = 0; i < matcher->num_of_mt; i++)
4075 		mlx5dr_definer_free(ctx, matcher->mt[i].definer);
4076 }
4077 
4078 static int
4079 mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
4080 				  struct mlx5dr_matcher *matcher,
4081 				  struct mlx5dr_definer *range_layout)
4082 {
4083 	struct mlx5dr_match_template *mt = matcher->mt;
4084 	int i;
4085 
4086 	/* Create optional range definers */
4087 	for (i = 0; i < matcher->num_of_mt; i++) {
4088 		/* All must use range if requested */
4089 		bool is_range = !!mt[i].fcr_sz;
4090 		bool has_range = matcher->flags & MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
4091 
4092 		if (i && ((is_range && !has_range) || (!is_range && has_range))) {
4093 			DR_LOG(ERR, "Using range and non range templates is not allowed");
4094 			rte_errno = EINVAL;
4095 			goto free_definers;
4096 		}
4097 
4098 		if (!mt[i].fcr_sz)
4099 			continue;
4100 
4101 		matcher->flags |= MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
4102 		/* Create definer without fcr binding, already binded */
4103 		mt[i].range_definer = mlx5dr_definer_alloc(ctx,
4104 							   mt[i].fcr,
4105 							   mt[i].fcr_sz,
4106 							   mt[i].items,
4107 							   range_layout,
4108 							   false);
4109 		if (!mt[i].range_definer) {
4110 			DR_LOG(ERR, "Failed to create match definer");
4111 			goto free_definers;
4112 		}
4113 	}
4114 	return 0;
4115 
4116 free_definers:
4117 	while (i--)
4118 		if (mt[i].range_definer)
4119 			mlx5dr_definer_free(ctx, mt[i].range_definer);
4120 
4121 	return rte_errno;
4122 }
4123 
4124 static void
4125 mlx5dr_definer_matcher_range_uninit(struct mlx5dr_matcher *matcher)
4126 {
4127 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
4128 	int i;
4129 
4130 	for (i = 0; i < matcher->num_of_mt; i++)
4131 		if (matcher->mt[i].range_definer)
4132 			mlx5dr_definer_free(ctx, matcher->mt[i].range_definer);
4133 }
4134 
4135 static int
4136 mlx5dr_definer_matcher_hash_init(struct mlx5dr_context *ctx,
4137 				 struct mlx5dr_matcher *matcher)
4138 {
4139 	struct mlx5dr_cmd_definer_create_attr def_attr = {0};
4140 	struct mlx5dr_match_template *mt = matcher->mt;
4141 	struct ibv_context *ibv_ctx = ctx->ibv_ctx;
4142 	uint8_t *bit_mask;
4143 	int i, j;
4144 
4145 	for (i = 1; i < matcher->num_of_mt; i++)
4146 		if (mlx5dr_definer_compare(mt[i].definer, mt[i - 1].definer))
4147 			matcher->flags |= MLX5DR_MATCHER_FLAGS_HASH_DEFINER;
4148 
4149 	if (!(matcher->flags & MLX5DR_MATCHER_FLAGS_HASH_DEFINER))
4150 		return 0;
4151 
4152 	/* Insert by index requires all MT using the same definer */
4153 	if (matcher->attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
4154 		DR_LOG(ERR, "Insert by index not supported with MT combination");
4155 		rte_errno = EOPNOTSUPP;
4156 		return rte_errno;
4157 	}
4158 
4159 	matcher->hash_definer = simple_calloc(1, sizeof(*matcher->hash_definer));
4160 	if (!matcher->hash_definer) {
4161 		DR_LOG(ERR, "Failed to allocate memory for hash definer");
4162 		rte_errno = ENOMEM;
4163 		return rte_errno;
4164 	}
4165 
4166 	/* Calculate intersection between all match templates bitmasks.
4167 	 * We will use mt[0] as reference and intersect it with mt[1..n].
4168 	 * From this we will get:
4169 	 * hash_definer.selectors = mt[0].selecotrs
4170 	 * hash_definer.mask =  mt[0].mask & mt[0].mask & ... & mt[n].mask
4171 	 */
4172 
4173 	/* Use first definer which should also contain intersection fields */
4174 	memcpy(matcher->hash_definer, mt->definer, sizeof(struct mlx5dr_definer));
4175 
4176 	/* Calculate intersection between first to all match templates bitmasks */
4177 	for (i = 1; i < matcher->num_of_mt; i++) {
4178 		bit_mask = (uint8_t *)&mt[i].definer->mask;
4179 		for (j = 0; j < MLX5DR_JUMBO_TAG_SZ; j++)
4180 			((uint8_t *)&matcher->hash_definer->mask)[j] &= bit_mask[j];
4181 	}
4182 
4183 	def_attr.match_mask = matcher->hash_definer->mask.jumbo;
4184 	def_attr.dw_selector = matcher->hash_definer->dw_selector;
4185 	def_attr.byte_selector = matcher->hash_definer->byte_selector;
4186 	matcher->hash_definer->obj = mlx5dr_cmd_definer_create(ibv_ctx, &def_attr);
4187 	if (!matcher->hash_definer->obj) {
4188 		DR_LOG(ERR, "Failed to create hash definer");
4189 		goto free_hash_definer;
4190 	}
4191 
4192 	return 0;
4193 
4194 free_hash_definer:
4195 	simple_free(matcher->hash_definer);
4196 	return rte_errno;
4197 }
4198 
4199 static void
4200 mlx5dr_definer_matcher_hash_uninit(struct mlx5dr_matcher *matcher)
4201 {
4202 	if (!matcher->hash_definer)
4203 		return;
4204 
4205 	mlx5dr_cmd_destroy_obj(matcher->hash_definer->obj);
4206 	simple_free(matcher->hash_definer);
4207 }
4208 
4209 int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
4210 				struct mlx5dr_matcher *matcher)
4211 {
4212 	struct mlx5dr_definer match_layout = {0};
4213 	struct mlx5dr_definer range_layout = {0};
4214 	int ret, i;
4215 
4216 	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLLISION)
4217 		return 0;
4218 
4219 	ret = mlx5dr_definer_calc_layout(matcher, &match_layout, &range_layout);
4220 	if (ret) {
4221 		DR_LOG(DEBUG, "Failed to calculate matcher definer layout");
4222 		return ret;
4223 	}
4224 
4225 	/* Calculate definers needed for exact match */
4226 	ret = mlx5dr_definer_matcher_match_init(ctx, matcher, &match_layout);
4227 	if (ret) {
4228 		DR_LOG(ERR, "Failed to init match definers");
4229 		goto free_fc;
4230 	}
4231 
4232 	/* Calculate definers needed for range */
4233 	ret = mlx5dr_definer_matcher_range_init(ctx, matcher, &range_layout);
4234 	if (ret) {
4235 		DR_LOG(ERR, "Failed to init range definers");
4236 		goto uninit_match_definer;
4237 	}
4238 
4239 	/* Calculate partial hash definer */
4240 	ret = mlx5dr_definer_matcher_hash_init(ctx, matcher);
4241 	if (ret) {
4242 		DR_LOG(ERR, "Failed to init hash definer");
4243 		goto uninit_range_definer;
4244 	}
4245 
4246 	return 0;
4247 
4248 uninit_range_definer:
4249 	mlx5dr_definer_matcher_range_uninit(matcher);
4250 uninit_match_definer:
4251 	mlx5dr_definer_matcher_match_uninit(matcher);
4252 free_fc:
4253 	for (i = 0; i < matcher->num_of_mt; i++)
4254 		simple_free(matcher->mt[i].fc);
4255 
4256 	return ret;
4257 }
4258 
4259 void mlx5dr_definer_matcher_uninit(struct mlx5dr_matcher *matcher)
4260 {
4261 	int i;
4262 
4263 	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLLISION)
4264 		return;
4265 
4266 	mlx5dr_definer_matcher_hash_uninit(matcher);
4267 	mlx5dr_definer_matcher_range_uninit(matcher);
4268 	mlx5dr_definer_matcher_match_uninit(matcher);
4269 
4270 	for (i = 0; i < matcher->num_of_mt; i++)
4271 		simple_free(matcher->mt[i].fc);
4272 }
4273