xref: /dpdk/drivers/net/mlx5/hws/mlx5dr_definer.c (revision e9fd1ebf981f361844aea9ec94e17f4bda5e1479)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright (c) 2022 NVIDIA Corporation & Affiliates
3  */
4 
5 #include "mlx5dr_internal.h"
6 
7 #define GTP_PDU_SC	0x85
8 #define BAD_PORT	0xBAD
9 #define ETH_TYPE_IPV4_VXLAN	0x0800
10 #define ETH_TYPE_IPV6_VXLAN	0x86DD
11 #define UDP_VXLAN_PORT	4789
12 #define UDP_VXLAN_GPE_PORT	4790
13 #define UDP_GTPU_PORT	2152
14 #define UDP_PORT_MPLS	6635
15 #define UDP_GENEVE_PORT 6081
16 #define UDP_ROCEV2_PORT	4791
17 #define DR_FLOW_LAYER_TUNNEL_NO_MPLS (MLX5_FLOW_LAYER_TUNNEL & ~MLX5_FLOW_LAYER_MPLS)
18 
19 #define STE_NO_VLAN	0x0
20 #define STE_SVLAN	0x1
21 #define STE_CVLAN	0x2
22 #define STE_NO_L3	0x0
23 #define STE_IPV4	0x1
24 #define STE_IPV6	0x2
25 #define STE_NO_L4	0x0
26 #define STE_TCP		0x1
27 #define STE_UDP		0x2
28 #define STE_ICMP	0x3
29 #define STE_NO_TUN	0x0
30 #define STE_ESP		0x3
31 
32 #define MLX5DR_DEFINER_QUOTA_BLOCK 0
33 #define MLX5DR_DEFINER_QUOTA_PASS  2
34 
35 /* Setter function based on bit offset and mask, for 32bit DW*/
36 #define _DR_SET_32(p, v, byte_off, bit_off, mask) \
37 	do { \
38 		u32 _v = v; \
39 		*((rte_be32_t *)(p) + ((byte_off) / 4)) = \
40 		rte_cpu_to_be_32((rte_be_to_cpu_32(*((u32 *)(p) + \
41 				  ((byte_off) / 4))) & \
42 				  (~((mask) << (bit_off)))) | \
43 				 (((_v) & (mask)) << \
44 				  (bit_off))); \
45 	} while (0)
46 
47 /* Setter function based on bit offset and mask */
48 #define DR_SET(p, v, byte_off, bit_off, mask) \
49 	do { \
50 		if (unlikely((bit_off) < 0)) { \
51 			u32 _bit_off = -1 * (bit_off); \
52 			u32 second_dw_mask = (mask) & ((1 << _bit_off) - 1); \
53 			_DR_SET_32(p, (v) >> _bit_off, byte_off, 0, (mask) >> _bit_off); \
54 			_DR_SET_32(p, (v) & second_dw_mask, (byte_off) + DW_SIZE, \
55 				   (bit_off) % BITS_IN_DW, second_dw_mask); \
56 		} else { \
57 			_DR_SET_32(p, v, byte_off, (bit_off), (mask)); \
58 		} \
59 	} while (0)
60 
61 /* Setter function based on byte offset to directly set FULL BE32 value  */
62 #define DR_SET_BE32(p, v, byte_off, bit_off, mask) \
63 	(*((rte_be32_t *)((uint8_t *)(p) + (byte_off))) = (v))
64 
65 /* Setter function based on byte offset to directly set FULL BE32 value from ptr  */
66 #define DR_SET_BE32P(p, v_ptr, byte_off, bit_off, mask) \
67 	memcpy((uint8_t *)(p) + (byte_off), v_ptr, 4)
68 
69 /* Setter function based on byte offset to directly set FULL BE16 value  */
70 #define DR_SET_BE16(p, v, byte_off, bit_off, mask) \
71 	(*((rte_be16_t *)((uint8_t *)(p) + (byte_off))) = (v))
72 
73 /* Setter function based on byte offset to directly set FULL BE16 value from ptr  */
74 #define DR_SET_BE16P(p, v_ptr, byte_off, bit_off, mask) \
75 	memcpy((uint8_t *)(p) + (byte_off), v_ptr, 2)
76 
77 #define DR_CALC_FNAME(field, inner) \
78 	((inner) ? MLX5DR_DEFINER_FNAME_##field##_I : \
79 		   MLX5DR_DEFINER_FNAME_##field##_O)
80 
81 #define DR_CALC_SET_HDR(fc, hdr, field) \
82 	do { \
83 		(fc)->bit_mask = __mlx5_mask(definer_hl, hdr.field); \
84 		(fc)->bit_off = __mlx5_dw_bit_off(definer_hl, hdr.field); \
85 		(fc)->byte_off = MLX5_BYTE_OFF(definer_hl, hdr.field); \
86 	} while (0)
87 
88 /* Helper to calculate data used by DR_SET */
89 #define DR_CALC_SET(fc, hdr, field, is_inner) \
90 	do { \
91 		if (is_inner) { \
92 			DR_CALC_SET_HDR(fc, hdr##_inner, field); \
93 		} else { \
94 			DR_CALC_SET_HDR(fc, hdr##_outer, field); \
95 		} \
96 	} while (0)
97 
98  #define DR_GET(typ, p, fld) \
99 	((rte_be_to_cpu_32(*((const rte_be32_t *)(p) + \
100 	__mlx5_dw_off(typ, fld))) >> __mlx5_dw_bit_off(typ, fld)) & \
101 	__mlx5_mask(typ, fld))
102 
103 struct mlx5dr_definer_sel_ctrl {
104 	uint8_t allowed_full_dw; /* Full DW selectors cover all offsets */
105 	uint8_t allowed_lim_dw;  /* Limited DW selectors cover offset < 64 */
106 	uint8_t allowed_bytes;   /* Bytes selectors, up to offset 255 */
107 	uint8_t used_full_dw;
108 	uint8_t used_lim_dw;
109 	uint8_t used_bytes;
110 	uint8_t full_dw_selector[DW_SELECTORS];
111 	uint8_t lim_dw_selector[DW_SELECTORS_LIMITED];
112 	uint8_t byte_selector[BYTE_SELECTORS];
113 };
114 
115 struct mlx5dr_definer_conv_data {
116 	struct mlx5dr_context *ctx;
117 	struct mlx5dr_definer_fc *fc;
118 	uint8_t relaxed;
119 	uint8_t tunnel;
120 	uint8_t mpls_idx;
121 	uint8_t geneve_opt_ok_idx;
122 	uint8_t geneve_opt_data_idx;
123 	enum rte_flow_item_type last_item;
124 };
125 
126 /* Xmacro used to create generic item setter from items */
127 #define LIST_OF_FIELDS_INFO \
128 	X(SET_BE16,	eth_type,		v->hdr.ether_type,		rte_flow_item_eth) \
129 	X(SET_BE32P,	eth_smac_47_16,		&v->hdr.src_addr.addr_bytes[0],	rte_flow_item_eth) \
130 	X(SET_BE16P,	eth_smac_15_0,		&v->hdr.src_addr.addr_bytes[4],	rte_flow_item_eth) \
131 	X(SET_BE32P,	eth_dmac_47_16,		&v->hdr.dst_addr.addr_bytes[0],	rte_flow_item_eth) \
132 	X(SET_BE16P,	eth_dmac_15_0,		&v->hdr.dst_addr.addr_bytes[4],	rte_flow_item_eth) \
133 	X(SET_BE16,	tci,			v->hdr.vlan_tci,		rte_flow_item_vlan) \
134 	X(SET,		ipv4_ihl,		v->ihl,			rte_ipv4_hdr) \
135 	X(SET,		ipv4_tos,		v->type_of_service,	rte_ipv4_hdr) \
136 	X(SET,		ipv4_time_to_live,	v->time_to_live,	rte_ipv4_hdr) \
137 	X(SET_BE32,	ipv4_dst_addr,		v->dst_addr,		rte_ipv4_hdr) \
138 	X(SET_BE32,	ipv4_src_addr,		v->src_addr,		rte_ipv4_hdr) \
139 	X(SET,		ipv4_next_proto,	v->next_proto_id,	rte_ipv4_hdr) \
140 	X(SET,		ipv4_version,		STE_IPV4,		rte_ipv4_hdr) \
141 	X(SET_BE16,	ipv4_frag,		v->fragment_offset,	rte_ipv4_hdr) \
142 	X(SET_BE16,	ipv4_len,		v->total_length,	rte_ipv4_hdr) \
143 	X(SET,          ip_fragmented,          !!v->fragment_offset,   rte_ipv4_hdr) \
144 	X(SET_BE16,	ipv6_payload_len,	v->hdr.payload_len,	rte_flow_item_ipv6) \
145 	X(SET,		ipv6_proto,		v->hdr.proto,		rte_flow_item_ipv6) \
146 	X(SET,		ipv6_routing_hdr,	IPPROTO_ROUTING,	rte_flow_item_ipv6) \
147 	X(SET,		ipv6_hop_limits,	v->hdr.hop_limits,	rte_flow_item_ipv6) \
148 	X(SET_BE32P,	ipv6_src_addr_127_96,	&v->hdr.src_addr[0],	rte_flow_item_ipv6) \
149 	X(SET_BE32P,	ipv6_src_addr_95_64,	&v->hdr.src_addr[4],	rte_flow_item_ipv6) \
150 	X(SET_BE32P,	ipv6_src_addr_63_32,	&v->hdr.src_addr[8],	rte_flow_item_ipv6) \
151 	X(SET_BE32P,	ipv6_src_addr_31_0,	&v->hdr.src_addr[12],	rte_flow_item_ipv6) \
152 	X(SET_BE32P,	ipv6_dst_addr_127_96,	&v->hdr.dst_addr[0],	rte_flow_item_ipv6) \
153 	X(SET_BE32P,	ipv6_dst_addr_95_64,	&v->hdr.dst_addr[4],	rte_flow_item_ipv6) \
154 	X(SET_BE32P,	ipv6_dst_addr_63_32,	&v->hdr.dst_addr[8],	rte_flow_item_ipv6) \
155 	X(SET_BE32P,	ipv6_dst_addr_31_0,	&v->hdr.dst_addr[12],	rte_flow_item_ipv6) \
156 	X(SET,		ipv6_version,		STE_IPV6,		rte_flow_item_ipv6) \
157 	X(SET,		ipv6_frag,		v->has_frag_ext,	rte_flow_item_ipv6) \
158 	X(SET,		icmp_protocol,		STE_ICMP,		rte_flow_item_icmp) \
159 	X(SET,		udp_protocol,		STE_UDP,		rte_flow_item_udp) \
160 	X(SET_BE16,	udp_src_port,		v->hdr.src_port,	rte_flow_item_udp) \
161 	X(SET_BE16,	udp_dst_port,		v->hdr.dst_port,	rte_flow_item_udp) \
162 	X(SET,		tcp_flags,		v->hdr.tcp_flags,	rte_flow_item_tcp) \
163 	X(SET,		tcp_protocol,		STE_TCP,		rte_flow_item_tcp) \
164 	X(SET_BE16,	tcp_src_port,		v->hdr.src_port,	rte_flow_item_tcp) \
165 	X(SET_BE16,	tcp_dst_port,		v->hdr.dst_port,	rte_flow_item_tcp) \
166 	X(SET,		gtp_udp_port,		UDP_GTPU_PORT,		rte_flow_item_gtp) \
167 	X(SET_BE32,	gtp_teid,		v->hdr.teid,		rte_flow_item_gtp) \
168 	X(SET,		gtp_msg_type,		v->hdr.msg_type,	rte_flow_item_gtp) \
169 	X(SET,		gtp_ext_flag,		!!v->hdr.gtp_hdr_info,	rte_flow_item_gtp) \
170 	X(SET,		gtp_next_ext_hdr,	GTP_PDU_SC,		rte_flow_item_gtp_psc) \
171 	X(SET,		gtp_ext_hdr_pdu,	v->hdr.type,		rte_flow_item_gtp_psc) \
172 	X(SET,		gtp_ext_hdr_qfi,	v->hdr.qfi,		rte_flow_item_gtp_psc) \
173 	X(SET,		vxlan_flags,		v->flags,		rte_flow_item_vxlan) \
174 	X(SET,		vxlan_udp_port,		UDP_VXLAN_PORT,		rte_flow_item_vxlan) \
175 	X(SET,		vxlan_gpe_udp_port,	UDP_VXLAN_GPE_PORT,	rte_flow_item_vxlan_gpe) \
176 	X(SET,		vxlan_gpe_flags,	v->flags,		rte_flow_item_vxlan_gpe) \
177 	X(SET,		vxlan_gpe_protocol,	v->protocol,		rte_flow_item_vxlan_gpe) \
178 	X(SET,		vxlan_gpe_rsvd1,	v->rsvd1,		rte_flow_item_vxlan_gpe) \
179 	X(SET,		mpls_udp_port,		UDP_PORT_MPLS,		rte_flow_item_mpls) \
180 	X(SET,		source_qp,		v->queue,		mlx5_rte_flow_item_sq) \
181 	X(SET,		tag,			v->data,		rte_flow_item_tag) \
182 	X(SET,		metadata,		v->data,		rte_flow_item_meta) \
183 	X(SET_BE16,	geneve_protocol,	v->protocol,		rte_flow_item_geneve) \
184 	X(SET,		geneve_udp_port,	UDP_GENEVE_PORT,	rte_flow_item_geneve) \
185 	X(SET_BE16,	geneve_ctrl,		v->ver_opt_len_o_c_rsvd0,	rte_flow_item_geneve) \
186 	X(SET_BE16,	gre_c_ver,		v->c_rsvd0_ver,		rte_flow_item_gre) \
187 	X(SET_BE16,	gre_protocol_type,	v->protocol,		rte_flow_item_gre) \
188 	X(SET,		ipv4_protocol_gre,	IPPROTO_GRE,		rte_flow_item_gre) \
189 	X(SET_BE32,	gre_opt_key,		v->key.key,		rte_flow_item_gre_opt) \
190 	X(SET_BE32,	gre_opt_seq,		v->sequence.sequence,	rte_flow_item_gre_opt) \
191 	X(SET_BE16,	gre_opt_checksum,	v->checksum_rsvd.checksum,	rte_flow_item_gre_opt) \
192 	X(SET,		meter_color,		rte_col_2_mlx5_col(v->color),	rte_flow_item_meter_color) \
193 	X(SET_BE32,     ipsec_spi,              v->hdr.spi,             rte_flow_item_esp) \
194 	X(SET_BE32,     ipsec_sequence_number,  v->hdr.seq,             rte_flow_item_esp) \
195 	X(SET,		ib_l4_udp_port,		UDP_ROCEV2_PORT,	rte_flow_item_ib_bth) \
196 	X(SET,		ib_l4_opcode,		v->hdr.opcode,		rte_flow_item_ib_bth) \
197 	X(SET,		random_number,		v->value,		rte_flow_item_random) \
198 	X(SET,		ib_l4_bth_a,		v->hdr.a,		rte_flow_item_ib_bth) \
199 
200 /* Item set function format */
201 #define X(set_type, func_name, value, item_type) \
202 static void mlx5dr_definer_##func_name##_set( \
203 	struct mlx5dr_definer_fc *fc, \
204 	const void *item_spec, \
205 	uint8_t *tag) \
206 { \
207 	__rte_unused const struct item_type *v = item_spec; \
208 	DR_##set_type(tag, value, fc->byte_off, fc->bit_off, fc->bit_mask); \
209 }
210 LIST_OF_FIELDS_INFO
211 #undef X
212 
213 static void
214 mlx5dr_definer_ones_set(struct mlx5dr_definer_fc *fc,
215 			__rte_unused const void *item_spec,
216 			__rte_unused uint8_t *tag)
217 {
218 	DR_SET(tag, -1, fc->byte_off, fc->bit_off, fc->bit_mask);
219 }
220 
221 static void
222 mlx5dr_definer_eth_first_vlan_q_set(struct mlx5dr_definer_fc *fc,
223 				    const void *item_spec,
224 				    uint8_t *tag)
225 {
226 	const struct rte_flow_item_eth *v = item_spec;
227 	uint8_t vlan_type;
228 
229 	vlan_type = v->has_vlan ? STE_CVLAN : STE_NO_VLAN;
230 
231 	DR_SET(tag, vlan_type, fc->byte_off, fc->bit_off, fc->bit_mask);
232 }
233 
234 static void
235 mlx5dr_definer_first_vlan_q_set(struct mlx5dr_definer_fc *fc,
236 				const void *item_spec,
237 				uint8_t *tag)
238 {
239 	const struct rte_flow_item_vlan *v = item_spec;
240 	uint8_t vlan_type;
241 
242 	vlan_type = v->has_more_vlan ? STE_SVLAN : STE_CVLAN;
243 
244 	DR_SET(tag, vlan_type, fc->byte_off, fc->bit_off, fc->bit_mask);
245 }
246 
247 static void
248 mlx5dr_definer_conntrack_mask(struct mlx5dr_definer_fc *fc,
249 			      const void *item_spec,
250 			      uint8_t *tag)
251 {
252 	const struct rte_flow_item_conntrack *m = item_spec;
253 	uint32_t reg_mask = 0;
254 
255 	if (m->flags & (RTE_FLOW_CONNTRACK_PKT_STATE_VALID |
256 			RTE_FLOW_CONNTRACK_PKT_STATE_INVALID |
257 			RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED))
258 		reg_mask |= (MLX5_CT_SYNDROME_VALID | MLX5_CT_SYNDROME_INVALID |
259 			     MLX5_CT_SYNDROME_TRAP);
260 
261 	if (m->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
262 		reg_mask |= MLX5_CT_SYNDROME_STATE_CHANGE;
263 
264 	if (m->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
265 		reg_mask |= MLX5_CT_SYNDROME_BAD_PACKET;
266 
267 	DR_SET(tag, reg_mask, fc->byte_off, fc->bit_off, fc->bit_mask);
268 }
269 
270 static void
271 mlx5dr_definer_conntrack_tag(struct mlx5dr_definer_fc *fc,
272 			     const void *item_spec,
273 			     uint8_t *tag)
274 {
275 	const struct rte_flow_item_conntrack *v = item_spec;
276 	uint32_t reg_value = 0;
277 
278 	/* The conflict should be checked in the validation. */
279 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_VALID)
280 		reg_value |= MLX5_CT_SYNDROME_VALID;
281 
282 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_CHANGED)
283 		reg_value |= MLX5_CT_SYNDROME_STATE_CHANGE;
284 
285 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_INVALID)
286 		reg_value |= MLX5_CT_SYNDROME_INVALID;
287 
288 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_DISABLED)
289 		reg_value |= MLX5_CT_SYNDROME_TRAP;
290 
291 	if (v->flags & RTE_FLOW_CONNTRACK_PKT_STATE_BAD)
292 		reg_value |= MLX5_CT_SYNDROME_BAD_PACKET;
293 
294 	DR_SET(tag, reg_value, fc->byte_off, fc->bit_off, fc->bit_mask);
295 }
296 
297 static void
298 mlx5dr_definer_ptype_l2_set(struct mlx5dr_definer_fc *fc,
299 			    const void *item_spec,
300 			    uint8_t *tag)
301 {
302 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L2_I);
303 	const struct rte_flow_item_ptype *v = item_spec;
304 	uint32_t packet_type = v->packet_type &
305 		(inner ? RTE_PTYPE_INNER_L2_MASK : RTE_PTYPE_L2_MASK);
306 	uint8_t l2_type = STE_NO_VLAN;
307 
308 	if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER : RTE_PTYPE_L2_ETHER))
309 		l2_type = STE_NO_VLAN;
310 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_VLAN : RTE_PTYPE_L2_ETHER_VLAN))
311 		l2_type = STE_CVLAN;
312 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L2_ETHER_QINQ : RTE_PTYPE_L2_ETHER_QINQ))
313 		l2_type = STE_SVLAN;
314 
315 	DR_SET(tag, l2_type, fc->byte_off, fc->bit_off, fc->bit_mask);
316 }
317 
318 static void
319 mlx5dr_definer_ptype_l3_set(struct mlx5dr_definer_fc *fc,
320 			    const void *item_spec,
321 			    uint8_t *tag)
322 {
323 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L3_I);
324 	const struct rte_flow_item_ptype *v = item_spec;
325 	uint32_t packet_type = v->packet_type &
326 		(inner ? RTE_PTYPE_INNER_L3_MASK : RTE_PTYPE_L3_MASK);
327 	uint8_t l3_type = STE_NO_L3;
328 
329 	if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV4 : RTE_PTYPE_L3_IPV4))
330 		l3_type = STE_IPV4;
331 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L3_IPV6 : RTE_PTYPE_L3_IPV6))
332 		l3_type = STE_IPV6;
333 
334 	DR_SET(tag, l3_type, fc->byte_off, fc->bit_off, fc->bit_mask);
335 }
336 
337 static void
338 mlx5dr_definer_ptype_l4_set(struct mlx5dr_definer_fc *fc,
339 			    const void *item_spec,
340 			    uint8_t *tag)
341 {
342 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_L4_I);
343 	const struct rte_flow_item_ptype *v = item_spec;
344 	uint32_t packet_type = v->packet_type &
345 		(inner ? RTE_PTYPE_INNER_L4_MASK : RTE_PTYPE_L4_MASK);
346 	uint8_t l4_type = STE_NO_L4;
347 
348 	if (packet_type == (inner ? RTE_PTYPE_INNER_L4_TCP : RTE_PTYPE_L4_TCP))
349 		l4_type = STE_TCP;
350 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_UDP : RTE_PTYPE_L4_UDP))
351 		l4_type = STE_UDP;
352 	else if (packet_type == (inner ? RTE_PTYPE_INNER_L4_ICMP : RTE_PTYPE_L4_ICMP))
353 		l4_type = STE_ICMP;
354 
355 	DR_SET(tag, l4_type, fc->byte_off, fc->bit_off, fc->bit_mask);
356 }
357 
358 static void
359 mlx5dr_definer_ptype_tunnel_set(struct mlx5dr_definer_fc *fc,
360 				const void *item_spec,
361 				uint8_t *tag)
362 {
363 	const struct rte_flow_item_ptype *v = item_spec;
364 	uint32_t packet_type = v->packet_type & RTE_PTYPE_TUNNEL_MASK;
365 	uint8_t tun_type = STE_NO_TUN;
366 
367 	if (packet_type == RTE_PTYPE_TUNNEL_ESP)
368 		tun_type = STE_ESP;
369 
370 	DR_SET(tag, tun_type, fc->byte_off, fc->bit_off, fc->bit_mask);
371 }
372 
373 static void
374 mlx5dr_definer_ptype_frag_set(struct mlx5dr_definer_fc *fc,
375 			      const void *item_spec,
376 			      uint8_t *tag)
377 {
378 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_PTYPE_FRAG_I);
379 	const struct rte_flow_item_ptype *v = item_spec;
380 	uint32_t packet_type = v->packet_type &
381 		(inner ? RTE_PTYPE_INNER_L4_FRAG : RTE_PTYPE_L4_FRAG);
382 
383 	DR_SET(tag, !!packet_type, fc->byte_off, fc->bit_off, fc->bit_mask);
384 }
385 
386 static void
387 mlx5dr_definer_integrity_set(struct mlx5dr_definer_fc *fc,
388 			     const void *item_spec,
389 			     uint8_t *tag)
390 {
391 	bool inner = (fc->fname == MLX5DR_DEFINER_FNAME_INTEGRITY_I);
392 	const struct rte_flow_item_integrity *v = item_spec;
393 	uint32_t ok1_bits = 0;
394 
395 	if (v->l3_ok)
396 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L3_OK) :
397 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L3_OK);
398 
399 	if (v->ipv4_csum_ok)
400 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_IPV4_CSUM_OK) :
401 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_IPV4_CSUM_OK);
402 
403 	if (v->l4_ok)
404 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_OK) |
405 				    BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_CSUM_OK) :
406 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_OK) |
407 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_CSUM_OK);
408 
409 	if (v->l4_csum_ok)
410 		ok1_bits |= inner ? BIT(MLX5DR_DEFINER_OKS1_SECOND_L4_CSUM_OK) :
411 				    BIT(MLX5DR_DEFINER_OKS1_FIRST_L4_CSUM_OK);
412 
413 	DR_SET(tag, ok1_bits, fc->byte_off, fc->bit_off, fc->bit_mask);
414 }
415 
416 static void
417 mlx5dr_definer_ipv6_routing_ext_set(struct mlx5dr_definer_fc *fc,
418 				    const void *item,
419 				    uint8_t *tag)
420 {
421 	const struct rte_flow_item_ipv6_routing_ext *v = item;
422 	uint32_t val;
423 
424 	val = v->hdr.next_hdr << __mlx5_dw_bit_off(header_ipv6_routing_ext, next_hdr);
425 	val |= v->hdr.type << __mlx5_dw_bit_off(header_ipv6_routing_ext, type);
426 	val |= v->hdr.segments_left <<
427 		__mlx5_dw_bit_off(header_ipv6_routing_ext, segments_left);
428 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
429 }
430 
431 static void
432 mlx5dr_definer_flex_parser_set(struct mlx5dr_definer_fc *fc,
433 			       const void *item,
434 			       uint8_t *tag, bool is_inner)
435 {
436 	const struct rte_flow_item_flex *flex = item;
437 	uint32_t byte_off, val, idx;
438 	int ret;
439 
440 	val = 0;
441 	byte_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
442 	idx = fc->fname - MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
443 	byte_off -= idx * sizeof(uint32_t);
444 	ret = mlx5_flex_get_parser_value_per_byte_off(flex, flex->handle, byte_off,
445 						      false, is_inner, &val);
446 	if (ret == -1 || !val)
447 		return;
448 
449 	DR_SET(tag, val, fc->byte_off, 0, fc->bit_mask);
450 }
451 
452 static void
453 mlx5dr_definer_flex_parser_inner_set(struct mlx5dr_definer_fc *fc,
454 				     const void *item,
455 				     uint8_t *tag)
456 {
457 	mlx5dr_definer_flex_parser_set(fc, item, tag, true);
458 }
459 
460 static void
461 mlx5dr_definer_flex_parser_outer_set(struct mlx5dr_definer_fc *fc,
462 				     const void *item,
463 				     uint8_t *tag)
464 {
465 	mlx5dr_definer_flex_parser_set(fc, item, tag, false);
466 }
467 
468 static void
469 mlx5dr_definer_gre_key_set(struct mlx5dr_definer_fc *fc,
470 			   const void *item_spec,
471 			   uint8_t *tag)
472 {
473 	const rte_be32_t *v = item_spec;
474 
475 	DR_SET_BE32(tag, *v, fc->byte_off, fc->bit_off, fc->bit_mask);
476 }
477 
478 static void
479 mlx5dr_definer_vxlan_vni_set(struct mlx5dr_definer_fc *fc,
480 			     const void *item_spec,
481 			     uint8_t *tag)
482 {
483 	const struct rte_flow_item_vxlan *v = item_spec;
484 
485 	memcpy(tag + fc->byte_off, v->vni, sizeof(v->vni));
486 }
487 
488 static void
489 mlx5dr_definer_ipv6_tos_set(struct mlx5dr_definer_fc *fc,
490 			    const void *item_spec,
491 			    uint8_t *tag)
492 {
493 	const struct rte_flow_item_ipv6 *v = item_spec;
494 	uint8_t tos = DR_GET(header_ipv6_vtc, &v->hdr.vtc_flow, tos);
495 
496 	DR_SET(tag, tos, fc->byte_off, fc->bit_off, fc->bit_mask);
497 }
498 
499 static void
500 mlx5dr_definer_icmp_dw1_set(struct mlx5dr_definer_fc *fc,
501 			    const void *item_spec,
502 			    uint8_t *tag)
503 {
504 	const struct rte_flow_item_icmp *v = item_spec;
505 	rte_be32_t icmp_dw1;
506 
507 	icmp_dw1 = (v->hdr.icmp_type << __mlx5_dw_bit_off(header_icmp, type)) |
508 		   (v->hdr.icmp_code << __mlx5_dw_bit_off(header_icmp, code)) |
509 		   (rte_be_to_cpu_16(v->hdr.icmp_cksum) << __mlx5_dw_bit_off(header_icmp, cksum));
510 
511 	DR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);
512 }
513 
514 static void
515 mlx5dr_definer_icmp_dw2_set(struct mlx5dr_definer_fc *fc,
516 			    const void *item_spec,
517 			    uint8_t *tag)
518 {
519 	const struct rte_flow_item_icmp *v = item_spec;
520 	rte_be32_t icmp_dw2;
521 
522 	icmp_dw2 = (rte_be_to_cpu_16(v->hdr.icmp_ident) << __mlx5_dw_bit_off(header_icmp, ident)) |
523 		   (rte_be_to_cpu_16(v->hdr.icmp_seq_nb) << __mlx5_dw_bit_off(header_icmp, seq_nb));
524 
525 	DR_SET(tag, icmp_dw2, fc->byte_off, fc->bit_off, fc->bit_mask);
526 }
527 
528 static void
529 mlx5dr_definer_icmp6_dw1_set(struct mlx5dr_definer_fc *fc,
530 			    const void *item_spec,
531 			    uint8_t *tag)
532 {
533 	const struct rte_flow_item_icmp6 *v = item_spec;
534 	rte_be32_t icmp_dw1;
535 
536 	icmp_dw1 = (v->type << __mlx5_dw_bit_off(header_icmp, type)) |
537 		   (v->code << __mlx5_dw_bit_off(header_icmp, code)) |
538 		   (rte_be_to_cpu_16(v->checksum) << __mlx5_dw_bit_off(header_icmp, cksum));
539 
540 	DR_SET(tag, icmp_dw1, fc->byte_off, fc->bit_off, fc->bit_mask);
541 }
542 
543 static void
544 mlx5dr_definer_icmp6_echo_dw1_mask_set(struct mlx5dr_definer_fc *fc,
545 				       __rte_unused const void *item_spec,
546 				       uint8_t *tag)
547 {
548 	const struct rte_flow_item_icmp6 spec = {0xFF, 0xFF, 0x0};
549 	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
550 }
551 
552 static void
553 mlx5dr_definer_icmp6_echo_request_dw1_set(struct mlx5dr_definer_fc *fc,
554 					  __rte_unused const void *item_spec,
555 					  uint8_t *tag)
556 {
557 	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REQUEST, 0, 0};
558 	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
559 }
560 
561 static void
562 mlx5dr_definer_icmp6_echo_reply_dw1_set(struct mlx5dr_definer_fc *fc,
563 					__rte_unused const void *item_spec,
564 					uint8_t *tag)
565 {
566 	const struct rte_flow_item_icmp6 spec = {RTE_ICMP6_ECHO_REPLY, 0, 0};
567 	mlx5dr_definer_icmp6_dw1_set(fc, &spec, tag);
568 }
569 
570 static void
571 mlx5dr_definer_icmp6_echo_dw2_set(struct mlx5dr_definer_fc *fc,
572 				  const void *item_spec,
573 				  uint8_t *tag)
574 {
575 	const struct rte_flow_item_icmp6_echo *v = item_spec;
576 	rte_be32_t dw2;
577 
578 	dw2 = (rte_be_to_cpu_16(v->hdr.identifier) << __mlx5_dw_bit_off(header_icmp, ident)) |
579 	      (rte_be_to_cpu_16(v->hdr.sequence) << __mlx5_dw_bit_off(header_icmp, seq_nb));
580 
581 	DR_SET(tag, dw2, fc->byte_off, fc->bit_off, fc->bit_mask);
582 }
583 
584 static void
585 mlx5dr_definer_ipv6_flow_label_set(struct mlx5dr_definer_fc *fc,
586 				   const void *item_spec,
587 				   uint8_t *tag)
588 {
589 	const struct rte_flow_item_ipv6 *v = item_spec;
590 	uint32_t flow_label = DR_GET(header_ipv6_vtc, &v->hdr.vtc_flow, flow_label);
591 
592 	DR_SET(tag, flow_label, fc->byte_off, fc->bit_off, fc->bit_mask);
593 }
594 
595 static void
596 mlx5dr_definer_vport_set(struct mlx5dr_definer_fc *fc,
597 			 const void *item_spec,
598 			 uint8_t *tag)
599 {
600 	const struct rte_flow_item_ethdev *v = item_spec;
601 	const struct flow_hw_port_info *port_info;
602 	uint32_t regc_value;
603 
604 	port_info = flow_hw_conv_port_id(v->port_id);
605 	if (unlikely(!port_info))
606 		regc_value = BAD_PORT;
607 	else
608 		regc_value = port_info->regc_value >> fc->bit_off;
609 
610 	/* Bit offset is set to 0 to since regc value is 32bit */
611 	DR_SET(tag, regc_value, fc->byte_off, fc->bit_off, fc->bit_mask);
612 }
613 
614 static struct mlx5dr_definer_fc *
615 mlx5dr_definer_get_mpls_fc(struct mlx5dr_definer_conv_data *cd, bool inner)
616 {
617 	uint8_t mpls_idx = cd->mpls_idx;
618 	struct mlx5dr_definer_fc *fc;
619 
620 	switch (mpls_idx) {
621 	case 0:
622 		fc = &cd->fc[DR_CALC_FNAME(MPLS0, inner)];
623 		DR_CALC_SET_HDR(fc, mpls_inner, mpls0_label);
624 		break;
625 	case 1:
626 		fc = &cd->fc[DR_CALC_FNAME(MPLS1, inner)];
627 		DR_CALC_SET_HDR(fc, mpls_inner, mpls1_label);
628 		break;
629 	case 2:
630 		fc = &cd->fc[DR_CALC_FNAME(MPLS2, inner)];
631 		DR_CALC_SET_HDR(fc, mpls_inner, mpls2_label);
632 		break;
633 	case 3:
634 		fc = &cd->fc[DR_CALC_FNAME(MPLS3, inner)];
635 		DR_CALC_SET_HDR(fc, mpls_inner, mpls3_label);
636 		break;
637 	case 4:
638 		fc = &cd->fc[DR_CALC_FNAME(MPLS4, inner)];
639 		DR_CALC_SET_HDR(fc, mpls_inner, mpls4_label);
640 		break;
641 	default:
642 		rte_errno = ENOTSUP;
643 		DR_LOG(ERR, "MPLS index %d is not supported", mpls_idx);
644 		return NULL;
645 	}
646 
647 	return fc;
648 }
649 
650 static struct mlx5dr_definer_fc *
651 mlx5dr_definer_get_mpls_oks_fc(struct mlx5dr_definer_conv_data *cd, bool inner)
652 {
653 	uint8_t mpls_idx = cd->mpls_idx;
654 	struct mlx5dr_definer_fc *fc;
655 
656 	switch (mpls_idx) {
657 	case 0:
658 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS0, inner)];
659 		DR_CALC_SET_HDR(fc, oks2, second_mpls0_qualifier);
660 		break;
661 	case 1:
662 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS1, inner)];
663 		DR_CALC_SET_HDR(fc, oks2, second_mpls1_qualifier);
664 		break;
665 	case 2:
666 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS2, inner)];
667 		DR_CALC_SET_HDR(fc, oks2, second_mpls2_qualifier);
668 		break;
669 	case 3:
670 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS3, inner)];
671 		DR_CALC_SET_HDR(fc, oks2, second_mpls3_qualifier);
672 		break;
673 	case 4:
674 		fc = &cd->fc[DR_CALC_FNAME(OKS2_MPLS4, inner)];
675 		DR_CALC_SET_HDR(fc, oks2, second_mpls4_qualifier);
676 		break;
677 	default:
678 		rte_errno = ENOTSUP;
679 		DR_LOG(ERR, "MPLS index %d is not supported", mpls_idx);
680 		return NULL;
681 	}
682 
683 	return fc;
684 }
685 
686 static void
687 mlx5dr_definer_mpls_label_set(struct mlx5dr_definer_fc *fc,
688 			      const void *item_spec,
689 			      uint8_t *tag)
690 {
691 	const struct rte_flow_item_mpls *v = item_spec;
692 
693 	memcpy(tag + fc->byte_off, v->label_tc_s, sizeof(v->label_tc_s));
694 	memcpy(tag + fc->byte_off + sizeof(v->label_tc_s), &v->ttl, sizeof(v->ttl));
695 }
696 
697 static void
698 mlx5dr_definer_geneve_vni_set(struct mlx5dr_definer_fc *fc,
699 			      const void *item_spec,
700 			      uint8_t *tag)
701 {
702 	const struct rte_flow_item_geneve *v = item_spec;
703 
704 	memcpy(tag + fc->byte_off, v->vni, sizeof(v->vni));
705 }
706 
707 static void
708 mlx5dr_definer_geneve_opt_ctrl_set(struct mlx5dr_definer_fc *fc,
709 				   const void *item_spec,
710 				   uint8_t *tag)
711 {
712 	const struct rte_flow_item_geneve_opt *v = item_spec;
713 	uint32_t dw0 = 0;
714 
715 	dw0 |= v->option_type << __mlx5_dw_bit_off(header_geneve_opt, type);
716 	dw0 |= rte_cpu_to_be_16(v->option_class) << __mlx5_dw_bit_off(header_geneve_opt, class);
717 	DR_SET(tag, dw0, fc->byte_off, fc->bit_off, fc->bit_mask);
718 }
719 
720 static void
721 mlx5dr_definer_geneve_opt_data_set(struct mlx5dr_definer_fc *fc,
722 				   const void *item_spec,
723 				   uint8_t *tag)
724 {
725 	const struct rte_flow_item_geneve_opt *v = item_spec;
726 
727 	DR_SET_BE32(tag, v->data[fc->extra_data], fc->byte_off, fc->bit_off, fc->bit_mask);
728 }
729 
730 static void
731 mlx5dr_definer_ib_l4_qp_set(struct mlx5dr_definer_fc *fc,
732 			    const void *item_spec,
733 			    uint8_t *tag)
734 {
735 	const struct rte_flow_item_ib_bth *v = item_spec;
736 
737 	memcpy(tag + fc->byte_off, &v->hdr.dst_qp, sizeof(v->hdr.dst_qp));
738 }
739 
740 static void
741 mlx5dr_definer_vxlan_gpe_vni_set(struct mlx5dr_definer_fc *fc,
742 				 const void *item_spec,
743 				 uint8_t *tag)
744 {
745 	const struct rte_flow_item_vxlan_gpe *v = item_spec;
746 
747 	memcpy(tag + fc->byte_off, v->vni, sizeof(v->vni));
748 }
749 
750 static void
751 mlx5dr_definer_vxlan_gpe_rsvd0_set(struct mlx5dr_definer_fc *fc,
752 				   const void *item_spec,
753 				   uint8_t *tag)
754 {
755 	const struct rte_flow_item_vxlan_gpe *v = item_spec;
756 	uint16_t rsvd0;
757 
758 	rsvd0 = (v->rsvd0[0] << 8 | v->rsvd0[1]);
759 	DR_SET(tag, rsvd0, fc->byte_off, fc->bit_off, fc->bit_mask);
760 }
761 
762 static int
763 mlx5dr_definer_conv_item_eth(struct mlx5dr_definer_conv_data *cd,
764 			     struct rte_flow_item *item,
765 			     int item_idx)
766 {
767 	const struct rte_flow_item_eth *m = item->mask;
768 	uint8_t empty_mac[RTE_ETHER_ADDR_LEN] = {0};
769 	struct mlx5dr_definer_fc *fc;
770 	bool inner = cd->tunnel;
771 
772 	if (!m)
773 		return 0;
774 
775 	if (m->reserved) {
776 		rte_errno = ENOTSUP;
777 		return rte_errno;
778 	}
779 
780 	if (m->hdr.ether_type) {
781 		fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)];
782 		fc->item_idx = item_idx;
783 		fc->tag_set = &mlx5dr_definer_eth_type_set;
784 		DR_CALC_SET(fc, eth_l2, l3_ethertype, inner);
785 	}
786 
787 	/* Check SMAC 47_16 */
788 	if (memcmp(m->hdr.src_addr.addr_bytes, empty_mac, 4)) {
789 		fc = &cd->fc[DR_CALC_FNAME(ETH_SMAC_48_16, inner)];
790 		fc->item_idx = item_idx;
791 		fc->tag_set = &mlx5dr_definer_eth_smac_47_16_set;
792 		DR_CALC_SET(fc, eth_l2_src, smac_47_16, inner);
793 	}
794 
795 	/* Check SMAC 15_0 */
796 	if (memcmp(m->hdr.src_addr.addr_bytes + 4, empty_mac + 4, 2)) {
797 		fc = &cd->fc[DR_CALC_FNAME(ETH_SMAC_15_0, inner)];
798 		fc->item_idx = item_idx;
799 		fc->tag_set = &mlx5dr_definer_eth_smac_15_0_set;
800 		DR_CALC_SET(fc, eth_l2_src, smac_15_0, inner);
801 	}
802 
803 	/* Check DMAC 47_16 */
804 	if (memcmp(m->hdr.dst_addr.addr_bytes, empty_mac, 4)) {
805 		fc = &cd->fc[DR_CALC_FNAME(ETH_DMAC_48_16, inner)];
806 		fc->item_idx = item_idx;
807 		fc->tag_set = &mlx5dr_definer_eth_dmac_47_16_set;
808 		DR_CALC_SET(fc, eth_l2, dmac_47_16, inner);
809 	}
810 
811 	/* Check DMAC 15_0 */
812 	if (memcmp(m->hdr.dst_addr.addr_bytes + 4, empty_mac + 4, 2)) {
813 		fc = &cd->fc[DR_CALC_FNAME(ETH_DMAC_15_0, inner)];
814 		fc->item_idx = item_idx;
815 		fc->tag_set = &mlx5dr_definer_eth_dmac_15_0_set;
816 		DR_CALC_SET(fc, eth_l2, dmac_15_0, inner);
817 	}
818 
819 	if (m->has_vlan) {
820 		/* Mark packet as tagged (CVLAN) */
821 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
822 		fc->item_idx = item_idx;
823 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
824 		fc->tag_set = &mlx5dr_definer_eth_first_vlan_q_set;
825 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);
826 	}
827 
828 	return 0;
829 }
830 
831 static int
832 mlx5dr_definer_conv_item_vlan(struct mlx5dr_definer_conv_data *cd,
833 			      struct rte_flow_item *item,
834 			      int item_idx)
835 {
836 	const struct rte_flow_item_vlan *m = item->mask;
837 	struct mlx5dr_definer_fc *fc;
838 	bool inner = cd->tunnel;
839 
840 	if (!m)
841 		return 0;
842 
843 	if (m->reserved) {
844 		rte_errno = ENOTSUP;
845 		return rte_errno;
846 	}
847 
848 	if (!cd->relaxed || m->has_more_vlan) {
849 		/* Mark packet as tagged (CVLAN or SVLAN) even if TCI is not specified.*/
850 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TYPE, inner)];
851 		fc->item_idx = item_idx;
852 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
853 		fc->tag_set = &mlx5dr_definer_first_vlan_q_set;
854 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, inner);
855 	}
856 
857 	if (m->hdr.vlan_tci) {
858 		fc = &cd->fc[DR_CALC_FNAME(VLAN_TCI, inner)];
859 		fc->item_idx = item_idx;
860 		fc->tag_set = &mlx5dr_definer_tci_set;
861 		DR_CALC_SET(fc, eth_l2, tci, inner);
862 	}
863 
864 	if (m->hdr.eth_proto) {
865 		fc = &cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)];
866 		fc->item_idx = item_idx;
867 		fc->tag_set = &mlx5dr_definer_eth_type_set;
868 		DR_CALC_SET(fc, eth_l2, l3_ethertype, inner);
869 	}
870 
871 	return 0;
872 }
873 
874 static int
875 mlx5dr_definer_conv_item_ipv4(struct mlx5dr_definer_conv_data *cd,
876 			      struct rte_flow_item *item,
877 			      int item_idx)
878 {
879 	const struct rte_ipv4_hdr *m = item->mask;
880 	const struct rte_ipv4_hdr *l = item->last;
881 	struct mlx5dr_definer_fc *fc;
882 	bool inner = cd->tunnel;
883 
884 	if (!cd->relaxed) {
885 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
886 		fc->item_idx = item_idx;
887 		fc->tag_set = &mlx5dr_definer_ipv4_version_set;
888 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
889 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
890 
891 		/* Overwrite - Unset ethertype if present */
892 		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
893 	}
894 
895 	if (!m)
896 		return 0;
897 
898 	if (m->packet_id || m->hdr_checksum ||
899 	    (l && (l->next_proto_id || l->type_of_service))) {
900 		rte_errno = ENOTSUP;
901 		return rte_errno;
902 	}
903 
904 	if (m->version) {
905 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
906 		fc->item_idx = item_idx;
907 		fc->tag_set = &mlx5dr_definer_ipv4_version_set;
908 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
909 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
910 	}
911 
912 	if (m->fragment_offset) {
913 		fc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)];
914 		fc->item_idx = item_idx;
915 		if (rte_be_to_cpu_16(m->fragment_offset) == 0x3fff) {
916 			fc->tag_set = &mlx5dr_definer_ip_fragmented_set;
917 			DR_CALC_SET(fc, eth_l2, ip_fragmented, inner);
918 		} else {
919 			fc->is_range = l && l->fragment_offset;
920 			fc->tag_set = &mlx5dr_definer_ipv4_frag_set;
921 			DR_CALC_SET(fc, eth_l3, ipv4_frag, inner);
922 		}
923 	}
924 
925 	if (m->next_proto_id) {
926 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
927 		fc->item_idx = item_idx;
928 		fc->tag_set = &mlx5dr_definer_ipv4_next_proto_set;
929 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
930 	}
931 
932 	if (m->total_length) {
933 		fc = &cd->fc[DR_CALC_FNAME(IP_LEN, inner)];
934 		fc->item_idx = item_idx;
935 		fc->is_range = l && l->total_length;
936 		fc->tag_set = &mlx5dr_definer_ipv4_len_set;
937 		DR_CALC_SET(fc, eth_l3, ipv4_total_length, inner);
938 	}
939 
940 	if (m->dst_addr) {
941 		fc = &cd->fc[DR_CALC_FNAME(IPV4_DST, inner)];
942 		fc->item_idx = item_idx;
943 		fc->is_range = l && l->dst_addr;
944 		fc->tag_set = &mlx5dr_definer_ipv4_dst_addr_set;
945 		DR_CALC_SET(fc, ipv4_src_dest, destination_address, inner);
946 	}
947 
948 	if (m->src_addr) {
949 		fc = &cd->fc[DR_CALC_FNAME(IPV4_SRC, inner)];
950 		fc->item_idx = item_idx;
951 		fc->is_range = l && l->src_addr;
952 		fc->tag_set = &mlx5dr_definer_ipv4_src_addr_set;
953 		DR_CALC_SET(fc, ipv4_src_dest, source_address, inner);
954 	}
955 
956 	if (m->ihl) {
957 		fc = &cd->fc[DR_CALC_FNAME(IPV4_IHL, inner)];
958 		fc->item_idx = item_idx;
959 		fc->is_range = l && l->ihl;
960 		fc->tag_set = &mlx5dr_definer_ipv4_ihl_set;
961 		DR_CALC_SET(fc, eth_l3, ihl, inner);
962 	}
963 
964 	if (m->time_to_live) {
965 		fc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];
966 		fc->item_idx = item_idx;
967 		fc->is_range = l && l->time_to_live;
968 		fc->tag_set = &mlx5dr_definer_ipv4_time_to_live_set;
969 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
970 	}
971 
972 	if (m->type_of_service) {
973 		fc = &cd->fc[DR_CALC_FNAME(IP_TOS, inner)];
974 		fc->item_idx = item_idx;
975 		fc->tag_set = &mlx5dr_definer_ipv4_tos_set;
976 		DR_CALC_SET(fc, eth_l3, tos, inner);
977 	}
978 
979 	return 0;
980 }
981 
982 static int
983 mlx5dr_definer_conv_item_ipv6(struct mlx5dr_definer_conv_data *cd,
984 			      struct rte_flow_item *item,
985 			      int item_idx)
986 {
987 	const struct rte_flow_item_ipv6 *m = item->mask;
988 	const struct rte_flow_item_ipv6 *l = item->last;
989 	struct mlx5dr_definer_fc *fc;
990 	bool inner = cd->tunnel;
991 
992 	if (!cd->relaxed) {
993 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
994 		fc->item_idx = item_idx;
995 		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
996 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
997 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
998 
999 		/* Overwrite - Unset ethertype if present */
1000 		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
1001 	}
1002 
1003 	if (!m)
1004 		return 0;
1005 
1006 	if (m->has_hop_ext || m->has_route_ext || m->has_auth_ext ||
1007 	    m->has_esp_ext || m->has_dest_ext || m->has_mobil_ext ||
1008 	    m->has_hip_ext || m->has_shim6_ext ||
1009 	    (l && (l->has_frag_ext || l->hdr.vtc_flow || l->hdr.proto ||
1010 		   !is_mem_zero(l->hdr.src_addr, 16) ||
1011 		   !is_mem_zero(l->hdr.dst_addr, 16)))) {
1012 		rte_errno = ENOTSUP;
1013 		return rte_errno;
1014 	}
1015 
1016 	if (m->has_frag_ext) {
1017 		fc = &cd->fc[DR_CALC_FNAME(IP_FRAG, inner)];
1018 		fc->item_idx = item_idx;
1019 		fc->tag_set = &mlx5dr_definer_ipv6_frag_set;
1020 		DR_CALC_SET(fc, eth_l4, ip_fragmented, inner);
1021 	}
1022 
1023 	if (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, version)) {
1024 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
1025 		fc->item_idx = item_idx;
1026 		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
1027 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1028 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
1029 	}
1030 
1031 	if (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, tos)) {
1032 		fc = &cd->fc[DR_CALC_FNAME(IP_TOS, inner)];
1033 		fc->item_idx = item_idx;
1034 		fc->tag_set = &mlx5dr_definer_ipv6_tos_set;
1035 		DR_CALC_SET(fc, eth_l3, tos, inner);
1036 	}
1037 
1038 	if (DR_GET(header_ipv6_vtc, &m->hdr.vtc_flow, flow_label)) {
1039 		fc = &cd->fc[DR_CALC_FNAME(IPV6_FLOW_LABEL, inner)];
1040 		fc->item_idx = item_idx;
1041 		fc->tag_set = &mlx5dr_definer_ipv6_flow_label_set;
1042 		DR_CALC_SET(fc, eth_l3, flow_label, inner);
1043 	}
1044 
1045 	if (m->hdr.payload_len) {
1046 		fc = &cd->fc[DR_CALC_FNAME(IP_LEN, inner)];
1047 		fc->item_idx = item_idx;
1048 		fc->is_range = l && l->hdr.payload_len;
1049 		fc->tag_set = &mlx5dr_definer_ipv6_payload_len_set;
1050 		DR_CALC_SET(fc, eth_l3, ipv6_payload_length, inner);
1051 	}
1052 
1053 	if (m->hdr.proto) {
1054 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1055 		fc->item_idx = item_idx;
1056 		fc->tag_set = &mlx5dr_definer_ipv6_proto_set;
1057 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
1058 	}
1059 
1060 	if (m->hdr.hop_limits) {
1061 		fc = &cd->fc[DR_CALC_FNAME(IP_TTL, inner)];
1062 		fc->item_idx = item_idx;
1063 		fc->is_range = l && l->hdr.hop_limits;
1064 		fc->tag_set = &mlx5dr_definer_ipv6_hop_limits_set;
1065 		DR_CALC_SET(fc, eth_l3, time_to_live_hop_limit, inner);
1066 	}
1067 
1068 	if (!is_mem_zero(m->hdr.src_addr, 4)) {
1069 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_127_96, inner)];
1070 		fc->item_idx = item_idx;
1071 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_127_96_set;
1072 		DR_CALC_SET(fc, ipv6_src, ipv6_address_127_96, inner);
1073 	}
1074 
1075 	if (!is_mem_zero(m->hdr.src_addr + 4, 4)) {
1076 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_95_64, inner)];
1077 		fc->item_idx = item_idx;
1078 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_95_64_set;
1079 		DR_CALC_SET(fc, ipv6_src, ipv6_address_95_64, inner);
1080 	}
1081 
1082 	if (!is_mem_zero(m->hdr.src_addr + 8, 4)) {
1083 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_63_32, inner)];
1084 		fc->item_idx = item_idx;
1085 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_63_32_set;
1086 		DR_CALC_SET(fc, ipv6_src, ipv6_address_63_32, inner);
1087 	}
1088 
1089 	if (!is_mem_zero(m->hdr.src_addr + 12, 4)) {
1090 		fc = &cd->fc[DR_CALC_FNAME(IPV6_SRC_31_0, inner)];
1091 		fc->item_idx = item_idx;
1092 		fc->tag_set = &mlx5dr_definer_ipv6_src_addr_31_0_set;
1093 		DR_CALC_SET(fc, ipv6_src, ipv6_address_31_0, inner);
1094 	}
1095 
1096 	if (!is_mem_zero(m->hdr.dst_addr, 4)) {
1097 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_127_96, inner)];
1098 		fc->item_idx = item_idx;
1099 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_127_96_set;
1100 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_127_96, inner);
1101 	}
1102 
1103 	if (!is_mem_zero(m->hdr.dst_addr + 4, 4)) {
1104 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_95_64, inner)];
1105 		fc->item_idx = item_idx;
1106 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_95_64_set;
1107 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_95_64, inner);
1108 	}
1109 
1110 	if (!is_mem_zero(m->hdr.dst_addr + 8, 4)) {
1111 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_63_32, inner)];
1112 		fc->item_idx = item_idx;
1113 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_63_32_set;
1114 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_63_32, inner);
1115 	}
1116 
1117 	if (!is_mem_zero(m->hdr.dst_addr + 12, 4)) {
1118 		fc = &cd->fc[DR_CALC_FNAME(IPV6_DST_31_0, inner)];
1119 		fc->item_idx = item_idx;
1120 		fc->tag_set = &mlx5dr_definer_ipv6_dst_addr_31_0_set;
1121 		DR_CALC_SET(fc, ipv6_dst, ipv6_address_31_0, inner);
1122 	}
1123 
1124 	return 0;
1125 }
1126 
1127 static int
1128 mlx5dr_definer_conv_item_udp(struct mlx5dr_definer_conv_data *cd,
1129 			     struct rte_flow_item *item,
1130 			     int item_idx)
1131 {
1132 	const struct rte_flow_item_udp *m = item->mask;
1133 	const struct rte_flow_item_udp *l = item->last;
1134 	struct mlx5dr_definer_fc *fc;
1135 	bool inner = cd->tunnel;
1136 
1137 	/* Set match on L4 type UDP */
1138 	if (!cd->relaxed) {
1139 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1140 		if (!fc->not_overwrite) {
1141 			fc->item_idx = item_idx;
1142 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
1143 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1144 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
1145 		}
1146 	}
1147 
1148 	if (!m)
1149 		return 0;
1150 
1151 	if (m->hdr.dgram_cksum || m->hdr.dgram_len) {
1152 		rte_errno = ENOTSUP;
1153 		return rte_errno;
1154 	}
1155 
1156 	if (m->hdr.src_port) {
1157 		fc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];
1158 		fc->item_idx = item_idx;
1159 		fc->is_range = l && l->hdr.src_port;
1160 		fc->tag_set = &mlx5dr_definer_udp_src_port_set;
1161 		DR_CALC_SET(fc, eth_l4, source_port, inner);
1162 	}
1163 
1164 	if (m->hdr.dst_port) {
1165 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
1166 		fc->item_idx = item_idx;
1167 		fc->is_range = l && l->hdr.dst_port;
1168 		fc->tag_set = &mlx5dr_definer_udp_dst_port_set;
1169 		DR_CALC_SET(fc, eth_l4, destination_port, inner);
1170 	}
1171 
1172 	return 0;
1173 }
1174 
1175 static int
1176 mlx5dr_definer_conv_item_tcp(struct mlx5dr_definer_conv_data *cd,
1177 			     struct rte_flow_item *item,
1178 			     int item_idx)
1179 {
1180 	const struct rte_flow_item_tcp *m = item->mask;
1181 	const struct rte_flow_item_tcp *l = item->last;
1182 	struct mlx5dr_definer_fc *fc;
1183 	bool inner = cd->tunnel;
1184 
1185 	/* Overwrite match on L4 type TCP */
1186 	if (!cd->relaxed) {
1187 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1188 		if (!fc->not_overwrite) {
1189 			fc->item_idx = item_idx;
1190 			fc->tag_set = &mlx5dr_definer_tcp_protocol_set;
1191 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1192 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
1193 		}
1194 	}
1195 
1196 	if (!m)
1197 		return 0;
1198 
1199 	if (m->hdr.sent_seq || m->hdr.recv_ack || m->hdr.data_off ||
1200 	    m->hdr.rx_win || m->hdr.cksum || m->hdr.tcp_urp) {
1201 		rte_errno = ENOTSUP;
1202 		return rte_errno;
1203 	}
1204 
1205 	if (m->hdr.tcp_flags) {
1206 		fc = &cd->fc[DR_CALC_FNAME(TCP_FLAGS, inner)];
1207 		fc->item_idx = item_idx;
1208 		fc->is_range = l && l->hdr.tcp_flags;
1209 		fc->tag_set = &mlx5dr_definer_tcp_flags_set;
1210 		DR_CALC_SET(fc, eth_l4, tcp_flags, inner);
1211 	}
1212 
1213 	if (m->hdr.src_port) {
1214 		fc = &cd->fc[DR_CALC_FNAME(L4_SPORT, inner)];
1215 		fc->item_idx = item_idx;
1216 		fc->is_range = l && l->hdr.src_port;
1217 		fc->tag_set = &mlx5dr_definer_tcp_src_port_set;
1218 		DR_CALC_SET(fc, eth_l4, source_port, inner);
1219 	}
1220 
1221 	if (m->hdr.dst_port) {
1222 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
1223 		fc->item_idx = item_idx;
1224 		fc->is_range = l && l->hdr.dst_port;
1225 		fc->tag_set = &mlx5dr_definer_tcp_dst_port_set;
1226 		DR_CALC_SET(fc, eth_l4, destination_port, inner);
1227 	}
1228 
1229 	return 0;
1230 }
1231 
1232 static int
1233 mlx5dr_definer_conv_item_gtp(struct mlx5dr_definer_conv_data *cd,
1234 			     struct rte_flow_item *item,
1235 			     int item_idx)
1236 {
1237 	struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps;
1238 	const struct rte_flow_item_gtp *m = item->mask;
1239 	struct mlx5dr_definer_fc *fc;
1240 
1241 	if (cd->tunnel) {
1242 		DR_LOG(ERR, "Inner GTPU item not supported");
1243 		rte_errno = ENOTSUP;
1244 		return rte_errno;
1245 	}
1246 
1247 	/* Overwrite GTPU dest port if not present */
1248 	fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)];
1249 	if (!fc->tag_set && !cd->relaxed) {
1250 		fc->item_idx = item_idx;
1251 		fc->tag_set = &mlx5dr_definer_gtp_udp_port_set;
1252 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1253 		DR_CALC_SET(fc, eth_l4, destination_port, false);
1254 	}
1255 
1256 	if (!m)
1257 		return 0;
1258 
1259 	if (m->hdr.plen || m->hdr.gtp_hdr_info & ~MLX5DR_DEFINER_GTP_EXT_HDR_BIT) {
1260 		rte_errno = ENOTSUP;
1261 		return rte_errno;
1262 	}
1263 
1264 	if (m->hdr.teid) {
1265 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_TEID_ENABLED)) {
1266 			rte_errno = ENOTSUP;
1267 			return rte_errno;
1268 		}
1269 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_TEID];
1270 		fc->item_idx = item_idx;
1271 		fc->tag_set = &mlx5dr_definer_gtp_teid_set;
1272 		fc->bit_mask = __mlx5_mask(header_gtp, teid);
1273 		fc->byte_off = caps->format_select_gtpu_dw_1 * DW_SIZE;
1274 	}
1275 
1276 	if (m->hdr.gtp_hdr_info) {
1277 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {
1278 			rte_errno = ENOTSUP;
1279 			return rte_errno;
1280 		}
1281 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_FLAG];
1282 		fc->item_idx = item_idx;
1283 		fc->tag_set = &mlx5dr_definer_gtp_ext_flag_set;
1284 		fc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag);
1285 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag);
1286 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1287 	}
1288 
1289 
1290 	if (m->hdr.msg_type) {
1291 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {
1292 			rte_errno = ENOTSUP;
1293 			return rte_errno;
1294 		}
1295 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_MSG_TYPE];
1296 		fc->item_idx = item_idx;
1297 		fc->tag_set = &mlx5dr_definer_gtp_msg_type_set;
1298 		fc->bit_mask = __mlx5_mask(header_gtp, msg_type);
1299 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, msg_type);
1300 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1301 	}
1302 
1303 	return 0;
1304 }
1305 
1306 static int
1307 mlx5dr_definer_conv_item_gtp_psc(struct mlx5dr_definer_conv_data *cd,
1308 				 struct rte_flow_item *item,
1309 				 int item_idx)
1310 {
1311 	struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps;
1312 	const struct rte_flow_item_gtp_psc *m = item->mask;
1313 	struct mlx5dr_definer_fc *fc;
1314 
1315 	/* Overwrite GTP extension flag to be 1 */
1316 	if (!cd->relaxed) {
1317 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_0_ENABLED)) {
1318 			rte_errno = ENOTSUP;
1319 			return rte_errno;
1320 		}
1321 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_FLAG];
1322 		fc->item_idx = item_idx;
1323 		fc->tag_set = &mlx5dr_definer_ones_set;
1324 		fc->bit_mask = __mlx5_mask(header_gtp, ext_hdr_flag);
1325 		fc->bit_off = __mlx5_dw_bit_off(header_gtp, ext_hdr_flag);
1326 		fc->byte_off = caps->format_select_gtpu_dw_0 * DW_SIZE;
1327 	}
1328 
1329 	/* Overwrite next extension header type */
1330 	if (!cd->relaxed) {
1331 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_DW_2_ENABLED)) {
1332 			rte_errno = ENOTSUP;
1333 			return rte_errno;
1334 		}
1335 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_NEXT_EXT_HDR];
1336 		fc->item_idx = item_idx;
1337 		fc->tag_set = &mlx5dr_definer_gtp_next_ext_hdr_set;
1338 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1339 		fc->bit_mask = __mlx5_mask(header_opt_gtp, next_ext_hdr_type);
1340 		fc->bit_off = __mlx5_dw_bit_off(header_opt_gtp, next_ext_hdr_type);
1341 		fc->byte_off = caps->format_select_gtpu_dw_2 * DW_SIZE;
1342 	}
1343 
1344 	if (!m)
1345 		return 0;
1346 
1347 	if (m->hdr.type) {
1348 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) {
1349 			rte_errno = ENOTSUP;
1350 			return rte_errno;
1351 		}
1352 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_HDR_PDU];
1353 		fc->item_idx = item_idx;
1354 		fc->tag_set = &mlx5dr_definer_gtp_ext_hdr_pdu_set;
1355 		fc->bit_mask = __mlx5_mask(header_gtp_psc, pdu_type);
1356 		fc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, pdu_type);
1357 		fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
1358 	}
1359 
1360 	if (m->hdr.qfi) {
1361 		if (!(caps->flex_protocols & MLX5_HCA_FLEX_GTPU_FIRST_EXT_DW_0_ENABLED)) {
1362 			rte_errno = ENOTSUP;
1363 			return rte_errno;
1364 		}
1365 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GTP_EXT_HDR_QFI];
1366 		fc->item_idx = item_idx;
1367 		fc->tag_set = &mlx5dr_definer_gtp_ext_hdr_qfi_set;
1368 		fc->bit_mask = __mlx5_mask(header_gtp_psc, qfi);
1369 		fc->bit_off = __mlx5_dw_bit_off(header_gtp_psc, qfi);
1370 		fc->byte_off = caps->format_select_gtpu_ext_dw_0 * DW_SIZE;
1371 	}
1372 
1373 	return 0;
1374 }
1375 
1376 static int
1377 mlx5dr_definer_conv_item_port(struct mlx5dr_definer_conv_data *cd,
1378 			      struct rte_flow_item *item,
1379 			      int item_idx)
1380 {
1381 	struct mlx5dr_cmd_query_caps *caps = cd->ctx->caps;
1382 	const struct rte_flow_item_ethdev *m = item->mask;
1383 	struct mlx5dr_definer_fc *fc;
1384 
1385 	if (m->port_id) {
1386 		if (!caps->wire_regc_mask) {
1387 			DR_LOG(ERR, "Port ID item not supported, missing wire REGC mask");
1388 			rte_errno = ENOTSUP;
1389 			return rte_errno;
1390 		}
1391 
1392 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VPORT_REG_C_0];
1393 		fc->item_idx = item_idx;
1394 		fc->tag_set = &mlx5dr_definer_vport_set;
1395 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1396 		DR_CALC_SET_HDR(fc, registers, register_c_0);
1397 		fc->bit_off = __builtin_ctz(caps->wire_regc_mask);
1398 		fc->bit_mask = caps->wire_regc_mask >> fc->bit_off;
1399 	} else {
1400 		DR_LOG(ERR, "Pord ID item mask must specify ID mask");
1401 		rte_errno = EINVAL;
1402 		return rte_errno;
1403 	}
1404 
1405 	return 0;
1406 }
1407 
1408 static int
1409 mlx5dr_definer_conv_item_vxlan(struct mlx5dr_definer_conv_data *cd,
1410 			       struct rte_flow_item *item,
1411 			       int item_idx)
1412 {
1413 	const struct rte_flow_item_vxlan *m = item->mask;
1414 	struct mlx5dr_definer_fc *fc;
1415 	bool inner = cd->tunnel;
1416 
1417 	if (inner) {
1418 		DR_LOG(ERR, "Inner VXLAN item not supported");
1419 		rte_errno = ENOTSUP;
1420 		return rte_errno;
1421 	}
1422 
1423 	/* In order to match on VXLAN we must match on ip_protocol and l4_dport */
1424 	if (!cd->relaxed) {
1425 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1426 		if (!fc->tag_set) {
1427 			fc->item_idx = item_idx;
1428 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1429 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
1430 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
1431 		}
1432 
1433 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
1434 		if (!fc->tag_set) {
1435 			fc->item_idx = item_idx;
1436 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1437 			fc->tag_set = &mlx5dr_definer_vxlan_udp_port_set;
1438 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
1439 		}
1440 	}
1441 
1442 	if (!m)
1443 		return 0;
1444 
1445 	if (m->flags) {
1446 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_FLAGS];
1447 		fc->item_idx = item_idx;
1448 		fc->tag_set = &mlx5dr_definer_vxlan_flags_set;
1449 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1450 		fc->bit_mask = __mlx5_mask(header_vxlan, flags);
1451 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan, flags);
1452 	}
1453 
1454 	if (!is_mem_zero(m->vni, 3)) {
1455 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_VNI];
1456 		fc->item_idx = item_idx;
1457 		fc->tag_set = &mlx5dr_definer_vxlan_vni_set;
1458 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
1459 		fc->bit_mask = __mlx5_mask(header_vxlan, vni);
1460 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan, vni);
1461 	}
1462 
1463 	return 0;
1464 }
1465 
1466 static int
1467 mlx5dr_definer_conv_item_mpls(struct mlx5dr_definer_conv_data *cd,
1468 			      struct rte_flow_item *item,
1469 			      int item_idx)
1470 {
1471 	const struct rte_flow_item_mpls *m = item->mask;
1472 	struct mlx5dr_definer_fc *fc;
1473 	bool inner = cd->tunnel;
1474 
1475 	if (inner) {
1476 		DR_LOG(ERR, "Inner MPLS item not supported");
1477 		rte_errno = ENOTSUP;
1478 		return rte_errno;
1479 	}
1480 
1481 	if (!cd->relaxed) {
1482 		/* In order to match on MPLS we must match on ip_protocol and l4_dport. */
1483 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
1484 		if (!fc->tag_set) {
1485 			fc->item_idx = item_idx;
1486 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1487 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
1488 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
1489 		}
1490 
1491 		/* Currently support only MPLSoUDP */
1492 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, false)];
1493 		if (!fc->tag_set) {
1494 			fc->item_idx = item_idx;
1495 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1496 			fc->tag_set = &mlx5dr_definer_mpls_udp_port_set;
1497 			DR_CALC_SET(fc, eth_l4, destination_port, false);
1498 		}
1499 	}
1500 
1501 	if (m && (!is_mem_zero(m->label_tc_s, 3) || m->ttl)) {
1502 		/* According to HW MPLSoUDP is handled as inner */
1503 		fc = mlx5dr_definer_get_mpls_fc(cd, true);
1504 		if (!fc)
1505 			return rte_errno;
1506 
1507 		fc->item_idx = item_idx;
1508 		fc->tag_set = &mlx5dr_definer_mpls_label_set;
1509 	} else { /* Mask relevant oks2 bit, indicates MPLS label exists.
1510 		  * According to HW MPLSoUDP is handled as inner
1511 		  */
1512 		fc = mlx5dr_definer_get_mpls_oks_fc(cd, true);
1513 		if (!fc)
1514 			return rte_errno;
1515 
1516 		fc->item_idx = item_idx;
1517 		fc->tag_set = mlx5dr_definer_ones_set;
1518 	}
1519 
1520 	return 0;
1521 }
1522 
1523 static struct mlx5dr_definer_fc *
1524 mlx5dr_definer_get_register_fc(struct mlx5dr_definer_conv_data *cd, int reg)
1525 {
1526 	struct mlx5dr_definer_fc *fc;
1527 
1528 	switch (reg) {
1529 	case REG_C_0:
1530 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_0];
1531 		DR_CALC_SET_HDR(fc, registers, register_c_0);
1532 		break;
1533 	case REG_C_1:
1534 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_1];
1535 		DR_CALC_SET_HDR(fc, registers, register_c_1);
1536 		break;
1537 	case REG_C_2:
1538 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_2];
1539 		DR_CALC_SET_HDR(fc, registers, register_c_2);
1540 		break;
1541 	case REG_C_3:
1542 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_3];
1543 		DR_CALC_SET_HDR(fc, registers, register_c_3);
1544 		break;
1545 	case REG_C_4:
1546 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_4];
1547 		DR_CALC_SET_HDR(fc, registers, register_c_4);
1548 		break;
1549 	case REG_C_5:
1550 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_5];
1551 		DR_CALC_SET_HDR(fc, registers, register_c_5);
1552 		break;
1553 	case REG_C_6:
1554 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_6];
1555 		DR_CALC_SET_HDR(fc, registers, register_c_6);
1556 		break;
1557 	case REG_C_7:
1558 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_7];
1559 		DR_CALC_SET_HDR(fc, registers, register_c_7);
1560 		break;
1561 	case REG_C_8:
1562 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_8];
1563 		DR_CALC_SET_HDR(fc, registers, register_c_8);
1564 		break;
1565 	case REG_C_9:
1566 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_9];
1567 		DR_CALC_SET_HDR(fc, registers, register_c_9);
1568 		break;
1569 	case REG_C_10:
1570 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_10];
1571 		DR_CALC_SET_HDR(fc, registers, register_c_10);
1572 		break;
1573 	case REG_C_11:
1574 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_11];
1575 		DR_CALC_SET_HDR(fc, registers, register_c_11);
1576 		break;
1577 	case REG_A:
1578 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_A];
1579 		DR_CALC_SET_HDR(fc, metadata, general_purpose);
1580 		break;
1581 	case REG_B:
1582 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_REG_B];
1583 		DR_CALC_SET_HDR(fc, metadata, metadata_to_cqe);
1584 		break;
1585 	default:
1586 		rte_errno = ENOTSUP;
1587 		return NULL;
1588 	}
1589 
1590 	return fc;
1591 }
1592 
1593 static int
1594 mlx5dr_definer_conv_item_tag(struct mlx5dr_definer_conv_data *cd,
1595 			     struct rte_flow_item *item,
1596 			     int item_idx)
1597 {
1598 	const struct rte_flow_item_tag *m = item->mask;
1599 	const struct rte_flow_item_tag *v = item->spec;
1600 	const struct rte_flow_item_tag *l = item->last;
1601 	struct mlx5dr_definer_fc *fc;
1602 	int reg;
1603 
1604 	if (!m || !v)
1605 		return 0;
1606 
1607 	if (item->type == RTE_FLOW_ITEM_TYPE_TAG)
1608 		reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
1609 						  RTE_FLOW_ITEM_TYPE_TAG,
1610 						  v->index);
1611 	else
1612 		reg = (int)v->index;
1613 
1614 	if (reg <= 0) {
1615 		DR_LOG(ERR, "Invalid register for item tag");
1616 		rte_errno = EINVAL;
1617 		return rte_errno;
1618 	}
1619 
1620 	fc = mlx5dr_definer_get_register_fc(cd, reg);
1621 	if (!fc)
1622 		return rte_errno;
1623 
1624 	fc->item_idx = item_idx;
1625 	fc->is_range = l && l->index;
1626 	fc->tag_set = &mlx5dr_definer_tag_set;
1627 
1628 	return 0;
1629 }
1630 
1631 static void
1632 mlx5dr_definer_quota_set(struct mlx5dr_definer_fc *fc,
1633 			 const void *item_data, uint8_t *tag)
1634 {
1635 	/**
1636 	 * MLX5 PMD implements QUOTA with Meter object.
1637 	 * PMD Quota action translation implicitly increments
1638 	 * Meter register value after HW assigns it.
1639 	 * Meter register values are:
1640 	 *            HW     QUOTA(HW+1)  QUOTA state
1641 	 * RED        0        1 (01b)       BLOCK
1642 	 * YELLOW     1        2 (10b)       PASS
1643 	 * GREEN      2        3 (11b)       PASS
1644 	 *
1645 	 * Quota item checks Meter register bit 1 value to determine state:
1646 	 *            SPEC       MASK
1647 	 * PASS     2 (10b)    2 (10b)
1648 	 * BLOCK    0 (00b)    2 (10b)
1649 	 *
1650 	 * item_data is NULL when template quota item is non-masked:
1651 	 * .. / quota / ..
1652 	 */
1653 
1654 	const struct rte_flow_item_quota *quota = item_data;
1655 	uint32_t val;
1656 
1657 	if (quota && quota->state == RTE_FLOW_QUOTA_STATE_BLOCK)
1658 		val = MLX5DR_DEFINER_QUOTA_BLOCK;
1659 	else
1660 		val = MLX5DR_DEFINER_QUOTA_PASS;
1661 
1662 	DR_SET(tag, val, fc->byte_off, fc->bit_off, fc->bit_mask);
1663 }
1664 
1665 static int
1666 mlx5dr_definer_conv_item_quota(struct mlx5dr_definer_conv_data *cd,
1667 			       __rte_unused struct rte_flow_item *item,
1668 			       int item_idx)
1669 {
1670 	int mtr_reg =
1671 	flow_hw_get_reg_id_from_ctx(cd->ctx, RTE_FLOW_ITEM_TYPE_METER_COLOR,
1672 				    0);
1673 	struct mlx5dr_definer_fc *fc;
1674 
1675 	if (mtr_reg < 0) {
1676 		rte_errno = EINVAL;
1677 		return rte_errno;
1678 	}
1679 
1680 	fc = mlx5dr_definer_get_register_fc(cd, mtr_reg);
1681 	if (!fc)
1682 		return rte_errno;
1683 
1684 	fc->tag_set = &mlx5dr_definer_quota_set;
1685 	fc->item_idx = item_idx;
1686 	return 0;
1687 }
1688 
1689 static int
1690 mlx5dr_definer_conv_item_metadata(struct mlx5dr_definer_conv_data *cd,
1691 				  struct rte_flow_item *item,
1692 				  int item_idx)
1693 {
1694 	const struct rte_flow_item_meta *m = item->mask;
1695 	const struct rte_flow_item_meta *l = item->last;
1696 	struct mlx5dr_definer_fc *fc;
1697 	int reg;
1698 
1699 	if (!m)
1700 		return 0;
1701 
1702 	reg = flow_hw_get_reg_id_from_ctx(cd->ctx, RTE_FLOW_ITEM_TYPE_META, -1);
1703 	if (reg <= 0) {
1704 		DR_LOG(ERR, "Invalid register for item metadata");
1705 		rte_errno = EINVAL;
1706 		return rte_errno;
1707 	}
1708 
1709 	fc = mlx5dr_definer_get_register_fc(cd, reg);
1710 	if (!fc)
1711 		return rte_errno;
1712 
1713 	fc->item_idx = item_idx;
1714 	fc->is_range = l && l->data;
1715 	fc->tag_set = &mlx5dr_definer_metadata_set;
1716 
1717 	return 0;
1718 }
1719 
1720 static int
1721 mlx5dr_definer_conv_item_sq(struct mlx5dr_definer_conv_data *cd,
1722 			    struct rte_flow_item *item,
1723 			    int item_idx)
1724 {
1725 	const struct mlx5_rte_flow_item_sq *m = item->mask;
1726 	struct mlx5dr_definer_fc *fc;
1727 
1728 	if (!m)
1729 		return 0;
1730 
1731 	if (m->queue) {
1732 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_SOURCE_QP];
1733 		fc->item_idx = item_idx;
1734 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1735 		fc->tag_set = &mlx5dr_definer_source_qp_set;
1736 		DR_CALC_SET_HDR(fc, source_qp_gvmi, source_qp);
1737 	}
1738 
1739 	return 0;
1740 }
1741 
1742 static int
1743 mlx5dr_definer_conv_item_gre(struct mlx5dr_definer_conv_data *cd,
1744 			     struct rte_flow_item *item,
1745 			     int item_idx)
1746 {
1747 	const struct rte_flow_item_gre *m = item->mask;
1748 	struct mlx5dr_definer_fc *fc;
1749 	bool inner = cd->tunnel;
1750 
1751 	if (inner) {
1752 		DR_LOG(ERR, "Inner GRE item not supported");
1753 		rte_errno = ENOTSUP;
1754 		return rte_errno;
1755 	}
1756 
1757 	if (!cd->relaxed) {
1758 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
1759 		fc->item_idx = item_idx;
1760 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1761 		fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
1762 		DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
1763 	}
1764 
1765 	if (!m)
1766 		return 0;
1767 
1768 	if (m->c_rsvd0_ver) {
1769 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_C_VER];
1770 		fc->item_idx = item_idx;
1771 		fc->tag_set = &mlx5dr_definer_gre_c_ver_set;
1772 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1773 		fc->bit_mask = __mlx5_mask(header_gre, c_rsvd0_ver);
1774 		fc->bit_off = __mlx5_dw_bit_off(header_gre, c_rsvd0_ver);
1775 	}
1776 
1777 	if (m->protocol) {
1778 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_PROTOCOL];
1779 		fc->item_idx = item_idx;
1780 		fc->tag_set = &mlx5dr_definer_gre_protocol_type_set;
1781 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1782 		fc->byte_off += MLX5_BYTE_OFF(header_gre, gre_protocol);
1783 		fc->bit_mask = __mlx5_mask(header_gre, gre_protocol);
1784 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_protocol);
1785 	}
1786 
1787 	return 0;
1788 }
1789 
1790 static int
1791 mlx5dr_definer_conv_item_gre_opt(struct mlx5dr_definer_conv_data *cd,
1792 				 struct rte_flow_item *item,
1793 				 int item_idx)
1794 {
1795 	const struct rte_flow_item_gre_opt *m = item->mask;
1796 	struct mlx5dr_definer_fc *fc;
1797 
1798 	if (!cd->relaxed) {
1799 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
1800 		if (!fc->tag_set) {
1801 			fc->item_idx = item_idx;
1802 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1803 			fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
1804 			DR_CALC_SET(fc, eth_l3, protocol_next_header, false);
1805 		}
1806 	}
1807 
1808 	if (!m)
1809 		return 0;
1810 
1811 	if (m->checksum_rsvd.checksum) {
1812 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_CHECKSUM];
1813 		fc->item_idx = item_idx;
1814 		fc->tag_set = &mlx5dr_definer_gre_opt_checksum_set;
1815 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
1816 	}
1817 
1818 	if (m->key.key) {
1819 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_KEY];
1820 		fc->item_idx = item_idx;
1821 		fc->tag_set = &mlx5dr_definer_gre_opt_key_set;
1822 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);
1823 	}
1824 
1825 	if (m->sequence.sequence) {
1826 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_SEQ];
1827 		fc->item_idx = item_idx;
1828 		fc->tag_set = &mlx5dr_definer_gre_opt_seq_set;
1829 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_3);
1830 	}
1831 
1832 	return 0;
1833 }
1834 
1835 static int
1836 mlx5dr_definer_conv_item_gre_key(struct mlx5dr_definer_conv_data *cd,
1837 				 struct rte_flow_item *item,
1838 				 int item_idx)
1839 {
1840 	const rte_be32_t *m = item->mask;
1841 	struct mlx5dr_definer_fc *fc;
1842 
1843 	if (!cd->relaxed) {
1844 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_KEY_PRESENT];
1845 		fc->item_idx = item_idx;
1846 		fc->tag_set = &mlx5dr_definer_ones_set;
1847 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
1848 		fc->bit_mask = __mlx5_mask(header_gre, gre_k_present);
1849 		fc->bit_off = __mlx5_dw_bit_off(header_gre, gre_k_present);
1850 
1851 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, false)];
1852 		if (!fc->tag_set) {
1853 			fc->item_idx = item_idx;
1854 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1855 			fc->tag_set = &mlx5dr_definer_ipv4_protocol_gre_set;
1856 			DR_CALC_SET(fc, eth_l3, protocol_next_header, false);
1857 		}
1858 	}
1859 
1860 	if (!m)
1861 		return 0;
1862 
1863 	if (*m) {
1864 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GRE_OPT_KEY];
1865 		fc->item_idx = item_idx;
1866 		fc->tag_set = &mlx5dr_definer_gre_key_set;
1867 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_2);
1868 	}
1869 
1870 	return 0;
1871 }
1872 
1873 static int
1874 mlx5dr_definer_conv_item_ptype(struct mlx5dr_definer_conv_data *cd,
1875 			       struct rte_flow_item *item,
1876 			       int item_idx)
1877 {
1878 	const struct rte_flow_item_ptype *m = item->mask;
1879 	struct mlx5dr_definer_fc *fc;
1880 
1881 	if (!m)
1882 		return 0;
1883 
1884 	if (!(m->packet_type &
1885 	      (RTE_PTYPE_L2_MASK | RTE_PTYPE_L3_MASK | RTE_PTYPE_L4_MASK | RTE_PTYPE_TUNNEL_MASK |
1886 	       RTE_PTYPE_INNER_L2_MASK | RTE_PTYPE_INNER_L3_MASK | RTE_PTYPE_INNER_L4_MASK))) {
1887 		rte_errno = ENOTSUP;
1888 		return rte_errno;
1889 	}
1890 
1891 	if (m->packet_type & RTE_PTYPE_L2_MASK) {
1892 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, false)];
1893 		fc->item_idx = item_idx;
1894 		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
1895 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1896 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, false);
1897 	}
1898 
1899 	if (m->packet_type & RTE_PTYPE_INNER_L2_MASK) {
1900 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L2, true)];
1901 		fc->item_idx = item_idx;
1902 		fc->tag_set = &mlx5dr_definer_ptype_l2_set;
1903 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1904 		DR_CALC_SET(fc, eth_l2, first_vlan_qualifier, true);
1905 	}
1906 
1907 	if (m->packet_type & RTE_PTYPE_L3_MASK) {
1908 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, false)];
1909 		fc->item_idx = item_idx;
1910 		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
1911 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1912 		DR_CALC_SET(fc, eth_l2, l3_type, false);
1913 	}
1914 
1915 	if (m->packet_type & RTE_PTYPE_INNER_L3_MASK) {
1916 		fc = &cd->fc[DR_CALC_FNAME(PTYPE_L3, true)];
1917 		fc->item_idx = item_idx;
1918 		fc->tag_set = &mlx5dr_definer_ptype_l3_set;
1919 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1920 		DR_CALC_SET(fc, eth_l2, l3_type, true);
1921 	}
1922 
1923 	if (m->packet_type & RTE_PTYPE_L4_MASK) {
1924 		/*
1925 		 * Fragmented IP (Internet Protocol) packet type.
1926 		 * Cannot be combined with Layer 4 Types (TCP/UDP).
1927 		 * The exact value must be specified in the mask.
1928 		 */
1929 		if (m->packet_type == RTE_PTYPE_L4_FRAG) {
1930 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, false)];
1931 			fc->item_idx = item_idx;
1932 			fc->tag_set = &mlx5dr_definer_ptype_frag_set;
1933 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1934 			DR_CALC_SET(fc, eth_l2, ip_fragmented, false);
1935 		} else {
1936 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, false)];
1937 			fc->item_idx = item_idx;
1938 			fc->tag_set = &mlx5dr_definer_ptype_l4_set;
1939 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1940 			DR_CALC_SET(fc, eth_l2, l4_type, false);
1941 		}
1942 	}
1943 
1944 	if (m->packet_type & RTE_PTYPE_INNER_L4_MASK) {
1945 		if (m->packet_type == RTE_PTYPE_INNER_L4_FRAG) {
1946 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_FRAG, true)];
1947 			fc->item_idx = item_idx;
1948 			fc->tag_set = &mlx5dr_definer_ptype_frag_set;
1949 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1950 			DR_CALC_SET(fc, eth_l2, ip_fragmented, true);
1951 		} else {
1952 			fc = &cd->fc[DR_CALC_FNAME(PTYPE_L4, true)];
1953 			fc->item_idx = item_idx;
1954 			fc->tag_set = &mlx5dr_definer_ptype_l4_set;
1955 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
1956 			DR_CALC_SET(fc, eth_l2, l4_type, true);
1957 		}
1958 	}
1959 
1960 	if (m->packet_type & RTE_PTYPE_TUNNEL_MASK) {
1961 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_PTYPE_TUNNEL];
1962 		fc->item_idx = item_idx;
1963 		fc->tag_set = &mlx5dr_definer_ptype_tunnel_set;
1964 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
1965 		DR_CALC_SET(fc, eth_l2, l4_type_bwc, false);
1966 	}
1967 
1968 	return 0;
1969 }
1970 
1971 static int
1972 mlx5dr_definer_conv_item_integrity(struct mlx5dr_definer_conv_data *cd,
1973 				   struct rte_flow_item *item,
1974 				   int item_idx)
1975 {
1976 	const struct rte_flow_item_integrity *m = item->mask;
1977 	struct mlx5dr_definer_fc *fc;
1978 
1979 	if (!m)
1980 		return 0;
1981 
1982 	if (m->packet_ok || m->l2_ok || m->l2_crc_ok || m->l3_len_ok) {
1983 		rte_errno = ENOTSUP;
1984 		return rte_errno;
1985 	}
1986 
1987 	if (m->l3_ok || m->ipv4_csum_ok || m->l4_ok || m->l4_csum_ok) {
1988 		fc = &cd->fc[DR_CALC_FNAME(INTEGRITY, m->level)];
1989 		fc->item_idx = item_idx;
1990 		fc->tag_set = &mlx5dr_definer_integrity_set;
1991 		DR_CALC_SET_HDR(fc, oks1, oks1_bits);
1992 	}
1993 
1994 	return 0;
1995 }
1996 
1997 static int
1998 mlx5dr_definer_conv_item_conntrack(struct mlx5dr_definer_conv_data *cd,
1999 				   struct rte_flow_item *item,
2000 				   int item_idx)
2001 {
2002 	const struct rte_flow_item_conntrack *m = item->mask;
2003 	struct mlx5dr_definer_fc *fc;
2004 	int reg;
2005 
2006 	if (!m)
2007 		return 0;
2008 
2009 	reg = flow_hw_get_reg_id_from_ctx(cd->ctx, RTE_FLOW_ITEM_TYPE_CONNTRACK,
2010 					  -1);
2011 	if (reg <= 0) {
2012 		DR_LOG(ERR, "Invalid register for item conntrack");
2013 		rte_errno = EINVAL;
2014 		return rte_errno;
2015 	}
2016 
2017 	fc = mlx5dr_definer_get_register_fc(cd, reg);
2018 	if (!fc)
2019 		return rte_errno;
2020 
2021 	fc->item_idx = item_idx;
2022 	fc->tag_mask_set = &mlx5dr_definer_conntrack_mask;
2023 	fc->tag_set = &mlx5dr_definer_conntrack_tag;
2024 
2025 	return 0;
2026 }
2027 
2028 static int
2029 mlx5dr_definer_conv_item_icmp(struct mlx5dr_definer_conv_data *cd,
2030 			      struct rte_flow_item *item,
2031 			      int item_idx)
2032 {
2033 	const struct rte_flow_item_icmp *m = item->mask;
2034 	struct mlx5dr_definer_fc *fc;
2035 	bool inner = cd->tunnel;
2036 
2037 	/* Overwrite match on L4 type ICMP */
2038 	if (!cd->relaxed) {
2039 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2040 		fc->item_idx = item_idx;
2041 		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
2042 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2043 		DR_CALC_SET(fc, eth_l2, l4_type, inner);
2044 	}
2045 
2046 	if (!m)
2047 		return 0;
2048 
2049 	if (m->hdr.icmp_type || m->hdr.icmp_code || m->hdr.icmp_cksum) {
2050 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
2051 		fc->item_idx = item_idx;
2052 		fc->tag_set = &mlx5dr_definer_icmp_dw1_set;
2053 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
2054 	}
2055 
2056 	if (m->hdr.icmp_ident || m->hdr.icmp_seq_nb) {
2057 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];
2058 		fc->item_idx = item_idx;
2059 		fc->tag_set = &mlx5dr_definer_icmp_dw2_set;
2060 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);
2061 	}
2062 
2063 	return 0;
2064 }
2065 
2066 static int
2067 mlx5dr_definer_conv_item_icmp6(struct mlx5dr_definer_conv_data *cd,
2068 			       struct rte_flow_item *item,
2069 			       int item_idx)
2070 {
2071 	const struct rte_flow_item_icmp6 *m = item->mask;
2072 	struct mlx5dr_definer_fc *fc;
2073 	bool inner = cd->tunnel;
2074 
2075 	/* Overwrite match on L4 type ICMP6 */
2076 	if (!cd->relaxed) {
2077 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2078 		fc->item_idx = item_idx;
2079 		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
2080 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2081 		DR_CALC_SET(fc, eth_l2, l4_type, inner);
2082 	}
2083 
2084 	if (!m)
2085 		return 0;
2086 
2087 	if (m->type || m->code || m->checksum) {
2088 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
2089 		fc->item_idx = item_idx;
2090 		fc->tag_set = &mlx5dr_definer_icmp6_dw1_set;
2091 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
2092 	}
2093 
2094 	return 0;
2095 }
2096 
2097 static int
2098 mlx5dr_definer_conv_item_icmp6_echo(struct mlx5dr_definer_conv_data *cd,
2099 				    struct rte_flow_item *item,
2100 				    int item_idx)
2101 {
2102 	const struct rte_flow_item_icmp6_echo *m = item->mask;
2103 	struct mlx5dr_definer_fc *fc;
2104 	bool inner = cd->tunnel;
2105 
2106 	if (!cd->relaxed) {
2107 		/* Overwrite match on L4 type ICMP6 */
2108 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2109 		fc->item_idx = item_idx;
2110 		fc->tag_set = &mlx5dr_definer_icmp_protocol_set;
2111 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2112 		DR_CALC_SET(fc, eth_l2, l4_type, inner);
2113 
2114 		/* Set fixed type and code for icmp6 echo request/reply */
2115 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW1];
2116 		fc->item_idx = item_idx;
2117 		fc->tag_mask_set = &mlx5dr_definer_icmp6_echo_dw1_mask_set;
2118 		if (item->type == RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST)
2119 			fc->tag_set = &mlx5dr_definer_icmp6_echo_request_dw1_set;
2120 		else /* RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY */
2121 			fc->tag_set = &mlx5dr_definer_icmp6_echo_reply_dw1_set;
2122 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw1);
2123 	}
2124 
2125 	if (!m)
2126 		return 0;
2127 
2128 	/* Set identifier & sequence into icmp_dw2 */
2129 	if (m->hdr.identifier || m->hdr.sequence) {
2130 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ICMP_DW2];
2131 		fc->item_idx = item_idx;
2132 		fc->tag_set = &mlx5dr_definer_icmp6_echo_dw2_set;
2133 		DR_CALC_SET_HDR(fc, tcp_icmp, icmp_dw2);
2134 	}
2135 
2136 	return 0;
2137 }
2138 
2139 static int
2140 mlx5dr_definer_conv_item_meter_color(struct mlx5dr_definer_conv_data *cd,
2141 			     struct rte_flow_item *item,
2142 			     int item_idx)
2143 {
2144 	const struct rte_flow_item_meter_color *m = item->mask;
2145 	struct mlx5dr_definer_fc *fc;
2146 	int reg;
2147 
2148 	if (!m)
2149 		return 0;
2150 
2151 	reg = flow_hw_get_reg_id_from_ctx(cd->ctx,
2152 					  RTE_FLOW_ITEM_TYPE_METER_COLOR, 0);
2153 	MLX5_ASSERT(reg > 0);
2154 
2155 	fc = mlx5dr_definer_get_register_fc(cd, reg);
2156 	if (!fc)
2157 		return rte_errno;
2158 
2159 	fc->item_idx = item_idx;
2160 	fc->tag_set = &mlx5dr_definer_meter_color_set;
2161 	return 0;
2162 }
2163 
2164 static struct mlx5dr_definer_fc *
2165 mlx5dr_definer_get_flex_parser_fc(struct mlx5dr_definer_conv_data *cd, uint32_t byte_off)
2166 {
2167 	uint32_t byte_off_fp7 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_7);
2168 	uint32_t byte_off_fp0 = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
2169 	enum mlx5dr_definer_fname fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
2170 	struct mlx5dr_definer_fc *fc;
2171 	uint32_t idx;
2172 
2173 	if (byte_off < byte_off_fp7 || byte_off > byte_off_fp0) {
2174 		rte_errno = EINVAL;
2175 		return NULL;
2176 	}
2177 	idx = (byte_off_fp0 - byte_off) / (sizeof(uint32_t));
2178 	fname += (enum mlx5dr_definer_fname)idx;
2179 	fc = &cd->fc[fname];
2180 	fc->byte_off = byte_off;
2181 	fc->bit_mask = UINT32_MAX;
2182 	return fc;
2183 }
2184 
2185 static int
2186 mlx5dr_definer_conv_item_ipv6_routing_ext(struct mlx5dr_definer_conv_data *cd,
2187 					  struct rte_flow_item *item,
2188 					  int item_idx)
2189 {
2190 	const struct rte_flow_item_ipv6_routing_ext *m = item->mask;
2191 	struct mlx5dr_definer_fc *fc;
2192 	bool inner = cd->tunnel;
2193 	uint32_t byte_off;
2194 
2195 	if (!cd->relaxed) {
2196 		fc = &cd->fc[DR_CALC_FNAME(IP_VERSION, inner)];
2197 		fc->item_idx = item_idx;
2198 		fc->tag_set = &mlx5dr_definer_ipv6_version_set;
2199 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2200 		DR_CALC_SET(fc, eth_l2, l3_type, inner);
2201 
2202 		/* Overwrite - Unset ethertype if present */
2203 		memset(&cd->fc[DR_CALC_FNAME(ETH_TYPE, inner)], 0, sizeof(*fc));
2204 
2205 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2206 		if (!fc->tag_set) {
2207 			fc->item_idx = item_idx;
2208 			fc->tag_set = &mlx5dr_definer_ipv6_routing_hdr_set;
2209 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2210 			fc->not_overwrite = 1;
2211 			DR_CALC_SET(fc, eth_l3, protocol_next_header, inner);
2212 		}
2213 	} else {
2214 		rte_errno = ENOTSUP;
2215 		return rte_errno;
2216 	}
2217 
2218 	if (!m)
2219 		return 0;
2220 
2221 	if (m->hdr.hdr_len || m->hdr.flags) {
2222 		rte_errno = ENOTSUP;
2223 		return rte_errno;
2224 	}
2225 
2226 	if (m->hdr.next_hdr || m->hdr.type || m->hdr.segments_left) {
2227 		byte_off = flow_hw_get_srh_flex_parser_byte_off_from_ctx(cd->ctx);
2228 		fc = mlx5dr_definer_get_flex_parser_fc(cd, byte_off);
2229 		if (!fc)
2230 			return rte_errno;
2231 
2232 		fc->item_idx = item_idx;
2233 		fc->tag_set = &mlx5dr_definer_ipv6_routing_ext_set;
2234 	}
2235 	return 0;
2236 }
2237 
2238 static int
2239 mlx5dr_definer_conv_item_random(struct mlx5dr_definer_conv_data *cd,
2240 				struct rte_flow_item *item,
2241 				int item_idx)
2242 {
2243 	const struct rte_flow_item_random *m = item->mask;
2244 	const struct rte_flow_item_random *l = item->last;
2245 	struct mlx5dr_definer_fc *fc;
2246 
2247 	if (!m)
2248 		return 0;
2249 
2250 	if (m->value != (m->value & UINT16_MAX)) {
2251 		DR_LOG(ERR, "Random value is 16 bits only");
2252 		rte_errno = EINVAL;
2253 		return rte_errno;
2254 	}
2255 
2256 	fc = &cd->fc[MLX5DR_DEFINER_FNAME_RANDOM_NUM];
2257 	fc->item_idx = item_idx;
2258 	fc->tag_set = &mlx5dr_definer_random_number_set;
2259 	fc->is_range = l && l->value;
2260 	DR_CALC_SET_HDR(fc, random_number, random_number);
2261 
2262 	return 0;
2263 }
2264 
2265 static int
2266 mlx5dr_definer_conv_item_geneve(struct mlx5dr_definer_conv_data *cd,
2267 				struct rte_flow_item *item,
2268 				int item_idx)
2269 {
2270 	const struct rte_flow_item_geneve *m = item->mask;
2271 	struct mlx5dr_definer_fc *fc;
2272 	bool inner = cd->tunnel;
2273 
2274 	if (inner) {
2275 		DR_LOG(ERR, "Inner GENEVE item not supported");
2276 		rte_errno = ENOTSUP;
2277 		return rte_errno;
2278 	}
2279 
2280 	/* In order to match on Geneve we must match on ip_protocol and l4_dport */
2281 	if (!cd->relaxed) {
2282 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2283 		if (!fc->tag_set) {
2284 			fc->item_idx = item_idx;
2285 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2286 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
2287 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
2288 		}
2289 
2290 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
2291 		if (!fc->tag_set) {
2292 			fc->item_idx = item_idx;
2293 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2294 			fc->tag_set = &mlx5dr_definer_geneve_udp_port_set;
2295 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
2296 		}
2297 	}
2298 
2299 	if (!m)
2300 		return 0;
2301 
2302 	if (m->rsvd1) {
2303 		rte_errno = ENOTSUP;
2304 		return rte_errno;
2305 	}
2306 
2307 	if (m->ver_opt_len_o_c_rsvd0) {
2308 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_CTRL];
2309 		fc->item_idx = item_idx;
2310 		fc->tag_set = &mlx5dr_definer_geneve_ctrl_set;
2311 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2312 		fc->bit_mask = __mlx5_mask(header_geneve, ver_opt_len_o_c_rsvd);
2313 		fc->bit_off = __mlx5_dw_bit_off(header_geneve, ver_opt_len_o_c_rsvd);
2314 	}
2315 
2316 	if (m->protocol) {
2317 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_PROTO];
2318 		fc->item_idx = item_idx;
2319 		fc->tag_set = &mlx5dr_definer_geneve_protocol_set;
2320 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2321 		fc->byte_off += MLX5_BYTE_OFF(header_geneve, protocol_type);
2322 		fc->bit_mask = __mlx5_mask(header_geneve, protocol_type);
2323 		fc->bit_off = __mlx5_dw_bit_off(header_geneve, protocol_type);
2324 	}
2325 
2326 	if (!is_mem_zero(m->vni, 3)) {
2327 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_VNI];
2328 		fc->item_idx = item_idx;
2329 		fc->tag_set = &mlx5dr_definer_geneve_vni_set;
2330 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
2331 		fc->bit_mask = __mlx5_mask(header_geneve, vni);
2332 		fc->bit_off = __mlx5_dw_bit_off(header_geneve, vni);
2333 	}
2334 
2335 	return 0;
2336 }
2337 
2338 static int
2339 mlx5dr_definer_conv_item_geneve_opt(struct mlx5dr_definer_conv_data *cd,
2340 				    struct rte_flow_item *item,
2341 				    int item_idx)
2342 {
2343 	const struct rte_flow_item_geneve_opt *m = item->mask;
2344 	const struct rte_flow_item_geneve_opt *v = item->spec;
2345 	struct mlx5_hl_data *hl_ok_bit, *hl_dws;
2346 	struct mlx5dr_definer_fc *fc;
2347 	uint8_t num_of_dws, i;
2348 	bool ok_bit_on_class;
2349 	int ret;
2350 
2351 	if (!m || !(m->option_class || m->option_type || m->data))
2352 		return 0;
2353 
2354 	if (!v || m->option_type != 0xff) {
2355 		DR_LOG(ERR, "Cannot match geneve opt without valid opt type");
2356 		goto out_not_supp;
2357 	}
2358 
2359 	if (m->option_class && m->option_class != RTE_BE16(UINT16_MAX)) {
2360 		DR_LOG(ERR, "Geneve option class has invalid mask");
2361 		goto out_not_supp;
2362 	}
2363 
2364 	ret = mlx5_get_geneve_hl_data(cd->ctx,
2365 				      v->option_type,
2366 				      v->option_class,
2367 				      &hl_ok_bit,
2368 				      &num_of_dws,
2369 				      &hl_dws,
2370 				      &ok_bit_on_class);
2371 	if (ret) {
2372 		DR_LOG(ERR, "Geneve opt type and class %d not supported", v->option_type);
2373 		goto out_not_supp;
2374 	}
2375 
2376 	if (!ok_bit_on_class && m->option_class) {
2377 		/* DW0 is used, we will match type, class */
2378 		if (!num_of_dws || hl_dws[0].dw_mask != UINT32_MAX) {
2379 			DR_LOG(ERR, "Geneve opt type %d DW0 not supported", v->option_type);
2380 			goto out_not_supp;
2381 		}
2382 
2383 		if (MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx >
2384 		    MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_7) {
2385 			DR_LOG(ERR, "Max match geneve opt DWs reached");
2386 			goto out_not_supp;
2387 		}
2388 
2389 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx++];
2390 		fc->item_idx = item_idx;
2391 		fc->tag_set = &mlx5dr_definer_geneve_opt_ctrl_set;
2392 		fc->byte_off = hl_dws[0].dw_offset * DW_SIZE;
2393 		fc->bit_mask = UINT32_MAX;
2394 	} else {
2395 		/* DW0 is not used, we must verify geneve opt type exists in packet */
2396 		if (!hl_ok_bit->dw_mask) {
2397 			DR_LOG(ERR, "Geneve opt OK bits not supported");
2398 			goto out_not_supp;
2399 		}
2400 
2401 		if (MLX5DR_DEFINER_FNAME_GENEVE_OPT_OK_0 + cd->geneve_opt_ok_idx >
2402 		    MLX5DR_DEFINER_FNAME_GENEVE_OPT_OK_7) {
2403 			DR_LOG(ERR, "Max match geneve opt reached");
2404 			goto out_not_supp;
2405 		}
2406 
2407 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_OPT_OK_0 + cd->geneve_opt_ok_idx++];
2408 		fc->item_idx = item_idx;
2409 		fc->tag_set = &mlx5dr_definer_ones_set;
2410 		fc->byte_off = hl_ok_bit->dw_offset * DW_SIZE +
2411 				__builtin_clz(hl_ok_bit->dw_mask) / 8;
2412 		fc->bit_off = __builtin_ctz(hl_ok_bit->dw_mask);
2413 		fc->bit_mask = 0x1;
2414 	}
2415 
2416 	for (i = 1; i < num_of_dws; i++) {
2417 		/* Process each valid geneve option data DW1..N */
2418 		if (!m->data[i - 1])
2419 			continue;
2420 
2421 		if (hl_dws[i].dw_mask != UINT32_MAX) {
2422 			DR_LOG(ERR, "Matching Geneve opt data[%d] not supported", i - 1);
2423 			goto out_not_supp;
2424 		}
2425 
2426 		if (MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx >
2427 		    MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_7) {
2428 			DR_LOG(ERR, "Max match geneve options DWs reached");
2429 			goto out_not_supp;
2430 		}
2431 
2432 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_GENEVE_OPT_DW_0 + cd->geneve_opt_data_idx++];
2433 		fc->item_idx = item_idx;
2434 		fc->tag_set = &mlx5dr_definer_geneve_opt_data_set;
2435 		fc->byte_off = hl_dws[i].dw_offset * DW_SIZE;
2436 		fc->bit_mask = m->data[i - 1];
2437 		/* Use extra_data for data[] set offset */
2438 		fc->extra_data = i - 1;
2439 	}
2440 
2441 	return 0;
2442 
2443 out_not_supp:
2444 	rte_errno = ENOTSUP;
2445 	return rte_errno;
2446 }
2447 
2448 static int
2449 mlx5dr_definer_mt_set_fc(struct mlx5dr_match_template *mt,
2450 			 struct mlx5dr_definer_fc *fc,
2451 			 uint8_t *hl)
2452 {
2453 	uint32_t fc_sz = 0, fcr_sz = 0;
2454 	int i;
2455 
2456 	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++)
2457 		if (fc[i].tag_set)
2458 			fc[i].is_range ? fcr_sz++ : fc_sz++;
2459 
2460 	mt->fc = simple_calloc(fc_sz + fcr_sz, sizeof(*mt->fc));
2461 	if (!mt->fc) {
2462 		rte_errno = ENOMEM;
2463 		return rte_errno;
2464 	}
2465 
2466 	mt->fcr = mt->fc + fc_sz;
2467 
2468 	for (i = 0; i < MLX5DR_DEFINER_FNAME_MAX; i++) {
2469 		if (!fc[i].tag_set)
2470 			continue;
2471 
2472 		fc[i].fname = i;
2473 
2474 		if (fc[i].is_range) {
2475 			memcpy(&mt->fcr[mt->fcr_sz++], &fc[i], sizeof(*mt->fcr));
2476 		} else {
2477 			memcpy(&mt->fc[mt->fc_sz++], &fc[i], sizeof(*mt->fc));
2478 			DR_SET(hl, -1, fc[i].byte_off, fc[i].bit_off, fc[i].bit_mask);
2479 		}
2480 	}
2481 
2482 	return 0;
2483 }
2484 
2485 static int
2486 mlx5dr_definer_check_item_range_supp(struct rte_flow_item *item)
2487 {
2488 	if (!item->last)
2489 		return 0;
2490 
2491 	switch ((int)item->type) {
2492 	case RTE_FLOW_ITEM_TYPE_IPV4:
2493 	case RTE_FLOW_ITEM_TYPE_IPV6:
2494 	case RTE_FLOW_ITEM_TYPE_UDP:
2495 	case RTE_FLOW_ITEM_TYPE_TCP:
2496 	case RTE_FLOW_ITEM_TYPE_TAG:
2497 	case RTE_FLOW_ITEM_TYPE_META:
2498 	case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
2499 	case RTE_FLOW_ITEM_TYPE_RANDOM:
2500 		return 0;
2501 	default:
2502 		DR_LOG(ERR, "Range not supported over item type %d", item->type);
2503 		rte_errno = ENOTSUP;
2504 		return rte_errno;
2505 	}
2506 }
2507 
2508 static int
2509 mlx5dr_definer_conv_item_esp(struct mlx5dr_definer_conv_data *cd,
2510 			     struct rte_flow_item *item,
2511 			     int item_idx)
2512 {
2513 	const struct rte_flow_item_esp *m = item->mask;
2514 	struct mlx5dr_definer_fc *fc;
2515 
2516 	if (!m)
2517 		return 0;
2518 	if (m->hdr.spi) {
2519 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ESP_SPI];
2520 		fc->item_idx = item_idx;
2521 		fc->tag_set = &mlx5dr_definer_ipsec_spi_set;
2522 		DR_CALC_SET_HDR(fc, ipsec, spi);
2523 	}
2524 	if (m->hdr.seq) {
2525 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_ESP_SEQUENCE_NUMBER];
2526 		fc->item_idx = item_idx;
2527 		fc->tag_set = &mlx5dr_definer_ipsec_sequence_number_set;
2528 		DR_CALC_SET_HDR(fc, ipsec, sequence_number);
2529 	}
2530 	return 0;
2531 }
2532 
2533 static void mlx5dr_definer_set_conv_tunnel(enum rte_flow_item_type cur_type,
2534 					   uint64_t item_flags,
2535 					   struct mlx5dr_definer_conv_data *cd)
2536 {
2537 	/* Already tunnel nothing to change */
2538 	if (cd->tunnel)
2539 		return;
2540 
2541 	/* We can have more than one MPLS label at each level (inner/outer), so
2542 	 * consider tunnel only when it is already under tunnel or if we moved to the
2543 	 * second MPLS level.
2544 	 */
2545 	if (cur_type != RTE_FLOW_ITEM_TYPE_MPLS)
2546 		cd->tunnel = !!(item_flags & MLX5_FLOW_LAYER_TUNNEL);
2547 	else
2548 		cd->tunnel = !!(item_flags & DR_FLOW_LAYER_TUNNEL_NO_MPLS);
2549 }
2550 
2551 static int
2552 mlx5dr_definer_conv_item_flex_parser(struct mlx5dr_definer_conv_data *cd,
2553 				     struct rte_flow_item *item,
2554 				     int item_idx)
2555 {
2556 	uint32_t base_off = MLX5_BYTE_OFF(definer_hl, flex_parser.flex_parser_0);
2557 	const struct rte_flow_item_flex *v, *m;
2558 	enum mlx5dr_definer_fname fname;
2559 	struct mlx5dr_definer_fc *fc;
2560 	uint32_t i, mask, byte_off;
2561 	bool is_inner = cd->tunnel;
2562 	int ret;
2563 
2564 	m = item->mask;
2565 	v = item->spec;
2566 	mask = 0;
2567 	for (i = 0; i < MLX5_GRAPH_NODE_SAMPLE_NUM; i++) {
2568 		byte_off = base_off - i * sizeof(uint32_t);
2569 		ret = mlx5_flex_get_parser_value_per_byte_off(m, v->handle, byte_off,
2570 							      true, is_inner, &mask);
2571 		if (ret == -1) {
2572 			rte_errno = EINVAL;
2573 			return rte_errno;
2574 		}
2575 
2576 		if (!mask)
2577 			continue;
2578 
2579 		fname = MLX5DR_DEFINER_FNAME_FLEX_PARSER_0;
2580 		fname += (enum mlx5dr_definer_fname)i;
2581 		fc = &cd->fc[fname];
2582 		fc->byte_off = byte_off;
2583 		fc->item_idx = item_idx;
2584 		fc->tag_set = cd->tunnel ? &mlx5dr_definer_flex_parser_inner_set :
2585 					   &mlx5dr_definer_flex_parser_outer_set;
2586 		fc->tag_mask_set = &mlx5dr_definer_ones_set;
2587 		fc->bit_mask = mask;
2588 	}
2589 	return 0;
2590 }
2591 
2592 static int
2593 mlx5dr_definer_conv_item_ib_l4(struct mlx5dr_definer_conv_data *cd,
2594 			       struct rte_flow_item *item,
2595 			       int item_idx)
2596 {
2597 	const struct rte_flow_item_ib_bth *m = item->mask;
2598 	struct mlx5dr_definer_fc *fc;
2599 	bool inner = cd->tunnel;
2600 
2601 	/* In order to match on RoCEv2(layer4 ib), we must match
2602 	 * on ip_protocol and l4_dport.
2603 	 */
2604 	if (!cd->relaxed) {
2605 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2606 		if (!fc->tag_set) {
2607 			fc->item_idx = item_idx;
2608 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2609 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
2610 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
2611 		}
2612 
2613 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
2614 		if (!fc->tag_set) {
2615 			fc->item_idx = item_idx;
2616 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2617 			fc->tag_set = &mlx5dr_definer_ib_l4_udp_port_set;
2618 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
2619 		}
2620 	}
2621 
2622 	if (!m)
2623 		return 0;
2624 
2625 	if (m->hdr.se || m->hdr.m || m->hdr.padcnt || m->hdr.tver ||
2626 		m->hdr.pkey || m->hdr.f || m->hdr.b || m->hdr.rsvd0 ||
2627 		m->hdr.rsvd1 || !is_mem_zero(m->hdr.psn, 3)) {
2628 		rte_errno = ENOTSUP;
2629 		return rte_errno;
2630 	}
2631 
2632 	if (m->hdr.opcode) {
2633 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_OPCODE];
2634 		fc->item_idx = item_idx;
2635 		fc->tag_set = &mlx5dr_definer_ib_l4_opcode_set;
2636 		DR_CALC_SET_HDR(fc, ib_l4, opcode);
2637 	}
2638 
2639 	if (!is_mem_zero(m->hdr.dst_qp, 3)) {
2640 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_QPN];
2641 		fc->item_idx = item_idx;
2642 		fc->tag_set = &mlx5dr_definer_ib_l4_qp_set;
2643 		DR_CALC_SET_HDR(fc, ib_l4, qp);
2644 	}
2645 
2646 	if (m->hdr.a) {
2647 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_IB_L4_A];
2648 		fc->item_idx = item_idx;
2649 		fc->tag_set = &mlx5dr_definer_ib_l4_bth_a_set;
2650 		DR_CALC_SET_HDR(fc, ib_l4, ackreq);
2651 	}
2652 
2653 	return 0;
2654 }
2655 
2656 static int
2657 mlx5dr_definer_conv_item_vxlan_gpe(struct mlx5dr_definer_conv_data *cd,
2658 				   struct rte_flow_item *item,
2659 				   int item_idx)
2660 {
2661 	const struct rte_flow_item_vxlan_gpe *m = item->mask;
2662 	struct mlx5dr_definer_fc *fc;
2663 	bool inner = cd->tunnel;
2664 
2665 	if (inner) {
2666 		DR_LOG(ERR, "Inner VXLAN GPE item not supported");
2667 		rte_errno = ENOTSUP;
2668 		return rte_errno;
2669 	}
2670 
2671 	/* In order to match on VXLAN GPE we must match on ip_protocol and l4_dport */
2672 	if (!cd->relaxed) {
2673 		fc = &cd->fc[DR_CALC_FNAME(IP_PROTOCOL, inner)];
2674 		if (!fc->tag_set) {
2675 			fc->item_idx = item_idx;
2676 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2677 			fc->tag_set = &mlx5dr_definer_udp_protocol_set;
2678 			DR_CALC_SET(fc, eth_l2, l4_type_bwc, inner);
2679 		}
2680 
2681 		fc = &cd->fc[DR_CALC_FNAME(L4_DPORT, inner)];
2682 		if (!fc->tag_set) {
2683 			fc->item_idx = item_idx;
2684 			fc->tag_mask_set = &mlx5dr_definer_ones_set;
2685 			fc->tag_set = &mlx5dr_definer_vxlan_gpe_udp_port_set;
2686 			DR_CALC_SET(fc, eth_l4, destination_port, inner);
2687 		}
2688 	}
2689 
2690 	if (!m)
2691 		return 0;
2692 
2693 	if (m->flags) {
2694 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_FLAGS];
2695 		fc->item_idx = item_idx;
2696 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_flags_set;
2697 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2698 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, flags);
2699 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, flags);
2700 	}
2701 
2702 	if (!is_mem_zero(m->rsvd0, 2)) {
2703 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_RSVD0];
2704 		fc->item_idx = item_idx;
2705 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_rsvd0_set;
2706 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2707 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, rsvd0);
2708 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, rsvd0);
2709 	}
2710 
2711 	if (m->protocol) {
2712 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_PROTO];
2713 		fc->item_idx = item_idx;
2714 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_protocol_set;
2715 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_0);
2716 		fc->byte_off += MLX5_BYTE_OFF(header_vxlan_gpe, protocol);
2717 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, protocol);
2718 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, protocol);
2719 	}
2720 
2721 	if (!is_mem_zero(m->vni, 3)) {
2722 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_VNI];
2723 		fc->item_idx = item_idx;
2724 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_vni_set;
2725 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
2726 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, vni);
2727 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, vni);
2728 	}
2729 
2730 	if (m->rsvd1) {
2731 		fc = &cd->fc[MLX5DR_DEFINER_FNAME_VXLAN_GPE_RSVD1];
2732 		fc->item_idx = item_idx;
2733 		fc->tag_set = &mlx5dr_definer_vxlan_gpe_rsvd1_set;
2734 		DR_CALC_SET_HDR(fc, tunnel_header, tunnel_header_1);
2735 		fc->bit_mask = __mlx5_mask(header_vxlan_gpe, rsvd1);
2736 		fc->bit_off = __mlx5_dw_bit_off(header_vxlan_gpe, rsvd1);
2737 	}
2738 
2739 	return 0;
2740 }
2741 
2742 static int
2743 mlx5dr_definer_conv_items_to_hl(struct mlx5dr_context *ctx,
2744 				struct mlx5dr_match_template *mt,
2745 				uint8_t *hl)
2746 {
2747 	struct mlx5dr_definer_fc fc[MLX5DR_DEFINER_FNAME_MAX] = {{0}};
2748 	struct mlx5dr_definer_conv_data cd = {0};
2749 	struct rte_flow_item *items = mt->items;
2750 	uint64_t item_flags = 0;
2751 	int i, ret;
2752 
2753 	cd.fc = fc;
2754 	cd.ctx = ctx;
2755 	cd.relaxed = mt->flags & MLX5DR_MATCH_TEMPLATE_FLAG_RELAXED_MATCH;
2756 
2757 	/* Collect all RTE fields to the field array and set header layout */
2758 	for (i = 0; items->type != RTE_FLOW_ITEM_TYPE_END; i++, items++) {
2759 		mlx5dr_definer_set_conv_tunnel(items->type, item_flags, &cd);
2760 
2761 		ret = mlx5dr_definer_check_item_range_supp(items);
2762 		if (ret)
2763 			return ret;
2764 
2765 		switch ((int)items->type) {
2766 		case RTE_FLOW_ITEM_TYPE_ETH:
2767 			ret = mlx5dr_definer_conv_item_eth(&cd, items, i);
2768 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L2 :
2769 						  MLX5_FLOW_LAYER_OUTER_L2;
2770 			break;
2771 		case RTE_FLOW_ITEM_TYPE_VLAN:
2772 			ret = mlx5dr_definer_conv_item_vlan(&cd, items, i);
2773 			item_flags |= cd.tunnel ?
2774 				(MLX5_FLOW_LAYER_INNER_VLAN | MLX5_FLOW_LAYER_INNER_L2) :
2775 				(MLX5_FLOW_LAYER_OUTER_VLAN | MLX5_FLOW_LAYER_OUTER_L2);
2776 			break;
2777 		case RTE_FLOW_ITEM_TYPE_IPV4:
2778 			ret = mlx5dr_definer_conv_item_ipv4(&cd, items, i);
2779 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV4 :
2780 						  MLX5_FLOW_LAYER_OUTER_L3_IPV4;
2781 			break;
2782 		case RTE_FLOW_ITEM_TYPE_IPV6:
2783 			ret = mlx5dr_definer_conv_item_ipv6(&cd, items, i);
2784 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L3_IPV6 :
2785 						  MLX5_FLOW_LAYER_OUTER_L3_IPV6;
2786 			break;
2787 		case RTE_FLOW_ITEM_TYPE_UDP:
2788 			ret = mlx5dr_definer_conv_item_udp(&cd, items, i);
2789 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L4_UDP :
2790 						  MLX5_FLOW_LAYER_OUTER_L4_UDP;
2791 			break;
2792 		case RTE_FLOW_ITEM_TYPE_TCP:
2793 			ret = mlx5dr_definer_conv_item_tcp(&cd, items, i);
2794 			item_flags |= cd.tunnel ? MLX5_FLOW_LAYER_INNER_L4_TCP :
2795 						  MLX5_FLOW_LAYER_OUTER_L4_TCP;
2796 			break;
2797 		case RTE_FLOW_ITEM_TYPE_GTP:
2798 			ret = mlx5dr_definer_conv_item_gtp(&cd, items, i);
2799 			item_flags |= MLX5_FLOW_LAYER_GTP;
2800 			break;
2801 		case RTE_FLOW_ITEM_TYPE_GTP_PSC:
2802 			ret = mlx5dr_definer_conv_item_gtp_psc(&cd, items, i);
2803 			item_flags |= MLX5_FLOW_LAYER_GTP_PSC;
2804 			break;
2805 		case RTE_FLOW_ITEM_TYPE_REPRESENTED_PORT:
2806 			ret = mlx5dr_definer_conv_item_port(&cd, items, i);
2807 			item_flags |= MLX5_FLOW_ITEM_REPRESENTED_PORT;
2808 			mt->vport_item_id = i;
2809 			break;
2810 		case RTE_FLOW_ITEM_TYPE_VXLAN:
2811 			ret = mlx5dr_definer_conv_item_vxlan(&cd, items, i);
2812 			item_flags |= MLX5_FLOW_LAYER_VXLAN;
2813 			break;
2814 		case MLX5_RTE_FLOW_ITEM_TYPE_SQ:
2815 			ret = mlx5dr_definer_conv_item_sq(&cd, items, i);
2816 			item_flags |= MLX5_FLOW_ITEM_SQ;
2817 			break;
2818 		case RTE_FLOW_ITEM_TYPE_TAG:
2819 		case MLX5_RTE_FLOW_ITEM_TYPE_TAG:
2820 			ret = mlx5dr_definer_conv_item_tag(&cd, items, i);
2821 			item_flags |= MLX5_FLOW_ITEM_TAG;
2822 			break;
2823 		case RTE_FLOW_ITEM_TYPE_META:
2824 			ret = mlx5dr_definer_conv_item_metadata(&cd, items, i);
2825 			item_flags |= MLX5_FLOW_ITEM_METADATA;
2826 			break;
2827 		case RTE_FLOW_ITEM_TYPE_GRE:
2828 			ret = mlx5dr_definer_conv_item_gre(&cd, items, i);
2829 			item_flags |= MLX5_FLOW_LAYER_GRE;
2830 			break;
2831 		case RTE_FLOW_ITEM_TYPE_GRE_OPTION:
2832 			ret = mlx5dr_definer_conv_item_gre_opt(&cd, items, i);
2833 			item_flags |= MLX5_FLOW_LAYER_GRE;
2834 			break;
2835 		case RTE_FLOW_ITEM_TYPE_GRE_KEY:
2836 			ret = mlx5dr_definer_conv_item_gre_key(&cd, items, i);
2837 			item_flags |= MLX5_FLOW_LAYER_GRE_KEY;
2838 			break;
2839 		case RTE_FLOW_ITEM_TYPE_INTEGRITY:
2840 			ret = mlx5dr_definer_conv_item_integrity(&cd, items, i);
2841 			item_flags |= MLX5_FLOW_ITEM_INTEGRITY;
2842 			break;
2843 		case RTE_FLOW_ITEM_TYPE_CONNTRACK:
2844 			ret = mlx5dr_definer_conv_item_conntrack(&cd, items, i);
2845 			break;
2846 		case RTE_FLOW_ITEM_TYPE_ICMP:
2847 			ret = mlx5dr_definer_conv_item_icmp(&cd, items, i);
2848 			item_flags |= MLX5_FLOW_LAYER_ICMP;
2849 			break;
2850 		case RTE_FLOW_ITEM_TYPE_ICMP6:
2851 			ret = mlx5dr_definer_conv_item_icmp6(&cd, items, i);
2852 			item_flags |= MLX5_FLOW_LAYER_ICMP6;
2853 			break;
2854 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REQUEST:
2855 		case RTE_FLOW_ITEM_TYPE_ICMP6_ECHO_REPLY:
2856 			ret = mlx5dr_definer_conv_item_icmp6_echo(&cd, items, i);
2857 			item_flags |= MLX5_FLOW_LAYER_ICMP6;
2858 			break;
2859 		case RTE_FLOW_ITEM_TYPE_METER_COLOR:
2860 			ret = mlx5dr_definer_conv_item_meter_color(&cd, items, i);
2861 			item_flags |= MLX5_FLOW_ITEM_METER_COLOR;
2862 			break;
2863 		case RTE_FLOW_ITEM_TYPE_QUOTA:
2864 			ret = mlx5dr_definer_conv_item_quota(&cd, items, i);
2865 			item_flags |= MLX5_FLOW_ITEM_QUOTA;
2866 			break;
2867 		case RTE_FLOW_ITEM_TYPE_IPV6_ROUTING_EXT:
2868 			ret = mlx5dr_definer_conv_item_ipv6_routing_ext(&cd, items, i);
2869 			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_IPV6_ROUTING_EXT :
2870 						  MLX5_FLOW_ITEM_OUTER_IPV6_ROUTING_EXT;
2871 			break;
2872 		case RTE_FLOW_ITEM_TYPE_ESP:
2873 			ret = mlx5dr_definer_conv_item_esp(&cd, items, i);
2874 			item_flags |= MLX5_FLOW_ITEM_ESP;
2875 			break;
2876 		case RTE_FLOW_ITEM_TYPE_FLEX:
2877 			ret = mlx5dr_definer_conv_item_flex_parser(&cd, items, i);
2878 			item_flags |= cd.tunnel ? MLX5_FLOW_ITEM_INNER_FLEX :
2879 						  MLX5_FLOW_ITEM_OUTER_FLEX;
2880 			break;
2881 		case RTE_FLOW_ITEM_TYPE_MPLS:
2882 			ret = mlx5dr_definer_conv_item_mpls(&cd, items, i);
2883 			item_flags |= MLX5_FLOW_LAYER_MPLS;
2884 			cd.mpls_idx++;
2885 			break;
2886 		case RTE_FLOW_ITEM_TYPE_GENEVE:
2887 			ret = mlx5dr_definer_conv_item_geneve(&cd, items, i);
2888 			item_flags |= MLX5_FLOW_LAYER_GENEVE;
2889 			break;
2890 		case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
2891 			ret = mlx5dr_definer_conv_item_geneve_opt(&cd, items, i);
2892 			item_flags |= MLX5_FLOW_LAYER_GENEVE_OPT;
2893 			break;
2894 		case RTE_FLOW_ITEM_TYPE_IB_BTH:
2895 			ret = mlx5dr_definer_conv_item_ib_l4(&cd, items, i);
2896 			item_flags |= MLX5_FLOW_ITEM_IB_BTH;
2897 			break;
2898 		case RTE_FLOW_ITEM_TYPE_PTYPE:
2899 			ret = mlx5dr_definer_conv_item_ptype(&cd, items, i);
2900 			item_flags |= MLX5_FLOW_ITEM_PTYPE;
2901 			break;
2902 		case RTE_FLOW_ITEM_TYPE_RANDOM:
2903 			ret = mlx5dr_definer_conv_item_random(&cd, items, i);
2904 			item_flags |= MLX5_FLOW_ITEM_RANDOM;
2905 			break;
2906 		case RTE_FLOW_ITEM_TYPE_VXLAN_GPE:
2907 			ret = mlx5dr_definer_conv_item_vxlan_gpe(&cd, items, i);
2908 			item_flags |= MLX5_FLOW_LAYER_VXLAN_GPE;
2909 			break;
2910 		default:
2911 			DR_LOG(ERR, "Unsupported item type %d", items->type);
2912 			rte_errno = ENOTSUP;
2913 			return rte_errno;
2914 		}
2915 
2916 		cd.last_item = items->type;
2917 
2918 		if (ret) {
2919 			DR_LOG(ERR, "Failed processing item type: %d", items->type);
2920 			return ret;
2921 		}
2922 	}
2923 
2924 	mt->item_flags = item_flags;
2925 
2926 	/* Fill in headers layout and allocate fc & fcr array on mt */
2927 	ret = mlx5dr_definer_mt_set_fc(mt, fc, hl);
2928 	if (ret) {
2929 		DR_LOG(ERR, "Failed to set field copy to match template");
2930 		return ret;
2931 	}
2932 
2933 	return 0;
2934 }
2935 
2936 static int
2937 mlx5dr_definer_find_byte_in_tag(struct mlx5dr_definer *definer,
2938 				uint32_t hl_byte_off,
2939 				uint32_t *tag_byte_off)
2940 {
2941 	uint8_t byte_offset;
2942 	int i, dw_to_scan;
2943 
2944 	/* Avoid accessing unused DW selectors */
2945 	dw_to_scan = mlx5dr_definer_is_jumbo(definer) ?
2946 		DW_SELECTORS : DW_SELECTORS_MATCH;
2947 
2948 	/* Add offset since each DW covers multiple BYTEs */
2949 	byte_offset = hl_byte_off % DW_SIZE;
2950 	for (i = 0; i < dw_to_scan; i++) {
2951 		if (definer->dw_selector[i] == hl_byte_off / DW_SIZE) {
2952 			*tag_byte_off = byte_offset + DW_SIZE * (DW_SELECTORS - i - 1);
2953 			return 0;
2954 		}
2955 	}
2956 
2957 	/* Add offset to skip DWs in definer */
2958 	byte_offset = DW_SIZE * DW_SELECTORS;
2959 	/* Iterate in reverse since the code uses bytes from 7 -> 0 */
2960 	for (i = BYTE_SELECTORS; i-- > 0 ;) {
2961 		if (definer->byte_selector[i] == hl_byte_off) {
2962 			*tag_byte_off = byte_offset + (BYTE_SELECTORS - i - 1);
2963 			return 0;
2964 		}
2965 	}
2966 
2967 	/* The hl byte offset must be part of the definer */
2968 	DR_LOG(INFO, "Failed to map to definer, HL byte [%d] not found", byte_offset);
2969 	rte_errno = EINVAL;
2970 	return rte_errno;
2971 }
2972 
2973 static int
2974 mlx5dr_definer_fc_bind(struct mlx5dr_definer *definer,
2975 		       struct mlx5dr_definer_fc *fc,
2976 		       uint32_t fc_sz)
2977 {
2978 	uint32_t tag_offset = 0;
2979 	int ret, byte_diff;
2980 	uint32_t i;
2981 
2982 	for (i = 0; i < fc_sz; i++) {
2983 		/* Map header layout byte offset to byte offset in tag */
2984 		ret = mlx5dr_definer_find_byte_in_tag(definer, fc->byte_off, &tag_offset);
2985 		if (ret)
2986 			return ret;
2987 
2988 		/* Move setter based on the location in the definer */
2989 		byte_diff = fc->byte_off % DW_SIZE - tag_offset % DW_SIZE;
2990 		fc->bit_off = fc->bit_off + byte_diff * BITS_IN_BYTE;
2991 
2992 		/* Update offset in headers layout to offset in tag */
2993 		fc->byte_off = tag_offset;
2994 		fc++;
2995 	}
2996 
2997 	return 0;
2998 }
2999 
3000 static bool
3001 mlx5dr_definer_best_hl_fit_recu(struct mlx5dr_definer_sel_ctrl *ctrl,
3002 				uint32_t cur_dw,
3003 				uint32_t *data)
3004 {
3005 	uint8_t bytes_set;
3006 	int byte_idx;
3007 	bool ret;
3008 	int i;
3009 
3010 	/* Reached end, nothing left to do */
3011 	if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
3012 		return true;
3013 
3014 	/* No data set, can skip to next DW */
3015 	while (!*data) {
3016 		cur_dw++;
3017 		data++;
3018 
3019 		/* Reached end, nothing left to do */
3020 		if (cur_dw == MLX5_ST_SZ_DW(definer_hl))
3021 			return true;
3022 	}
3023 
3024 	/* Used all DW selectors and Byte selectors, no possible solution */
3025 	if (ctrl->allowed_full_dw == ctrl->used_full_dw &&
3026 	    ctrl->allowed_lim_dw == ctrl->used_lim_dw &&
3027 	    ctrl->allowed_bytes == ctrl->used_bytes)
3028 		return false;
3029 
3030 	/* Try to use limited DW selectors */
3031 	if (ctrl->allowed_lim_dw > ctrl->used_lim_dw && cur_dw < 64) {
3032 		ctrl->lim_dw_selector[ctrl->used_lim_dw++] = cur_dw;
3033 
3034 		ret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
3035 		if (ret)
3036 			return ret;
3037 
3038 		ctrl->lim_dw_selector[--ctrl->used_lim_dw] = 0;
3039 	}
3040 
3041 	/* Try to use DW selectors */
3042 	if (ctrl->allowed_full_dw > ctrl->used_full_dw) {
3043 		ctrl->full_dw_selector[ctrl->used_full_dw++] = cur_dw;
3044 
3045 		ret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
3046 		if (ret)
3047 			return ret;
3048 
3049 		ctrl->full_dw_selector[--ctrl->used_full_dw] = 0;
3050 	}
3051 
3052 	/* No byte selector for offset bigger than 255 */
3053 	if (cur_dw * DW_SIZE > 255)
3054 		return false;
3055 
3056 	bytes_set = !!(0x000000ff & *data) +
3057 		    !!(0x0000ff00 & *data) +
3058 		    !!(0x00ff0000 & *data) +
3059 		    !!(0xff000000 & *data);
3060 
3061 	/* Check if there are enough byte selectors left */
3062 	if (bytes_set + ctrl->used_bytes > ctrl->allowed_bytes)
3063 		return false;
3064 
3065 	/* Try to use Byte selectors */
3066 	for (i = 0; i < DW_SIZE; i++)
3067 		if ((0xff000000 >> (i * BITS_IN_BYTE)) & rte_be_to_cpu_32(*data)) {
3068 			/* Use byte selectors high to low */
3069 			byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
3070 			ctrl->byte_selector[byte_idx] = cur_dw * DW_SIZE + i;
3071 			ctrl->used_bytes++;
3072 		}
3073 
3074 	ret = mlx5dr_definer_best_hl_fit_recu(ctrl, cur_dw + 1, data + 1);
3075 	if (ret)
3076 		return ret;
3077 
3078 	for (i = 0; i < DW_SIZE; i++)
3079 		if ((0xff << (i * BITS_IN_BYTE)) & rte_be_to_cpu_32(*data)) {
3080 			ctrl->used_bytes--;
3081 			byte_idx = ctrl->allowed_bytes - ctrl->used_bytes - 1;
3082 			ctrl->byte_selector[byte_idx] = 0;
3083 		}
3084 
3085 	return false;
3086 }
3087 
3088 static void
3089 mlx5dr_definer_copy_sel_ctrl(struct mlx5dr_definer_sel_ctrl *ctrl,
3090 			     struct mlx5dr_definer *definer)
3091 {
3092 	memcpy(definer->byte_selector, ctrl->byte_selector, ctrl->allowed_bytes);
3093 	memcpy(definer->dw_selector, ctrl->full_dw_selector, ctrl->allowed_full_dw);
3094 	memcpy(definer->dw_selector + ctrl->allowed_full_dw,
3095 	       ctrl->lim_dw_selector, ctrl->allowed_lim_dw);
3096 }
3097 
3098 static int
3099 mlx5dr_definer_find_best_range_fit(struct mlx5dr_definer *definer,
3100 				   struct mlx5dr_matcher *matcher)
3101 {
3102 	uint8_t tag_byte_offset[MLX5DR_DEFINER_FNAME_MAX] = {0};
3103 	uint8_t field_select[MLX5DR_DEFINER_FNAME_MAX] = {0};
3104 	struct mlx5dr_definer_sel_ctrl ctrl = {0};
3105 	uint32_t byte_offset, algn_byte_off;
3106 	struct mlx5dr_definer_fc *fcr;
3107 	bool require_dw;
3108 	int idx, i, j;
3109 
3110 	/* Try to create a range definer */
3111 	ctrl.allowed_full_dw = DW_SELECTORS_RANGE;
3112 	ctrl.allowed_bytes = BYTE_SELECTORS_RANGE;
3113 
3114 	/* Multiple fields cannot share the same DW for range match.
3115 	 * The HW doesn't recognize each field but compares the full dw.
3116 	 * For example definer DW consists of FieldA_FieldB
3117 	 * FieldA: Mask 0xFFFF range 0x1 to 0x2
3118 	 * FieldB: Mask 0xFFFF range 0x3 to 0x4
3119 	 * STE DW range will be 0x00010003 - 0x00020004
3120 	 * This will cause invalid match for FieldB if FieldA=1 and FieldB=8
3121 	 * Since 0x10003 < 0x10008 < 0x20004
3122 	 */
3123 	for (i = 0; i < matcher->num_of_mt; i++) {
3124 		for (j = 0; j < matcher->mt[i].fcr_sz; j++) {
3125 			fcr = &matcher->mt[i].fcr[j];
3126 
3127 			/* Found - Reuse previous mt binding */
3128 			if (field_select[fcr->fname]) {
3129 				fcr->byte_off = tag_byte_offset[fcr->fname];
3130 				continue;
3131 			}
3132 
3133 			/* Not found */
3134 			require_dw = fcr->byte_off >= (64 * DW_SIZE);
3135 			if (require_dw || ctrl.used_bytes == ctrl.allowed_bytes) {
3136 				/* Try to cover using DW selector */
3137 				if (ctrl.used_full_dw == ctrl.allowed_full_dw)
3138 					goto not_supported;
3139 
3140 				ctrl.full_dw_selector[ctrl.used_full_dw++] =
3141 					fcr->byte_off / DW_SIZE;
3142 
3143 				/* Bind DW */
3144 				idx = ctrl.used_full_dw - 1;
3145 				byte_offset = fcr->byte_off % DW_SIZE;
3146 				byte_offset += DW_SIZE * (DW_SELECTORS - idx - 1);
3147 			} else {
3148 				/* Try to cover using Bytes selectors */
3149 				if (ctrl.used_bytes == ctrl.allowed_bytes)
3150 					goto not_supported;
3151 
3152 				algn_byte_off = DW_SIZE * (fcr->byte_off / DW_SIZE);
3153 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 3;
3154 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 2;
3155 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off + 1;
3156 				ctrl.byte_selector[ctrl.used_bytes++] = algn_byte_off;
3157 
3158 				/* Bind BYTE */
3159 				byte_offset = DW_SIZE * DW_SELECTORS;
3160 				byte_offset += BYTE_SELECTORS - ctrl.used_bytes;
3161 				byte_offset += fcr->byte_off % DW_SIZE;
3162 			}
3163 
3164 			fcr->byte_off = byte_offset;
3165 			tag_byte_offset[fcr->fname] = byte_offset;
3166 			field_select[fcr->fname] = 1;
3167 		}
3168 	}
3169 
3170 	mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
3171 	definer->type = MLX5DR_DEFINER_TYPE_RANGE;
3172 
3173 	return 0;
3174 
3175 not_supported:
3176 	DR_LOG(ERR, "Unable to find supporting range definer combination");
3177 	rte_errno = ENOTSUP;
3178 	return rte_errno;
3179 }
3180 
3181 static int
3182 mlx5dr_definer_find_best_match_fit(struct mlx5dr_context *ctx,
3183 				   struct mlx5dr_definer *definer,
3184 				   uint8_t *hl)
3185 {
3186 	struct mlx5dr_definer_sel_ctrl ctrl = {0};
3187 	bool found;
3188 
3189 	/* Try to create a match definer */
3190 	ctrl.allowed_full_dw = DW_SELECTORS_MATCH;
3191 	ctrl.allowed_lim_dw = 0;
3192 	ctrl.allowed_bytes = BYTE_SELECTORS;
3193 
3194 	found = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);
3195 	if (found) {
3196 		mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
3197 		definer->type = MLX5DR_DEFINER_TYPE_MATCH;
3198 		return 0;
3199 	}
3200 
3201 	/* Try to create a full/limited jumbo definer */
3202 	ctrl.allowed_full_dw = ctx->caps->full_dw_jumbo_support ? DW_SELECTORS :
3203 								  DW_SELECTORS_MATCH;
3204 	ctrl.allowed_lim_dw = ctx->caps->full_dw_jumbo_support ? 0 :
3205 								 DW_SELECTORS_LIMITED;
3206 	ctrl.allowed_bytes = BYTE_SELECTORS;
3207 
3208 	found = mlx5dr_definer_best_hl_fit_recu(&ctrl, 0, (uint32_t *)hl);
3209 	if (found) {
3210 		mlx5dr_definer_copy_sel_ctrl(&ctrl, definer);
3211 		definer->type = MLX5DR_DEFINER_TYPE_JUMBO;
3212 		return 0;
3213 	}
3214 
3215 	DR_LOG(ERR, "Unable to find supporting match/jumbo definer combination");
3216 	rte_errno = ENOTSUP;
3217 	return rte_errno;
3218 }
3219 
3220 static void
3221 mlx5dr_definer_create_tag_mask(struct rte_flow_item *items,
3222 			       struct mlx5dr_definer_fc *fc,
3223 			       uint32_t fc_sz,
3224 			       uint8_t *tag)
3225 {
3226 	uint32_t i;
3227 
3228 	for (i = 0; i < fc_sz; i++) {
3229 		if (fc->tag_mask_set)
3230 			fc->tag_mask_set(fc, items[fc->item_idx].mask, tag);
3231 		else
3232 			fc->tag_set(fc, items[fc->item_idx].mask, tag);
3233 		fc++;
3234 	}
3235 }
3236 
3237 void mlx5dr_definer_create_tag(const struct rte_flow_item *items,
3238 			       struct mlx5dr_definer_fc *fc,
3239 			       uint32_t fc_sz,
3240 			       uint8_t *tag)
3241 {
3242 	uint32_t i;
3243 
3244 	for (i = 0; i < fc_sz; i++) {
3245 		fc->tag_set(fc, items[fc->item_idx].spec, tag);
3246 		fc++;
3247 	}
3248 }
3249 
3250 static uint32_t mlx5dr_definer_get_range_byte_off(uint32_t match_byte_off)
3251 {
3252 	uint8_t curr_dw_idx = match_byte_off / DW_SIZE;
3253 	uint8_t new_dw_idx;
3254 
3255 	/* Range DW can have the following values 7,8,9,10
3256 	 * -DW7 is mapped to DW9
3257 	 * -DW8 is mapped to DW7
3258 	 * -DW9 is mapped to DW5
3259 	 * -DW10 is mapped to DW3
3260 	 * To reduce calculation the following formula is used:
3261 	 */
3262 	new_dw_idx = curr_dw_idx * (-2) + 23;
3263 
3264 	return new_dw_idx * DW_SIZE + match_byte_off % DW_SIZE;
3265 }
3266 
3267 void mlx5dr_definer_create_tag_range(const struct rte_flow_item *items,
3268 				     struct mlx5dr_definer_fc *fc,
3269 				     uint32_t fc_sz,
3270 				     uint8_t *tag)
3271 {
3272 	struct mlx5dr_definer_fc tmp_fc;
3273 	uint32_t i;
3274 
3275 	for (i = 0; i < fc_sz; i++) {
3276 		tmp_fc = *fc;
3277 		/* Set MAX value */
3278 		tmp_fc.byte_off = mlx5dr_definer_get_range_byte_off(fc->byte_off);
3279 		tmp_fc.tag_set(&tmp_fc, items[fc->item_idx].last, tag);
3280 		/* Set MIN value */
3281 		tmp_fc.byte_off += DW_SIZE;
3282 		tmp_fc.tag_set(&tmp_fc, items[fc->item_idx].spec, tag);
3283 		fc++;
3284 	}
3285 }
3286 
3287 int mlx5dr_definer_get_id(struct mlx5dr_definer *definer)
3288 {
3289 	return definer->obj->id;
3290 }
3291 
3292 static int
3293 mlx5dr_definer_compare(struct mlx5dr_definer *definer_a,
3294 		       struct mlx5dr_definer *definer_b)
3295 {
3296 	int i;
3297 
3298 	/* Future: Optimize by comparing selectors with valid mask only */
3299 	for (i = 0; i < BYTE_SELECTORS; i++)
3300 		if (definer_a->byte_selector[i] != definer_b->byte_selector[i])
3301 			return 1;
3302 
3303 	for (i = 0; i < DW_SELECTORS; i++)
3304 		if (definer_a->dw_selector[i] != definer_b->dw_selector[i])
3305 			return 1;
3306 
3307 	for (i = 0; i < MLX5DR_JUMBO_TAG_SZ; i++)
3308 		if (definer_a->mask.jumbo[i] != definer_b->mask.jumbo[i])
3309 			return 1;
3310 
3311 	return 0;
3312 }
3313 
3314 static int
3315 mlx5dr_definer_calc_layout(struct mlx5dr_matcher *matcher,
3316 			   struct mlx5dr_definer *match_definer,
3317 			   struct mlx5dr_definer *range_definer)
3318 {
3319 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
3320 	struct mlx5dr_match_template *mt = matcher->mt;
3321 	uint8_t *match_hl;
3322 	int i, ret;
3323 
3324 	/* Union header-layout (hl) is used for creating a single definer
3325 	 * field layout used with different bitmasks for hash and match.
3326 	 */
3327 	match_hl = simple_calloc(1, MLX5_ST_SZ_BYTES(definer_hl));
3328 	if (!match_hl) {
3329 		DR_LOG(ERR, "Failed to allocate memory for header layout");
3330 		rte_errno = ENOMEM;
3331 		return rte_errno;
3332 	}
3333 
3334 	/* Convert all mt items to header layout (hl)
3335 	 * and allocate the match and range field copy array (fc & fcr).
3336 	 */
3337 	for (i = 0; i < matcher->num_of_mt; i++) {
3338 		ret = mlx5dr_definer_conv_items_to_hl(ctx, &mt[i], match_hl);
3339 		if (ret) {
3340 			DR_LOG(ERR, "Failed to convert items to header layout");
3341 			goto free_fc;
3342 		}
3343 	}
3344 
3345 	/* Find the match definer layout for header layout match union */
3346 	ret = mlx5dr_definer_find_best_match_fit(ctx, match_definer, match_hl);
3347 	if (ret) {
3348 		DR_LOG(ERR, "Failed to create match definer from header layout");
3349 		goto free_fc;
3350 	}
3351 
3352 	/* Find the range definer layout for match templates fcrs */
3353 	ret = mlx5dr_definer_find_best_range_fit(range_definer, matcher);
3354 	if (ret) {
3355 		DR_LOG(ERR, "Failed to create range definer from header layout");
3356 		goto free_fc;
3357 	}
3358 
3359 	simple_free(match_hl);
3360 	return 0;
3361 
3362 free_fc:
3363 	for (i = 0; i < matcher->num_of_mt; i++)
3364 		if (mt[i].fc)
3365 			simple_free(mt[i].fc);
3366 
3367 	simple_free(match_hl);
3368 	return rte_errno;
3369 }
3370 
3371 int mlx5dr_definer_init_cache(struct mlx5dr_definer_cache **cache)
3372 {
3373 	struct mlx5dr_definer_cache *new_cache;
3374 
3375 	new_cache = simple_calloc(1, sizeof(*new_cache));
3376 	if (!new_cache) {
3377 		rte_errno = ENOMEM;
3378 		return rte_errno;
3379 	}
3380 	LIST_INIT(&new_cache->head);
3381 	*cache = new_cache;
3382 
3383 	return 0;
3384 }
3385 
3386 void mlx5dr_definer_uninit_cache(struct mlx5dr_definer_cache *cache)
3387 {
3388 	simple_free(cache);
3389 }
3390 
3391 static struct mlx5dr_devx_obj *
3392 mlx5dr_definer_get_obj(struct mlx5dr_context *ctx,
3393 		       struct mlx5dr_definer *definer)
3394 {
3395 	struct mlx5dr_definer_cache *cache = ctx->definer_cache;
3396 	struct mlx5dr_cmd_definer_create_attr def_attr = {0};
3397 	struct mlx5dr_definer_cache_item *cached_definer;
3398 	struct mlx5dr_devx_obj *obj;
3399 
3400 	/* Search definer cache for requested definer */
3401 	LIST_FOREACH(cached_definer, &cache->head, next) {
3402 		if (mlx5dr_definer_compare(&cached_definer->definer, definer))
3403 			continue;
3404 
3405 		/* Reuse definer and set LRU (move to be first in the list) */
3406 		LIST_REMOVE(cached_definer, next);
3407 		LIST_INSERT_HEAD(&cache->head, cached_definer, next);
3408 		cached_definer->refcount++;
3409 		return cached_definer->definer.obj;
3410 	}
3411 
3412 	/* Allocate and create definer based on the bitmask tag */
3413 	def_attr.match_mask = definer->mask.jumbo;
3414 	def_attr.dw_selector = definer->dw_selector;
3415 	def_attr.byte_selector = definer->byte_selector;
3416 
3417 	obj = mlx5dr_cmd_definer_create(ctx->ibv_ctx, &def_attr);
3418 	if (!obj)
3419 		return NULL;
3420 
3421 	cached_definer = simple_calloc(1, sizeof(*cached_definer));
3422 	if (!cached_definer) {
3423 		rte_errno = ENOMEM;
3424 		goto free_definer_obj;
3425 	}
3426 
3427 	memcpy(&cached_definer->definer, definer, sizeof(*definer));
3428 	cached_definer->definer.obj = obj;
3429 	cached_definer->refcount = 1;
3430 	LIST_INSERT_HEAD(&cache->head, cached_definer, next);
3431 
3432 	return obj;
3433 
3434 free_definer_obj:
3435 	mlx5dr_cmd_destroy_obj(obj);
3436 	return NULL;
3437 }
3438 
3439 static void
3440 mlx5dr_definer_put_obj(struct mlx5dr_context *ctx,
3441 		       struct mlx5dr_devx_obj *obj)
3442 {
3443 	struct mlx5dr_definer_cache_item *cached_definer;
3444 
3445 	LIST_FOREACH(cached_definer, &ctx->definer_cache->head, next) {
3446 		if (cached_definer->definer.obj != obj)
3447 			continue;
3448 
3449 		/* Object found */
3450 		if (--cached_definer->refcount)
3451 			return;
3452 
3453 		LIST_REMOVE(cached_definer, next);
3454 		mlx5dr_cmd_destroy_obj(cached_definer->definer.obj);
3455 		simple_free(cached_definer);
3456 		return;
3457 	}
3458 
3459 	/* Programming error, object must be part of cache */
3460 	assert(false);
3461 }
3462 
3463 static struct mlx5dr_definer *
3464 mlx5dr_definer_alloc(struct mlx5dr_context *ctx,
3465 		     struct mlx5dr_definer_fc *fc,
3466 		     int fc_sz,
3467 		     struct rte_flow_item *items,
3468 		     struct mlx5dr_definer *layout,
3469 		     bool bind_fc)
3470 {
3471 	struct mlx5dr_definer *definer;
3472 	int ret;
3473 
3474 	definer = simple_calloc(1, sizeof(*definer));
3475 	if (!definer) {
3476 		DR_LOG(ERR, "Failed to allocate memory for definer");
3477 		rte_errno = ENOMEM;
3478 		return NULL;
3479 	}
3480 
3481 	memcpy(definer, layout, sizeof(*definer));
3482 
3483 	/* Align field copy array based on given layout */
3484 	if (bind_fc) {
3485 		ret = mlx5dr_definer_fc_bind(definer, fc, fc_sz);
3486 		if (ret) {
3487 			DR_LOG(ERR, "Failed to bind field copy to definer");
3488 			goto free_definer;
3489 		}
3490 	}
3491 
3492 	/* Create the tag mask used for definer creation */
3493 	mlx5dr_definer_create_tag_mask(items, fc, fc_sz, definer->mask.jumbo);
3494 
3495 	definer->obj = mlx5dr_definer_get_obj(ctx, definer);
3496 	if (!definer->obj)
3497 		goto free_definer;
3498 
3499 	return definer;
3500 
3501 free_definer:
3502 	simple_free(definer);
3503 	return NULL;
3504 }
3505 
3506 static void
3507 mlx5dr_definer_free(struct mlx5dr_context *ctx,
3508 		    struct mlx5dr_definer *definer)
3509 {
3510 	mlx5dr_definer_put_obj(ctx, definer->obj);
3511 	simple_free(definer);
3512 }
3513 
3514 static int
3515 mlx5dr_definer_matcher_match_init(struct mlx5dr_context *ctx,
3516 				  struct mlx5dr_matcher *matcher,
3517 				  struct mlx5dr_definer *match_layout)
3518 {
3519 	struct mlx5dr_match_template *mt = matcher->mt;
3520 	int i;
3521 
3522 	/* Create mendatory match definer */
3523 	for (i = 0; i < matcher->num_of_mt; i++) {
3524 		mt[i].definer = mlx5dr_definer_alloc(ctx,
3525 						     mt[i].fc,
3526 						     mt[i].fc_sz,
3527 						     mt[i].items,
3528 						     match_layout,
3529 						     true);
3530 		if (!mt[i].definer) {
3531 			DR_LOG(ERR, "Failed to create match definer");
3532 			goto free_definers;
3533 		}
3534 	}
3535 	return 0;
3536 
3537 free_definers:
3538 	while (i--)
3539 		mlx5dr_definer_free(ctx, mt[i].definer);
3540 
3541 	return rte_errno;
3542 }
3543 
3544 static void
3545 mlx5dr_definer_matcher_match_uninit(struct mlx5dr_matcher *matcher)
3546 {
3547 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
3548 	int i;
3549 
3550 	for (i = 0; i < matcher->num_of_mt; i++)
3551 		mlx5dr_definer_free(ctx, matcher->mt[i].definer);
3552 }
3553 
3554 static int
3555 mlx5dr_definer_matcher_range_init(struct mlx5dr_context *ctx,
3556 				  struct mlx5dr_matcher *matcher,
3557 				  struct mlx5dr_definer *range_layout)
3558 {
3559 	struct mlx5dr_match_template *mt = matcher->mt;
3560 	int i;
3561 
3562 	/* Create optional range definers */
3563 	for (i = 0; i < matcher->num_of_mt; i++) {
3564 		if (!mt[i].fcr_sz)
3565 			continue;
3566 
3567 		/* All must use range if requested */
3568 		if (i && !mt[i - 1].range_definer) {
3569 			DR_LOG(ERR, "Using range and non range templates is not allowed");
3570 			goto free_definers;
3571 		}
3572 
3573 		matcher->flags |= MLX5DR_MATCHER_FLAGS_RANGE_DEFINER;
3574 		/* Create definer without fcr binding, already binded */
3575 		mt[i].range_definer = mlx5dr_definer_alloc(ctx,
3576 							   mt[i].fcr,
3577 							   mt[i].fcr_sz,
3578 							   mt[i].items,
3579 							   range_layout,
3580 							   false);
3581 		if (!mt[i].range_definer) {
3582 			DR_LOG(ERR, "Failed to create match definer");
3583 			goto free_definers;
3584 		}
3585 	}
3586 	return 0;
3587 
3588 free_definers:
3589 	while (i--)
3590 		if (mt[i].range_definer)
3591 			mlx5dr_definer_free(ctx, mt[i].range_definer);
3592 
3593 	return rte_errno;
3594 }
3595 
3596 static void
3597 mlx5dr_definer_matcher_range_uninit(struct mlx5dr_matcher *matcher)
3598 {
3599 	struct mlx5dr_context *ctx = matcher->tbl->ctx;
3600 	int i;
3601 
3602 	for (i = 0; i < matcher->num_of_mt; i++)
3603 		if (matcher->mt[i].range_definer)
3604 			mlx5dr_definer_free(ctx, matcher->mt[i].range_definer);
3605 }
3606 
3607 static int
3608 mlx5dr_definer_matcher_hash_init(struct mlx5dr_context *ctx,
3609 				 struct mlx5dr_matcher *matcher)
3610 {
3611 	struct mlx5dr_cmd_definer_create_attr def_attr = {0};
3612 	struct mlx5dr_match_template *mt = matcher->mt;
3613 	struct ibv_context *ibv_ctx = ctx->ibv_ctx;
3614 	uint8_t *bit_mask;
3615 	int i, j;
3616 
3617 	for (i = 1; i < matcher->num_of_mt; i++)
3618 		if (mlx5dr_definer_compare(mt[i].definer, mt[i - 1].definer))
3619 			matcher->flags |= MLX5DR_MATCHER_FLAGS_HASH_DEFINER;
3620 
3621 	if (!(matcher->flags & MLX5DR_MATCHER_FLAGS_HASH_DEFINER))
3622 		return 0;
3623 
3624 	/* Insert by index requires all MT using the same definer */
3625 	if (matcher->attr.insert_mode == MLX5DR_MATCHER_INSERT_BY_INDEX) {
3626 		DR_LOG(ERR, "Insert by index not supported with MT combination");
3627 		rte_errno = EOPNOTSUPP;
3628 		return rte_errno;
3629 	}
3630 
3631 	matcher->hash_definer = simple_calloc(1, sizeof(*matcher->hash_definer));
3632 	if (!matcher->hash_definer) {
3633 		DR_LOG(ERR, "Failed to allocate memory for hash definer");
3634 		rte_errno = ENOMEM;
3635 		return rte_errno;
3636 	}
3637 
3638 	/* Calculate intersection between all match templates bitmasks.
3639 	 * We will use mt[0] as reference and intersect it with mt[1..n].
3640 	 * From this we will get:
3641 	 * hash_definer.selectors = mt[0].selecotrs
3642 	 * hash_definer.mask =  mt[0].mask & mt[0].mask & ... & mt[n].mask
3643 	 */
3644 
3645 	/* Use first definer which should also contain intersection fields */
3646 	memcpy(matcher->hash_definer, mt->definer, sizeof(struct mlx5dr_definer));
3647 
3648 	/* Calculate intersection between first to all match templates bitmasks */
3649 	for (i = 1; i < matcher->num_of_mt; i++) {
3650 		bit_mask = (uint8_t *)&mt[i].definer->mask;
3651 		for (j = 0; j < MLX5DR_JUMBO_TAG_SZ; j++)
3652 			((uint8_t *)&matcher->hash_definer->mask)[j] &= bit_mask[j];
3653 	}
3654 
3655 	def_attr.match_mask = matcher->hash_definer->mask.jumbo;
3656 	def_attr.dw_selector = matcher->hash_definer->dw_selector;
3657 	def_attr.byte_selector = matcher->hash_definer->byte_selector;
3658 	matcher->hash_definer->obj = mlx5dr_cmd_definer_create(ibv_ctx, &def_attr);
3659 	if (!matcher->hash_definer->obj) {
3660 		DR_LOG(ERR, "Failed to create hash definer");
3661 		goto free_hash_definer;
3662 	}
3663 
3664 	return 0;
3665 
3666 free_hash_definer:
3667 	simple_free(matcher->hash_definer);
3668 	return rte_errno;
3669 }
3670 
3671 static void
3672 mlx5dr_definer_matcher_hash_uninit(struct mlx5dr_matcher *matcher)
3673 {
3674 	if (!matcher->hash_definer)
3675 		return;
3676 
3677 	mlx5dr_cmd_destroy_obj(matcher->hash_definer->obj);
3678 	simple_free(matcher->hash_definer);
3679 }
3680 
3681 int mlx5dr_definer_matcher_init(struct mlx5dr_context *ctx,
3682 				struct mlx5dr_matcher *matcher)
3683 {
3684 	struct mlx5dr_definer match_layout = {0};
3685 	struct mlx5dr_definer range_layout = {0};
3686 	int ret, i;
3687 
3688 	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLLISION)
3689 		return 0;
3690 
3691 	ret = mlx5dr_definer_calc_layout(matcher, &match_layout, &range_layout);
3692 	if (ret) {
3693 		DR_LOG(ERR, "Failed to calculate matcher definer layout");
3694 		return ret;
3695 	}
3696 
3697 	/* Calculate definers needed for exact match */
3698 	ret = mlx5dr_definer_matcher_match_init(ctx, matcher, &match_layout);
3699 	if (ret) {
3700 		DR_LOG(ERR, "Failed to init match definers");
3701 		goto free_fc;
3702 	}
3703 
3704 	/* Calculate definers needed for range */
3705 	ret = mlx5dr_definer_matcher_range_init(ctx, matcher, &range_layout);
3706 	if (ret) {
3707 		DR_LOG(ERR, "Failed to init range definers");
3708 		goto uninit_match_definer;
3709 	}
3710 
3711 	/* Calculate partial hash definer */
3712 	ret = mlx5dr_definer_matcher_hash_init(ctx, matcher);
3713 	if (ret) {
3714 		DR_LOG(ERR, "Failed to init hash definer");
3715 		goto uninit_range_definer;
3716 	}
3717 
3718 	return 0;
3719 
3720 uninit_range_definer:
3721 	mlx5dr_definer_matcher_range_uninit(matcher);
3722 uninit_match_definer:
3723 	mlx5dr_definer_matcher_match_uninit(matcher);
3724 free_fc:
3725 	for (i = 0; i < matcher->num_of_mt; i++)
3726 		simple_free(matcher->mt[i].fc);
3727 
3728 	return ret;
3729 }
3730 
3731 void mlx5dr_definer_matcher_uninit(struct mlx5dr_matcher *matcher)
3732 {
3733 	int i;
3734 
3735 	if (matcher->flags & MLX5DR_MATCHER_FLAGS_COLLISION)
3736 		return;
3737 
3738 	mlx5dr_definer_matcher_hash_uninit(matcher);
3739 	mlx5dr_definer_matcher_range_uninit(matcher);
3740 	mlx5dr_definer_matcher_match_uninit(matcher);
3741 
3742 	for (i = 0; i < matcher->num_of_mt; i++)
3743 		simple_free(matcher->mt[i].fc);
3744 }
3745