xref: /dpdk/lib/ethdev/rte_flow.c (revision cf0556f4a11c1de87f94ed9ba0c14267cb034217)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <pthread.h>
11 
12 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 #define FLOW_LOG RTE_ETHDEV_LOG_LINE
23 
24 /* Mbuf dynamic field name for metadata. */
25 int32_t rte_flow_dynf_metadata_offs = -1;
26 
27 /* Mbuf dynamic field flag bit number for metadata. */
28 uint64_t rte_flow_dynf_metadata_mask;
29 
30 /**
31  * Flow elements description tables.
32  */
33 struct rte_flow_desc_data {
34 	const char *name;
35 	size_t size;
36 	size_t (*desc_fn)(void *dst, const void *src);
37 };
38 
39 /**
40  *
41  * @param buf
42  * Destination memory.
43  * @param data
44  * Source memory
45  * @param size
46  * Requested copy size
47  * @param desc
48  * rte_flow_desc_item - for flow item conversion.
49  * rte_flow_desc_action - for flow action conversion.
50  * @param type
51  * Offset into the desc param or negative value for private flow elements.
52  */
53 static inline size_t
54 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
55 		   const struct rte_flow_desc_data *desc, int type)
56 {
57 	/**
58 	 * Allow PMD private flow item
59 	 */
60 	bool rte_type = type >= 0;
61 
62 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
63 	if (data == NULL)
64 		return 0;
65 	if (buf != NULL)
66 		rte_memcpy(buf, data, (size > sz ? sz : size));
67 	if (rte_type && desc[type].desc_fn)
68 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
69 	return sz;
70 }
71 
72 static size_t
73 rte_flow_item_flex_conv(void *buf, const void *data)
74 {
75 	struct rte_flow_item_flex *dst = buf;
76 	const struct rte_flow_item_flex *src = data;
77 	if (buf) {
78 		dst->pattern = rte_memcpy
79 			((void *)((uintptr_t)(dst + 1)), src->pattern,
80 			 src->length);
81 	}
82 	return src->length;
83 }
84 
85 /** Generate flow_item[] entry. */
86 #define MK_FLOW_ITEM(t, s) \
87 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
88 		.name = # t, \
89 		.size = s,               \
90 		.desc_fn = NULL,\
91 	}
92 
93 #define MK_FLOW_ITEM_FN(t, s, fn) \
94 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
95 		.name = # t,                 \
96 		.size = s,                   \
97 		.desc_fn = fn,               \
98 	}
99 
100 /** Information about known flow pattern items. */
101 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
102 	MK_FLOW_ITEM(END, 0),
103 	MK_FLOW_ITEM(VOID, 0),
104 	MK_FLOW_ITEM(INVERT, 0),
105 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
106 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
107 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
108 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
109 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
110 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
111 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
112 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
113 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
114 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
115 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
116 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
117 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
118 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
119 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
120 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
121 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
122 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
123 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
124 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
125 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
126 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
127 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
128 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
129 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
130 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
131 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
132 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
133 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
134 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
135 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
137 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
138 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
139 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
140 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
141 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
142 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
143 	MK_FLOW_ITEM(RANDOM, sizeof(struct rte_flow_item_random)),
144 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
145 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
146 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
147 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
148 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
149 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
150 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
151 			sizeof(struct rte_flow_item_pppoe_proto_id)),
152 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
153 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
154 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
155 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
156 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
157 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
158 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
159 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
160 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
161 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
162 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
163 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
164 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
165 			rte_flow_item_flex_conv),
166 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
167 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
168 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
169 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
170 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
171 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
172 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
173 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
174 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
175 	MK_FLOW_ITEM(COMPARE, sizeof(struct rte_flow_item_compare)),
176 };
177 
178 /** Generate flow_action[] entry. */
179 #define MK_FLOW_ACTION(t, s) \
180 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
181 		.name = # t, \
182 		.size = s, \
183 		.desc_fn = NULL,\
184 	}
185 
186 #define MK_FLOW_ACTION_FN(t, fn) \
187 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
188 		.name = # t, \
189 		.size = 0, \
190 		.desc_fn = fn,\
191 	}
192 
193 
194 /** Information about known flow actions. */
195 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
196 	MK_FLOW_ACTION(END, 0),
197 	MK_FLOW_ACTION(VOID, 0),
198 	MK_FLOW_ACTION(PASSTHRU, 0),
199 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
200 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
201 	MK_FLOW_ACTION(FLAG, 0),
202 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
203 	MK_FLOW_ACTION(DROP, 0),
204 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
205 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
206 	MK_FLOW_ACTION(PF, 0),
207 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
208 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
209 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
210 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
211 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
212 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
213 	MK_FLOW_ACTION(OF_PUSH_VLAN,
214 		       sizeof(struct rte_flow_action_of_push_vlan)),
215 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
216 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
217 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
218 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
219 	MK_FLOW_ACTION(OF_POP_MPLS,
220 		       sizeof(struct rte_flow_action_of_pop_mpls)),
221 	MK_FLOW_ACTION(OF_PUSH_MPLS,
222 		       sizeof(struct rte_flow_action_of_push_mpls)),
223 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
224 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
225 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
226 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
227 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
228 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
229 	MK_FLOW_ACTION(SET_IPV4_SRC,
230 		       sizeof(struct rte_flow_action_set_ipv4)),
231 	MK_FLOW_ACTION(SET_IPV4_DST,
232 		       sizeof(struct rte_flow_action_set_ipv4)),
233 	MK_FLOW_ACTION(SET_IPV6_SRC,
234 		       sizeof(struct rte_flow_action_set_ipv6)),
235 	MK_FLOW_ACTION(SET_IPV6_DST,
236 		       sizeof(struct rte_flow_action_set_ipv6)),
237 	MK_FLOW_ACTION(SET_TP_SRC,
238 		       sizeof(struct rte_flow_action_set_tp)),
239 	MK_FLOW_ACTION(SET_TP_DST,
240 		       sizeof(struct rte_flow_action_set_tp)),
241 	MK_FLOW_ACTION(MAC_SWAP, 0),
242 	MK_FLOW_ACTION(DEC_TTL, 0),
243 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
244 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
245 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
246 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
247 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
248 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
249 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
250 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
251 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
252 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
253 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
254 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
255 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
256 	MK_FLOW_ACTION(MODIFY_FIELD,
257 		       sizeof(struct rte_flow_action_modify_field)),
258 	/**
259 	 * Indirect action represented as handle of type
260 	 * (struct rte_flow_action_handle *) stored in conf field (see
261 	 * struct rte_flow_action); no need for additional structure to * store
262 	 * indirect action handle.
263 	 */
264 	MK_FLOW_ACTION(INDIRECT, 0),
265 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
266 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
267 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
268 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
269 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
270 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
271 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
272 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
273 	MK_FLOW_ACTION(INDIRECT_LIST,
274 		       sizeof(struct rte_flow_action_indirect_list)),
275 	MK_FLOW_ACTION(PROG,
276 		       sizeof(struct rte_flow_action_prog)),
277 	MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)),
278 };
279 
280 int
281 rte_flow_dynf_metadata_register(void)
282 {
283 	int offset;
284 	int flag;
285 
286 	static const struct rte_mbuf_dynfield desc_offs = {
287 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
288 		.size = sizeof(uint32_t),
289 		.align = alignof(uint32_t),
290 	};
291 	static const struct rte_mbuf_dynflag desc_flag = {
292 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
293 	};
294 
295 	offset = rte_mbuf_dynfield_register(&desc_offs);
296 	if (offset < 0)
297 		goto error;
298 	flag = rte_mbuf_dynflag_register(&desc_flag);
299 	if (flag < 0)
300 		goto error;
301 	rte_flow_dynf_metadata_offs = offset;
302 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
303 
304 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
305 
306 	return 0;
307 
308 error:
309 	rte_flow_dynf_metadata_offs = -1;
310 	rte_flow_dynf_metadata_mask = UINT64_C(0);
311 	return -rte_errno;
312 }
313 
314 static inline void
315 fts_enter(struct rte_eth_dev *dev)
316 {
317 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
318 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
319 }
320 
321 static inline void
322 fts_exit(struct rte_eth_dev *dev)
323 {
324 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
325 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
326 }
327 
328 static int
329 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
330 {
331 	if (ret == 0)
332 		return 0;
333 	if (rte_eth_dev_is_removed(port_id))
334 		return rte_flow_error_set(error, EIO,
335 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
336 					  NULL, rte_strerror(EIO));
337 	return ret;
338 }
339 
340 /* Get generic flow operations structure from a port. */
341 const struct rte_flow_ops *
342 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
343 {
344 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
345 	const struct rte_flow_ops *ops;
346 	int code;
347 
348 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
349 		code = ENODEV;
350 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
351 		/* flow API not supported with this driver dev_ops */
352 		code = ENOSYS;
353 	else
354 		code = dev->dev_ops->flow_ops_get(dev, &ops);
355 	if (code == 0 && ops == NULL)
356 		/* flow API not supported with this device */
357 		code = ENOSYS;
358 
359 	if (code != 0) {
360 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
361 				   NULL, rte_strerror(code));
362 		return NULL;
363 	}
364 	return ops;
365 }
366 
367 /* Check whether a flow rule can be created on a given port. */
368 int
369 rte_flow_validate(uint16_t port_id,
370 		  const struct rte_flow_attr *attr,
371 		  const struct rte_flow_item pattern[],
372 		  const struct rte_flow_action actions[],
373 		  struct rte_flow_error *error)
374 {
375 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
376 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
377 	int ret;
378 
379 	if (likely(!!attr) && attr->transfer &&
380 	    (attr->ingress || attr->egress)) {
381 		return rte_flow_error_set(error, EINVAL,
382 					  RTE_FLOW_ERROR_TYPE_ATTR,
383 					  attr, "cannot use attr ingress/egress with attr transfer");
384 	}
385 
386 	if (unlikely(!ops))
387 		return -rte_errno;
388 	if (likely(!!ops->validate)) {
389 		fts_enter(dev);
390 		ret = ops->validate(dev, attr, pattern, actions, error);
391 		fts_exit(dev);
392 		ret = flow_err(port_id, ret, error);
393 
394 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
395 
396 		return ret;
397 	}
398 	return rte_flow_error_set(error, ENOSYS,
399 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
400 				  NULL, rte_strerror(ENOSYS));
401 }
402 
403 /* Create a flow rule on a given port. */
404 struct rte_flow *
405 rte_flow_create(uint16_t port_id,
406 		const struct rte_flow_attr *attr,
407 		const struct rte_flow_item pattern[],
408 		const struct rte_flow_action actions[],
409 		struct rte_flow_error *error)
410 {
411 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
412 	struct rte_flow *flow;
413 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
414 
415 	if (unlikely(!ops))
416 		return NULL;
417 	if (likely(!!ops->create)) {
418 		fts_enter(dev);
419 		flow = ops->create(dev, attr, pattern, actions, error);
420 		fts_exit(dev);
421 		if (flow == NULL)
422 			flow_err(port_id, -rte_errno, error);
423 
424 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
425 
426 		return flow;
427 	}
428 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
429 			   NULL, rte_strerror(ENOSYS));
430 	return NULL;
431 }
432 
433 /* Destroy a flow rule on a given port. */
434 int
435 rte_flow_destroy(uint16_t port_id,
436 		 struct rte_flow *flow,
437 		 struct rte_flow_error *error)
438 {
439 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
440 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
441 	int ret;
442 
443 	if (unlikely(!ops))
444 		return -rte_errno;
445 	if (likely(!!ops->destroy)) {
446 		fts_enter(dev);
447 		ret = ops->destroy(dev, flow, error);
448 		fts_exit(dev);
449 		ret = flow_err(port_id, ret, error);
450 
451 		rte_flow_trace_destroy(port_id, flow, ret);
452 
453 		return ret;
454 	}
455 	return rte_flow_error_set(error, ENOSYS,
456 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
457 				  NULL, rte_strerror(ENOSYS));
458 }
459 
460 int
461 rte_flow_actions_update(uint16_t port_id,
462 			struct rte_flow *flow,
463 			const struct rte_flow_action actions[],
464 			struct rte_flow_error *error)
465 {
466 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
467 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
468 	int ret;
469 
470 	if (unlikely(!ops))
471 		return -rte_errno;
472 	if (likely(!!ops->actions_update)) {
473 		fts_enter(dev);
474 		ret = ops->actions_update(dev, flow, actions, error);
475 		fts_exit(dev);
476 
477 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
478 
479 		return flow_err(port_id, ret, error);
480 	}
481 	return rte_flow_error_set(error, ENOSYS,
482 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
483 				  NULL, rte_strerror(ENOSYS));
484 }
485 
486 /* Destroy all flow rules associated with a port. */
487 int
488 rte_flow_flush(uint16_t port_id,
489 	       struct rte_flow_error *error)
490 {
491 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
492 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
493 	int ret;
494 
495 	if (unlikely(!ops))
496 		return -rte_errno;
497 	if (likely(!!ops->flush)) {
498 		fts_enter(dev);
499 		ret = ops->flush(dev, error);
500 		fts_exit(dev);
501 		ret = flow_err(port_id, ret, error);
502 
503 		rte_flow_trace_flush(port_id, ret);
504 
505 		return ret;
506 	}
507 	return rte_flow_error_set(error, ENOSYS,
508 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
509 				  NULL, rte_strerror(ENOSYS));
510 }
511 
512 /* Query an existing flow rule. */
513 int
514 rte_flow_query(uint16_t port_id,
515 	       struct rte_flow *flow,
516 	       const struct rte_flow_action *action,
517 	       void *data,
518 	       struct rte_flow_error *error)
519 {
520 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
521 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
522 	int ret;
523 
524 	if (!ops)
525 		return -rte_errno;
526 	if (likely(!!ops->query)) {
527 		fts_enter(dev);
528 		ret = ops->query(dev, flow, action, data, error);
529 		fts_exit(dev);
530 		ret = flow_err(port_id, ret, error);
531 
532 		rte_flow_trace_query(port_id, flow, action, data, ret);
533 
534 		return ret;
535 	}
536 	return rte_flow_error_set(error, ENOSYS,
537 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
538 				  NULL, rte_strerror(ENOSYS));
539 }
540 
541 /* Restrict ingress traffic to the defined flow rules. */
542 int
543 rte_flow_isolate(uint16_t port_id,
544 		 int set,
545 		 struct rte_flow_error *error)
546 {
547 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
548 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
549 	int ret;
550 
551 	if (!ops)
552 		return -rte_errno;
553 	if (likely(!!ops->isolate)) {
554 		fts_enter(dev);
555 		ret = ops->isolate(dev, set, error);
556 		fts_exit(dev);
557 		ret = flow_err(port_id, ret, error);
558 
559 		rte_flow_trace_isolate(port_id, set, ret);
560 
561 		return ret;
562 	}
563 	return rte_flow_error_set(error, ENOSYS,
564 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
565 				  NULL, rte_strerror(ENOSYS));
566 }
567 
568 /* Initialize flow error structure. */
569 int
570 rte_flow_error_set(struct rte_flow_error *error,
571 		   int code,
572 		   enum rte_flow_error_type type,
573 		   const void *cause,
574 		   const char *message)
575 {
576 	if (error) {
577 		*error = (struct rte_flow_error){
578 			.type = type,
579 			.cause = cause,
580 			.message = message,
581 		};
582 	}
583 	rte_errno = code;
584 	return -code;
585 }
586 
587 /** Pattern item specification types. */
588 enum rte_flow_conv_item_spec_type {
589 	RTE_FLOW_CONV_ITEM_SPEC,
590 	RTE_FLOW_CONV_ITEM_LAST,
591 	RTE_FLOW_CONV_ITEM_MASK,
592 };
593 
594 /**
595  * Copy pattern item specification.
596  *
597  * @param[out] buf
598  *   Output buffer. Can be NULL if @p size is zero.
599  * @param size
600  *   Size of @p buf in bytes.
601  * @param[in] item
602  *   Pattern item to copy specification from.
603  * @param type
604  *   Specification selector for either @p spec, @p last or @p mask.
605  *
606  * @return
607  *   Number of bytes needed to store pattern item specification regardless
608  *   of @p size. @p buf contents are truncated to @p size if not large
609  *   enough.
610  */
611 static size_t
612 rte_flow_conv_item_spec(void *buf, const size_t size,
613 			const struct rte_flow_item *item,
614 			enum rte_flow_conv_item_spec_type type)
615 {
616 	size_t off;
617 	const void *data =
618 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
619 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
620 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
621 		NULL;
622 
623 	switch (item->type) {
624 		union {
625 			const struct rte_flow_item_raw *raw;
626 		} spec;
627 		union {
628 			const struct rte_flow_item_raw *raw;
629 		} last;
630 		union {
631 			const struct rte_flow_item_raw *raw;
632 		} mask;
633 		union {
634 			const struct rte_flow_item_raw *raw;
635 		} src;
636 		union {
637 			struct rte_flow_item_raw *raw;
638 		} dst;
639 		size_t tmp;
640 
641 	case RTE_FLOW_ITEM_TYPE_RAW:
642 		spec.raw = item->spec;
643 		last.raw = item->last ? item->last : item->spec;
644 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
645 		src.raw = data;
646 		dst.raw = buf;
647 		rte_memcpy(dst.raw,
648 			   (&(struct rte_flow_item_raw){
649 				.relative = src.raw->relative,
650 				.search = src.raw->search,
651 				.reserved = src.raw->reserved,
652 				.offset = src.raw->offset,
653 				.limit = src.raw->limit,
654 				.length = src.raw->length,
655 			   }),
656 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
657 		off = sizeof(*dst.raw);
658 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
659 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
660 		     ((spec.raw->length & mask.raw->length) >=
661 		      (last.raw->length & mask.raw->length))))
662 			tmp = spec.raw->length & mask.raw->length;
663 		else
664 			tmp = last.raw->length & mask.raw->length;
665 		if (tmp) {
666 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
667 			if (size >= off + tmp)
668 				dst.raw->pattern = rte_memcpy
669 					((void *)((uintptr_t)dst.raw + off),
670 					 src.raw->pattern, tmp);
671 			off += tmp;
672 		}
673 		break;
674 	default:
675 		off = rte_flow_conv_copy(buf, data, size,
676 					 rte_flow_desc_item, item->type);
677 		break;
678 	}
679 	return off;
680 }
681 
682 /**
683  * Copy action configuration.
684  *
685  * @param[out] buf
686  *   Output buffer. Can be NULL if @p size is zero.
687  * @param size
688  *   Size of @p buf in bytes.
689  * @param[in] action
690  *   Action to copy configuration from.
691  *
692  * @return
693  *   Number of bytes needed to store pattern item specification regardless
694  *   of @p size. @p buf contents are truncated to @p size if not large
695  *   enough.
696  */
697 static size_t
698 rte_flow_conv_action_conf(void *buf, const size_t size,
699 			  const struct rte_flow_action *action)
700 {
701 	size_t off;
702 
703 	switch (action->type) {
704 		union {
705 			const struct rte_flow_action_rss *rss;
706 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
707 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
708 		} src;
709 		union {
710 			struct rte_flow_action_rss *rss;
711 			struct rte_flow_action_vxlan_encap *vxlan_encap;
712 			struct rte_flow_action_nvgre_encap *nvgre_encap;
713 		} dst;
714 		size_t tmp;
715 		int ret;
716 
717 	case RTE_FLOW_ACTION_TYPE_RSS:
718 		src.rss = action->conf;
719 		dst.rss = buf;
720 		rte_memcpy(dst.rss,
721 			   (&(struct rte_flow_action_rss){
722 				.func = src.rss->func,
723 				.level = src.rss->level,
724 				.types = src.rss->types,
725 				.key_len = src.rss->key_len,
726 				.queue_num = src.rss->queue_num,
727 			   }),
728 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
729 		off = sizeof(*dst.rss);
730 		if (src.rss->key_len && src.rss->key) {
731 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
732 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
733 			if (size >= (uint64_t)off + (uint64_t)tmp)
734 				dst.rss->key = rte_memcpy
735 					((void *)((uintptr_t)dst.rss + off),
736 					 src.rss->key, tmp);
737 			off += tmp;
738 		}
739 		if (src.rss->queue_num) {
740 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
741 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
742 			if (size >= (uint64_t)off + (uint64_t)tmp)
743 				dst.rss->queue = rte_memcpy
744 					((void *)((uintptr_t)dst.rss + off),
745 					 src.rss->queue, tmp);
746 			off += tmp;
747 		}
748 		break;
749 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
750 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
751 		src.vxlan_encap = action->conf;
752 		dst.vxlan_encap = buf;
753 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
754 				 sizeof(*src.nvgre_encap) ||
755 				 offsetof(struct rte_flow_action_vxlan_encap,
756 					  definition) !=
757 				 offsetof(struct rte_flow_action_nvgre_encap,
758 					  definition));
759 		off = sizeof(*dst.vxlan_encap);
760 		if (src.vxlan_encap->definition) {
761 			off = RTE_ALIGN_CEIL
762 				(off, sizeof(*dst.vxlan_encap->definition));
763 			ret = rte_flow_conv
764 				(RTE_FLOW_CONV_OP_PATTERN,
765 				 (void *)((uintptr_t)dst.vxlan_encap + off),
766 				 size > off ? size - off : 0,
767 				 src.vxlan_encap->definition, NULL);
768 			if (ret < 0)
769 				return 0;
770 			if (size >= off + ret)
771 				dst.vxlan_encap->definition =
772 					(void *)((uintptr_t)dst.vxlan_encap +
773 						 off);
774 			off += ret;
775 		}
776 		break;
777 	default:
778 		off = rte_flow_conv_copy(buf, action->conf, size,
779 					 rte_flow_desc_action, action->type);
780 		break;
781 	}
782 	return off;
783 }
784 
785 /**
786  * Copy a list of pattern items.
787  *
788  * @param[out] dst
789  *   Destination buffer. Can be NULL if @p size is zero.
790  * @param size
791  *   Size of @p dst in bytes.
792  * @param[in] src
793  *   Source pattern items.
794  * @param num
795  *   Maximum number of pattern items to process from @p src or 0 to process
796  *   the entire list. In both cases, processing stops after
797  *   RTE_FLOW_ITEM_TYPE_END is encountered.
798  * @param[out] error
799  *   Perform verbose error reporting if not NULL.
800  *
801  * @return
802  *   A positive value representing the number of bytes needed to store
803  *   pattern items regardless of @p size on success (@p buf contents are
804  *   truncated to @p size if not large enough), a negative errno value
805  *   otherwise and rte_errno is set.
806  */
807 static int
808 rte_flow_conv_pattern(struct rte_flow_item *dst,
809 		      const size_t size,
810 		      const struct rte_flow_item *src,
811 		      unsigned int num,
812 		      struct rte_flow_error *error)
813 {
814 	uintptr_t data = (uintptr_t)dst;
815 	size_t off;
816 	size_t ret;
817 	unsigned int i;
818 
819 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
820 		/**
821 		 * allow PMD private flow item
822 		 */
823 		if (((int)src->type >= 0) &&
824 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
825 		    !rte_flow_desc_item[src->type].name))
826 			return rte_flow_error_set
827 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
828 				 "cannot convert unknown item type");
829 		if (size >= off + sizeof(*dst))
830 			*dst = (struct rte_flow_item){
831 				.type = src->type,
832 			};
833 		off += sizeof(*dst);
834 		if (!src->type)
835 			num = i + 1;
836 	}
837 	num = i;
838 	src -= num;
839 	dst -= num;
840 	do {
841 		if (src->spec) {
842 			off = RTE_ALIGN_CEIL(off, sizeof(double));
843 			ret = rte_flow_conv_item_spec
844 				((void *)(data + off),
845 				 size > off ? size - off : 0, src,
846 				 RTE_FLOW_CONV_ITEM_SPEC);
847 			if (size && size >= off + ret)
848 				dst->spec = (void *)(data + off);
849 			off += ret;
850 
851 		}
852 		if (src->last) {
853 			off = RTE_ALIGN_CEIL(off, sizeof(double));
854 			ret = rte_flow_conv_item_spec
855 				((void *)(data + off),
856 				 size > off ? size - off : 0, src,
857 				 RTE_FLOW_CONV_ITEM_LAST);
858 			if (size && size >= off + ret)
859 				dst->last = (void *)(data + off);
860 			off += ret;
861 		}
862 		if (src->mask) {
863 			off = RTE_ALIGN_CEIL(off, sizeof(double));
864 			ret = rte_flow_conv_item_spec
865 				((void *)(data + off),
866 				 size > off ? size - off : 0, src,
867 				 RTE_FLOW_CONV_ITEM_MASK);
868 			if (size && size >= off + ret)
869 				dst->mask = (void *)(data + off);
870 			off += ret;
871 		}
872 		++src;
873 		++dst;
874 	} while (--num);
875 	return off;
876 }
877 
878 /**
879  * Copy a list of actions.
880  *
881  * @param[out] dst
882  *   Destination buffer. Can be NULL if @p size is zero.
883  * @param size
884  *   Size of @p dst in bytes.
885  * @param[in] src
886  *   Source actions.
887  * @param num
888  *   Maximum number of actions to process from @p src or 0 to process the
889  *   entire list. In both cases, processing stops after
890  *   RTE_FLOW_ACTION_TYPE_END is encountered.
891  * @param[out] error
892  *   Perform verbose error reporting if not NULL.
893  *
894  * @return
895  *   A positive value representing the number of bytes needed to store
896  *   actions regardless of @p size on success (@p buf contents are truncated
897  *   to @p size if not large enough), a negative errno value otherwise and
898  *   rte_errno is set.
899  */
900 static int
901 rte_flow_conv_actions(struct rte_flow_action *dst,
902 		      const size_t size,
903 		      const struct rte_flow_action *src,
904 		      unsigned int num,
905 		      struct rte_flow_error *error)
906 {
907 	uintptr_t data = (uintptr_t)dst;
908 	size_t off;
909 	size_t ret;
910 	unsigned int i;
911 
912 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
913 		/**
914 		 * allow PMD private flow action
915 		 */
916 		if (((int)src->type >= 0) &&
917 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
918 		    !rte_flow_desc_action[src->type].name))
919 			return rte_flow_error_set
920 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
921 				 src, "cannot convert unknown action type");
922 		if (size >= off + sizeof(*dst))
923 			*dst = (struct rte_flow_action){
924 				.type = src->type,
925 			};
926 		off += sizeof(*dst);
927 		if (!src->type)
928 			num = i + 1;
929 	}
930 	num = i;
931 	src -= num;
932 	dst -= num;
933 	do {
934 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
935 			/*
936 			 * Indirect action conf fills the indirect action
937 			 * handler. Copy the action handle directly instead
938 			 * of duplicating the pointer memory.
939 			 */
940 			if (size)
941 				dst->conf = src->conf;
942 		} else if (src->conf) {
943 			off = RTE_ALIGN_CEIL(off, sizeof(double));
944 			ret = rte_flow_conv_action_conf
945 				((void *)(data + off),
946 				 size > off ? size - off : 0, src);
947 			if (size && size >= off + ret)
948 				dst->conf = (void *)(data + off);
949 			off += ret;
950 		}
951 		++src;
952 		++dst;
953 	} while (--num);
954 	return off;
955 }
956 
957 /**
958  * Copy flow rule components.
959  *
960  * This comprises the flow rule descriptor itself, attributes, pattern and
961  * actions list. NULL components in @p src are skipped.
962  *
963  * @param[out] dst
964  *   Destination buffer. Can be NULL if @p size is zero.
965  * @param size
966  *   Size of @p dst in bytes.
967  * @param[in] src
968  *   Source flow rule descriptor.
969  * @param[out] error
970  *   Perform verbose error reporting if not NULL.
971  *
972  * @return
973  *   A positive value representing the number of bytes needed to store all
974  *   components including the descriptor regardless of @p size on success
975  *   (@p buf contents are truncated to @p size if not large enough), a
976  *   negative errno value otherwise and rte_errno is set.
977  */
978 static int
979 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
980 		   const size_t size,
981 		   const struct rte_flow_conv_rule *src,
982 		   struct rte_flow_error *error)
983 {
984 	size_t off;
985 	int ret;
986 
987 	rte_memcpy(dst,
988 		   (&(struct rte_flow_conv_rule){
989 			.attr = NULL,
990 			.pattern = NULL,
991 			.actions = NULL,
992 		   }),
993 		   size > sizeof(*dst) ? sizeof(*dst) : size);
994 	off = sizeof(*dst);
995 	if (src->attr_ro) {
996 		off = RTE_ALIGN_CEIL(off, sizeof(double));
997 		if (size && size >= off + sizeof(*dst->attr))
998 			dst->attr = rte_memcpy
999 				((void *)((uintptr_t)dst + off),
1000 				 src->attr_ro, sizeof(*dst->attr));
1001 		off += sizeof(*dst->attr);
1002 	}
1003 	if (src->pattern_ro) {
1004 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1005 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
1006 					    size > off ? size - off : 0,
1007 					    src->pattern_ro, 0, error);
1008 		if (ret < 0)
1009 			return ret;
1010 		if (size && size >= off + (size_t)ret)
1011 			dst->pattern = (void *)((uintptr_t)dst + off);
1012 		off += ret;
1013 	}
1014 	if (src->actions_ro) {
1015 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1016 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1017 					    size > off ? size - off : 0,
1018 					    src->actions_ro, 0, error);
1019 		if (ret < 0)
1020 			return ret;
1021 		if (size >= off + (size_t)ret)
1022 			dst->actions = (void *)((uintptr_t)dst + off);
1023 		off += ret;
1024 	}
1025 	return off;
1026 }
1027 
1028 /**
1029  * Retrieve the name of a pattern item/action type.
1030  *
1031  * @param is_action
1032  *   Nonzero when @p src represents an action type instead of a pattern item
1033  *   type.
1034  * @param is_ptr
1035  *   Nonzero to write string address instead of contents into @p dst.
1036  * @param[out] dst
1037  *   Destination buffer. Can be NULL if @p size is zero.
1038  * @param size
1039  *   Size of @p dst in bytes.
1040  * @param[in] src
1041  *   Depending on @p is_action, source pattern item or action type cast as a
1042  *   pointer.
1043  * @param[out] error
1044  *   Perform verbose error reporting if not NULL.
1045  *
1046  * @return
1047  *   A positive value representing the number of bytes needed to store the
1048  *   name or its address regardless of @p size on success (@p buf contents
1049  *   are truncated to @p size if not large enough), a negative errno value
1050  *   otherwise and rte_errno is set.
1051  */
1052 static int
1053 rte_flow_conv_name(int is_action,
1054 		   int is_ptr,
1055 		   char *dst,
1056 		   const size_t size,
1057 		   const void *src,
1058 		   struct rte_flow_error *error)
1059 {
1060 	struct desc_info {
1061 		const struct rte_flow_desc_data *data;
1062 		size_t num;
1063 	};
1064 	static const struct desc_info info_rep[2] = {
1065 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1066 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1067 	};
1068 	const struct desc_info *const info = &info_rep[!!is_action];
1069 	unsigned int type = (uintptr_t)src;
1070 
1071 	if (type >= info->num)
1072 		return rte_flow_error_set
1073 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1074 			 "unknown object type to retrieve the name of");
1075 	if (!is_ptr)
1076 		return strlcpy(dst, info->data[type].name, size);
1077 	if (size >= sizeof(const char **))
1078 		*((const char **)dst) = info->data[type].name;
1079 	return sizeof(const char **);
1080 }
1081 
1082 /** Helper function to convert flow API objects. */
1083 int
1084 rte_flow_conv(enum rte_flow_conv_op op,
1085 	      void *dst,
1086 	      size_t size,
1087 	      const void *src,
1088 	      struct rte_flow_error *error)
1089 {
1090 	int ret;
1091 
1092 	switch (op) {
1093 		const struct rte_flow_attr *attr;
1094 		const struct rte_flow_item *item;
1095 
1096 	case RTE_FLOW_CONV_OP_NONE:
1097 		ret = 0;
1098 		break;
1099 	case RTE_FLOW_CONV_OP_ATTR:
1100 		attr = src;
1101 		if (size > sizeof(*attr))
1102 			size = sizeof(*attr);
1103 		rte_memcpy(dst, attr, size);
1104 		ret = sizeof(*attr);
1105 		break;
1106 	case RTE_FLOW_CONV_OP_ITEM:
1107 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1108 		break;
1109 	case RTE_FLOW_CONV_OP_ITEM_MASK:
1110 		item = src;
1111 		if (item->mask == NULL) {
1112 			ret = rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1113 						 item, "Mask not provided");
1114 			break;
1115 		}
1116 		ret = rte_flow_conv_item_spec(dst, size, src, RTE_FLOW_CONV_ITEM_MASK);
1117 		break;
1118 	case RTE_FLOW_CONV_OP_ACTION:
1119 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1120 		break;
1121 	case RTE_FLOW_CONV_OP_PATTERN:
1122 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1123 		break;
1124 	case RTE_FLOW_CONV_OP_ACTIONS:
1125 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1126 		break;
1127 	case RTE_FLOW_CONV_OP_RULE:
1128 		ret = rte_flow_conv_rule(dst, size, src, error);
1129 		break;
1130 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1131 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1132 		break;
1133 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1134 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1135 		break;
1136 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1137 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1138 		break;
1139 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1140 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1141 		break;
1142 	default:
1143 		ret = rte_flow_error_set
1144 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1145 		 "unknown object conversion operation");
1146 	}
1147 
1148 	rte_flow_trace_conv(op, dst, size, src, ret);
1149 
1150 	return ret;
1151 }
1152 
1153 /** Store a full rte_flow description. */
1154 size_t
1155 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1156 	      const struct rte_flow_attr *attr,
1157 	      const struct rte_flow_item *items,
1158 	      const struct rte_flow_action *actions)
1159 {
1160 	/*
1161 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1162 	 * to convert the former to the latter without wasting space.
1163 	 */
1164 	struct rte_flow_conv_rule *dst =
1165 		len ?
1166 		(void *)((uintptr_t)desc +
1167 			 (offsetof(struct rte_flow_desc, actions) -
1168 			  offsetof(struct rte_flow_conv_rule, actions))) :
1169 		NULL;
1170 	size_t dst_size =
1171 		len > sizeof(*desc) - sizeof(*dst) ?
1172 		len - (sizeof(*desc) - sizeof(*dst)) :
1173 		0;
1174 	struct rte_flow_conv_rule src = {
1175 		.attr_ro = NULL,
1176 		.pattern_ro = items,
1177 		.actions_ro = actions,
1178 	};
1179 	int ret;
1180 
1181 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1182 			 sizeof(struct rte_flow_conv_rule));
1183 	if (dst_size &&
1184 	    (&dst->pattern != &desc->items ||
1185 	     &dst->actions != &desc->actions ||
1186 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1187 		rte_errno = EINVAL;
1188 		return 0;
1189 	}
1190 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1191 	if (ret < 0)
1192 		return 0;
1193 	ret += sizeof(*desc) - sizeof(*dst);
1194 	rte_memcpy(desc,
1195 		   (&(struct rte_flow_desc){
1196 			.size = ret,
1197 			.attr = *attr,
1198 			.items = dst_size ? dst->pattern : NULL,
1199 			.actions = dst_size ? dst->actions : NULL,
1200 		   }),
1201 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1202 
1203 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1204 
1205 	return ret;
1206 }
1207 
1208 int
1209 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1210 			FILE *file, struct rte_flow_error *error)
1211 {
1212 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1213 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1214 	int ret;
1215 
1216 	if (unlikely(!ops))
1217 		return -rte_errno;
1218 	if (likely(!!ops->dev_dump)) {
1219 		fts_enter(dev);
1220 		ret = ops->dev_dump(dev, flow, file, error);
1221 		fts_exit(dev);
1222 		return flow_err(port_id, ret, error);
1223 	}
1224 	return rte_flow_error_set(error, ENOSYS,
1225 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1226 				  NULL, rte_strerror(ENOSYS));
1227 }
1228 
1229 int
1230 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1231 		    uint32_t nb_contexts, struct rte_flow_error *error)
1232 {
1233 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1234 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1235 	int ret;
1236 
1237 	if (unlikely(!ops))
1238 		return -rte_errno;
1239 	if (likely(!!ops->get_aged_flows)) {
1240 		fts_enter(dev);
1241 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1242 		fts_exit(dev);
1243 		ret = flow_err(port_id, ret, error);
1244 
1245 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1246 
1247 		return ret;
1248 	}
1249 	return rte_flow_error_set(error, ENOTSUP,
1250 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1251 				  NULL, rte_strerror(ENOTSUP));
1252 }
1253 
1254 int
1255 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1256 			  uint32_t nb_contexts, struct rte_flow_error *error)
1257 {
1258 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1259 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1260 	int ret;
1261 
1262 	if (unlikely(!ops))
1263 		return -rte_errno;
1264 	if (likely(!!ops->get_q_aged_flows)) {
1265 		fts_enter(dev);
1266 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1267 					    nb_contexts, error);
1268 		fts_exit(dev);
1269 		ret = flow_err(port_id, ret, error);
1270 
1271 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1272 						nb_contexts, ret);
1273 
1274 		return ret;
1275 	}
1276 	return rte_flow_error_set(error, ENOTSUP,
1277 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1278 				  NULL, rte_strerror(ENOTSUP));
1279 }
1280 
1281 struct rte_flow_action_handle *
1282 rte_flow_action_handle_create(uint16_t port_id,
1283 			      const struct rte_flow_indir_action_conf *conf,
1284 			      const struct rte_flow_action *action,
1285 			      struct rte_flow_error *error)
1286 {
1287 	struct rte_flow_action_handle *handle;
1288 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1289 
1290 	if (unlikely(!ops))
1291 		return NULL;
1292 	if (unlikely(!ops->action_handle_create)) {
1293 		rte_flow_error_set(error, ENOSYS,
1294 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1295 				   rte_strerror(ENOSYS));
1296 		return NULL;
1297 	}
1298 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1299 					   conf, action, error);
1300 	if (handle == NULL)
1301 		flow_err(port_id, -rte_errno, error);
1302 
1303 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1304 
1305 	return handle;
1306 }
1307 
1308 int
1309 rte_flow_action_handle_destroy(uint16_t port_id,
1310 			       struct rte_flow_action_handle *handle,
1311 			       struct rte_flow_error *error)
1312 {
1313 	int ret;
1314 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1315 
1316 	if (unlikely(!ops))
1317 		return -rte_errno;
1318 	if (unlikely(!ops->action_handle_destroy))
1319 		return rte_flow_error_set(error, ENOSYS,
1320 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1321 					  NULL, rte_strerror(ENOSYS));
1322 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1323 					 handle, error);
1324 	ret = flow_err(port_id, ret, error);
1325 
1326 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1327 
1328 	return ret;
1329 }
1330 
1331 int
1332 rte_flow_action_handle_update(uint16_t port_id,
1333 			      struct rte_flow_action_handle *handle,
1334 			      const void *update,
1335 			      struct rte_flow_error *error)
1336 {
1337 	int ret;
1338 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1339 
1340 	if (unlikely(!ops))
1341 		return -rte_errno;
1342 	if (unlikely(!ops->action_handle_update))
1343 		return rte_flow_error_set(error, ENOSYS,
1344 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1345 					  NULL, rte_strerror(ENOSYS));
1346 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1347 					update, error);
1348 	ret = flow_err(port_id, ret, error);
1349 
1350 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1351 
1352 	return ret;
1353 }
1354 
1355 int
1356 rte_flow_action_handle_query(uint16_t port_id,
1357 			     const struct rte_flow_action_handle *handle,
1358 			     void *data,
1359 			     struct rte_flow_error *error)
1360 {
1361 	int ret;
1362 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1363 
1364 	if (unlikely(!ops))
1365 		return -rte_errno;
1366 	if (unlikely(!ops->action_handle_query))
1367 		return rte_flow_error_set(error, ENOSYS,
1368 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1369 					  NULL, rte_strerror(ENOSYS));
1370 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1371 				       data, error);
1372 	ret = flow_err(port_id, ret, error);
1373 
1374 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1375 
1376 	return ret;
1377 }
1378 
1379 int
1380 rte_flow_tunnel_decap_set(uint16_t port_id,
1381 			  struct rte_flow_tunnel *tunnel,
1382 			  struct rte_flow_action **actions,
1383 			  uint32_t *num_of_actions,
1384 			  struct rte_flow_error *error)
1385 {
1386 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1387 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1388 	int ret;
1389 
1390 	if (unlikely(!ops))
1391 		return -rte_errno;
1392 	if (likely(!!ops->tunnel_decap_set)) {
1393 		ret = flow_err(port_id,
1394 			       ops->tunnel_decap_set(dev, tunnel, actions,
1395 						     num_of_actions, error),
1396 			       error);
1397 
1398 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1399 						num_of_actions, ret);
1400 
1401 		return ret;
1402 	}
1403 	return rte_flow_error_set(error, ENOTSUP,
1404 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1405 				  NULL, rte_strerror(ENOTSUP));
1406 }
1407 
1408 int
1409 rte_flow_tunnel_match(uint16_t port_id,
1410 		      struct rte_flow_tunnel *tunnel,
1411 		      struct rte_flow_item **items,
1412 		      uint32_t *num_of_items,
1413 		      struct rte_flow_error *error)
1414 {
1415 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1416 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1417 	int ret;
1418 
1419 	if (unlikely(!ops))
1420 		return -rte_errno;
1421 	if (likely(!!ops->tunnel_match)) {
1422 		ret = flow_err(port_id,
1423 			       ops->tunnel_match(dev, tunnel, items,
1424 						 num_of_items, error),
1425 			       error);
1426 
1427 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1428 					    ret);
1429 
1430 		return ret;
1431 	}
1432 	return rte_flow_error_set(error, ENOTSUP,
1433 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1434 				  NULL, rte_strerror(ENOTSUP));
1435 }
1436 
1437 int
1438 rte_flow_get_restore_info(uint16_t port_id,
1439 			  struct rte_mbuf *m,
1440 			  struct rte_flow_restore_info *restore_info,
1441 			  struct rte_flow_error *error)
1442 {
1443 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1444 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1445 	int ret;
1446 
1447 	if (unlikely(!ops))
1448 		return -rte_errno;
1449 	if (likely(!!ops->get_restore_info)) {
1450 		ret = flow_err(port_id,
1451 			       ops->get_restore_info(dev, m, restore_info,
1452 						     error),
1453 			       error);
1454 
1455 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1456 
1457 		return ret;
1458 	}
1459 	return rte_flow_error_set(error, ENOTSUP,
1460 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1461 				  NULL, rte_strerror(ENOTSUP));
1462 }
1463 
1464 static struct {
1465 	const struct rte_mbuf_dynflag desc;
1466 	uint64_t value;
1467 } flow_restore_info_dynflag = {
1468 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1469 };
1470 
1471 uint64_t
1472 rte_flow_restore_info_dynflag(void)
1473 {
1474 	return flow_restore_info_dynflag.value;
1475 }
1476 
1477 int
1478 rte_flow_restore_info_dynflag_register(void)
1479 {
1480 	if (flow_restore_info_dynflag.value == 0) {
1481 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1482 
1483 		if (offset < 0)
1484 			return -1;
1485 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1486 	}
1487 
1488 	return 0;
1489 }
1490 
1491 int
1492 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1493 				     struct rte_flow_action *actions,
1494 				     uint32_t num_of_actions,
1495 				     struct rte_flow_error *error)
1496 {
1497 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1498 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1499 	int ret;
1500 
1501 	if (unlikely(!ops))
1502 		return -rte_errno;
1503 	if (likely(!!ops->tunnel_action_decap_release)) {
1504 		ret = flow_err(port_id,
1505 			       ops->tunnel_action_decap_release(dev, actions,
1506 								num_of_actions,
1507 								error),
1508 			       error);
1509 
1510 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1511 							   num_of_actions, ret);
1512 
1513 		return ret;
1514 	}
1515 	return rte_flow_error_set(error, ENOTSUP,
1516 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1517 				  NULL, rte_strerror(ENOTSUP));
1518 }
1519 
1520 int
1521 rte_flow_tunnel_item_release(uint16_t port_id,
1522 			     struct rte_flow_item *items,
1523 			     uint32_t num_of_items,
1524 			     struct rte_flow_error *error)
1525 {
1526 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1527 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1528 	int ret;
1529 
1530 	if (unlikely(!ops))
1531 		return -rte_errno;
1532 	if (likely(!!ops->tunnel_item_release)) {
1533 		ret = flow_err(port_id,
1534 			       ops->tunnel_item_release(dev, items,
1535 							num_of_items, error),
1536 			       error);
1537 
1538 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1539 
1540 		return ret;
1541 	}
1542 	return rte_flow_error_set(error, ENOTSUP,
1543 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1544 				  NULL, rte_strerror(ENOTSUP));
1545 }
1546 
1547 int
1548 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1549 			     struct rte_flow_error *error)
1550 {
1551 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1552 	struct rte_eth_dev *dev;
1553 	int ret;
1554 
1555 	if (unlikely(ops == NULL))
1556 		return -rte_errno;
1557 
1558 	if (ops->pick_transfer_proxy == NULL) {
1559 		*proxy_port_id = port_id;
1560 		return 0;
1561 	}
1562 
1563 	dev = &rte_eth_devices[port_id];
1564 
1565 	ret = flow_err(port_id,
1566 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1567 		       error);
1568 
1569 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1570 
1571 	return ret;
1572 }
1573 
1574 struct rte_flow_item_flex_handle *
1575 rte_flow_flex_item_create(uint16_t port_id,
1576 			  const struct rte_flow_item_flex_conf *conf,
1577 			  struct rte_flow_error *error)
1578 {
1579 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1580 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1581 	struct rte_flow_item_flex_handle *handle;
1582 
1583 	if (unlikely(!ops))
1584 		return NULL;
1585 	if (unlikely(!ops->flex_item_create)) {
1586 		rte_flow_error_set(error, ENOTSUP,
1587 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1588 				   NULL, rte_strerror(ENOTSUP));
1589 		return NULL;
1590 	}
1591 	handle = ops->flex_item_create(dev, conf, error);
1592 	if (handle == NULL)
1593 		flow_err(port_id, -rte_errno, error);
1594 
1595 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1596 
1597 	return handle;
1598 }
1599 
1600 int
1601 rte_flow_flex_item_release(uint16_t port_id,
1602 			   const struct rte_flow_item_flex_handle *handle,
1603 			   struct rte_flow_error *error)
1604 {
1605 	int ret;
1606 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1607 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1608 
1609 	if (unlikely(!ops || !ops->flex_item_release))
1610 		return rte_flow_error_set(error, ENOTSUP,
1611 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1612 					  NULL, rte_strerror(ENOTSUP));
1613 	ret = ops->flex_item_release(dev, handle, error);
1614 	ret = flow_err(port_id, ret, error);
1615 
1616 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1617 
1618 	return ret;
1619 }
1620 
1621 int
1622 rte_flow_info_get(uint16_t port_id,
1623 		  struct rte_flow_port_info *port_info,
1624 		  struct rte_flow_queue_info *queue_info,
1625 		  struct rte_flow_error *error)
1626 {
1627 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1628 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1629 	int ret;
1630 
1631 	if (unlikely(!ops))
1632 		return -rte_errno;
1633 	if (dev->data->dev_configured == 0) {
1634 		FLOW_LOG(INFO,
1635 			"Device with port_id=%"PRIu16" is not configured.",
1636 			port_id);
1637 		return -EINVAL;
1638 	}
1639 	if (port_info == NULL) {
1640 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1641 		return -EINVAL;
1642 	}
1643 	if (likely(!!ops->info_get)) {
1644 		ret = flow_err(port_id,
1645 			       ops->info_get(dev, port_info, queue_info, error),
1646 			       error);
1647 
1648 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1649 
1650 		return ret;
1651 	}
1652 	return rte_flow_error_set(error, ENOTSUP,
1653 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1654 				  NULL, rte_strerror(ENOTSUP));
1655 }
1656 
1657 int
1658 rte_flow_configure(uint16_t port_id,
1659 		   const struct rte_flow_port_attr *port_attr,
1660 		   uint16_t nb_queue,
1661 		   const struct rte_flow_queue_attr *queue_attr[],
1662 		   struct rte_flow_error *error)
1663 {
1664 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1665 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1666 	int ret;
1667 
1668 	if (unlikely(!ops))
1669 		return -rte_errno;
1670 	if (dev->data->dev_configured == 0) {
1671 		FLOW_LOG(INFO,
1672 			"Device with port_id=%"PRIu16" is not configured.",
1673 			port_id);
1674 		return -EINVAL;
1675 	}
1676 	if (dev->data->dev_started != 0) {
1677 		FLOW_LOG(INFO,
1678 			"Device with port_id=%"PRIu16" already started.",
1679 			port_id);
1680 		return -EINVAL;
1681 	}
1682 	if (port_attr == NULL) {
1683 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1684 		return -EINVAL;
1685 	}
1686 	if (queue_attr == NULL) {
1687 		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
1688 		return -EINVAL;
1689 	}
1690 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1691 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1692 		return rte_flow_error_set(error, ENODEV,
1693 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1694 					  NULL, rte_strerror(ENODEV));
1695 	}
1696 	if (likely(!!ops->configure)) {
1697 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1698 		if (ret == 0)
1699 			dev->data->flow_configured = 1;
1700 		ret = flow_err(port_id, ret, error);
1701 
1702 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1703 
1704 		return ret;
1705 	}
1706 	return rte_flow_error_set(error, ENOTSUP,
1707 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1708 				  NULL, rte_strerror(ENOTSUP));
1709 }
1710 
1711 struct rte_flow_pattern_template *
1712 rte_flow_pattern_template_create(uint16_t port_id,
1713 		const struct rte_flow_pattern_template_attr *template_attr,
1714 		const struct rte_flow_item pattern[],
1715 		struct rte_flow_error *error)
1716 {
1717 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1718 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1719 	struct rte_flow_pattern_template *template;
1720 
1721 	if (unlikely(!ops))
1722 		return NULL;
1723 	if (dev->data->flow_configured == 0) {
1724 		FLOW_LOG(INFO,
1725 			"Flow engine on port_id=%"PRIu16" is not configured.",
1726 			port_id);
1727 		rte_flow_error_set(error, EINVAL,
1728 				RTE_FLOW_ERROR_TYPE_STATE,
1729 				NULL, rte_strerror(EINVAL));
1730 		return NULL;
1731 	}
1732 	if (template_attr == NULL) {
1733 		FLOW_LOG(ERR,
1734 			     "Port %"PRIu16" template attr is NULL.",
1735 			     port_id);
1736 		rte_flow_error_set(error, EINVAL,
1737 				   RTE_FLOW_ERROR_TYPE_ATTR,
1738 				   NULL, rte_strerror(EINVAL));
1739 		return NULL;
1740 	}
1741 	if (pattern == NULL) {
1742 		FLOW_LOG(ERR,
1743 			     "Port %"PRIu16" pattern is NULL.",
1744 			     port_id);
1745 		rte_flow_error_set(error, EINVAL,
1746 				   RTE_FLOW_ERROR_TYPE_ATTR,
1747 				   NULL, rte_strerror(EINVAL));
1748 		return NULL;
1749 	}
1750 	if (likely(!!ops->pattern_template_create)) {
1751 		template = ops->pattern_template_create(dev, template_attr,
1752 							pattern, error);
1753 		if (template == NULL)
1754 			flow_err(port_id, -rte_errno, error);
1755 
1756 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1757 						       pattern, template);
1758 
1759 		return template;
1760 	}
1761 	rte_flow_error_set(error, ENOTSUP,
1762 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1763 			   NULL, rte_strerror(ENOTSUP));
1764 	return NULL;
1765 }
1766 
1767 int
1768 rte_flow_pattern_template_destroy(uint16_t port_id,
1769 		struct rte_flow_pattern_template *pattern_template,
1770 		struct rte_flow_error *error)
1771 {
1772 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1773 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1774 	int ret;
1775 
1776 	if (unlikely(!ops))
1777 		return -rte_errno;
1778 	if (unlikely(pattern_template == NULL))
1779 		return 0;
1780 	if (likely(!!ops->pattern_template_destroy)) {
1781 		ret = flow_err(port_id,
1782 			       ops->pattern_template_destroy(dev,
1783 							     pattern_template,
1784 							     error),
1785 			       error);
1786 
1787 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1788 							ret);
1789 
1790 		return ret;
1791 	}
1792 	return rte_flow_error_set(error, ENOTSUP,
1793 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1794 				  NULL, rte_strerror(ENOTSUP));
1795 }
1796 
1797 struct rte_flow_actions_template *
1798 rte_flow_actions_template_create(uint16_t port_id,
1799 			const struct rte_flow_actions_template_attr *template_attr,
1800 			const struct rte_flow_action actions[],
1801 			const struct rte_flow_action masks[],
1802 			struct rte_flow_error *error)
1803 {
1804 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1805 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1806 	struct rte_flow_actions_template *template;
1807 
1808 	if (unlikely(!ops))
1809 		return NULL;
1810 	if (dev->data->flow_configured == 0) {
1811 		FLOW_LOG(INFO,
1812 			"Flow engine on port_id=%"PRIu16" is not configured.",
1813 			port_id);
1814 		rte_flow_error_set(error, EINVAL,
1815 				   RTE_FLOW_ERROR_TYPE_STATE,
1816 				   NULL, rte_strerror(EINVAL));
1817 		return NULL;
1818 	}
1819 	if (template_attr == NULL) {
1820 		FLOW_LOG(ERR,
1821 			     "Port %"PRIu16" template attr is NULL.",
1822 			     port_id);
1823 		rte_flow_error_set(error, EINVAL,
1824 				   RTE_FLOW_ERROR_TYPE_ATTR,
1825 				   NULL, rte_strerror(EINVAL));
1826 		return NULL;
1827 	}
1828 	if (actions == NULL) {
1829 		FLOW_LOG(ERR,
1830 			     "Port %"PRIu16" actions is NULL.",
1831 			     port_id);
1832 		rte_flow_error_set(error, EINVAL,
1833 				   RTE_FLOW_ERROR_TYPE_ATTR,
1834 				   NULL, rte_strerror(EINVAL));
1835 		return NULL;
1836 	}
1837 	if (masks == NULL) {
1838 		FLOW_LOG(ERR,
1839 			     "Port %"PRIu16" masks is NULL.",
1840 			     port_id);
1841 		rte_flow_error_set(error, EINVAL,
1842 				   RTE_FLOW_ERROR_TYPE_ATTR,
1843 				   NULL, rte_strerror(EINVAL));
1844 
1845 	}
1846 	if (likely(!!ops->actions_template_create)) {
1847 		template = ops->actions_template_create(dev, template_attr,
1848 							actions, masks, error);
1849 		if (template == NULL)
1850 			flow_err(port_id, -rte_errno, error);
1851 
1852 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1853 						       masks, template);
1854 
1855 		return template;
1856 	}
1857 	rte_flow_error_set(error, ENOTSUP,
1858 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1859 			   NULL, rte_strerror(ENOTSUP));
1860 	return NULL;
1861 }
1862 
1863 int
1864 rte_flow_actions_template_destroy(uint16_t port_id,
1865 			struct rte_flow_actions_template *actions_template,
1866 			struct rte_flow_error *error)
1867 {
1868 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1869 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1870 	int ret;
1871 
1872 	if (unlikely(!ops))
1873 		return -rte_errno;
1874 	if (unlikely(actions_template == NULL))
1875 		return 0;
1876 	if (likely(!!ops->actions_template_destroy)) {
1877 		ret = flow_err(port_id,
1878 			       ops->actions_template_destroy(dev,
1879 							     actions_template,
1880 							     error),
1881 			       error);
1882 
1883 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1884 							ret);
1885 
1886 		return ret;
1887 	}
1888 	return rte_flow_error_set(error, ENOTSUP,
1889 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1890 				  NULL, rte_strerror(ENOTSUP));
1891 }
1892 
1893 struct rte_flow_template_table *
1894 rte_flow_template_table_create(uint16_t port_id,
1895 			const struct rte_flow_template_table_attr *table_attr,
1896 			struct rte_flow_pattern_template *pattern_templates[],
1897 			uint8_t nb_pattern_templates,
1898 			struct rte_flow_actions_template *actions_templates[],
1899 			uint8_t nb_actions_templates,
1900 			struct rte_flow_error *error)
1901 {
1902 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1903 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1904 	struct rte_flow_template_table *table;
1905 
1906 	if (unlikely(!ops))
1907 		return NULL;
1908 	if (dev->data->flow_configured == 0) {
1909 		FLOW_LOG(INFO,
1910 			"Flow engine on port_id=%"PRIu16" is not configured.",
1911 			port_id);
1912 		rte_flow_error_set(error, EINVAL,
1913 				   RTE_FLOW_ERROR_TYPE_STATE,
1914 				   NULL, rte_strerror(EINVAL));
1915 		return NULL;
1916 	}
1917 	if (table_attr == NULL) {
1918 		FLOW_LOG(ERR,
1919 			     "Port %"PRIu16" table attr is NULL.",
1920 			     port_id);
1921 		rte_flow_error_set(error, EINVAL,
1922 				   RTE_FLOW_ERROR_TYPE_ATTR,
1923 				   NULL, rte_strerror(EINVAL));
1924 		return NULL;
1925 	}
1926 	if (pattern_templates == NULL) {
1927 		FLOW_LOG(ERR,
1928 			     "Port %"PRIu16" pattern templates is NULL.",
1929 			     port_id);
1930 		rte_flow_error_set(error, EINVAL,
1931 				   RTE_FLOW_ERROR_TYPE_ATTR,
1932 				   NULL, rte_strerror(EINVAL));
1933 		return NULL;
1934 	}
1935 	if (actions_templates == NULL) {
1936 		FLOW_LOG(ERR,
1937 			     "Port %"PRIu16" actions templates is NULL.",
1938 			     port_id);
1939 		rte_flow_error_set(error, EINVAL,
1940 				   RTE_FLOW_ERROR_TYPE_ATTR,
1941 				   NULL, rte_strerror(EINVAL));
1942 		return NULL;
1943 	}
1944 	if (likely(!!ops->template_table_create)) {
1945 		table = ops->template_table_create(dev, table_attr,
1946 					pattern_templates, nb_pattern_templates,
1947 					actions_templates, nb_actions_templates,
1948 					error);
1949 		if (table == NULL)
1950 			flow_err(port_id, -rte_errno, error);
1951 
1952 		rte_flow_trace_template_table_create(port_id, table_attr,
1953 						     pattern_templates,
1954 						     nb_pattern_templates,
1955 						     actions_templates,
1956 						     nb_actions_templates, table);
1957 
1958 		return table;
1959 	}
1960 	rte_flow_error_set(error, ENOTSUP,
1961 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1962 			   NULL, rte_strerror(ENOTSUP));
1963 	return NULL;
1964 }
1965 
1966 int
1967 rte_flow_template_table_destroy(uint16_t port_id,
1968 				struct rte_flow_template_table *template_table,
1969 				struct rte_flow_error *error)
1970 {
1971 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1972 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1973 	int ret;
1974 
1975 	if (unlikely(!ops))
1976 		return -rte_errno;
1977 	if (unlikely(template_table == NULL))
1978 		return 0;
1979 	if (likely(!!ops->template_table_destroy)) {
1980 		ret = flow_err(port_id,
1981 			       ops->template_table_destroy(dev,
1982 							   template_table,
1983 							   error),
1984 			       error);
1985 
1986 		rte_flow_trace_template_table_destroy(port_id, template_table,
1987 						      ret);
1988 
1989 		return ret;
1990 	}
1991 	return rte_flow_error_set(error, ENOTSUP,
1992 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1993 				  NULL, rte_strerror(ENOTSUP));
1994 }
1995 
1996 int
1997 rte_flow_group_set_miss_actions(uint16_t port_id,
1998 				uint32_t group_id,
1999 				const struct rte_flow_group_attr *attr,
2000 				const struct rte_flow_action actions[],
2001 				struct rte_flow_error *error)
2002 {
2003 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2004 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2005 
2006 	if (unlikely(!ops))
2007 		return -rte_errno;
2008 	if (likely(!!ops->group_set_miss_actions)) {
2009 		return flow_err(port_id,
2010 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
2011 				error);
2012 	}
2013 	return rte_flow_error_set(error, ENOTSUP,
2014 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2015 				  NULL, rte_strerror(ENOTSUP));
2016 }
2017 
2018 struct rte_flow *
2019 rte_flow_async_create(uint16_t port_id,
2020 		      uint32_t queue_id,
2021 		      const struct rte_flow_op_attr *op_attr,
2022 		      struct rte_flow_template_table *template_table,
2023 		      const struct rte_flow_item pattern[],
2024 		      uint8_t pattern_template_index,
2025 		      const struct rte_flow_action actions[],
2026 		      uint8_t actions_template_index,
2027 		      void *user_data,
2028 		      struct rte_flow_error *error)
2029 {
2030 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2031 	struct rte_flow *flow;
2032 
2033 #ifdef RTE_FLOW_DEBUG
2034 	if (!rte_eth_dev_is_valid_port(port_id)) {
2035 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2036 				   rte_strerror(ENODEV));
2037 		return NULL;
2038 	}
2039 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create == NULL) {
2040 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2041 				   rte_strerror(ENOSYS));
2042 		return NULL;
2043 	}
2044 #endif
2045 
2046 	flow = dev->flow_fp_ops->async_create(dev, queue_id,
2047 					      op_attr, template_table,
2048 					      pattern, pattern_template_index,
2049 					      actions, actions_template_index,
2050 					      user_data, error);
2051 
2052 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2053 				    pattern, pattern_template_index, actions,
2054 				    actions_template_index, user_data, flow);
2055 
2056 	return flow;
2057 }
2058 
2059 struct rte_flow *
2060 rte_flow_async_create_by_index(uint16_t port_id,
2061 			       uint32_t queue_id,
2062 			       const struct rte_flow_op_attr *op_attr,
2063 			       struct rte_flow_template_table *template_table,
2064 			       uint32_t rule_index,
2065 			       const struct rte_flow_action actions[],
2066 			       uint8_t actions_template_index,
2067 			       void *user_data,
2068 			       struct rte_flow_error *error)
2069 {
2070 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2071 
2072 #ifdef RTE_FLOW_DEBUG
2073 	if (!rte_eth_dev_is_valid_port(port_id)) {
2074 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2075 				   rte_strerror(ENODEV));
2076 		return NULL;
2077 	}
2078 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create_by_index == NULL) {
2079 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2080 				   rte_strerror(ENOSYS));
2081 		return NULL;
2082 	}
2083 #endif
2084 
2085 	return dev->flow_fp_ops->async_create_by_index(dev, queue_id,
2086 						       op_attr, template_table, rule_index,
2087 						       actions, actions_template_index,
2088 						       user_data, error);
2089 }
2090 
2091 int
2092 rte_flow_async_destroy(uint16_t port_id,
2093 		       uint32_t queue_id,
2094 		       const struct rte_flow_op_attr *op_attr,
2095 		       struct rte_flow *flow,
2096 		       void *user_data,
2097 		       struct rte_flow_error *error)
2098 {
2099 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2100 	int ret;
2101 
2102 #ifdef RTE_FLOW_DEBUG
2103 	if (!rte_eth_dev_is_valid_port(port_id))
2104 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2105 					  rte_strerror(ENODEV));
2106 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_destroy == NULL)
2107 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2108 					  rte_strerror(ENOSYS));
2109 #endif
2110 
2111 	ret = dev->flow_fp_ops->async_destroy(dev, queue_id,
2112 					      op_attr, flow,
2113 					      user_data, error);
2114 
2115 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2116 				     user_data, ret);
2117 
2118 	return ret;
2119 }
2120 
2121 int
2122 rte_flow_async_actions_update(uint16_t port_id,
2123 			      uint32_t queue_id,
2124 			      const struct rte_flow_op_attr *op_attr,
2125 			      struct rte_flow *flow,
2126 			      const struct rte_flow_action actions[],
2127 			      uint8_t actions_template_index,
2128 			      void *user_data,
2129 			      struct rte_flow_error *error)
2130 {
2131 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2132 	int ret;
2133 
2134 #ifdef RTE_FLOW_DEBUG
2135 	if (!rte_eth_dev_is_valid_port(port_id))
2136 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2137 					  rte_strerror(ENODEV));
2138 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_actions_update == NULL)
2139 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2140 					  rte_strerror(ENOSYS));
2141 #endif
2142 
2143 	ret = dev->flow_fp_ops->async_actions_update(dev, queue_id, op_attr,
2144 						     flow, actions,
2145 						     actions_template_index,
2146 						     user_data, error);
2147 
2148 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2149 					    actions, actions_template_index,
2150 					    user_data, ret);
2151 
2152 	return ret;
2153 }
2154 
2155 int
2156 rte_flow_push(uint16_t port_id,
2157 	      uint32_t queue_id,
2158 	      struct rte_flow_error *error)
2159 {
2160 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2161 	int ret;
2162 
2163 #ifdef RTE_FLOW_DEBUG
2164 	if (!rte_eth_dev_is_valid_port(port_id))
2165 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2166 					  rte_strerror(ENODEV));
2167 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->push == NULL)
2168 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2169 					  rte_strerror(ENOSYS));
2170 #endif
2171 
2172 	ret = dev->flow_fp_ops->push(dev, queue_id, error);
2173 
2174 	rte_flow_trace_push(port_id, queue_id, ret);
2175 
2176 	return ret;
2177 }
2178 
2179 int
2180 rte_flow_pull(uint16_t port_id,
2181 	      uint32_t queue_id,
2182 	      struct rte_flow_op_result res[],
2183 	      uint16_t n_res,
2184 	      struct rte_flow_error *error)
2185 {
2186 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2187 	int ret;
2188 
2189 #ifdef RTE_FLOW_DEBUG
2190 	if (!rte_eth_dev_is_valid_port(port_id))
2191 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2192 					  rte_strerror(ENODEV));
2193 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->pull == NULL)
2194 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2195 					  rte_strerror(ENOSYS));
2196 #endif
2197 
2198 	ret = dev->flow_fp_ops->pull(dev, queue_id, res, n_res, error);
2199 
2200 	rte_flow_trace_pull(port_id, queue_id, res, n_res, ret);
2201 
2202 	return ret;
2203 }
2204 
2205 struct rte_flow_action_handle *
2206 rte_flow_async_action_handle_create(uint16_t port_id,
2207 		uint32_t queue_id,
2208 		const struct rte_flow_op_attr *op_attr,
2209 		const struct rte_flow_indir_action_conf *indir_action_conf,
2210 		const struct rte_flow_action *action,
2211 		void *user_data,
2212 		struct rte_flow_error *error)
2213 {
2214 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2215 	struct rte_flow_action_handle *handle;
2216 
2217 #ifdef RTE_FLOW_DEBUG
2218 	if (!rte_eth_dev_is_valid_port(port_id)) {
2219 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2220 				   rte_strerror(ENODEV));
2221 		return NULL;
2222 	}
2223 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_create == NULL) {
2224 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2225 				   rte_strerror(ENOSYS));
2226 		return NULL;
2227 	}
2228 #endif
2229 
2230 	handle = dev->flow_fp_ops->async_action_handle_create(dev, queue_id, op_attr,
2231 							      indir_action_conf, action,
2232 							      user_data, error);
2233 
2234 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2235 						  indir_action_conf, action,
2236 						  user_data, handle);
2237 
2238 	return handle;
2239 }
2240 
2241 int
2242 rte_flow_async_action_handle_destroy(uint16_t port_id,
2243 		uint32_t queue_id,
2244 		const struct rte_flow_op_attr *op_attr,
2245 		struct rte_flow_action_handle *action_handle,
2246 		void *user_data,
2247 		struct rte_flow_error *error)
2248 {
2249 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2250 	int ret;
2251 
2252 #ifdef RTE_FLOW_DEBUG
2253 	if (!rte_eth_dev_is_valid_port(port_id))
2254 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2255 					  rte_strerror(ENODEV));
2256 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_destroy == NULL)
2257 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2258 					  rte_strerror(ENOSYS));
2259 #endif
2260 
2261 	ret = dev->flow_fp_ops->async_action_handle_destroy(dev, queue_id, op_attr,
2262 							    action_handle, user_data, error);
2263 
2264 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2265 						   action_handle, user_data, ret);
2266 
2267 	return ret;
2268 }
2269 
2270 int
2271 rte_flow_async_action_handle_update(uint16_t port_id,
2272 		uint32_t queue_id,
2273 		const struct rte_flow_op_attr *op_attr,
2274 		struct rte_flow_action_handle *action_handle,
2275 		const void *update,
2276 		void *user_data,
2277 		struct rte_flow_error *error)
2278 {
2279 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2280 	int ret;
2281 
2282 #ifdef RTE_FLOW_DEBUG
2283 	if (!rte_eth_dev_is_valid_port(port_id))
2284 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2285 					  rte_strerror(ENODEV));
2286 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_update == NULL)
2287 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2288 					  rte_strerror(ENOSYS));
2289 #endif
2290 
2291 	ret = dev->flow_fp_ops->async_action_handle_update(dev, queue_id, op_attr,
2292 							   action_handle, update, user_data, error);
2293 
2294 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2295 						  action_handle, update,
2296 						  user_data, ret);
2297 
2298 	return ret;
2299 }
2300 
2301 int
2302 rte_flow_async_action_handle_query(uint16_t port_id,
2303 		uint32_t queue_id,
2304 		const struct rte_flow_op_attr *op_attr,
2305 		const struct rte_flow_action_handle *action_handle,
2306 		void *data,
2307 		void *user_data,
2308 		struct rte_flow_error *error)
2309 {
2310 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2311 	int ret;
2312 
2313 #ifdef RTE_FLOW_DEBUG
2314 	if (!rte_eth_dev_is_valid_port(port_id))
2315 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2316 					  rte_strerror(ENODEV));
2317 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query == NULL)
2318 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2319 					  rte_strerror(ENOSYS));
2320 #endif
2321 
2322 	ret = dev->flow_fp_ops->async_action_handle_query(dev, queue_id, op_attr,
2323 							  action_handle, data, user_data, error);
2324 
2325 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2326 						 action_handle, data, user_data,
2327 						 ret);
2328 
2329 	return ret;
2330 }
2331 
2332 int
2333 rte_flow_action_handle_query_update(uint16_t port_id,
2334 				    struct rte_flow_action_handle *handle,
2335 				    const void *update, void *query,
2336 				    enum rte_flow_query_update_mode mode,
2337 				    struct rte_flow_error *error)
2338 {
2339 	int ret;
2340 	struct rte_eth_dev *dev;
2341 	const struct rte_flow_ops *ops;
2342 
2343 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2344 	if (!handle)
2345 		return -EINVAL;
2346 	if (!update && !query)
2347 		return -EINVAL;
2348 	dev = &rte_eth_devices[port_id];
2349 	ops = rte_flow_ops_get(port_id, error);
2350 	if (!ops || !ops->action_handle_query_update)
2351 		return -ENOTSUP;
2352 	ret = ops->action_handle_query_update(dev, handle, update,
2353 					      query, mode, error);
2354 	return flow_err(port_id, ret, error);
2355 }
2356 
2357 int
2358 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2359 					  const struct rte_flow_op_attr *attr,
2360 					  struct rte_flow_action_handle *handle,
2361 					  const void *update, void *query,
2362 					  enum rte_flow_query_update_mode mode,
2363 					  void *user_data,
2364 					  struct rte_flow_error *error)
2365 {
2366 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2367 
2368 #ifdef RTE_FLOW_DEBUG
2369 	if (!rte_eth_dev_is_valid_port(port_id))
2370 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2371 					  rte_strerror(ENODEV));
2372 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query_update == NULL)
2373 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2374 					  rte_strerror(ENOSYS));
2375 #endif
2376 
2377 	return dev->flow_fp_ops->async_action_handle_query_update(dev, queue_id, attr,
2378 								  handle, update,
2379 								  query, mode,
2380 								  user_data, error);
2381 }
2382 
2383 struct rte_flow_action_list_handle *
2384 rte_flow_action_list_handle_create(uint16_t port_id,
2385 				   const
2386 				   struct rte_flow_indir_action_conf *conf,
2387 				   const struct rte_flow_action *actions,
2388 				   struct rte_flow_error *error)
2389 {
2390 	int ret;
2391 	struct rte_eth_dev *dev;
2392 	const struct rte_flow_ops *ops;
2393 	struct rte_flow_action_list_handle *handle;
2394 
2395 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2396 	ops = rte_flow_ops_get(port_id, error);
2397 	if (!ops || !ops->action_list_handle_create) {
2398 		rte_flow_error_set(error, ENOTSUP,
2399 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2400 				   "action_list handle not supported");
2401 		return NULL;
2402 	}
2403 	dev = &rte_eth_devices[port_id];
2404 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2405 	ret = flow_err(port_id, -rte_errno, error);
2406 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2407 	return handle;
2408 }
2409 
2410 int
2411 rte_flow_action_list_handle_destroy(uint16_t port_id,
2412 				    struct rte_flow_action_list_handle *handle,
2413 				    struct rte_flow_error *error)
2414 {
2415 	int ret;
2416 	struct rte_eth_dev *dev;
2417 	const struct rte_flow_ops *ops;
2418 
2419 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2420 	ops = rte_flow_ops_get(port_id, error);
2421 	if (!ops || !ops->action_list_handle_destroy)
2422 		return rte_flow_error_set(error, ENOTSUP,
2423 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2424 					  "action_list handle not supported");
2425 	dev = &rte_eth_devices[port_id];
2426 	ret = ops->action_list_handle_destroy(dev, handle, error);
2427 	ret = flow_err(port_id, ret, error);
2428 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2429 	return ret;
2430 }
2431 
2432 struct rte_flow_action_list_handle *
2433 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2434 					 const struct rte_flow_op_attr *attr,
2435 					 const struct rte_flow_indir_action_conf *conf,
2436 					 const struct rte_flow_action *actions,
2437 					 void *user_data,
2438 					 struct rte_flow_error *error)
2439 {
2440 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2441 	struct rte_flow_action_list_handle *handle;
2442 	int ret;
2443 
2444 #ifdef RTE_FLOW_DEBUG
2445 	if (!rte_eth_dev_is_valid_port(port_id)) {
2446 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2447 				   rte_strerror(ENODEV));
2448 		return NULL;
2449 	}
2450 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_create == NULL) {
2451 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2452 				   rte_strerror(ENOSYS));
2453 		return NULL;
2454 	}
2455 #endif
2456 
2457 	handle = dev->flow_fp_ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2458 								   actions, user_data,
2459 								   error);
2460 	ret = flow_err(port_id, -rte_errno, error);
2461 
2462 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2463 						       conf, actions, user_data,
2464 						       ret);
2465 	return handle;
2466 }
2467 
2468 int
2469 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2470 				 const struct rte_flow_op_attr *op_attr,
2471 				 struct rte_flow_action_list_handle *handle,
2472 				 void *user_data, struct rte_flow_error *error)
2473 {
2474 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2475 	int ret;
2476 
2477 #ifdef RTE_FLOW_DEBUG
2478 	if (!rte_eth_dev_is_valid_port(port_id))
2479 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2480 					  rte_strerror(ENODEV));
2481 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_destroy == NULL)
2482 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2483 					  rte_strerror(ENOSYS));
2484 #endif
2485 
2486 	ret = dev->flow_fp_ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2487 								 handle, user_data, error);
2488 
2489 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2490 							op_attr, handle,
2491 							user_data, ret);
2492 	return ret;
2493 }
2494 
2495 int
2496 rte_flow_action_list_handle_query_update(uint16_t port_id,
2497 			 const struct rte_flow_action_list_handle *handle,
2498 			 const void **update, void **query,
2499 			 enum rte_flow_query_update_mode mode,
2500 			 struct rte_flow_error *error)
2501 {
2502 	int ret;
2503 	struct rte_eth_dev *dev;
2504 	const struct rte_flow_ops *ops;
2505 
2506 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2507 	ops = rte_flow_ops_get(port_id, error);
2508 	if (!ops || !ops->action_list_handle_query_update)
2509 		return rte_flow_error_set(error, ENOTSUP,
2510 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2511 					  "action_list query_update not supported");
2512 	dev = &rte_eth_devices[port_id];
2513 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2514 						   mode, error);
2515 	ret = flow_err(port_id, ret, error);
2516 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2517 						       query, mode, ret);
2518 	return ret;
2519 }
2520 
2521 int
2522 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2523 			 const struct rte_flow_op_attr *attr,
2524 			 const struct rte_flow_action_list_handle *handle,
2525 			 const void **update, void **query,
2526 			 enum rte_flow_query_update_mode mode,
2527 			 void *user_data, struct rte_flow_error *error)
2528 {
2529 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2530 	int ret;
2531 
2532 #ifdef RTE_FLOW_DEBUG
2533 	if (!rte_eth_dev_is_valid_port(port_id))
2534 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2535 					  rte_strerror(ENODEV));
2536 	if (dev->flow_fp_ops == NULL ||
2537 	    dev->flow_fp_ops->async_action_list_handle_query_update == NULL)
2538 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2539 					  rte_strerror(ENOSYS));
2540 #endif
2541 
2542 	ret = dev->flow_fp_ops->async_action_list_handle_query_update(dev, queue_id, attr,
2543 								      handle, update, query,
2544 								      mode, user_data,
2545 								      error);
2546 
2547 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2548 							     attr, handle,
2549 							     update, query,
2550 							     mode, user_data,
2551 							     ret);
2552 	return ret;
2553 }
2554 
2555 int
2556 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2557 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2558 			 uint32_t *hash, struct rte_flow_error *error)
2559 {
2560 	int ret;
2561 	struct rte_eth_dev *dev;
2562 	const struct rte_flow_ops *ops;
2563 
2564 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2565 	ops = rte_flow_ops_get(port_id, error);
2566 	if (!ops || !ops->flow_calc_table_hash)
2567 		return rte_flow_error_set(error, ENOTSUP,
2568 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2569 					  "action_list async query_update not supported");
2570 	dev = &rte_eth_devices[port_id];
2571 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2572 					hash, error);
2573 	return flow_err(port_id, ret, error);
2574 }
2575 
2576 int
2577 rte_flow_calc_encap_hash(uint16_t port_id, const struct rte_flow_item pattern[],
2578 			 enum rte_flow_encap_hash_field dest_field, uint8_t hash_len,
2579 			 uint8_t *hash, struct rte_flow_error *error)
2580 {
2581 	int ret;
2582 	struct rte_eth_dev *dev;
2583 	const struct rte_flow_ops *ops;
2584 
2585 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2586 	ops = rte_flow_ops_get(port_id, error);
2587 	if (!ops || !ops->flow_calc_encap_hash)
2588 		return rte_flow_error_set(error, ENOTSUP,
2589 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2590 					  "calc encap hash is not supported");
2591 	if (dest_field > RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID)
2592 		return rte_flow_error_set(error, EINVAL,
2593 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2594 					  "hash dest field is not defined");
2595 	if ((dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT && hash_len != 2) ||
2596 	    (dest_field == RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID && hash_len != 1))
2597 		return rte_flow_error_set(error, EINVAL,
2598 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2599 					  "hash len doesn't match the requested field len");
2600 	dev = &rte_eth_devices[port_id];
2601 	ret = ops->flow_calc_encap_hash(dev, pattern, dest_field, hash, error);
2602 	return flow_err(port_id, ret, error);
2603 }
2604 
2605 bool
2606 rte_flow_template_table_resizable(__rte_unused uint16_t port_id,
2607 				  const struct rte_flow_template_table_attr *tbl_attr)
2608 {
2609 	return (tbl_attr->specialize &
2610 		RTE_FLOW_TABLE_SPECIALIZE_RESIZABLE) != 0;
2611 }
2612 
2613 int
2614 rte_flow_template_table_resize(uint16_t port_id,
2615 			       struct rte_flow_template_table *table,
2616 			       uint32_t nb_rules,
2617 			       struct rte_flow_error *error)
2618 {
2619 	int ret;
2620 	struct rte_eth_dev *dev;
2621 	const struct rte_flow_ops *ops;
2622 
2623 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2624 	ops = rte_flow_ops_get(port_id, error);
2625 	if (!ops || !ops->flow_template_table_resize)
2626 		return rte_flow_error_set(error, ENOTSUP,
2627 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2628 					  "flow_template_table_resize not supported");
2629 	dev = &rte_eth_devices[port_id];
2630 	ret = ops->flow_template_table_resize(dev, table, nb_rules, error);
2631 	ret = flow_err(port_id, ret, error);
2632 	rte_flow_trace_template_table_resize(port_id, table, nb_rules, ret);
2633 	return ret;
2634 }
2635 
2636 int
2637 rte_flow_async_update_resized(uint16_t port_id, uint32_t queue,
2638 			      const struct rte_flow_op_attr *attr,
2639 			      struct rte_flow *rule, void *user_data,
2640 			      struct rte_flow_error *error)
2641 {
2642 	int ret;
2643 	struct rte_eth_dev *dev;
2644 	const struct rte_flow_ops *ops;
2645 
2646 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2647 	ops = rte_flow_ops_get(port_id, error);
2648 	if (!ops || !ops->flow_update_resized)
2649 		return rte_flow_error_set(error, ENOTSUP,
2650 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2651 					  "async_flow_async_transfer not supported");
2652 	dev = &rte_eth_devices[port_id];
2653 	ret = ops->flow_update_resized(dev, queue, attr, rule, user_data, error);
2654 	ret = flow_err(port_id, ret, error);
2655 	rte_flow_trace_async_update_resized(port_id, queue, attr,
2656 					    rule, user_data, ret);
2657 	return ret;
2658 }
2659 
2660 int
2661 rte_flow_template_table_resize_complete(uint16_t port_id,
2662 					struct rte_flow_template_table *table,
2663 					struct rte_flow_error *error)
2664 {
2665 	int ret;
2666 	struct rte_eth_dev *dev;
2667 	const struct rte_flow_ops *ops;
2668 
2669 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2670 	ops = rte_flow_ops_get(port_id, error);
2671 	if (!ops || !ops->flow_template_table_resize_complete)
2672 		return rte_flow_error_set(error, ENOTSUP,
2673 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2674 					  "flow_template_table_transfer_complete not supported");
2675 	dev = &rte_eth_devices[port_id];
2676 	ret = ops->flow_template_table_resize_complete(dev, table, error);
2677 	ret = flow_err(port_id, ret, error);
2678 	rte_flow_trace_table_resize_complete(port_id, table, ret);
2679 	return ret;
2680 }
2681 
2682 static struct rte_flow *
2683 rte_flow_dummy_async_create(struct rte_eth_dev *dev __rte_unused,
2684 			    uint32_t queue __rte_unused,
2685 			    const struct rte_flow_op_attr *attr __rte_unused,
2686 			    struct rte_flow_template_table *table __rte_unused,
2687 			    const struct rte_flow_item items[] __rte_unused,
2688 			    uint8_t pattern_template_index __rte_unused,
2689 			    const struct rte_flow_action actions[] __rte_unused,
2690 			    uint8_t action_template_index __rte_unused,
2691 			    void *user_data __rte_unused,
2692 			    struct rte_flow_error *error)
2693 {
2694 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2695 			   rte_strerror(ENOSYS));
2696 	return NULL;
2697 }
2698 
2699 static struct rte_flow *
2700 rte_flow_dummy_async_create_by_index(struct rte_eth_dev *dev __rte_unused,
2701 				     uint32_t queue __rte_unused,
2702 				     const struct rte_flow_op_attr *attr __rte_unused,
2703 				     struct rte_flow_template_table *table __rte_unused,
2704 				     uint32_t rule_index __rte_unused,
2705 				     const struct rte_flow_action actions[] __rte_unused,
2706 				     uint8_t action_template_index __rte_unused,
2707 				     void *user_data __rte_unused,
2708 				     struct rte_flow_error *error)
2709 {
2710 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2711 			   rte_strerror(ENOSYS));
2712 	return NULL;
2713 }
2714 
2715 static int
2716 rte_flow_dummy_async_actions_update(struct rte_eth_dev *dev __rte_unused,
2717 				    uint32_t queue_id __rte_unused,
2718 				    const struct rte_flow_op_attr *op_attr __rte_unused,
2719 				    struct rte_flow *flow __rte_unused,
2720 				    const struct rte_flow_action actions[] __rte_unused,
2721 				    uint8_t actions_template_index __rte_unused,
2722 				    void *user_data __rte_unused,
2723 				    struct rte_flow_error *error)
2724 {
2725 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2726 				  rte_strerror(ENOSYS));
2727 }
2728 
2729 static int
2730 rte_flow_dummy_async_destroy(struct rte_eth_dev *dev __rte_unused,
2731 			     uint32_t queue_id __rte_unused,
2732 			     const struct rte_flow_op_attr *op_attr __rte_unused,
2733 			     struct rte_flow *flow __rte_unused,
2734 			     void *user_data __rte_unused,
2735 			     struct rte_flow_error *error)
2736 {
2737 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2738 				  rte_strerror(ENOSYS));
2739 }
2740 
2741 static int
2742 rte_flow_dummy_push(struct rte_eth_dev *dev __rte_unused,
2743 		    uint32_t queue_id __rte_unused,
2744 		    struct rte_flow_error *error)
2745 {
2746 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2747 				  rte_strerror(ENOSYS));
2748 }
2749 
2750 static int
2751 rte_flow_dummy_pull(struct rte_eth_dev *dev __rte_unused,
2752 		    uint32_t queue_id __rte_unused,
2753 		    struct rte_flow_op_result res[] __rte_unused,
2754 		    uint16_t n_res __rte_unused,
2755 		    struct rte_flow_error *error)
2756 {
2757 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2758 				  rte_strerror(ENOSYS));
2759 }
2760 
2761 static struct rte_flow_action_handle *
2762 rte_flow_dummy_async_action_handle_create(
2763 	struct rte_eth_dev *dev __rte_unused,
2764 	uint32_t queue_id __rte_unused,
2765 	const struct rte_flow_op_attr *op_attr __rte_unused,
2766 	const struct rte_flow_indir_action_conf *indir_action_conf __rte_unused,
2767 	const struct rte_flow_action *action __rte_unused,
2768 	void *user_data __rte_unused,
2769 	struct rte_flow_error *error)
2770 {
2771 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2772 			   rte_strerror(ENOSYS));
2773 	return NULL;
2774 }
2775 
2776 static int
2777 rte_flow_dummy_async_action_handle_destroy(
2778 	struct rte_eth_dev *dev __rte_unused,
2779 	uint32_t queue_id __rte_unused,
2780 	const struct rte_flow_op_attr *op_attr __rte_unused,
2781 	struct rte_flow_action_handle *action_handle __rte_unused,
2782 	void *user_data __rte_unused,
2783 	struct rte_flow_error *error)
2784 {
2785 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2786 				  rte_strerror(ENOSYS));
2787 }
2788 
2789 static int
2790 rte_flow_dummy_async_action_handle_update(
2791 	struct rte_eth_dev *dev __rte_unused,
2792 	uint32_t queue_id __rte_unused,
2793 	const struct rte_flow_op_attr *op_attr __rte_unused,
2794 	struct rte_flow_action_handle *action_handle __rte_unused,
2795 	const void *update __rte_unused,
2796 	void *user_data __rte_unused,
2797 	struct rte_flow_error *error)
2798 {
2799 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2800 				  rte_strerror(ENOSYS));
2801 }
2802 
2803 static int
2804 rte_flow_dummy_async_action_handle_query(
2805 	struct rte_eth_dev *dev __rte_unused,
2806 	uint32_t queue_id __rte_unused,
2807 	const struct rte_flow_op_attr *op_attr __rte_unused,
2808 	const struct rte_flow_action_handle *action_handle __rte_unused,
2809 	void *data __rte_unused,
2810 	void *user_data __rte_unused,
2811 	struct rte_flow_error *error)
2812 {
2813 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2814 				  rte_strerror(ENOSYS));
2815 }
2816 
2817 static int
2818 rte_flow_dummy_async_action_handle_query_update(
2819 	struct rte_eth_dev *dev __rte_unused,
2820 	uint32_t queue_id __rte_unused,
2821 	const struct rte_flow_op_attr *attr __rte_unused,
2822 	struct rte_flow_action_handle *handle __rte_unused,
2823 	const void *update __rte_unused,
2824 	void *query __rte_unused,
2825 	enum rte_flow_query_update_mode mode __rte_unused,
2826 	void *user_data __rte_unused,
2827 	struct rte_flow_error *error)
2828 {
2829 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2830 				  rte_strerror(ENOSYS));
2831 }
2832 
2833 static struct rte_flow_action_list_handle *
2834 rte_flow_dummy_async_action_list_handle_create(
2835 	struct rte_eth_dev *dev __rte_unused,
2836 	uint32_t queue_id __rte_unused,
2837 	const struct rte_flow_op_attr *attr __rte_unused,
2838 	const struct rte_flow_indir_action_conf *conf __rte_unused,
2839 	const struct rte_flow_action *actions __rte_unused,
2840 	void *user_data __rte_unused,
2841 	struct rte_flow_error *error)
2842 {
2843 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2844 			   rte_strerror(ENOSYS));
2845 	return NULL;
2846 }
2847 
2848 static int
2849 rte_flow_dummy_async_action_list_handle_destroy(
2850 	struct rte_eth_dev *dev __rte_unused,
2851 	uint32_t queue_id __rte_unused,
2852 	const struct rte_flow_op_attr *op_attr __rte_unused,
2853 	struct rte_flow_action_list_handle *handle __rte_unused,
2854 	void *user_data __rte_unused,
2855 	struct rte_flow_error *error)
2856 {
2857 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2858 				  rte_strerror(ENOSYS));
2859 }
2860 
2861 static int
2862 rte_flow_dummy_async_action_list_handle_query_update(
2863 	struct rte_eth_dev *dev __rte_unused,
2864 	uint32_t queue_id __rte_unused,
2865 	const struct rte_flow_op_attr *attr __rte_unused,
2866 	const struct rte_flow_action_list_handle *handle __rte_unused,
2867 	const void **update __rte_unused,
2868 	void **query __rte_unused,
2869 	enum rte_flow_query_update_mode mode __rte_unused,
2870 	void *user_data __rte_unused,
2871 	struct rte_flow_error *error)
2872 {
2873 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2874 				  rte_strerror(ENOSYS));
2875 }
2876 
2877 struct rte_flow_fp_ops rte_flow_fp_default_ops = {
2878 	.async_create = rte_flow_dummy_async_create,
2879 	.async_create_by_index = rte_flow_dummy_async_create_by_index,
2880 	.async_actions_update = rte_flow_dummy_async_actions_update,
2881 	.async_destroy = rte_flow_dummy_async_destroy,
2882 	.push = rte_flow_dummy_push,
2883 	.pull = rte_flow_dummy_pull,
2884 	.async_action_handle_create = rte_flow_dummy_async_action_handle_create,
2885 	.async_action_handle_destroy = rte_flow_dummy_async_action_handle_destroy,
2886 	.async_action_handle_update = rte_flow_dummy_async_action_handle_update,
2887 	.async_action_handle_query = rte_flow_dummy_async_action_handle_query,
2888 	.async_action_handle_query_update = rte_flow_dummy_async_action_handle_query_update,
2889 	.async_action_list_handle_create = rte_flow_dummy_async_action_list_handle_create,
2890 	.async_action_list_handle_destroy = rte_flow_dummy_async_action_list_handle_destroy,
2891 	.async_action_list_handle_query_update =
2892 		rte_flow_dummy_async_action_list_handle_query_update,
2893 };
2894