xref: /dpdk/lib/ethdev/rte_flow.c (revision 389fca7577cc89dd64b9ef37b6a6f64e6e1f68d2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <pthread.h>
11 
12 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 #define FLOW_LOG RTE_ETHDEV_LOG_LINE
23 
24 /* Mbuf dynamic field name for metadata. */
25 int32_t rte_flow_dynf_metadata_offs = -1;
26 
27 /* Mbuf dynamic field flag bit number for metadata. */
28 uint64_t rte_flow_dynf_metadata_mask;
29 
30 /**
31  * Flow elements description tables.
32  */
33 struct rte_flow_desc_data {
34 	const char *name;
35 	size_t size;
36 	size_t (*desc_fn)(void *dst, const void *src);
37 };
38 
39 /**
40  *
41  * @param buf
42  * Destination memory.
43  * @param data
44  * Source memory
45  * @param size
46  * Requested copy size
47  * @param desc
48  * rte_flow_desc_item - for flow item conversion.
49  * rte_flow_desc_action - for flow action conversion.
50  * @param type
51  * Offset into the desc param or negative value for private flow elements.
52  */
53 static inline size_t
54 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
55 		   const struct rte_flow_desc_data *desc, int type)
56 {
57 	/**
58 	 * Allow PMD private flow item
59 	 */
60 	bool rte_type = type >= 0;
61 
62 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
63 	if (data == NULL)
64 		return 0;
65 	if (buf != NULL)
66 		rte_memcpy(buf, data, (size > sz ? sz : size));
67 	if (rte_type && desc[type].desc_fn)
68 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
69 	return sz;
70 }
71 
72 static size_t
73 rte_flow_item_flex_conv(void *buf, const void *data)
74 {
75 	struct rte_flow_item_flex *dst = buf;
76 	const struct rte_flow_item_flex *src = data;
77 	if (buf) {
78 		dst->pattern = rte_memcpy
79 			((void *)((uintptr_t)(dst + 1)), src->pattern,
80 			 src->length);
81 	}
82 	return src->length;
83 }
84 
85 /** Generate flow_item[] entry. */
86 #define MK_FLOW_ITEM(t, s) \
87 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
88 		.name = # t, \
89 		.size = s,               \
90 		.desc_fn = NULL,\
91 	}
92 
93 #define MK_FLOW_ITEM_FN(t, s, fn) \
94 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
95 		.name = # t,                 \
96 		.size = s,                   \
97 		.desc_fn = fn,               \
98 	}
99 
100 /** Information about known flow pattern items. */
101 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
102 	MK_FLOW_ITEM(END, 0),
103 	MK_FLOW_ITEM(VOID, 0),
104 	MK_FLOW_ITEM(INVERT, 0),
105 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
106 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
107 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
108 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
109 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
110 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
111 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
112 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
113 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
114 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
115 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
116 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
117 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
118 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
119 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
120 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
121 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
122 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
123 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
124 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
125 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
126 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
127 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
128 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
129 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
130 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
131 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
132 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
133 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
134 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
135 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
137 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
138 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
139 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
140 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
141 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
142 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
143 	MK_FLOW_ITEM(RANDOM, sizeof(struct rte_flow_item_random)),
144 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
145 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
146 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
147 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
148 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
149 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
150 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
151 			sizeof(struct rte_flow_item_pppoe_proto_id)),
152 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
153 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
154 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
155 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
156 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
157 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
158 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
159 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
160 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
161 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
162 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
163 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
164 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
165 			rte_flow_item_flex_conv),
166 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
167 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
168 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
169 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
170 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
171 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
172 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
173 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
174 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
175 	MK_FLOW_ITEM(COMPARE, sizeof(struct rte_flow_item_compare)),
176 };
177 
178 /** Generate flow_action[] entry. */
179 #define MK_FLOW_ACTION(t, s) \
180 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
181 		.name = # t, \
182 		.size = s, \
183 		.desc_fn = NULL,\
184 	}
185 
186 #define MK_FLOW_ACTION_FN(t, fn) \
187 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
188 		.name = # t, \
189 		.size = 0, \
190 		.desc_fn = fn,\
191 	}
192 
193 
194 /** Information about known flow actions. */
195 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
196 	MK_FLOW_ACTION(END, 0),
197 	MK_FLOW_ACTION(VOID, 0),
198 	MK_FLOW_ACTION(PASSTHRU, 0),
199 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
200 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
201 	MK_FLOW_ACTION(FLAG, 0),
202 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
203 	MK_FLOW_ACTION(DROP, 0),
204 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
205 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
206 	MK_FLOW_ACTION(PF, 0),
207 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
208 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
209 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
210 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
211 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
212 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
213 	MK_FLOW_ACTION(OF_PUSH_VLAN,
214 		       sizeof(struct rte_flow_action_of_push_vlan)),
215 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
216 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
217 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
218 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
219 	MK_FLOW_ACTION(OF_POP_MPLS,
220 		       sizeof(struct rte_flow_action_of_pop_mpls)),
221 	MK_FLOW_ACTION(OF_PUSH_MPLS,
222 		       sizeof(struct rte_flow_action_of_push_mpls)),
223 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
224 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
225 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
226 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
227 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
228 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
229 	MK_FLOW_ACTION(SET_IPV4_SRC,
230 		       sizeof(struct rte_flow_action_set_ipv4)),
231 	MK_FLOW_ACTION(SET_IPV4_DST,
232 		       sizeof(struct rte_flow_action_set_ipv4)),
233 	MK_FLOW_ACTION(SET_IPV6_SRC,
234 		       sizeof(struct rte_flow_action_set_ipv6)),
235 	MK_FLOW_ACTION(SET_IPV6_DST,
236 		       sizeof(struct rte_flow_action_set_ipv6)),
237 	MK_FLOW_ACTION(SET_TP_SRC,
238 		       sizeof(struct rte_flow_action_set_tp)),
239 	MK_FLOW_ACTION(SET_TP_DST,
240 		       sizeof(struct rte_flow_action_set_tp)),
241 	MK_FLOW_ACTION(MAC_SWAP, 0),
242 	MK_FLOW_ACTION(DEC_TTL, 0),
243 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
244 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
245 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
246 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
247 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
248 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
249 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
250 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
251 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
252 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
253 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
254 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
255 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
256 	MK_FLOW_ACTION(MODIFY_FIELD,
257 		       sizeof(struct rte_flow_action_modify_field)),
258 	/**
259 	 * Indirect action represented as handle of type
260 	 * (struct rte_flow_action_handle *) stored in conf field (see
261 	 * struct rte_flow_action); no need for additional structure to * store
262 	 * indirect action handle.
263 	 */
264 	MK_FLOW_ACTION(INDIRECT, 0),
265 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
266 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
267 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
268 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
269 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
270 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
271 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
272 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
273 	MK_FLOW_ACTION(INDIRECT_LIST,
274 		       sizeof(struct rte_flow_action_indirect_list)),
275 	MK_FLOW_ACTION(PROG,
276 		       sizeof(struct rte_flow_action_prog)),
277 	MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)),
278 };
279 
280 int
281 rte_flow_dynf_metadata_register(void)
282 {
283 	int offset;
284 	int flag;
285 
286 	static const struct rte_mbuf_dynfield desc_offs = {
287 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
288 		.size = sizeof(uint32_t),
289 		.align = alignof(uint32_t),
290 	};
291 	static const struct rte_mbuf_dynflag desc_flag = {
292 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
293 	};
294 
295 	offset = rte_mbuf_dynfield_register(&desc_offs);
296 	if (offset < 0)
297 		goto error;
298 	flag = rte_mbuf_dynflag_register(&desc_flag);
299 	if (flag < 0)
300 		goto error;
301 	rte_flow_dynf_metadata_offs = offset;
302 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
303 
304 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
305 
306 	return 0;
307 
308 error:
309 	rte_flow_dynf_metadata_offs = -1;
310 	rte_flow_dynf_metadata_mask = UINT64_C(0);
311 	return -rte_errno;
312 }
313 
314 static inline void
315 fts_enter(struct rte_eth_dev *dev)
316 {
317 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
318 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
319 }
320 
321 static inline void
322 fts_exit(struct rte_eth_dev *dev)
323 {
324 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
325 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
326 }
327 
328 static int
329 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
330 {
331 	if (ret == 0)
332 		return 0;
333 	if (rte_eth_dev_is_removed(port_id))
334 		return rte_flow_error_set(error, EIO,
335 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
336 					  NULL, rte_strerror(EIO));
337 	return ret;
338 }
339 
340 /* Get generic flow operations structure from a port. */
341 const struct rte_flow_ops *
342 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
343 {
344 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
345 	const struct rte_flow_ops *ops;
346 	int code;
347 
348 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
349 		code = ENODEV;
350 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
351 		/* flow API not supported with this driver dev_ops */
352 		code = ENOSYS;
353 	else
354 		code = dev->dev_ops->flow_ops_get(dev, &ops);
355 	if (code == 0 && ops == NULL)
356 		/* flow API not supported with this device */
357 		code = ENOSYS;
358 
359 	if (code != 0) {
360 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
361 				   NULL, rte_strerror(code));
362 		return NULL;
363 	}
364 	return ops;
365 }
366 
367 /* Check whether a flow rule can be created on a given port. */
368 int
369 rte_flow_validate(uint16_t port_id,
370 		  const struct rte_flow_attr *attr,
371 		  const struct rte_flow_item pattern[],
372 		  const struct rte_flow_action actions[],
373 		  struct rte_flow_error *error)
374 {
375 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
376 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
377 	int ret;
378 
379 	if (likely(!!attr) && attr->transfer &&
380 	    (attr->ingress || attr->egress)) {
381 		return rte_flow_error_set(error, EINVAL,
382 					  RTE_FLOW_ERROR_TYPE_ATTR,
383 					  attr, "cannot use attr ingress/egress with attr transfer");
384 	}
385 
386 	if (unlikely(!ops))
387 		return -rte_errno;
388 	if (likely(!!ops->validate)) {
389 		fts_enter(dev);
390 		ret = ops->validate(dev, attr, pattern, actions, error);
391 		fts_exit(dev);
392 		ret = flow_err(port_id, ret, error);
393 
394 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
395 
396 		return ret;
397 	}
398 	return rte_flow_error_set(error, ENOSYS,
399 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
400 				  NULL, rte_strerror(ENOSYS));
401 }
402 
403 /* Create a flow rule on a given port. */
404 struct rte_flow *
405 rte_flow_create(uint16_t port_id,
406 		const struct rte_flow_attr *attr,
407 		const struct rte_flow_item pattern[],
408 		const struct rte_flow_action actions[],
409 		struct rte_flow_error *error)
410 {
411 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
412 	struct rte_flow *flow;
413 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
414 
415 	if (unlikely(!ops))
416 		return NULL;
417 	if (likely(!!ops->create)) {
418 		fts_enter(dev);
419 		flow = ops->create(dev, attr, pattern, actions, error);
420 		fts_exit(dev);
421 		if (flow == NULL)
422 			flow_err(port_id, -rte_errno, error);
423 
424 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
425 
426 		return flow;
427 	}
428 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
429 			   NULL, rte_strerror(ENOSYS));
430 	return NULL;
431 }
432 
433 /* Destroy a flow rule on a given port. */
434 int
435 rte_flow_destroy(uint16_t port_id,
436 		 struct rte_flow *flow,
437 		 struct rte_flow_error *error)
438 {
439 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
440 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
441 	int ret;
442 
443 	if (unlikely(!ops))
444 		return -rte_errno;
445 	if (likely(!!ops->destroy)) {
446 		fts_enter(dev);
447 		ret = ops->destroy(dev, flow, error);
448 		fts_exit(dev);
449 		ret = flow_err(port_id, ret, error);
450 
451 		rte_flow_trace_destroy(port_id, flow, ret);
452 
453 		return ret;
454 	}
455 	return rte_flow_error_set(error, ENOSYS,
456 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
457 				  NULL, rte_strerror(ENOSYS));
458 }
459 
460 int
461 rte_flow_actions_update(uint16_t port_id,
462 			struct rte_flow *flow,
463 			const struct rte_flow_action actions[],
464 			struct rte_flow_error *error)
465 {
466 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
467 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
468 	int ret;
469 
470 	if (unlikely(!ops))
471 		return -rte_errno;
472 	if (likely(!!ops->actions_update)) {
473 		fts_enter(dev);
474 		ret = ops->actions_update(dev, flow, actions, error);
475 		fts_exit(dev);
476 
477 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
478 
479 		return flow_err(port_id, ret, error);
480 	}
481 	return rte_flow_error_set(error, ENOSYS,
482 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
483 				  NULL, rte_strerror(ENOSYS));
484 }
485 
486 /* Destroy all flow rules associated with a port. */
487 int
488 rte_flow_flush(uint16_t port_id,
489 	       struct rte_flow_error *error)
490 {
491 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
492 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
493 	int ret;
494 
495 	if (unlikely(!ops))
496 		return -rte_errno;
497 	if (likely(!!ops->flush)) {
498 		fts_enter(dev);
499 		ret = ops->flush(dev, error);
500 		fts_exit(dev);
501 		ret = flow_err(port_id, ret, error);
502 
503 		rte_flow_trace_flush(port_id, ret);
504 
505 		return ret;
506 	}
507 	return rte_flow_error_set(error, ENOSYS,
508 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
509 				  NULL, rte_strerror(ENOSYS));
510 }
511 
512 /* Query an existing flow rule. */
513 int
514 rte_flow_query(uint16_t port_id,
515 	       struct rte_flow *flow,
516 	       const struct rte_flow_action *action,
517 	       void *data,
518 	       struct rte_flow_error *error)
519 {
520 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
521 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
522 	int ret;
523 
524 	if (!ops)
525 		return -rte_errno;
526 	if (likely(!!ops->query)) {
527 		fts_enter(dev);
528 		ret = ops->query(dev, flow, action, data, error);
529 		fts_exit(dev);
530 		ret = flow_err(port_id, ret, error);
531 
532 		rte_flow_trace_query(port_id, flow, action, data, ret);
533 
534 		return ret;
535 	}
536 	return rte_flow_error_set(error, ENOSYS,
537 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
538 				  NULL, rte_strerror(ENOSYS));
539 }
540 
541 /* Restrict ingress traffic to the defined flow rules. */
542 int
543 rte_flow_isolate(uint16_t port_id,
544 		 int set,
545 		 struct rte_flow_error *error)
546 {
547 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
548 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
549 	int ret;
550 
551 	if (!ops)
552 		return -rte_errno;
553 	if (likely(!!ops->isolate)) {
554 		fts_enter(dev);
555 		ret = ops->isolate(dev, set, error);
556 		fts_exit(dev);
557 		ret = flow_err(port_id, ret, error);
558 
559 		rte_flow_trace_isolate(port_id, set, ret);
560 
561 		return ret;
562 	}
563 	return rte_flow_error_set(error, ENOSYS,
564 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
565 				  NULL, rte_strerror(ENOSYS));
566 }
567 
568 /* Initialize flow error structure. */
569 int
570 rte_flow_error_set(struct rte_flow_error *error,
571 		   int code,
572 		   enum rte_flow_error_type type,
573 		   const void *cause,
574 		   const char *message)
575 {
576 	if (error) {
577 		*error = (struct rte_flow_error){
578 			.type = type,
579 			.cause = cause,
580 			.message = message,
581 		};
582 	}
583 	rte_errno = code;
584 	return -code;
585 }
586 
587 /** Pattern item specification types. */
588 enum rte_flow_conv_item_spec_type {
589 	RTE_FLOW_CONV_ITEM_SPEC,
590 	RTE_FLOW_CONV_ITEM_LAST,
591 	RTE_FLOW_CONV_ITEM_MASK,
592 };
593 
594 /**
595  * Copy pattern item specification.
596  *
597  * @param[out] buf
598  *   Output buffer. Can be NULL if @p size is zero.
599  * @param size
600  *   Size of @p buf in bytes.
601  * @param[in] item
602  *   Pattern item to copy specification from.
603  * @param type
604  *   Specification selector for either @p spec, @p last or @p mask.
605  *
606  * @return
607  *   Number of bytes needed to store pattern item specification regardless
608  *   of @p size. @p buf contents are truncated to @p size if not large
609  *   enough.
610  */
611 static size_t
612 rte_flow_conv_item_spec(void *buf, const size_t size,
613 			const struct rte_flow_item *item,
614 			enum rte_flow_conv_item_spec_type type)
615 {
616 	size_t off;
617 	const void *data =
618 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
619 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
620 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
621 		NULL;
622 
623 	switch (item->type) {
624 		union {
625 			const struct rte_flow_item_raw *raw;
626 			const struct rte_flow_item_geneve_opt *geneve_opt;
627 		} spec;
628 		union {
629 			const struct rte_flow_item_raw *raw;
630 		} last;
631 		union {
632 			const struct rte_flow_item_raw *raw;
633 		} mask;
634 		union {
635 			const struct rte_flow_item_raw *raw;
636 			const struct rte_flow_item_geneve_opt *geneve_opt;
637 		} src;
638 		union {
639 			struct rte_flow_item_raw *raw;
640 			struct rte_flow_item_geneve_opt *geneve_opt;
641 		} dst;
642 		void *deep_src;
643 		size_t tmp;
644 
645 	case RTE_FLOW_ITEM_TYPE_RAW:
646 		spec.raw = item->spec;
647 		last.raw = item->last ? item->last : item->spec;
648 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
649 		src.raw = data;
650 		dst.raw = buf;
651 		rte_memcpy(dst.raw,
652 			   (&(struct rte_flow_item_raw){
653 				.relative = src.raw->relative,
654 				.search = src.raw->search,
655 				.reserved = src.raw->reserved,
656 				.offset = src.raw->offset,
657 				.limit = src.raw->limit,
658 				.length = src.raw->length,
659 			   }),
660 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
661 		off = sizeof(*dst.raw);
662 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
663 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
664 		     ((spec.raw->length & mask.raw->length) >=
665 		      (last.raw->length & mask.raw->length))))
666 			tmp = spec.raw->length & mask.raw->length;
667 		else
668 			tmp = last.raw->length & mask.raw->length;
669 		if (tmp) {
670 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
671 			if (size >= off + tmp) {
672 				deep_src = (void *)((uintptr_t)dst.raw + off);
673 				dst.raw->pattern = rte_memcpy(deep_src,
674 							      src.raw->pattern,
675 							      tmp);
676 			}
677 			off += tmp;
678 		}
679 		break;
680 	case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
681 		off = rte_flow_conv_copy(buf, data, size,
682 					 rte_flow_desc_item, item->type);
683 		spec.geneve_opt = item->spec;
684 		src.geneve_opt = data;
685 		dst.geneve_opt = buf;
686 		tmp = spec.geneve_opt->option_len << 2;
687 		if (size > 0 && src.geneve_opt->data) {
688 			deep_src = (void *)((uintptr_t)(dst.geneve_opt + 1));
689 			dst.geneve_opt->data = rte_memcpy(deep_src,
690 							  src.geneve_opt->data,
691 							  tmp);
692 		}
693 		off += tmp;
694 		break;
695 	default:
696 		off = rte_flow_conv_copy(buf, data, size,
697 					 rte_flow_desc_item, item->type);
698 		break;
699 	}
700 	return off;
701 }
702 
703 /**
704  * Copy action configuration.
705  *
706  * @param[out] buf
707  *   Output buffer. Can be NULL if @p size is zero.
708  * @param size
709  *   Size of @p buf in bytes.
710  * @param[in] action
711  *   Action to copy configuration from.
712  *
713  * @return
714  *   Number of bytes needed to store pattern item specification regardless
715  *   of @p size. @p buf contents are truncated to @p size if not large
716  *   enough.
717  */
718 static size_t
719 rte_flow_conv_action_conf(void *buf, const size_t size,
720 			  const struct rte_flow_action *action)
721 {
722 	size_t off;
723 
724 	switch (action->type) {
725 		union {
726 			const struct rte_flow_action_rss *rss;
727 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
728 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
729 		} src;
730 		union {
731 			struct rte_flow_action_rss *rss;
732 			struct rte_flow_action_vxlan_encap *vxlan_encap;
733 			struct rte_flow_action_nvgre_encap *nvgre_encap;
734 		} dst;
735 		size_t tmp;
736 		int ret;
737 
738 	case RTE_FLOW_ACTION_TYPE_RSS:
739 		src.rss = action->conf;
740 		dst.rss = buf;
741 		rte_memcpy(dst.rss,
742 			   (&(struct rte_flow_action_rss){
743 				.func = src.rss->func,
744 				.level = src.rss->level,
745 				.types = src.rss->types,
746 				.key_len = src.rss->key_len,
747 				.queue_num = src.rss->queue_num,
748 			   }),
749 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
750 		off = sizeof(*dst.rss);
751 		if (src.rss->key_len && src.rss->key) {
752 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
753 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
754 			if (size >= (uint64_t)off + (uint64_t)tmp)
755 				dst.rss->key = rte_memcpy
756 					((void *)((uintptr_t)dst.rss + off),
757 					 src.rss->key, tmp);
758 			off += tmp;
759 		}
760 		if (src.rss->queue_num) {
761 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
762 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
763 			if (size >= (uint64_t)off + (uint64_t)tmp)
764 				dst.rss->queue = rte_memcpy
765 					((void *)((uintptr_t)dst.rss + off),
766 					 src.rss->queue, tmp);
767 			off += tmp;
768 		}
769 		break;
770 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
771 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
772 		src.vxlan_encap = action->conf;
773 		dst.vxlan_encap = buf;
774 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
775 				 sizeof(*src.nvgre_encap) ||
776 				 offsetof(struct rte_flow_action_vxlan_encap,
777 					  definition) !=
778 				 offsetof(struct rte_flow_action_nvgre_encap,
779 					  definition));
780 		off = sizeof(*dst.vxlan_encap);
781 		if (src.vxlan_encap->definition) {
782 			off = RTE_ALIGN_CEIL
783 				(off, sizeof(*dst.vxlan_encap->definition));
784 			ret = rte_flow_conv
785 				(RTE_FLOW_CONV_OP_PATTERN,
786 				 (void *)((uintptr_t)dst.vxlan_encap + off),
787 				 size > off ? size - off : 0,
788 				 src.vxlan_encap->definition, NULL);
789 			if (ret < 0)
790 				return 0;
791 			if (size >= off + ret)
792 				dst.vxlan_encap->definition =
793 					(void *)((uintptr_t)dst.vxlan_encap +
794 						 off);
795 			off += ret;
796 		}
797 		break;
798 	default:
799 		off = rte_flow_conv_copy(buf, action->conf, size,
800 					 rte_flow_desc_action, action->type);
801 		break;
802 	}
803 	return off;
804 }
805 
806 /**
807  * Copy a list of pattern items.
808  *
809  * @param[out] dst
810  *   Destination buffer. Can be NULL if @p size is zero.
811  * @param size
812  *   Size of @p dst in bytes.
813  * @param[in] src
814  *   Source pattern items.
815  * @param num
816  *   Maximum number of pattern items to process from @p src or 0 to process
817  *   the entire list. In both cases, processing stops after
818  *   RTE_FLOW_ITEM_TYPE_END is encountered.
819  * @param[out] error
820  *   Perform verbose error reporting if not NULL.
821  *
822  * @return
823  *   A positive value representing the number of bytes needed to store
824  *   pattern items regardless of @p size on success (@p buf contents are
825  *   truncated to @p size if not large enough), a negative errno value
826  *   otherwise and rte_errno is set.
827  */
828 static int
829 rte_flow_conv_pattern(struct rte_flow_item *dst,
830 		      const size_t size,
831 		      const struct rte_flow_item *src,
832 		      unsigned int num,
833 		      struct rte_flow_error *error)
834 {
835 	uintptr_t data = (uintptr_t)dst;
836 	size_t off;
837 	size_t ret;
838 	unsigned int i;
839 
840 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
841 		/**
842 		 * allow PMD private flow item
843 		 */
844 		if (((int)src->type >= 0) &&
845 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
846 		    !rte_flow_desc_item[src->type].name))
847 			return rte_flow_error_set
848 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
849 				 "cannot convert unknown item type");
850 		if (size >= off + sizeof(*dst))
851 			*dst = (struct rte_flow_item){
852 				.type = src->type,
853 			};
854 		off += sizeof(*dst);
855 		if (!src->type)
856 			num = i + 1;
857 	}
858 	num = i;
859 	src -= num;
860 	dst -= num;
861 	do {
862 		if (src->spec) {
863 			off = RTE_ALIGN_CEIL(off, sizeof(double));
864 			ret = rte_flow_conv_item_spec
865 				((void *)(data + off),
866 				 size > off ? size - off : 0, src,
867 				 RTE_FLOW_CONV_ITEM_SPEC);
868 			if (size && size >= off + ret)
869 				dst->spec = (void *)(data + off);
870 			off += ret;
871 
872 		}
873 		if (src->last) {
874 			off = RTE_ALIGN_CEIL(off, sizeof(double));
875 			ret = rte_flow_conv_item_spec
876 				((void *)(data + off),
877 				 size > off ? size - off : 0, src,
878 				 RTE_FLOW_CONV_ITEM_LAST);
879 			if (size && size >= off + ret)
880 				dst->last = (void *)(data + off);
881 			off += ret;
882 		}
883 		if (src->mask) {
884 			off = RTE_ALIGN_CEIL(off, sizeof(double));
885 			ret = rte_flow_conv_item_spec
886 				((void *)(data + off),
887 				 size > off ? size - off : 0, src,
888 				 RTE_FLOW_CONV_ITEM_MASK);
889 			if (size && size >= off + ret)
890 				dst->mask = (void *)(data + off);
891 			off += ret;
892 		}
893 		++src;
894 		++dst;
895 	} while (--num);
896 	return off;
897 }
898 
899 /**
900  * Copy a list of actions.
901  *
902  * @param[out] dst
903  *   Destination buffer. Can be NULL if @p size is zero.
904  * @param size
905  *   Size of @p dst in bytes.
906  * @param[in] src
907  *   Source actions.
908  * @param num
909  *   Maximum number of actions to process from @p src or 0 to process the
910  *   entire list. In both cases, processing stops after
911  *   RTE_FLOW_ACTION_TYPE_END is encountered.
912  * @param[out] error
913  *   Perform verbose error reporting if not NULL.
914  *
915  * @return
916  *   A positive value representing the number of bytes needed to store
917  *   actions regardless of @p size on success (@p buf contents are truncated
918  *   to @p size if not large enough), a negative errno value otherwise and
919  *   rte_errno is set.
920  */
921 static int
922 rte_flow_conv_actions(struct rte_flow_action *dst,
923 		      const size_t size,
924 		      const struct rte_flow_action *src,
925 		      unsigned int num,
926 		      struct rte_flow_error *error)
927 {
928 	uintptr_t data = (uintptr_t)dst;
929 	size_t off;
930 	size_t ret;
931 	unsigned int i;
932 
933 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
934 		/**
935 		 * allow PMD private flow action
936 		 */
937 		if (((int)src->type >= 0) &&
938 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
939 		    !rte_flow_desc_action[src->type].name))
940 			return rte_flow_error_set
941 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
942 				 src, "cannot convert unknown action type");
943 		if (size >= off + sizeof(*dst))
944 			*dst = (struct rte_flow_action){
945 				.type = src->type,
946 			};
947 		off += sizeof(*dst);
948 		if (!src->type)
949 			num = i + 1;
950 	}
951 	num = i;
952 	src -= num;
953 	dst -= num;
954 	do {
955 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
956 			/*
957 			 * Indirect action conf fills the indirect action
958 			 * handler. Copy the action handle directly instead
959 			 * of duplicating the pointer memory.
960 			 */
961 			if (size)
962 				dst->conf = src->conf;
963 		} else if (src->conf) {
964 			off = RTE_ALIGN_CEIL(off, sizeof(double));
965 			ret = rte_flow_conv_action_conf
966 				((void *)(data + off),
967 				 size > off ? size - off : 0, src);
968 			if (size && size >= off + ret)
969 				dst->conf = (void *)(data + off);
970 			off += ret;
971 		}
972 		++src;
973 		++dst;
974 	} while (--num);
975 	return off;
976 }
977 
978 /**
979  * Copy flow rule components.
980  *
981  * This comprises the flow rule descriptor itself, attributes, pattern and
982  * actions list. NULL components in @p src are skipped.
983  *
984  * @param[out] dst
985  *   Destination buffer. Can be NULL if @p size is zero.
986  * @param size
987  *   Size of @p dst in bytes.
988  * @param[in] src
989  *   Source flow rule descriptor.
990  * @param[out] error
991  *   Perform verbose error reporting if not NULL.
992  *
993  * @return
994  *   A positive value representing the number of bytes needed to store all
995  *   components including the descriptor regardless of @p size on success
996  *   (@p buf contents are truncated to @p size if not large enough), a
997  *   negative errno value otherwise and rte_errno is set.
998  */
999 static int
1000 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
1001 		   const size_t size,
1002 		   const struct rte_flow_conv_rule *src,
1003 		   struct rte_flow_error *error)
1004 {
1005 	size_t off;
1006 	int ret;
1007 
1008 	rte_memcpy(dst,
1009 		   (&(struct rte_flow_conv_rule){
1010 			.attr = NULL,
1011 			.pattern = NULL,
1012 			.actions = NULL,
1013 		   }),
1014 		   size > sizeof(*dst) ? sizeof(*dst) : size);
1015 	off = sizeof(*dst);
1016 	if (src->attr_ro) {
1017 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1018 		if (size && size >= off + sizeof(*dst->attr))
1019 			dst->attr = rte_memcpy
1020 				((void *)((uintptr_t)dst + off),
1021 				 src->attr_ro, sizeof(*dst->attr));
1022 		off += sizeof(*dst->attr);
1023 	}
1024 	if (src->pattern_ro) {
1025 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1026 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
1027 					    size > off ? size - off : 0,
1028 					    src->pattern_ro, 0, error);
1029 		if (ret < 0)
1030 			return ret;
1031 		if (size && size >= off + (size_t)ret)
1032 			dst->pattern = (void *)((uintptr_t)dst + off);
1033 		off += ret;
1034 	}
1035 	if (src->actions_ro) {
1036 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1037 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1038 					    size > off ? size - off : 0,
1039 					    src->actions_ro, 0, error);
1040 		if (ret < 0)
1041 			return ret;
1042 		if (size >= off + (size_t)ret)
1043 			dst->actions = (void *)((uintptr_t)dst + off);
1044 		off += ret;
1045 	}
1046 	return off;
1047 }
1048 
1049 /**
1050  * Retrieve the name of a pattern item/action type.
1051  *
1052  * @param is_action
1053  *   Nonzero when @p src represents an action type instead of a pattern item
1054  *   type.
1055  * @param is_ptr
1056  *   Nonzero to write string address instead of contents into @p dst.
1057  * @param[out] dst
1058  *   Destination buffer. Can be NULL if @p size is zero.
1059  * @param size
1060  *   Size of @p dst in bytes.
1061  * @param[in] src
1062  *   Depending on @p is_action, source pattern item or action type cast as a
1063  *   pointer.
1064  * @param[out] error
1065  *   Perform verbose error reporting if not NULL.
1066  *
1067  * @return
1068  *   A positive value representing the number of bytes needed to store the
1069  *   name or its address regardless of @p size on success (@p buf contents
1070  *   are truncated to @p size if not large enough), a negative errno value
1071  *   otherwise and rte_errno is set.
1072  */
1073 static int
1074 rte_flow_conv_name(int is_action,
1075 		   int is_ptr,
1076 		   char *dst,
1077 		   const size_t size,
1078 		   const void *src,
1079 		   struct rte_flow_error *error)
1080 {
1081 	struct desc_info {
1082 		const struct rte_flow_desc_data *data;
1083 		size_t num;
1084 	};
1085 	static const struct desc_info info_rep[2] = {
1086 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1087 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1088 	};
1089 	const struct desc_info *const info = &info_rep[!!is_action];
1090 	unsigned int type = (uintptr_t)src;
1091 
1092 	if (type >= info->num)
1093 		return rte_flow_error_set
1094 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1095 			 "unknown object type to retrieve the name of");
1096 	if (!is_ptr)
1097 		return strlcpy(dst, info->data[type].name, size);
1098 	if (size >= sizeof(const char **))
1099 		*((const char **)dst) = info->data[type].name;
1100 	return sizeof(const char **);
1101 }
1102 
1103 /** Helper function to convert flow API objects. */
1104 int
1105 rte_flow_conv(enum rte_flow_conv_op op,
1106 	      void *dst,
1107 	      size_t size,
1108 	      const void *src,
1109 	      struct rte_flow_error *error)
1110 {
1111 	int ret;
1112 
1113 	switch (op) {
1114 		const struct rte_flow_attr *attr;
1115 		const struct rte_flow_item *item;
1116 
1117 	case RTE_FLOW_CONV_OP_NONE:
1118 		ret = 0;
1119 		break;
1120 	case RTE_FLOW_CONV_OP_ATTR:
1121 		attr = src;
1122 		if (size > sizeof(*attr))
1123 			size = sizeof(*attr);
1124 		rte_memcpy(dst, attr, size);
1125 		ret = sizeof(*attr);
1126 		break;
1127 	case RTE_FLOW_CONV_OP_ITEM:
1128 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1129 		break;
1130 	case RTE_FLOW_CONV_OP_ITEM_MASK:
1131 		item = src;
1132 		if (item->mask == NULL) {
1133 			ret = rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1134 						 item, "Mask not provided");
1135 			break;
1136 		}
1137 		ret = rte_flow_conv_item_spec(dst, size, src, RTE_FLOW_CONV_ITEM_MASK);
1138 		break;
1139 	case RTE_FLOW_CONV_OP_ACTION:
1140 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1141 		break;
1142 	case RTE_FLOW_CONV_OP_PATTERN:
1143 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1144 		break;
1145 	case RTE_FLOW_CONV_OP_ACTIONS:
1146 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1147 		break;
1148 	case RTE_FLOW_CONV_OP_RULE:
1149 		ret = rte_flow_conv_rule(dst, size, src, error);
1150 		break;
1151 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1152 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1153 		break;
1154 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1155 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1156 		break;
1157 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1158 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1159 		break;
1160 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1161 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1162 		break;
1163 	default:
1164 		ret = rte_flow_error_set
1165 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1166 		 "unknown object conversion operation");
1167 	}
1168 
1169 	rte_flow_trace_conv(op, dst, size, src, ret);
1170 
1171 	return ret;
1172 }
1173 
1174 /** Store a full rte_flow description. */
1175 size_t
1176 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1177 	      const struct rte_flow_attr *attr,
1178 	      const struct rte_flow_item *items,
1179 	      const struct rte_flow_action *actions)
1180 {
1181 	/*
1182 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1183 	 * to convert the former to the latter without wasting space.
1184 	 */
1185 	struct rte_flow_conv_rule *dst =
1186 		len ?
1187 		(void *)((uintptr_t)desc +
1188 			 (offsetof(struct rte_flow_desc, actions) -
1189 			  offsetof(struct rte_flow_conv_rule, actions))) :
1190 		NULL;
1191 	size_t dst_size =
1192 		len > sizeof(*desc) - sizeof(*dst) ?
1193 		len - (sizeof(*desc) - sizeof(*dst)) :
1194 		0;
1195 	struct rte_flow_conv_rule src = {
1196 		.attr_ro = NULL,
1197 		.pattern_ro = items,
1198 		.actions_ro = actions,
1199 	};
1200 	int ret;
1201 
1202 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1203 			 sizeof(struct rte_flow_conv_rule));
1204 	if (dst_size &&
1205 	    (&dst->pattern != &desc->items ||
1206 	     &dst->actions != &desc->actions ||
1207 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1208 		rte_errno = EINVAL;
1209 		return 0;
1210 	}
1211 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1212 	if (ret < 0)
1213 		return 0;
1214 	ret += sizeof(*desc) - sizeof(*dst);
1215 	rte_memcpy(desc,
1216 		   (&(struct rte_flow_desc){
1217 			.size = ret,
1218 			.attr = *attr,
1219 			.items = dst_size ? dst->pattern : NULL,
1220 			.actions = dst_size ? dst->actions : NULL,
1221 		   }),
1222 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1223 
1224 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1225 
1226 	return ret;
1227 }
1228 
1229 int
1230 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1231 			FILE *file, struct rte_flow_error *error)
1232 {
1233 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1234 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1235 	int ret;
1236 
1237 	if (unlikely(!ops))
1238 		return -rte_errno;
1239 	if (likely(!!ops->dev_dump)) {
1240 		fts_enter(dev);
1241 		ret = ops->dev_dump(dev, flow, file, error);
1242 		fts_exit(dev);
1243 		return flow_err(port_id, ret, error);
1244 	}
1245 	return rte_flow_error_set(error, ENOSYS,
1246 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1247 				  NULL, rte_strerror(ENOSYS));
1248 }
1249 
1250 int
1251 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1252 		    uint32_t nb_contexts, struct rte_flow_error *error)
1253 {
1254 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1255 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1256 	int ret;
1257 
1258 	if (unlikely(!ops))
1259 		return -rte_errno;
1260 	if (likely(!!ops->get_aged_flows)) {
1261 		fts_enter(dev);
1262 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1263 		fts_exit(dev);
1264 		ret = flow_err(port_id, ret, error);
1265 
1266 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1267 
1268 		return ret;
1269 	}
1270 	return rte_flow_error_set(error, ENOTSUP,
1271 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1272 				  NULL, rte_strerror(ENOTSUP));
1273 }
1274 
1275 int
1276 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1277 			  uint32_t nb_contexts, struct rte_flow_error *error)
1278 {
1279 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1280 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1281 	int ret;
1282 
1283 	if (unlikely(!ops))
1284 		return -rte_errno;
1285 	if (likely(!!ops->get_q_aged_flows)) {
1286 		fts_enter(dev);
1287 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1288 					    nb_contexts, error);
1289 		fts_exit(dev);
1290 		ret = flow_err(port_id, ret, error);
1291 
1292 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1293 						nb_contexts, ret);
1294 
1295 		return ret;
1296 	}
1297 	return rte_flow_error_set(error, ENOTSUP,
1298 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1299 				  NULL, rte_strerror(ENOTSUP));
1300 }
1301 
1302 struct rte_flow_action_handle *
1303 rte_flow_action_handle_create(uint16_t port_id,
1304 			      const struct rte_flow_indir_action_conf *conf,
1305 			      const struct rte_flow_action *action,
1306 			      struct rte_flow_error *error)
1307 {
1308 	struct rte_flow_action_handle *handle;
1309 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1310 
1311 	if (unlikely(!ops))
1312 		return NULL;
1313 	if (unlikely(!ops->action_handle_create)) {
1314 		rte_flow_error_set(error, ENOSYS,
1315 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1316 				   rte_strerror(ENOSYS));
1317 		return NULL;
1318 	}
1319 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1320 					   conf, action, error);
1321 	if (handle == NULL)
1322 		flow_err(port_id, -rte_errno, error);
1323 
1324 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1325 
1326 	return handle;
1327 }
1328 
1329 int
1330 rte_flow_action_handle_destroy(uint16_t port_id,
1331 			       struct rte_flow_action_handle *handle,
1332 			       struct rte_flow_error *error)
1333 {
1334 	int ret;
1335 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1336 
1337 	if (unlikely(!ops))
1338 		return -rte_errno;
1339 	if (unlikely(!ops->action_handle_destroy))
1340 		return rte_flow_error_set(error, ENOSYS,
1341 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1342 					  NULL, rte_strerror(ENOSYS));
1343 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1344 					 handle, error);
1345 	ret = flow_err(port_id, ret, error);
1346 
1347 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1348 
1349 	return ret;
1350 }
1351 
1352 int
1353 rte_flow_action_handle_update(uint16_t port_id,
1354 			      struct rte_flow_action_handle *handle,
1355 			      const void *update,
1356 			      struct rte_flow_error *error)
1357 {
1358 	int ret;
1359 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1360 
1361 	if (unlikely(!ops))
1362 		return -rte_errno;
1363 	if (unlikely(!ops->action_handle_update))
1364 		return rte_flow_error_set(error, ENOSYS,
1365 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1366 					  NULL, rte_strerror(ENOSYS));
1367 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1368 					update, error);
1369 	ret = flow_err(port_id, ret, error);
1370 
1371 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1372 
1373 	return ret;
1374 }
1375 
1376 int
1377 rte_flow_action_handle_query(uint16_t port_id,
1378 			     const struct rte_flow_action_handle *handle,
1379 			     void *data,
1380 			     struct rte_flow_error *error)
1381 {
1382 	int ret;
1383 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1384 
1385 	if (unlikely(!ops))
1386 		return -rte_errno;
1387 	if (unlikely(!ops->action_handle_query))
1388 		return rte_flow_error_set(error, ENOSYS,
1389 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1390 					  NULL, rte_strerror(ENOSYS));
1391 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1392 				       data, error);
1393 	ret = flow_err(port_id, ret, error);
1394 
1395 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1396 
1397 	return ret;
1398 }
1399 
1400 int
1401 rte_flow_tunnel_decap_set(uint16_t port_id,
1402 			  struct rte_flow_tunnel *tunnel,
1403 			  struct rte_flow_action **actions,
1404 			  uint32_t *num_of_actions,
1405 			  struct rte_flow_error *error)
1406 {
1407 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1408 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1409 	int ret;
1410 
1411 	if (unlikely(!ops))
1412 		return -rte_errno;
1413 	if (likely(!!ops->tunnel_decap_set)) {
1414 		ret = flow_err(port_id,
1415 			       ops->tunnel_decap_set(dev, tunnel, actions,
1416 						     num_of_actions, error),
1417 			       error);
1418 
1419 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1420 						num_of_actions, ret);
1421 
1422 		return ret;
1423 	}
1424 	return rte_flow_error_set(error, ENOTSUP,
1425 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1426 				  NULL, rte_strerror(ENOTSUP));
1427 }
1428 
1429 int
1430 rte_flow_tunnel_match(uint16_t port_id,
1431 		      struct rte_flow_tunnel *tunnel,
1432 		      struct rte_flow_item **items,
1433 		      uint32_t *num_of_items,
1434 		      struct rte_flow_error *error)
1435 {
1436 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1437 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1438 	int ret;
1439 
1440 	if (unlikely(!ops))
1441 		return -rte_errno;
1442 	if (likely(!!ops->tunnel_match)) {
1443 		ret = flow_err(port_id,
1444 			       ops->tunnel_match(dev, tunnel, items,
1445 						 num_of_items, error),
1446 			       error);
1447 
1448 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1449 					    ret);
1450 
1451 		return ret;
1452 	}
1453 	return rte_flow_error_set(error, ENOTSUP,
1454 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1455 				  NULL, rte_strerror(ENOTSUP));
1456 }
1457 
1458 int
1459 rte_flow_get_restore_info(uint16_t port_id,
1460 			  struct rte_mbuf *m,
1461 			  struct rte_flow_restore_info *restore_info,
1462 			  struct rte_flow_error *error)
1463 {
1464 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1465 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1466 	int ret;
1467 
1468 	if (unlikely(!ops))
1469 		return -rte_errno;
1470 	if (likely(!!ops->get_restore_info)) {
1471 		ret = flow_err(port_id,
1472 			       ops->get_restore_info(dev, m, restore_info,
1473 						     error),
1474 			       error);
1475 
1476 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1477 
1478 		return ret;
1479 	}
1480 	return rte_flow_error_set(error, ENOTSUP,
1481 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1482 				  NULL, rte_strerror(ENOTSUP));
1483 }
1484 
1485 static struct {
1486 	const struct rte_mbuf_dynflag desc;
1487 	uint64_t value;
1488 } flow_restore_info_dynflag = {
1489 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1490 };
1491 
1492 uint64_t
1493 rte_flow_restore_info_dynflag(void)
1494 {
1495 	return flow_restore_info_dynflag.value;
1496 }
1497 
1498 int
1499 rte_flow_restore_info_dynflag_register(void)
1500 {
1501 	if (flow_restore_info_dynflag.value == 0) {
1502 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1503 
1504 		if (offset < 0)
1505 			return -1;
1506 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1507 	}
1508 
1509 	return 0;
1510 }
1511 
1512 int
1513 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1514 				     struct rte_flow_action *actions,
1515 				     uint32_t num_of_actions,
1516 				     struct rte_flow_error *error)
1517 {
1518 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1519 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1520 	int ret;
1521 
1522 	if (unlikely(!ops))
1523 		return -rte_errno;
1524 	if (likely(!!ops->tunnel_action_decap_release)) {
1525 		ret = flow_err(port_id,
1526 			       ops->tunnel_action_decap_release(dev, actions,
1527 								num_of_actions,
1528 								error),
1529 			       error);
1530 
1531 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1532 							   num_of_actions, ret);
1533 
1534 		return ret;
1535 	}
1536 	return rte_flow_error_set(error, ENOTSUP,
1537 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1538 				  NULL, rte_strerror(ENOTSUP));
1539 }
1540 
1541 int
1542 rte_flow_tunnel_item_release(uint16_t port_id,
1543 			     struct rte_flow_item *items,
1544 			     uint32_t num_of_items,
1545 			     struct rte_flow_error *error)
1546 {
1547 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1548 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1549 	int ret;
1550 
1551 	if (unlikely(!ops))
1552 		return -rte_errno;
1553 	if (likely(!!ops->tunnel_item_release)) {
1554 		ret = flow_err(port_id,
1555 			       ops->tunnel_item_release(dev, items,
1556 							num_of_items, error),
1557 			       error);
1558 
1559 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1560 
1561 		return ret;
1562 	}
1563 	return rte_flow_error_set(error, ENOTSUP,
1564 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1565 				  NULL, rte_strerror(ENOTSUP));
1566 }
1567 
1568 int
1569 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1570 			     struct rte_flow_error *error)
1571 {
1572 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1573 	struct rte_eth_dev *dev;
1574 	int ret;
1575 
1576 	if (unlikely(ops == NULL))
1577 		return -rte_errno;
1578 
1579 	if (ops->pick_transfer_proxy == NULL) {
1580 		*proxy_port_id = port_id;
1581 		return 0;
1582 	}
1583 
1584 	dev = &rte_eth_devices[port_id];
1585 
1586 	ret = flow_err(port_id,
1587 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1588 		       error);
1589 
1590 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1591 
1592 	return ret;
1593 }
1594 
1595 struct rte_flow_item_flex_handle *
1596 rte_flow_flex_item_create(uint16_t port_id,
1597 			  const struct rte_flow_item_flex_conf *conf,
1598 			  struct rte_flow_error *error)
1599 {
1600 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1601 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1602 	struct rte_flow_item_flex_handle *handle;
1603 
1604 	if (unlikely(!ops))
1605 		return NULL;
1606 	if (unlikely(!ops->flex_item_create)) {
1607 		rte_flow_error_set(error, ENOTSUP,
1608 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1609 				   NULL, rte_strerror(ENOTSUP));
1610 		return NULL;
1611 	}
1612 	handle = ops->flex_item_create(dev, conf, error);
1613 	if (handle == NULL)
1614 		flow_err(port_id, -rte_errno, error);
1615 
1616 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1617 
1618 	return handle;
1619 }
1620 
1621 int
1622 rte_flow_flex_item_release(uint16_t port_id,
1623 			   const struct rte_flow_item_flex_handle *handle,
1624 			   struct rte_flow_error *error)
1625 {
1626 	int ret;
1627 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1628 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1629 
1630 	if (unlikely(!ops || !ops->flex_item_release))
1631 		return rte_flow_error_set(error, ENOTSUP,
1632 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1633 					  NULL, rte_strerror(ENOTSUP));
1634 	ret = ops->flex_item_release(dev, handle, error);
1635 	ret = flow_err(port_id, ret, error);
1636 
1637 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1638 
1639 	return ret;
1640 }
1641 
1642 int
1643 rte_flow_info_get(uint16_t port_id,
1644 		  struct rte_flow_port_info *port_info,
1645 		  struct rte_flow_queue_info *queue_info,
1646 		  struct rte_flow_error *error)
1647 {
1648 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1649 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1650 	int ret;
1651 
1652 	if (unlikely(!ops))
1653 		return -rte_errno;
1654 	if (dev->data->dev_configured == 0) {
1655 		FLOW_LOG(INFO,
1656 			"Device with port_id=%"PRIu16" is not configured.",
1657 			port_id);
1658 		return -EINVAL;
1659 	}
1660 	if (port_info == NULL) {
1661 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1662 		return -EINVAL;
1663 	}
1664 	if (likely(!!ops->info_get)) {
1665 		ret = flow_err(port_id,
1666 			       ops->info_get(dev, port_info, queue_info, error),
1667 			       error);
1668 
1669 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1670 
1671 		return ret;
1672 	}
1673 	return rte_flow_error_set(error, ENOTSUP,
1674 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1675 				  NULL, rte_strerror(ENOTSUP));
1676 }
1677 
1678 int
1679 rte_flow_configure(uint16_t port_id,
1680 		   const struct rte_flow_port_attr *port_attr,
1681 		   uint16_t nb_queue,
1682 		   const struct rte_flow_queue_attr *queue_attr[],
1683 		   struct rte_flow_error *error)
1684 {
1685 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1686 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1687 	int ret;
1688 
1689 	if (unlikely(!ops))
1690 		return -rte_errno;
1691 	if (dev->data->dev_configured == 0) {
1692 		FLOW_LOG(INFO,
1693 			"Device with port_id=%"PRIu16" is not configured.",
1694 			port_id);
1695 		return -EINVAL;
1696 	}
1697 	if (dev->data->dev_started != 0) {
1698 		FLOW_LOG(INFO,
1699 			"Device with port_id=%"PRIu16" already started.",
1700 			port_id);
1701 		return -EINVAL;
1702 	}
1703 	if (port_attr == NULL) {
1704 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1705 		return -EINVAL;
1706 	}
1707 	if (queue_attr == NULL) {
1708 		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
1709 		return -EINVAL;
1710 	}
1711 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1712 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1713 		return rte_flow_error_set(error, ENODEV,
1714 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1715 					  NULL, rte_strerror(ENODEV));
1716 	}
1717 	if (likely(!!ops->configure)) {
1718 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1719 		if (ret == 0)
1720 			dev->data->flow_configured = 1;
1721 		ret = flow_err(port_id, ret, error);
1722 
1723 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1724 
1725 		return ret;
1726 	}
1727 	return rte_flow_error_set(error, ENOTSUP,
1728 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1729 				  NULL, rte_strerror(ENOTSUP));
1730 }
1731 
1732 struct rte_flow_pattern_template *
1733 rte_flow_pattern_template_create(uint16_t port_id,
1734 		const struct rte_flow_pattern_template_attr *template_attr,
1735 		const struct rte_flow_item pattern[],
1736 		struct rte_flow_error *error)
1737 {
1738 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1739 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1740 	struct rte_flow_pattern_template *template;
1741 
1742 	if (unlikely(!ops))
1743 		return NULL;
1744 	if (dev->data->flow_configured == 0) {
1745 		FLOW_LOG(INFO,
1746 			"Flow engine on port_id=%"PRIu16" is not configured.",
1747 			port_id);
1748 		rte_flow_error_set(error, EINVAL,
1749 				RTE_FLOW_ERROR_TYPE_STATE,
1750 				NULL, rte_strerror(EINVAL));
1751 		return NULL;
1752 	}
1753 	if (template_attr == NULL) {
1754 		FLOW_LOG(ERR,
1755 			     "Port %"PRIu16" template attr is NULL.",
1756 			     port_id);
1757 		rte_flow_error_set(error, EINVAL,
1758 				   RTE_FLOW_ERROR_TYPE_ATTR,
1759 				   NULL, rte_strerror(EINVAL));
1760 		return NULL;
1761 	}
1762 	if (pattern == NULL) {
1763 		FLOW_LOG(ERR,
1764 			     "Port %"PRIu16" pattern is NULL.",
1765 			     port_id);
1766 		rte_flow_error_set(error, EINVAL,
1767 				   RTE_FLOW_ERROR_TYPE_ATTR,
1768 				   NULL, rte_strerror(EINVAL));
1769 		return NULL;
1770 	}
1771 	if (likely(!!ops->pattern_template_create)) {
1772 		template = ops->pattern_template_create(dev, template_attr,
1773 							pattern, error);
1774 		if (template == NULL)
1775 			flow_err(port_id, -rte_errno, error);
1776 
1777 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1778 						       pattern, template);
1779 
1780 		return template;
1781 	}
1782 	rte_flow_error_set(error, ENOTSUP,
1783 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1784 			   NULL, rte_strerror(ENOTSUP));
1785 	return NULL;
1786 }
1787 
1788 int
1789 rte_flow_pattern_template_destroy(uint16_t port_id,
1790 		struct rte_flow_pattern_template *pattern_template,
1791 		struct rte_flow_error *error)
1792 {
1793 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1794 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1795 	int ret;
1796 
1797 	if (unlikely(!ops))
1798 		return -rte_errno;
1799 	if (unlikely(pattern_template == NULL))
1800 		return 0;
1801 	if (likely(!!ops->pattern_template_destroy)) {
1802 		ret = flow_err(port_id,
1803 			       ops->pattern_template_destroy(dev,
1804 							     pattern_template,
1805 							     error),
1806 			       error);
1807 
1808 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1809 							ret);
1810 
1811 		return ret;
1812 	}
1813 	return rte_flow_error_set(error, ENOTSUP,
1814 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1815 				  NULL, rte_strerror(ENOTSUP));
1816 }
1817 
1818 struct rte_flow_actions_template *
1819 rte_flow_actions_template_create(uint16_t port_id,
1820 			const struct rte_flow_actions_template_attr *template_attr,
1821 			const struct rte_flow_action actions[],
1822 			const struct rte_flow_action masks[],
1823 			struct rte_flow_error *error)
1824 {
1825 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1826 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1827 	struct rte_flow_actions_template *template;
1828 
1829 	if (unlikely(!ops))
1830 		return NULL;
1831 	if (dev->data->flow_configured == 0) {
1832 		FLOW_LOG(INFO,
1833 			"Flow engine on port_id=%"PRIu16" is not configured.",
1834 			port_id);
1835 		rte_flow_error_set(error, EINVAL,
1836 				   RTE_FLOW_ERROR_TYPE_STATE,
1837 				   NULL, rte_strerror(EINVAL));
1838 		return NULL;
1839 	}
1840 	if (template_attr == NULL) {
1841 		FLOW_LOG(ERR,
1842 			     "Port %"PRIu16" template attr is NULL.",
1843 			     port_id);
1844 		rte_flow_error_set(error, EINVAL,
1845 				   RTE_FLOW_ERROR_TYPE_ATTR,
1846 				   NULL, rte_strerror(EINVAL));
1847 		return NULL;
1848 	}
1849 	if (actions == NULL) {
1850 		FLOW_LOG(ERR,
1851 			     "Port %"PRIu16" actions is NULL.",
1852 			     port_id);
1853 		rte_flow_error_set(error, EINVAL,
1854 				   RTE_FLOW_ERROR_TYPE_ATTR,
1855 				   NULL, rte_strerror(EINVAL));
1856 		return NULL;
1857 	}
1858 	if (masks == NULL) {
1859 		FLOW_LOG(ERR,
1860 			     "Port %"PRIu16" masks is NULL.",
1861 			     port_id);
1862 		rte_flow_error_set(error, EINVAL,
1863 				   RTE_FLOW_ERROR_TYPE_ATTR,
1864 				   NULL, rte_strerror(EINVAL));
1865 
1866 	}
1867 	if (likely(!!ops->actions_template_create)) {
1868 		template = ops->actions_template_create(dev, template_attr,
1869 							actions, masks, error);
1870 		if (template == NULL)
1871 			flow_err(port_id, -rte_errno, error);
1872 
1873 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1874 						       masks, template);
1875 
1876 		return template;
1877 	}
1878 	rte_flow_error_set(error, ENOTSUP,
1879 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1880 			   NULL, rte_strerror(ENOTSUP));
1881 	return NULL;
1882 }
1883 
1884 int
1885 rte_flow_actions_template_destroy(uint16_t port_id,
1886 			struct rte_flow_actions_template *actions_template,
1887 			struct rte_flow_error *error)
1888 {
1889 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1890 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1891 	int ret;
1892 
1893 	if (unlikely(!ops))
1894 		return -rte_errno;
1895 	if (unlikely(actions_template == NULL))
1896 		return 0;
1897 	if (likely(!!ops->actions_template_destroy)) {
1898 		ret = flow_err(port_id,
1899 			       ops->actions_template_destroy(dev,
1900 							     actions_template,
1901 							     error),
1902 			       error);
1903 
1904 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1905 							ret);
1906 
1907 		return ret;
1908 	}
1909 	return rte_flow_error_set(error, ENOTSUP,
1910 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1911 				  NULL, rte_strerror(ENOTSUP));
1912 }
1913 
1914 struct rte_flow_template_table *
1915 rte_flow_template_table_create(uint16_t port_id,
1916 			const struct rte_flow_template_table_attr *table_attr,
1917 			struct rte_flow_pattern_template *pattern_templates[],
1918 			uint8_t nb_pattern_templates,
1919 			struct rte_flow_actions_template *actions_templates[],
1920 			uint8_t nb_actions_templates,
1921 			struct rte_flow_error *error)
1922 {
1923 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1924 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1925 	struct rte_flow_template_table *table;
1926 
1927 	if (unlikely(!ops))
1928 		return NULL;
1929 	if (dev->data->flow_configured == 0) {
1930 		FLOW_LOG(INFO,
1931 			"Flow engine on port_id=%"PRIu16" is not configured.",
1932 			port_id);
1933 		rte_flow_error_set(error, EINVAL,
1934 				   RTE_FLOW_ERROR_TYPE_STATE,
1935 				   NULL, rte_strerror(EINVAL));
1936 		return NULL;
1937 	}
1938 	if (table_attr == NULL) {
1939 		FLOW_LOG(ERR,
1940 			     "Port %"PRIu16" table attr is NULL.",
1941 			     port_id);
1942 		rte_flow_error_set(error, EINVAL,
1943 				   RTE_FLOW_ERROR_TYPE_ATTR,
1944 				   NULL, rte_strerror(EINVAL));
1945 		return NULL;
1946 	}
1947 	if (pattern_templates == NULL) {
1948 		FLOW_LOG(ERR,
1949 			     "Port %"PRIu16" pattern templates is NULL.",
1950 			     port_id);
1951 		rte_flow_error_set(error, EINVAL,
1952 				   RTE_FLOW_ERROR_TYPE_ATTR,
1953 				   NULL, rte_strerror(EINVAL));
1954 		return NULL;
1955 	}
1956 	if (actions_templates == NULL) {
1957 		FLOW_LOG(ERR,
1958 			     "Port %"PRIu16" actions templates is NULL.",
1959 			     port_id);
1960 		rte_flow_error_set(error, EINVAL,
1961 				   RTE_FLOW_ERROR_TYPE_ATTR,
1962 				   NULL, rte_strerror(EINVAL));
1963 		return NULL;
1964 	}
1965 	if (likely(!!ops->template_table_create)) {
1966 		table = ops->template_table_create(dev, table_attr,
1967 					pattern_templates, nb_pattern_templates,
1968 					actions_templates, nb_actions_templates,
1969 					error);
1970 		if (table == NULL)
1971 			flow_err(port_id, -rte_errno, error);
1972 
1973 		rte_flow_trace_template_table_create(port_id, table_attr,
1974 						     pattern_templates,
1975 						     nb_pattern_templates,
1976 						     actions_templates,
1977 						     nb_actions_templates, table);
1978 
1979 		return table;
1980 	}
1981 	rte_flow_error_set(error, ENOTSUP,
1982 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1983 			   NULL, rte_strerror(ENOTSUP));
1984 	return NULL;
1985 }
1986 
1987 int
1988 rte_flow_template_table_destroy(uint16_t port_id,
1989 				struct rte_flow_template_table *template_table,
1990 				struct rte_flow_error *error)
1991 {
1992 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1993 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1994 	int ret;
1995 
1996 	if (unlikely(!ops))
1997 		return -rte_errno;
1998 	if (unlikely(template_table == NULL))
1999 		return 0;
2000 	if (likely(!!ops->template_table_destroy)) {
2001 		ret = flow_err(port_id,
2002 			       ops->template_table_destroy(dev,
2003 							   template_table,
2004 							   error),
2005 			       error);
2006 
2007 		rte_flow_trace_template_table_destroy(port_id, template_table,
2008 						      ret);
2009 
2010 		return ret;
2011 	}
2012 	return rte_flow_error_set(error, ENOTSUP,
2013 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2014 				  NULL, rte_strerror(ENOTSUP));
2015 }
2016 
2017 int
2018 rte_flow_group_set_miss_actions(uint16_t port_id,
2019 				uint32_t group_id,
2020 				const struct rte_flow_group_attr *attr,
2021 				const struct rte_flow_action actions[],
2022 				struct rte_flow_error *error)
2023 {
2024 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2025 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2026 
2027 	if (unlikely(!ops))
2028 		return -rte_errno;
2029 	if (likely(!!ops->group_set_miss_actions)) {
2030 		return flow_err(port_id,
2031 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
2032 				error);
2033 	}
2034 	return rte_flow_error_set(error, ENOTSUP,
2035 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2036 				  NULL, rte_strerror(ENOTSUP));
2037 }
2038 
2039 struct rte_flow *
2040 rte_flow_async_create(uint16_t port_id,
2041 		      uint32_t queue_id,
2042 		      const struct rte_flow_op_attr *op_attr,
2043 		      struct rte_flow_template_table *template_table,
2044 		      const struct rte_flow_item pattern[],
2045 		      uint8_t pattern_template_index,
2046 		      const struct rte_flow_action actions[],
2047 		      uint8_t actions_template_index,
2048 		      void *user_data,
2049 		      struct rte_flow_error *error)
2050 {
2051 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2052 	struct rte_flow *flow;
2053 
2054 #ifdef RTE_FLOW_DEBUG
2055 	if (!rte_eth_dev_is_valid_port(port_id)) {
2056 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2057 				   rte_strerror(ENODEV));
2058 		return NULL;
2059 	}
2060 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create == NULL) {
2061 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2062 				   rte_strerror(ENOSYS));
2063 		return NULL;
2064 	}
2065 #endif
2066 
2067 	flow = dev->flow_fp_ops->async_create(dev, queue_id,
2068 					      op_attr, template_table,
2069 					      pattern, pattern_template_index,
2070 					      actions, actions_template_index,
2071 					      user_data, error);
2072 
2073 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2074 				    pattern, pattern_template_index, actions,
2075 				    actions_template_index, user_data, flow);
2076 
2077 	return flow;
2078 }
2079 
2080 struct rte_flow *
2081 rte_flow_async_create_by_index(uint16_t port_id,
2082 			       uint32_t queue_id,
2083 			       const struct rte_flow_op_attr *op_attr,
2084 			       struct rte_flow_template_table *template_table,
2085 			       uint32_t rule_index,
2086 			       const struct rte_flow_action actions[],
2087 			       uint8_t actions_template_index,
2088 			       void *user_data,
2089 			       struct rte_flow_error *error)
2090 {
2091 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2092 
2093 #ifdef RTE_FLOW_DEBUG
2094 	if (!rte_eth_dev_is_valid_port(port_id)) {
2095 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2096 				   rte_strerror(ENODEV));
2097 		return NULL;
2098 	}
2099 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create_by_index == NULL) {
2100 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2101 				   rte_strerror(ENOSYS));
2102 		return NULL;
2103 	}
2104 #endif
2105 
2106 	return dev->flow_fp_ops->async_create_by_index(dev, queue_id,
2107 						       op_attr, template_table, rule_index,
2108 						       actions, actions_template_index,
2109 						       user_data, error);
2110 }
2111 
2112 int
2113 rte_flow_async_destroy(uint16_t port_id,
2114 		       uint32_t queue_id,
2115 		       const struct rte_flow_op_attr *op_attr,
2116 		       struct rte_flow *flow,
2117 		       void *user_data,
2118 		       struct rte_flow_error *error)
2119 {
2120 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2121 	int ret;
2122 
2123 #ifdef RTE_FLOW_DEBUG
2124 	if (!rte_eth_dev_is_valid_port(port_id))
2125 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2126 					  rte_strerror(ENODEV));
2127 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_destroy == NULL)
2128 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2129 					  rte_strerror(ENOSYS));
2130 #endif
2131 
2132 	ret = dev->flow_fp_ops->async_destroy(dev, queue_id,
2133 					      op_attr, flow,
2134 					      user_data, error);
2135 
2136 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2137 				     user_data, ret);
2138 
2139 	return ret;
2140 }
2141 
2142 int
2143 rte_flow_async_actions_update(uint16_t port_id,
2144 			      uint32_t queue_id,
2145 			      const struct rte_flow_op_attr *op_attr,
2146 			      struct rte_flow *flow,
2147 			      const struct rte_flow_action actions[],
2148 			      uint8_t actions_template_index,
2149 			      void *user_data,
2150 			      struct rte_flow_error *error)
2151 {
2152 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2153 	int ret;
2154 
2155 #ifdef RTE_FLOW_DEBUG
2156 	if (!rte_eth_dev_is_valid_port(port_id))
2157 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2158 					  rte_strerror(ENODEV));
2159 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_actions_update == NULL)
2160 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2161 					  rte_strerror(ENOSYS));
2162 #endif
2163 
2164 	ret = dev->flow_fp_ops->async_actions_update(dev, queue_id, op_attr,
2165 						     flow, actions,
2166 						     actions_template_index,
2167 						     user_data, error);
2168 
2169 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2170 					    actions, actions_template_index,
2171 					    user_data, ret);
2172 
2173 	return ret;
2174 }
2175 
2176 int
2177 rte_flow_push(uint16_t port_id,
2178 	      uint32_t queue_id,
2179 	      struct rte_flow_error *error)
2180 {
2181 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2182 	int ret;
2183 
2184 #ifdef RTE_FLOW_DEBUG
2185 	if (!rte_eth_dev_is_valid_port(port_id))
2186 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2187 					  rte_strerror(ENODEV));
2188 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->push == NULL)
2189 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2190 					  rte_strerror(ENOSYS));
2191 #endif
2192 
2193 	ret = dev->flow_fp_ops->push(dev, queue_id, error);
2194 
2195 	rte_flow_trace_push(port_id, queue_id, ret);
2196 
2197 	return ret;
2198 }
2199 
2200 int
2201 rte_flow_pull(uint16_t port_id,
2202 	      uint32_t queue_id,
2203 	      struct rte_flow_op_result res[],
2204 	      uint16_t n_res,
2205 	      struct rte_flow_error *error)
2206 {
2207 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2208 	int ret;
2209 
2210 #ifdef RTE_FLOW_DEBUG
2211 	if (!rte_eth_dev_is_valid_port(port_id))
2212 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2213 					  rte_strerror(ENODEV));
2214 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->pull == NULL)
2215 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2216 					  rte_strerror(ENOSYS));
2217 #endif
2218 
2219 	ret = dev->flow_fp_ops->pull(dev, queue_id, res, n_res, error);
2220 
2221 	rte_flow_trace_pull(port_id, queue_id, res, n_res, ret);
2222 
2223 	return ret;
2224 }
2225 
2226 struct rte_flow_action_handle *
2227 rte_flow_async_action_handle_create(uint16_t port_id,
2228 		uint32_t queue_id,
2229 		const struct rte_flow_op_attr *op_attr,
2230 		const struct rte_flow_indir_action_conf *indir_action_conf,
2231 		const struct rte_flow_action *action,
2232 		void *user_data,
2233 		struct rte_flow_error *error)
2234 {
2235 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2236 	struct rte_flow_action_handle *handle;
2237 
2238 #ifdef RTE_FLOW_DEBUG
2239 	if (!rte_eth_dev_is_valid_port(port_id)) {
2240 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2241 				   rte_strerror(ENODEV));
2242 		return NULL;
2243 	}
2244 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_create == NULL) {
2245 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2246 				   rte_strerror(ENOSYS));
2247 		return NULL;
2248 	}
2249 #endif
2250 
2251 	handle = dev->flow_fp_ops->async_action_handle_create(dev, queue_id, op_attr,
2252 							      indir_action_conf, action,
2253 							      user_data, error);
2254 
2255 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2256 						  indir_action_conf, action,
2257 						  user_data, handle);
2258 
2259 	return handle;
2260 }
2261 
2262 int
2263 rte_flow_async_action_handle_destroy(uint16_t port_id,
2264 		uint32_t queue_id,
2265 		const struct rte_flow_op_attr *op_attr,
2266 		struct rte_flow_action_handle *action_handle,
2267 		void *user_data,
2268 		struct rte_flow_error *error)
2269 {
2270 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2271 	int ret;
2272 
2273 #ifdef RTE_FLOW_DEBUG
2274 	if (!rte_eth_dev_is_valid_port(port_id))
2275 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2276 					  rte_strerror(ENODEV));
2277 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_destroy == NULL)
2278 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2279 					  rte_strerror(ENOSYS));
2280 #endif
2281 
2282 	ret = dev->flow_fp_ops->async_action_handle_destroy(dev, queue_id, op_attr,
2283 							    action_handle, user_data, error);
2284 
2285 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2286 						   action_handle, user_data, ret);
2287 
2288 	return ret;
2289 }
2290 
2291 int
2292 rte_flow_async_action_handle_update(uint16_t port_id,
2293 		uint32_t queue_id,
2294 		const struct rte_flow_op_attr *op_attr,
2295 		struct rte_flow_action_handle *action_handle,
2296 		const void *update,
2297 		void *user_data,
2298 		struct rte_flow_error *error)
2299 {
2300 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2301 	int ret;
2302 
2303 #ifdef RTE_FLOW_DEBUG
2304 	if (!rte_eth_dev_is_valid_port(port_id))
2305 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2306 					  rte_strerror(ENODEV));
2307 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_update == NULL)
2308 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2309 					  rte_strerror(ENOSYS));
2310 #endif
2311 
2312 	ret = dev->flow_fp_ops->async_action_handle_update(dev, queue_id, op_attr,
2313 							   action_handle, update, user_data, error);
2314 
2315 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2316 						  action_handle, update,
2317 						  user_data, ret);
2318 
2319 	return ret;
2320 }
2321 
2322 int
2323 rte_flow_async_action_handle_query(uint16_t port_id,
2324 		uint32_t queue_id,
2325 		const struct rte_flow_op_attr *op_attr,
2326 		const struct rte_flow_action_handle *action_handle,
2327 		void *data,
2328 		void *user_data,
2329 		struct rte_flow_error *error)
2330 {
2331 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2332 	int ret;
2333 
2334 #ifdef RTE_FLOW_DEBUG
2335 	if (!rte_eth_dev_is_valid_port(port_id))
2336 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2337 					  rte_strerror(ENODEV));
2338 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query == NULL)
2339 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2340 					  rte_strerror(ENOSYS));
2341 #endif
2342 
2343 	ret = dev->flow_fp_ops->async_action_handle_query(dev, queue_id, op_attr,
2344 							  action_handle, data, user_data, error);
2345 
2346 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2347 						 action_handle, data, user_data,
2348 						 ret);
2349 
2350 	return ret;
2351 }
2352 
2353 int
2354 rte_flow_action_handle_query_update(uint16_t port_id,
2355 				    struct rte_flow_action_handle *handle,
2356 				    const void *update, void *query,
2357 				    enum rte_flow_query_update_mode mode,
2358 				    struct rte_flow_error *error)
2359 {
2360 	int ret;
2361 	struct rte_eth_dev *dev;
2362 	const struct rte_flow_ops *ops;
2363 
2364 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2365 	if (!handle)
2366 		return -EINVAL;
2367 	if (!update && !query)
2368 		return -EINVAL;
2369 	dev = &rte_eth_devices[port_id];
2370 	ops = rte_flow_ops_get(port_id, error);
2371 	if (!ops || !ops->action_handle_query_update)
2372 		return -ENOTSUP;
2373 	ret = ops->action_handle_query_update(dev, handle, update,
2374 					      query, mode, error);
2375 	return flow_err(port_id, ret, error);
2376 }
2377 
2378 int
2379 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2380 					  const struct rte_flow_op_attr *attr,
2381 					  struct rte_flow_action_handle *handle,
2382 					  const void *update, void *query,
2383 					  enum rte_flow_query_update_mode mode,
2384 					  void *user_data,
2385 					  struct rte_flow_error *error)
2386 {
2387 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2388 
2389 #ifdef RTE_FLOW_DEBUG
2390 	if (!rte_eth_dev_is_valid_port(port_id))
2391 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2392 					  rte_strerror(ENODEV));
2393 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query_update == NULL)
2394 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2395 					  rte_strerror(ENOSYS));
2396 #endif
2397 
2398 	return dev->flow_fp_ops->async_action_handle_query_update(dev, queue_id, attr,
2399 								  handle, update,
2400 								  query, mode,
2401 								  user_data, error);
2402 }
2403 
2404 struct rte_flow_action_list_handle *
2405 rte_flow_action_list_handle_create(uint16_t port_id,
2406 				   const
2407 				   struct rte_flow_indir_action_conf *conf,
2408 				   const struct rte_flow_action *actions,
2409 				   struct rte_flow_error *error)
2410 {
2411 	int ret;
2412 	struct rte_eth_dev *dev;
2413 	const struct rte_flow_ops *ops;
2414 	struct rte_flow_action_list_handle *handle;
2415 
2416 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2417 	ops = rte_flow_ops_get(port_id, error);
2418 	if (!ops || !ops->action_list_handle_create) {
2419 		rte_flow_error_set(error, ENOTSUP,
2420 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2421 				   "action_list handle not supported");
2422 		return NULL;
2423 	}
2424 	dev = &rte_eth_devices[port_id];
2425 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2426 	ret = flow_err(port_id, -rte_errno, error);
2427 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2428 	return handle;
2429 }
2430 
2431 int
2432 rte_flow_action_list_handle_destroy(uint16_t port_id,
2433 				    struct rte_flow_action_list_handle *handle,
2434 				    struct rte_flow_error *error)
2435 {
2436 	int ret;
2437 	struct rte_eth_dev *dev;
2438 	const struct rte_flow_ops *ops;
2439 
2440 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2441 	ops = rte_flow_ops_get(port_id, error);
2442 	if (!ops || !ops->action_list_handle_destroy)
2443 		return rte_flow_error_set(error, ENOTSUP,
2444 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2445 					  "action_list handle not supported");
2446 	dev = &rte_eth_devices[port_id];
2447 	ret = ops->action_list_handle_destroy(dev, handle, error);
2448 	ret = flow_err(port_id, ret, error);
2449 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2450 	return ret;
2451 }
2452 
2453 struct rte_flow_action_list_handle *
2454 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2455 					 const struct rte_flow_op_attr *attr,
2456 					 const struct rte_flow_indir_action_conf *conf,
2457 					 const struct rte_flow_action *actions,
2458 					 void *user_data,
2459 					 struct rte_flow_error *error)
2460 {
2461 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2462 	struct rte_flow_action_list_handle *handle;
2463 	int ret;
2464 
2465 #ifdef RTE_FLOW_DEBUG
2466 	if (!rte_eth_dev_is_valid_port(port_id)) {
2467 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2468 				   rte_strerror(ENODEV));
2469 		return NULL;
2470 	}
2471 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_create == NULL) {
2472 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2473 				   rte_strerror(ENOSYS));
2474 		return NULL;
2475 	}
2476 #endif
2477 
2478 	handle = dev->flow_fp_ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2479 								   actions, user_data,
2480 								   error);
2481 	ret = flow_err(port_id, -rte_errno, error);
2482 
2483 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2484 						       conf, actions, user_data,
2485 						       ret);
2486 	return handle;
2487 }
2488 
2489 int
2490 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2491 				 const struct rte_flow_op_attr *op_attr,
2492 				 struct rte_flow_action_list_handle *handle,
2493 				 void *user_data, struct rte_flow_error *error)
2494 {
2495 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2496 	int ret;
2497 
2498 #ifdef RTE_FLOW_DEBUG
2499 	if (!rte_eth_dev_is_valid_port(port_id))
2500 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2501 					  rte_strerror(ENODEV));
2502 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_destroy == NULL)
2503 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2504 					  rte_strerror(ENOSYS));
2505 #endif
2506 
2507 	ret = dev->flow_fp_ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2508 								 handle, user_data, error);
2509 
2510 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2511 							op_attr, handle,
2512 							user_data, ret);
2513 	return ret;
2514 }
2515 
2516 int
2517 rte_flow_action_list_handle_query_update(uint16_t port_id,
2518 			 const struct rte_flow_action_list_handle *handle,
2519 			 const void **update, void **query,
2520 			 enum rte_flow_query_update_mode mode,
2521 			 struct rte_flow_error *error)
2522 {
2523 	int ret;
2524 	struct rte_eth_dev *dev;
2525 	const struct rte_flow_ops *ops;
2526 
2527 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2528 	ops = rte_flow_ops_get(port_id, error);
2529 	if (!ops || !ops->action_list_handle_query_update)
2530 		return rte_flow_error_set(error, ENOTSUP,
2531 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2532 					  "action_list query_update not supported");
2533 	dev = &rte_eth_devices[port_id];
2534 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2535 						   mode, error);
2536 	ret = flow_err(port_id, ret, error);
2537 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2538 						       query, mode, ret);
2539 	return ret;
2540 }
2541 
2542 int
2543 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2544 			 const struct rte_flow_op_attr *attr,
2545 			 const struct rte_flow_action_list_handle *handle,
2546 			 const void **update, void **query,
2547 			 enum rte_flow_query_update_mode mode,
2548 			 void *user_data, struct rte_flow_error *error)
2549 {
2550 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2551 	int ret;
2552 
2553 #ifdef RTE_FLOW_DEBUG
2554 	if (!rte_eth_dev_is_valid_port(port_id))
2555 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2556 					  rte_strerror(ENODEV));
2557 	if (dev->flow_fp_ops == NULL ||
2558 	    dev->flow_fp_ops->async_action_list_handle_query_update == NULL)
2559 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2560 					  rte_strerror(ENOSYS));
2561 #endif
2562 
2563 	ret = dev->flow_fp_ops->async_action_list_handle_query_update(dev, queue_id, attr,
2564 								      handle, update, query,
2565 								      mode, user_data,
2566 								      error);
2567 
2568 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2569 							     attr, handle,
2570 							     update, query,
2571 							     mode, user_data,
2572 							     ret);
2573 	return ret;
2574 }
2575 
2576 int
2577 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2578 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2579 			 uint32_t *hash, struct rte_flow_error *error)
2580 {
2581 	int ret;
2582 	struct rte_eth_dev *dev;
2583 	const struct rte_flow_ops *ops;
2584 
2585 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2586 	ops = rte_flow_ops_get(port_id, error);
2587 	if (!ops || !ops->flow_calc_table_hash)
2588 		return rte_flow_error_set(error, ENOTSUP,
2589 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2590 					  "action_list async query_update not supported");
2591 	dev = &rte_eth_devices[port_id];
2592 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2593 					hash, error);
2594 	return flow_err(port_id, ret, error);
2595 }
2596 
2597 int
2598 rte_flow_calc_encap_hash(uint16_t port_id, const struct rte_flow_item pattern[],
2599 			 enum rte_flow_encap_hash_field dest_field, uint8_t hash_len,
2600 			 uint8_t *hash, struct rte_flow_error *error)
2601 {
2602 	int ret;
2603 	struct rte_eth_dev *dev;
2604 	const struct rte_flow_ops *ops;
2605 
2606 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2607 	ops = rte_flow_ops_get(port_id, error);
2608 	if (!ops || !ops->flow_calc_encap_hash)
2609 		return rte_flow_error_set(error, ENOTSUP,
2610 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2611 					  "calc encap hash is not supported");
2612 	if (dest_field > RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID)
2613 		return rte_flow_error_set(error, EINVAL,
2614 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2615 					  "hash dest field is not defined");
2616 	if ((dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT && hash_len != 2) ||
2617 	    (dest_field == RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID && hash_len != 1))
2618 		return rte_flow_error_set(error, EINVAL,
2619 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2620 					  "hash len doesn't match the requested field len");
2621 	dev = &rte_eth_devices[port_id];
2622 	ret = ops->flow_calc_encap_hash(dev, pattern, dest_field, hash, error);
2623 	return flow_err(port_id, ret, error);
2624 }
2625 
2626 bool
2627 rte_flow_template_table_resizable(__rte_unused uint16_t port_id,
2628 				  const struct rte_flow_template_table_attr *tbl_attr)
2629 {
2630 	return (tbl_attr->specialize &
2631 		RTE_FLOW_TABLE_SPECIALIZE_RESIZABLE) != 0;
2632 }
2633 
2634 int
2635 rte_flow_template_table_resize(uint16_t port_id,
2636 			       struct rte_flow_template_table *table,
2637 			       uint32_t nb_rules,
2638 			       struct rte_flow_error *error)
2639 {
2640 	int ret;
2641 	struct rte_eth_dev *dev;
2642 	const struct rte_flow_ops *ops;
2643 
2644 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2645 	ops = rte_flow_ops_get(port_id, error);
2646 	if (!ops || !ops->flow_template_table_resize)
2647 		return rte_flow_error_set(error, ENOTSUP,
2648 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2649 					  "flow_template_table_resize not supported");
2650 	dev = &rte_eth_devices[port_id];
2651 	ret = ops->flow_template_table_resize(dev, table, nb_rules, error);
2652 	ret = flow_err(port_id, ret, error);
2653 	rte_flow_trace_template_table_resize(port_id, table, nb_rules, ret);
2654 	return ret;
2655 }
2656 
2657 int
2658 rte_flow_async_update_resized(uint16_t port_id, uint32_t queue,
2659 			      const struct rte_flow_op_attr *attr,
2660 			      struct rte_flow *rule, void *user_data,
2661 			      struct rte_flow_error *error)
2662 {
2663 	int ret;
2664 	struct rte_eth_dev *dev;
2665 	const struct rte_flow_ops *ops;
2666 
2667 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2668 	ops = rte_flow_ops_get(port_id, error);
2669 	if (!ops || !ops->flow_update_resized)
2670 		return rte_flow_error_set(error, ENOTSUP,
2671 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2672 					  "async_flow_async_transfer not supported");
2673 	dev = &rte_eth_devices[port_id];
2674 	ret = ops->flow_update_resized(dev, queue, attr, rule, user_data, error);
2675 	ret = flow_err(port_id, ret, error);
2676 	rte_flow_trace_async_update_resized(port_id, queue, attr,
2677 					    rule, user_data, ret);
2678 	return ret;
2679 }
2680 
2681 int
2682 rte_flow_template_table_resize_complete(uint16_t port_id,
2683 					struct rte_flow_template_table *table,
2684 					struct rte_flow_error *error)
2685 {
2686 	int ret;
2687 	struct rte_eth_dev *dev;
2688 	const struct rte_flow_ops *ops;
2689 
2690 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2691 	ops = rte_flow_ops_get(port_id, error);
2692 	if (!ops || !ops->flow_template_table_resize_complete)
2693 		return rte_flow_error_set(error, ENOTSUP,
2694 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2695 					  "flow_template_table_transfer_complete not supported");
2696 	dev = &rte_eth_devices[port_id];
2697 	ret = ops->flow_template_table_resize_complete(dev, table, error);
2698 	ret = flow_err(port_id, ret, error);
2699 	rte_flow_trace_table_resize_complete(port_id, table, ret);
2700 	return ret;
2701 }
2702 
2703 static struct rte_flow *
2704 rte_flow_dummy_async_create(struct rte_eth_dev *dev __rte_unused,
2705 			    uint32_t queue __rte_unused,
2706 			    const struct rte_flow_op_attr *attr __rte_unused,
2707 			    struct rte_flow_template_table *table __rte_unused,
2708 			    const struct rte_flow_item items[] __rte_unused,
2709 			    uint8_t pattern_template_index __rte_unused,
2710 			    const struct rte_flow_action actions[] __rte_unused,
2711 			    uint8_t action_template_index __rte_unused,
2712 			    void *user_data __rte_unused,
2713 			    struct rte_flow_error *error)
2714 {
2715 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2716 			   rte_strerror(ENOSYS));
2717 	return NULL;
2718 }
2719 
2720 static struct rte_flow *
2721 rte_flow_dummy_async_create_by_index(struct rte_eth_dev *dev __rte_unused,
2722 				     uint32_t queue __rte_unused,
2723 				     const struct rte_flow_op_attr *attr __rte_unused,
2724 				     struct rte_flow_template_table *table __rte_unused,
2725 				     uint32_t rule_index __rte_unused,
2726 				     const struct rte_flow_action actions[] __rte_unused,
2727 				     uint8_t action_template_index __rte_unused,
2728 				     void *user_data __rte_unused,
2729 				     struct rte_flow_error *error)
2730 {
2731 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2732 			   rte_strerror(ENOSYS));
2733 	return NULL;
2734 }
2735 
2736 static int
2737 rte_flow_dummy_async_actions_update(struct rte_eth_dev *dev __rte_unused,
2738 				    uint32_t queue_id __rte_unused,
2739 				    const struct rte_flow_op_attr *op_attr __rte_unused,
2740 				    struct rte_flow *flow __rte_unused,
2741 				    const struct rte_flow_action actions[] __rte_unused,
2742 				    uint8_t actions_template_index __rte_unused,
2743 				    void *user_data __rte_unused,
2744 				    struct rte_flow_error *error)
2745 {
2746 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2747 				  rte_strerror(ENOSYS));
2748 }
2749 
2750 static int
2751 rte_flow_dummy_async_destroy(struct rte_eth_dev *dev __rte_unused,
2752 			     uint32_t queue_id __rte_unused,
2753 			     const struct rte_flow_op_attr *op_attr __rte_unused,
2754 			     struct rte_flow *flow __rte_unused,
2755 			     void *user_data __rte_unused,
2756 			     struct rte_flow_error *error)
2757 {
2758 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2759 				  rte_strerror(ENOSYS));
2760 }
2761 
2762 static int
2763 rte_flow_dummy_push(struct rte_eth_dev *dev __rte_unused,
2764 		    uint32_t queue_id __rte_unused,
2765 		    struct rte_flow_error *error)
2766 {
2767 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2768 				  rte_strerror(ENOSYS));
2769 }
2770 
2771 static int
2772 rte_flow_dummy_pull(struct rte_eth_dev *dev __rte_unused,
2773 		    uint32_t queue_id __rte_unused,
2774 		    struct rte_flow_op_result res[] __rte_unused,
2775 		    uint16_t n_res __rte_unused,
2776 		    struct rte_flow_error *error)
2777 {
2778 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2779 				  rte_strerror(ENOSYS));
2780 }
2781 
2782 static struct rte_flow_action_handle *
2783 rte_flow_dummy_async_action_handle_create(
2784 	struct rte_eth_dev *dev __rte_unused,
2785 	uint32_t queue_id __rte_unused,
2786 	const struct rte_flow_op_attr *op_attr __rte_unused,
2787 	const struct rte_flow_indir_action_conf *indir_action_conf __rte_unused,
2788 	const struct rte_flow_action *action __rte_unused,
2789 	void *user_data __rte_unused,
2790 	struct rte_flow_error *error)
2791 {
2792 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2793 			   rte_strerror(ENOSYS));
2794 	return NULL;
2795 }
2796 
2797 static int
2798 rte_flow_dummy_async_action_handle_destroy(
2799 	struct rte_eth_dev *dev __rte_unused,
2800 	uint32_t queue_id __rte_unused,
2801 	const struct rte_flow_op_attr *op_attr __rte_unused,
2802 	struct rte_flow_action_handle *action_handle __rte_unused,
2803 	void *user_data __rte_unused,
2804 	struct rte_flow_error *error)
2805 {
2806 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2807 				  rte_strerror(ENOSYS));
2808 }
2809 
2810 static int
2811 rte_flow_dummy_async_action_handle_update(
2812 	struct rte_eth_dev *dev __rte_unused,
2813 	uint32_t queue_id __rte_unused,
2814 	const struct rte_flow_op_attr *op_attr __rte_unused,
2815 	struct rte_flow_action_handle *action_handle __rte_unused,
2816 	const void *update __rte_unused,
2817 	void *user_data __rte_unused,
2818 	struct rte_flow_error *error)
2819 {
2820 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2821 				  rte_strerror(ENOSYS));
2822 }
2823 
2824 static int
2825 rte_flow_dummy_async_action_handle_query(
2826 	struct rte_eth_dev *dev __rte_unused,
2827 	uint32_t queue_id __rte_unused,
2828 	const struct rte_flow_op_attr *op_attr __rte_unused,
2829 	const struct rte_flow_action_handle *action_handle __rte_unused,
2830 	void *data __rte_unused,
2831 	void *user_data __rte_unused,
2832 	struct rte_flow_error *error)
2833 {
2834 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2835 				  rte_strerror(ENOSYS));
2836 }
2837 
2838 static int
2839 rte_flow_dummy_async_action_handle_query_update(
2840 	struct rte_eth_dev *dev __rte_unused,
2841 	uint32_t queue_id __rte_unused,
2842 	const struct rte_flow_op_attr *attr __rte_unused,
2843 	struct rte_flow_action_handle *handle __rte_unused,
2844 	const void *update __rte_unused,
2845 	void *query __rte_unused,
2846 	enum rte_flow_query_update_mode mode __rte_unused,
2847 	void *user_data __rte_unused,
2848 	struct rte_flow_error *error)
2849 {
2850 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2851 				  rte_strerror(ENOSYS));
2852 }
2853 
2854 static struct rte_flow_action_list_handle *
2855 rte_flow_dummy_async_action_list_handle_create(
2856 	struct rte_eth_dev *dev __rte_unused,
2857 	uint32_t queue_id __rte_unused,
2858 	const struct rte_flow_op_attr *attr __rte_unused,
2859 	const struct rte_flow_indir_action_conf *conf __rte_unused,
2860 	const struct rte_flow_action *actions __rte_unused,
2861 	void *user_data __rte_unused,
2862 	struct rte_flow_error *error)
2863 {
2864 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2865 			   rte_strerror(ENOSYS));
2866 	return NULL;
2867 }
2868 
2869 static int
2870 rte_flow_dummy_async_action_list_handle_destroy(
2871 	struct rte_eth_dev *dev __rte_unused,
2872 	uint32_t queue_id __rte_unused,
2873 	const struct rte_flow_op_attr *op_attr __rte_unused,
2874 	struct rte_flow_action_list_handle *handle __rte_unused,
2875 	void *user_data __rte_unused,
2876 	struct rte_flow_error *error)
2877 {
2878 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2879 				  rte_strerror(ENOSYS));
2880 }
2881 
2882 static int
2883 rte_flow_dummy_async_action_list_handle_query_update(
2884 	struct rte_eth_dev *dev __rte_unused,
2885 	uint32_t queue_id __rte_unused,
2886 	const struct rte_flow_op_attr *attr __rte_unused,
2887 	const struct rte_flow_action_list_handle *handle __rte_unused,
2888 	const void **update __rte_unused,
2889 	void **query __rte_unused,
2890 	enum rte_flow_query_update_mode mode __rte_unused,
2891 	void *user_data __rte_unused,
2892 	struct rte_flow_error *error)
2893 {
2894 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2895 				  rte_strerror(ENOSYS));
2896 }
2897 
2898 struct rte_flow_fp_ops rte_flow_fp_default_ops = {
2899 	.async_create = rte_flow_dummy_async_create,
2900 	.async_create_by_index = rte_flow_dummy_async_create_by_index,
2901 	.async_actions_update = rte_flow_dummy_async_actions_update,
2902 	.async_destroy = rte_flow_dummy_async_destroy,
2903 	.push = rte_flow_dummy_push,
2904 	.pull = rte_flow_dummy_pull,
2905 	.async_action_handle_create = rte_flow_dummy_async_action_handle_create,
2906 	.async_action_handle_destroy = rte_flow_dummy_async_action_handle_destroy,
2907 	.async_action_handle_update = rte_flow_dummy_async_action_handle_update,
2908 	.async_action_handle_query = rte_flow_dummy_async_action_handle_query,
2909 	.async_action_handle_query_update = rte_flow_dummy_async_action_handle_query_update,
2910 	.async_action_list_handle_create = rte_flow_dummy_async_action_list_handle_create,
2911 	.async_action_list_handle_destroy = rte_flow_dummy_async_action_list_handle_destroy,
2912 	.async_action_list_handle_query_update =
2913 		rte_flow_dummy_async_action_list_handle_query_update,
2914 };
2915