xref: /dpdk/lib/ethdev/rte_flow.c (revision 62774b78a84e9fa5df56d04cffed69bef8c901f1)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_string_fns.h>
14 #include <rte_mbuf_dyn.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18 
19 #include "ethdev_trace.h"
20 
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23 
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26 
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31 	const char *name;
32 	size_t size;
33 	size_t (*desc_fn)(void *dst, const void *src);
34 };
35 
36 /**
37  *
38  * @param buf
39  * Destination memory.
40  * @param data
41  * Source memory
42  * @param size
43  * Requested copy size
44  * @param desc
45  * rte_flow_desc_item - for flow item conversion.
46  * rte_flow_desc_action - for flow action conversion.
47  * @param type
48  * Offset into the desc param or negative value for private flow elements.
49  */
50 static inline size_t
51 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
52 		   const struct rte_flow_desc_data *desc, int type)
53 {
54 	/**
55 	 * Allow PMD private flow item
56 	 */
57 	bool rte_type = type >= 0;
58 
59 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
60 	if (buf == NULL || data == NULL)
61 		return 0;
62 	rte_memcpy(buf, data, (size > sz ? sz : size));
63 	if (rte_type && desc[type].desc_fn)
64 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
65 	return sz;
66 }
67 
68 static size_t
69 rte_flow_item_flex_conv(void *buf, const void *data)
70 {
71 	struct rte_flow_item_flex *dst = buf;
72 	const struct rte_flow_item_flex *src = data;
73 	if (buf) {
74 		dst->pattern = rte_memcpy
75 			((void *)((uintptr_t)(dst + 1)), src->pattern,
76 			 src->length);
77 	}
78 	return src->length;
79 }
80 
81 /** Generate flow_item[] entry. */
82 #define MK_FLOW_ITEM(t, s) \
83 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
84 		.name = # t, \
85 		.size = s,               \
86 		.desc_fn = NULL,\
87 	}
88 
89 #define MK_FLOW_ITEM_FN(t, s, fn) \
90 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
91 		.name = # t,                 \
92 		.size = s,                   \
93 		.desc_fn = fn,               \
94 	}
95 
96 /** Information about known flow pattern items. */
97 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
98 	MK_FLOW_ITEM(END, 0),
99 	MK_FLOW_ITEM(VOID, 0),
100 	MK_FLOW_ITEM(INVERT, 0),
101 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
102 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
103 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
104 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
105 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
106 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
107 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
108 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
109 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
110 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
111 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
112 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
113 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
114 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
115 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
116 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
117 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
118 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
119 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
122 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
123 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
124 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
125 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
126 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
127 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
128 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
131 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
132 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
134 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
136 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
137 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
138 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
139 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
140 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
141 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
142 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
143 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
144 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
146 			sizeof(struct rte_flow_item_pppoe_proto_id)),
147 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
148 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
149 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
150 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
151 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
152 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
153 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
154 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
155 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
156 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
157 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
158 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
160 			rte_flow_item_flex_conv),
161 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
162 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
163 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
164 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
165 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
166 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
167 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
168 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
169 };
170 
171 /** Generate flow_action[] entry. */
172 #define MK_FLOW_ACTION(t, s) \
173 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
174 		.name = # t, \
175 		.size = s, \
176 		.desc_fn = NULL,\
177 	}
178 
179 #define MK_FLOW_ACTION_FN(t, fn) \
180 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
181 		.name = # t, \
182 		.size = 0, \
183 		.desc_fn = fn,\
184 	}
185 
186 
187 /** Information about known flow actions. */
188 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
189 	MK_FLOW_ACTION(END, 0),
190 	MK_FLOW_ACTION(VOID, 0),
191 	MK_FLOW_ACTION(PASSTHRU, 0),
192 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
193 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
194 	MK_FLOW_ACTION(FLAG, 0),
195 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
196 	MK_FLOW_ACTION(DROP, 0),
197 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
198 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
199 	MK_FLOW_ACTION(PF, 0),
200 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
201 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
202 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
203 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
204 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
205 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
206 	MK_FLOW_ACTION(OF_PUSH_VLAN,
207 		       sizeof(struct rte_flow_action_of_push_vlan)),
208 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
209 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
210 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
211 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
212 	MK_FLOW_ACTION(OF_POP_MPLS,
213 		       sizeof(struct rte_flow_action_of_pop_mpls)),
214 	MK_FLOW_ACTION(OF_PUSH_MPLS,
215 		       sizeof(struct rte_flow_action_of_push_mpls)),
216 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
217 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
218 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
219 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
220 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
221 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
222 	MK_FLOW_ACTION(SET_IPV4_SRC,
223 		       sizeof(struct rte_flow_action_set_ipv4)),
224 	MK_FLOW_ACTION(SET_IPV4_DST,
225 		       sizeof(struct rte_flow_action_set_ipv4)),
226 	MK_FLOW_ACTION(SET_IPV6_SRC,
227 		       sizeof(struct rte_flow_action_set_ipv6)),
228 	MK_FLOW_ACTION(SET_IPV6_DST,
229 		       sizeof(struct rte_flow_action_set_ipv6)),
230 	MK_FLOW_ACTION(SET_TP_SRC,
231 		       sizeof(struct rte_flow_action_set_tp)),
232 	MK_FLOW_ACTION(SET_TP_DST,
233 		       sizeof(struct rte_flow_action_set_tp)),
234 	MK_FLOW_ACTION(MAC_SWAP, 0),
235 	MK_FLOW_ACTION(DEC_TTL, 0),
236 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
237 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
238 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
239 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
240 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
241 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
242 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
243 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
244 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
245 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
246 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
247 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
248 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
249 	MK_FLOW_ACTION(MODIFY_FIELD,
250 		       sizeof(struct rte_flow_action_modify_field)),
251 	/**
252 	 * Indirect action represented as handle of type
253 	 * (struct rte_flow_action_handle *) stored in conf field (see
254 	 * struct rte_flow_action); no need for additional structure to * store
255 	 * indirect action handle.
256 	 */
257 	MK_FLOW_ACTION(INDIRECT, 0),
258 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
259 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
260 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
261 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
262 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
263 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
264 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
265 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
266 	MK_FLOW_ACTION(INDIRECT_LIST,
267 		       sizeof(struct rte_flow_action_indirect_list)),
268 };
269 
270 int
271 rte_flow_dynf_metadata_register(void)
272 {
273 	int offset;
274 	int flag;
275 
276 	static const struct rte_mbuf_dynfield desc_offs = {
277 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
278 		.size = sizeof(uint32_t),
279 		.align = __alignof__(uint32_t),
280 	};
281 	static const struct rte_mbuf_dynflag desc_flag = {
282 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
283 	};
284 
285 	offset = rte_mbuf_dynfield_register(&desc_offs);
286 	if (offset < 0)
287 		goto error;
288 	flag = rte_mbuf_dynflag_register(&desc_flag);
289 	if (flag < 0)
290 		goto error;
291 	rte_flow_dynf_metadata_offs = offset;
292 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
293 
294 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
295 
296 	return 0;
297 
298 error:
299 	rte_flow_dynf_metadata_offs = -1;
300 	rte_flow_dynf_metadata_mask = UINT64_C(0);
301 	return -rte_errno;
302 }
303 
304 static inline void
305 fts_enter(struct rte_eth_dev *dev)
306 {
307 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
308 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
309 }
310 
311 static inline void
312 fts_exit(struct rte_eth_dev *dev)
313 {
314 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
315 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
316 }
317 
318 static int
319 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
320 {
321 	if (ret == 0)
322 		return 0;
323 	if (rte_eth_dev_is_removed(port_id))
324 		return rte_flow_error_set(error, EIO,
325 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
326 					  NULL, rte_strerror(EIO));
327 	return ret;
328 }
329 
330 /* Get generic flow operations structure from a port. */
331 const struct rte_flow_ops *
332 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
333 {
334 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
335 	const struct rte_flow_ops *ops;
336 	int code;
337 
338 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
339 		code = ENODEV;
340 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
341 		/* flow API not supported with this driver dev_ops */
342 		code = ENOSYS;
343 	else
344 		code = dev->dev_ops->flow_ops_get(dev, &ops);
345 	if (code == 0 && ops == NULL)
346 		/* flow API not supported with this device */
347 		code = ENOSYS;
348 
349 	if (code != 0) {
350 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
351 				   NULL, rte_strerror(code));
352 		return NULL;
353 	}
354 	return ops;
355 }
356 
357 /* Check whether a flow rule can be created on a given port. */
358 int
359 rte_flow_validate(uint16_t port_id,
360 		  const struct rte_flow_attr *attr,
361 		  const struct rte_flow_item pattern[],
362 		  const struct rte_flow_action actions[],
363 		  struct rte_flow_error *error)
364 {
365 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
366 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
367 	int ret;
368 
369 	if (likely(!!attr) && attr->transfer &&
370 	    (attr->ingress || attr->egress)) {
371 		return rte_flow_error_set(error, EINVAL,
372 					  RTE_FLOW_ERROR_TYPE_ATTR,
373 					  attr, "cannot use attr ingress/egress with attr transfer");
374 	}
375 
376 	if (unlikely(!ops))
377 		return -rte_errno;
378 	if (likely(!!ops->validate)) {
379 		fts_enter(dev);
380 		ret = ops->validate(dev, attr, pattern, actions, error);
381 		fts_exit(dev);
382 		ret = flow_err(port_id, ret, error);
383 
384 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
385 
386 		return ret;
387 	}
388 	return rte_flow_error_set(error, ENOSYS,
389 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
390 				  NULL, rte_strerror(ENOSYS));
391 }
392 
393 /* Create a flow rule on a given port. */
394 struct rte_flow *
395 rte_flow_create(uint16_t port_id,
396 		const struct rte_flow_attr *attr,
397 		const struct rte_flow_item pattern[],
398 		const struct rte_flow_action actions[],
399 		struct rte_flow_error *error)
400 {
401 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
402 	struct rte_flow *flow;
403 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
404 
405 	if (unlikely(!ops))
406 		return NULL;
407 	if (likely(!!ops->create)) {
408 		fts_enter(dev);
409 		flow = ops->create(dev, attr, pattern, actions, error);
410 		fts_exit(dev);
411 		if (flow == NULL)
412 			flow_err(port_id, -rte_errno, error);
413 
414 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
415 
416 		return flow;
417 	}
418 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
419 			   NULL, rte_strerror(ENOSYS));
420 	return NULL;
421 }
422 
423 /* Destroy a flow rule on a given port. */
424 int
425 rte_flow_destroy(uint16_t port_id,
426 		 struct rte_flow *flow,
427 		 struct rte_flow_error *error)
428 {
429 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
430 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
431 	int ret;
432 
433 	if (unlikely(!ops))
434 		return -rte_errno;
435 	if (likely(!!ops->destroy)) {
436 		fts_enter(dev);
437 		ret = ops->destroy(dev, flow, error);
438 		fts_exit(dev);
439 		ret = flow_err(port_id, ret, error);
440 
441 		rte_flow_trace_destroy(port_id, flow, ret);
442 
443 		return ret;
444 	}
445 	return rte_flow_error_set(error, ENOSYS,
446 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
447 				  NULL, rte_strerror(ENOSYS));
448 }
449 
450 int
451 rte_flow_actions_update(uint16_t port_id,
452 			struct rte_flow *flow,
453 			const struct rte_flow_action actions[],
454 			struct rte_flow_error *error)
455 {
456 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
457 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
458 	int ret;
459 
460 	if (unlikely(!ops))
461 		return -rte_errno;
462 	if (likely(!!ops->actions_update)) {
463 		fts_enter(dev);
464 		ret = ops->actions_update(dev, flow, actions, error);
465 		fts_exit(dev);
466 
467 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
468 
469 		return flow_err(port_id, ret, error);
470 	}
471 	return rte_flow_error_set(error, ENOSYS,
472 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
473 				  NULL, rte_strerror(ENOSYS));
474 }
475 
476 /* Destroy all flow rules associated with a port. */
477 int
478 rte_flow_flush(uint16_t port_id,
479 	       struct rte_flow_error *error)
480 {
481 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
482 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
483 	int ret;
484 
485 	if (unlikely(!ops))
486 		return -rte_errno;
487 	if (likely(!!ops->flush)) {
488 		fts_enter(dev);
489 		ret = ops->flush(dev, error);
490 		fts_exit(dev);
491 		ret = flow_err(port_id, ret, error);
492 
493 		rte_flow_trace_flush(port_id, ret);
494 
495 		return ret;
496 	}
497 	return rte_flow_error_set(error, ENOSYS,
498 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
499 				  NULL, rte_strerror(ENOSYS));
500 }
501 
502 /* Query an existing flow rule. */
503 int
504 rte_flow_query(uint16_t port_id,
505 	       struct rte_flow *flow,
506 	       const struct rte_flow_action *action,
507 	       void *data,
508 	       struct rte_flow_error *error)
509 {
510 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
511 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
512 	int ret;
513 
514 	if (!ops)
515 		return -rte_errno;
516 	if (likely(!!ops->query)) {
517 		fts_enter(dev);
518 		ret = ops->query(dev, flow, action, data, error);
519 		fts_exit(dev);
520 		ret = flow_err(port_id, ret, error);
521 
522 		rte_flow_trace_query(port_id, flow, action, data, ret);
523 
524 		return ret;
525 	}
526 	return rte_flow_error_set(error, ENOSYS,
527 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
528 				  NULL, rte_strerror(ENOSYS));
529 }
530 
531 /* Restrict ingress traffic to the defined flow rules. */
532 int
533 rte_flow_isolate(uint16_t port_id,
534 		 int set,
535 		 struct rte_flow_error *error)
536 {
537 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
538 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
539 	int ret;
540 
541 	if (!ops)
542 		return -rte_errno;
543 	if (likely(!!ops->isolate)) {
544 		fts_enter(dev);
545 		ret = ops->isolate(dev, set, error);
546 		fts_exit(dev);
547 		ret = flow_err(port_id, ret, error);
548 
549 		rte_flow_trace_isolate(port_id, set, ret);
550 
551 		return ret;
552 	}
553 	return rte_flow_error_set(error, ENOSYS,
554 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
555 				  NULL, rte_strerror(ENOSYS));
556 }
557 
558 /* Initialize flow error structure. */
559 int
560 rte_flow_error_set(struct rte_flow_error *error,
561 		   int code,
562 		   enum rte_flow_error_type type,
563 		   const void *cause,
564 		   const char *message)
565 {
566 	if (error) {
567 		*error = (struct rte_flow_error){
568 			.type = type,
569 			.cause = cause,
570 			.message = message,
571 		};
572 	}
573 	rte_errno = code;
574 	return -code;
575 }
576 
577 /** Pattern item specification types. */
578 enum rte_flow_conv_item_spec_type {
579 	RTE_FLOW_CONV_ITEM_SPEC,
580 	RTE_FLOW_CONV_ITEM_LAST,
581 	RTE_FLOW_CONV_ITEM_MASK,
582 };
583 
584 /**
585  * Copy pattern item specification.
586  *
587  * @param[out] buf
588  *   Output buffer. Can be NULL if @p size is zero.
589  * @param size
590  *   Size of @p buf in bytes.
591  * @param[in] item
592  *   Pattern item to copy specification from.
593  * @param type
594  *   Specification selector for either @p spec, @p last or @p mask.
595  *
596  * @return
597  *   Number of bytes needed to store pattern item specification regardless
598  *   of @p size. @p buf contents are truncated to @p size if not large
599  *   enough.
600  */
601 static size_t
602 rte_flow_conv_item_spec(void *buf, const size_t size,
603 			const struct rte_flow_item *item,
604 			enum rte_flow_conv_item_spec_type type)
605 {
606 	size_t off;
607 	const void *data =
608 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
609 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
610 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
611 		NULL;
612 
613 	switch (item->type) {
614 		union {
615 			const struct rte_flow_item_raw *raw;
616 		} spec;
617 		union {
618 			const struct rte_flow_item_raw *raw;
619 		} last;
620 		union {
621 			const struct rte_flow_item_raw *raw;
622 		} mask;
623 		union {
624 			const struct rte_flow_item_raw *raw;
625 		} src;
626 		union {
627 			struct rte_flow_item_raw *raw;
628 		} dst;
629 		size_t tmp;
630 
631 	case RTE_FLOW_ITEM_TYPE_RAW:
632 		spec.raw = item->spec;
633 		last.raw = item->last ? item->last : item->spec;
634 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
635 		src.raw = data;
636 		dst.raw = buf;
637 		rte_memcpy(dst.raw,
638 			   (&(struct rte_flow_item_raw){
639 				.relative = src.raw->relative,
640 				.search = src.raw->search,
641 				.reserved = src.raw->reserved,
642 				.offset = src.raw->offset,
643 				.limit = src.raw->limit,
644 				.length = src.raw->length,
645 			   }),
646 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
647 		off = sizeof(*dst.raw);
648 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
649 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
650 		     ((spec.raw->length & mask.raw->length) >=
651 		      (last.raw->length & mask.raw->length))))
652 			tmp = spec.raw->length & mask.raw->length;
653 		else
654 			tmp = last.raw->length & mask.raw->length;
655 		if (tmp) {
656 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
657 			if (size >= off + tmp)
658 				dst.raw->pattern = rte_memcpy
659 					((void *)((uintptr_t)dst.raw + off),
660 					 src.raw->pattern, tmp);
661 			off += tmp;
662 		}
663 		break;
664 	default:
665 		off = rte_flow_conv_copy(buf, data, size,
666 					 rte_flow_desc_item, item->type);
667 		break;
668 	}
669 	return off;
670 }
671 
672 /**
673  * Copy action configuration.
674  *
675  * @param[out] buf
676  *   Output buffer. Can be NULL if @p size is zero.
677  * @param size
678  *   Size of @p buf in bytes.
679  * @param[in] action
680  *   Action to copy configuration from.
681  *
682  * @return
683  *   Number of bytes needed to store pattern item specification regardless
684  *   of @p size. @p buf contents are truncated to @p size if not large
685  *   enough.
686  */
687 static size_t
688 rte_flow_conv_action_conf(void *buf, const size_t size,
689 			  const struct rte_flow_action *action)
690 {
691 	size_t off;
692 
693 	switch (action->type) {
694 		union {
695 			const struct rte_flow_action_rss *rss;
696 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
697 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
698 		} src;
699 		union {
700 			struct rte_flow_action_rss *rss;
701 			struct rte_flow_action_vxlan_encap *vxlan_encap;
702 			struct rte_flow_action_nvgre_encap *nvgre_encap;
703 		} dst;
704 		size_t tmp;
705 		int ret;
706 
707 	case RTE_FLOW_ACTION_TYPE_RSS:
708 		src.rss = action->conf;
709 		dst.rss = buf;
710 		rte_memcpy(dst.rss,
711 			   (&(struct rte_flow_action_rss){
712 				.func = src.rss->func,
713 				.level = src.rss->level,
714 				.types = src.rss->types,
715 				.key_len = src.rss->key_len,
716 				.queue_num = src.rss->queue_num,
717 			   }),
718 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
719 		off = sizeof(*dst.rss);
720 		if (src.rss->key_len && src.rss->key) {
721 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
722 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
723 			if (size >= off + tmp)
724 				dst.rss->key = rte_memcpy
725 					((void *)((uintptr_t)dst.rss + off),
726 					 src.rss->key, tmp);
727 			off += tmp;
728 		}
729 		if (src.rss->queue_num) {
730 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
731 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
732 			if (size >= off + tmp)
733 				dst.rss->queue = rte_memcpy
734 					((void *)((uintptr_t)dst.rss + off),
735 					 src.rss->queue, tmp);
736 			off += tmp;
737 		}
738 		break;
739 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
740 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
741 		src.vxlan_encap = action->conf;
742 		dst.vxlan_encap = buf;
743 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
744 				 sizeof(*src.nvgre_encap) ||
745 				 offsetof(struct rte_flow_action_vxlan_encap,
746 					  definition) !=
747 				 offsetof(struct rte_flow_action_nvgre_encap,
748 					  definition));
749 		off = sizeof(*dst.vxlan_encap);
750 		if (src.vxlan_encap->definition) {
751 			off = RTE_ALIGN_CEIL
752 				(off, sizeof(*dst.vxlan_encap->definition));
753 			ret = rte_flow_conv
754 				(RTE_FLOW_CONV_OP_PATTERN,
755 				 (void *)((uintptr_t)dst.vxlan_encap + off),
756 				 size > off ? size - off : 0,
757 				 src.vxlan_encap->definition, NULL);
758 			if (ret < 0)
759 				return 0;
760 			if (size >= off + ret)
761 				dst.vxlan_encap->definition =
762 					(void *)((uintptr_t)dst.vxlan_encap +
763 						 off);
764 			off += ret;
765 		}
766 		break;
767 	default:
768 		off = rte_flow_conv_copy(buf, action->conf, size,
769 					 rte_flow_desc_action, action->type);
770 		break;
771 	}
772 	return off;
773 }
774 
775 /**
776  * Copy a list of pattern items.
777  *
778  * @param[out] dst
779  *   Destination buffer. Can be NULL if @p size is zero.
780  * @param size
781  *   Size of @p dst in bytes.
782  * @param[in] src
783  *   Source pattern items.
784  * @param num
785  *   Maximum number of pattern items to process from @p src or 0 to process
786  *   the entire list. In both cases, processing stops after
787  *   RTE_FLOW_ITEM_TYPE_END is encountered.
788  * @param[out] error
789  *   Perform verbose error reporting if not NULL.
790  *
791  * @return
792  *   A positive value representing the number of bytes needed to store
793  *   pattern items regardless of @p size on success (@p buf contents are
794  *   truncated to @p size if not large enough), a negative errno value
795  *   otherwise and rte_errno is set.
796  */
797 static int
798 rte_flow_conv_pattern(struct rte_flow_item *dst,
799 		      const size_t size,
800 		      const struct rte_flow_item *src,
801 		      unsigned int num,
802 		      struct rte_flow_error *error)
803 {
804 	uintptr_t data = (uintptr_t)dst;
805 	size_t off;
806 	size_t ret;
807 	unsigned int i;
808 
809 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
810 		/**
811 		 * allow PMD private flow item
812 		 */
813 		if (((int)src->type >= 0) &&
814 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
815 		    !rte_flow_desc_item[src->type].name))
816 			return rte_flow_error_set
817 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
818 				 "cannot convert unknown item type");
819 		if (size >= off + sizeof(*dst))
820 			*dst = (struct rte_flow_item){
821 				.type = src->type,
822 			};
823 		off += sizeof(*dst);
824 		if (!src->type)
825 			num = i + 1;
826 	}
827 	num = i;
828 	src -= num;
829 	dst -= num;
830 	do {
831 		if (src->spec) {
832 			off = RTE_ALIGN_CEIL(off, sizeof(double));
833 			ret = rte_flow_conv_item_spec
834 				((void *)(data + off),
835 				 size > off ? size - off : 0, src,
836 				 RTE_FLOW_CONV_ITEM_SPEC);
837 			if (size && size >= off + ret)
838 				dst->spec = (void *)(data + off);
839 			off += ret;
840 
841 		}
842 		if (src->last) {
843 			off = RTE_ALIGN_CEIL(off, sizeof(double));
844 			ret = rte_flow_conv_item_spec
845 				((void *)(data + off),
846 				 size > off ? size - off : 0, src,
847 				 RTE_FLOW_CONV_ITEM_LAST);
848 			if (size && size >= off + ret)
849 				dst->last = (void *)(data + off);
850 			off += ret;
851 		}
852 		if (src->mask) {
853 			off = RTE_ALIGN_CEIL(off, sizeof(double));
854 			ret = rte_flow_conv_item_spec
855 				((void *)(data + off),
856 				 size > off ? size - off : 0, src,
857 				 RTE_FLOW_CONV_ITEM_MASK);
858 			if (size && size >= off + ret)
859 				dst->mask = (void *)(data + off);
860 			off += ret;
861 		}
862 		++src;
863 		++dst;
864 	} while (--num);
865 	return off;
866 }
867 
868 /**
869  * Copy a list of actions.
870  *
871  * @param[out] dst
872  *   Destination buffer. Can be NULL if @p size is zero.
873  * @param size
874  *   Size of @p dst in bytes.
875  * @param[in] src
876  *   Source actions.
877  * @param num
878  *   Maximum number of actions to process from @p src or 0 to process the
879  *   entire list. In both cases, processing stops after
880  *   RTE_FLOW_ACTION_TYPE_END is encountered.
881  * @param[out] error
882  *   Perform verbose error reporting if not NULL.
883  *
884  * @return
885  *   A positive value representing the number of bytes needed to store
886  *   actions regardless of @p size on success (@p buf contents are truncated
887  *   to @p size if not large enough), a negative errno value otherwise and
888  *   rte_errno is set.
889  */
890 static int
891 rte_flow_conv_actions(struct rte_flow_action *dst,
892 		      const size_t size,
893 		      const struct rte_flow_action *src,
894 		      unsigned int num,
895 		      struct rte_flow_error *error)
896 {
897 	uintptr_t data = (uintptr_t)dst;
898 	size_t off;
899 	size_t ret;
900 	unsigned int i;
901 
902 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
903 		/**
904 		 * allow PMD private flow action
905 		 */
906 		if (((int)src->type >= 0) &&
907 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
908 		    !rte_flow_desc_action[src->type].name))
909 			return rte_flow_error_set
910 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
911 				 src, "cannot convert unknown action type");
912 		if (size >= off + sizeof(*dst))
913 			*dst = (struct rte_flow_action){
914 				.type = src->type,
915 			};
916 		off += sizeof(*dst);
917 		if (!src->type)
918 			num = i + 1;
919 	}
920 	num = i;
921 	src -= num;
922 	dst -= num;
923 	do {
924 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
925 			/*
926 			 * Indirect action conf fills the indirect action
927 			 * handler. Copy the action handle directly instead
928 			 * of duplicating the pointer memory.
929 			 */
930 			if (size)
931 				dst->conf = src->conf;
932 		} else if (src->conf) {
933 			off = RTE_ALIGN_CEIL(off, sizeof(double));
934 			ret = rte_flow_conv_action_conf
935 				((void *)(data + off),
936 				 size > off ? size - off : 0, src);
937 			if (size && size >= off + ret)
938 				dst->conf = (void *)(data + off);
939 			off += ret;
940 		}
941 		++src;
942 		++dst;
943 	} while (--num);
944 	return off;
945 }
946 
947 /**
948  * Copy flow rule components.
949  *
950  * This comprises the flow rule descriptor itself, attributes, pattern and
951  * actions list. NULL components in @p src are skipped.
952  *
953  * @param[out] dst
954  *   Destination buffer. Can be NULL if @p size is zero.
955  * @param size
956  *   Size of @p dst in bytes.
957  * @param[in] src
958  *   Source flow rule descriptor.
959  * @param[out] error
960  *   Perform verbose error reporting if not NULL.
961  *
962  * @return
963  *   A positive value representing the number of bytes needed to store all
964  *   components including the descriptor regardless of @p size on success
965  *   (@p buf contents are truncated to @p size if not large enough), a
966  *   negative errno value otherwise and rte_errno is set.
967  */
968 static int
969 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
970 		   const size_t size,
971 		   const struct rte_flow_conv_rule *src,
972 		   struct rte_flow_error *error)
973 {
974 	size_t off;
975 	int ret;
976 
977 	rte_memcpy(dst,
978 		   (&(struct rte_flow_conv_rule){
979 			.attr = NULL,
980 			.pattern = NULL,
981 			.actions = NULL,
982 		   }),
983 		   size > sizeof(*dst) ? sizeof(*dst) : size);
984 	off = sizeof(*dst);
985 	if (src->attr_ro) {
986 		off = RTE_ALIGN_CEIL(off, sizeof(double));
987 		if (size && size >= off + sizeof(*dst->attr))
988 			dst->attr = rte_memcpy
989 				((void *)((uintptr_t)dst + off),
990 				 src->attr_ro, sizeof(*dst->attr));
991 		off += sizeof(*dst->attr);
992 	}
993 	if (src->pattern_ro) {
994 		off = RTE_ALIGN_CEIL(off, sizeof(double));
995 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
996 					    size > off ? size - off : 0,
997 					    src->pattern_ro, 0, error);
998 		if (ret < 0)
999 			return ret;
1000 		if (size && size >= off + (size_t)ret)
1001 			dst->pattern = (void *)((uintptr_t)dst + off);
1002 		off += ret;
1003 	}
1004 	if (src->actions_ro) {
1005 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1006 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1007 					    size > off ? size - off : 0,
1008 					    src->actions_ro, 0, error);
1009 		if (ret < 0)
1010 			return ret;
1011 		if (size >= off + (size_t)ret)
1012 			dst->actions = (void *)((uintptr_t)dst + off);
1013 		off += ret;
1014 	}
1015 	return off;
1016 }
1017 
1018 /**
1019  * Retrieve the name of a pattern item/action type.
1020  *
1021  * @param is_action
1022  *   Nonzero when @p src represents an action type instead of a pattern item
1023  *   type.
1024  * @param is_ptr
1025  *   Nonzero to write string address instead of contents into @p dst.
1026  * @param[out] dst
1027  *   Destination buffer. Can be NULL if @p size is zero.
1028  * @param size
1029  *   Size of @p dst in bytes.
1030  * @param[in] src
1031  *   Depending on @p is_action, source pattern item or action type cast as a
1032  *   pointer.
1033  * @param[out] error
1034  *   Perform verbose error reporting if not NULL.
1035  *
1036  * @return
1037  *   A positive value representing the number of bytes needed to store the
1038  *   name or its address regardless of @p size on success (@p buf contents
1039  *   are truncated to @p size if not large enough), a negative errno value
1040  *   otherwise and rte_errno is set.
1041  */
1042 static int
1043 rte_flow_conv_name(int is_action,
1044 		   int is_ptr,
1045 		   char *dst,
1046 		   const size_t size,
1047 		   const void *src,
1048 		   struct rte_flow_error *error)
1049 {
1050 	struct desc_info {
1051 		const struct rte_flow_desc_data *data;
1052 		size_t num;
1053 	};
1054 	static const struct desc_info info_rep[2] = {
1055 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1056 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1057 	};
1058 	const struct desc_info *const info = &info_rep[!!is_action];
1059 	unsigned int type = (uintptr_t)src;
1060 
1061 	if (type >= info->num)
1062 		return rte_flow_error_set
1063 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1064 			 "unknown object type to retrieve the name of");
1065 	if (!is_ptr)
1066 		return strlcpy(dst, info->data[type].name, size);
1067 	if (size >= sizeof(const char **))
1068 		*((const char **)dst) = info->data[type].name;
1069 	return sizeof(const char **);
1070 }
1071 
1072 /** Helper function to convert flow API objects. */
1073 int
1074 rte_flow_conv(enum rte_flow_conv_op op,
1075 	      void *dst,
1076 	      size_t size,
1077 	      const void *src,
1078 	      struct rte_flow_error *error)
1079 {
1080 	int ret;
1081 
1082 	switch (op) {
1083 		const struct rte_flow_attr *attr;
1084 
1085 	case RTE_FLOW_CONV_OP_NONE:
1086 		ret = 0;
1087 		break;
1088 	case RTE_FLOW_CONV_OP_ATTR:
1089 		attr = src;
1090 		if (size > sizeof(*attr))
1091 			size = sizeof(*attr);
1092 		rte_memcpy(dst, attr, size);
1093 		ret = sizeof(*attr);
1094 		break;
1095 	case RTE_FLOW_CONV_OP_ITEM:
1096 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1097 		break;
1098 	case RTE_FLOW_CONV_OP_ACTION:
1099 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1100 		break;
1101 	case RTE_FLOW_CONV_OP_PATTERN:
1102 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1103 		break;
1104 	case RTE_FLOW_CONV_OP_ACTIONS:
1105 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1106 		break;
1107 	case RTE_FLOW_CONV_OP_RULE:
1108 		ret = rte_flow_conv_rule(dst, size, src, error);
1109 		break;
1110 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1111 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1112 		break;
1113 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1114 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1115 		break;
1116 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1117 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1118 		break;
1119 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1120 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1121 		break;
1122 	default:
1123 		ret = rte_flow_error_set
1124 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1125 		 "unknown object conversion operation");
1126 	}
1127 
1128 	rte_flow_trace_conv(op, dst, size, src, ret);
1129 
1130 	return ret;
1131 }
1132 
1133 /** Store a full rte_flow description. */
1134 size_t
1135 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1136 	      const struct rte_flow_attr *attr,
1137 	      const struct rte_flow_item *items,
1138 	      const struct rte_flow_action *actions)
1139 {
1140 	/*
1141 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1142 	 * to convert the former to the latter without wasting space.
1143 	 */
1144 	struct rte_flow_conv_rule *dst =
1145 		len ?
1146 		(void *)((uintptr_t)desc +
1147 			 (offsetof(struct rte_flow_desc, actions) -
1148 			  offsetof(struct rte_flow_conv_rule, actions))) :
1149 		NULL;
1150 	size_t dst_size =
1151 		len > sizeof(*desc) - sizeof(*dst) ?
1152 		len - (sizeof(*desc) - sizeof(*dst)) :
1153 		0;
1154 	struct rte_flow_conv_rule src = {
1155 		.attr_ro = NULL,
1156 		.pattern_ro = items,
1157 		.actions_ro = actions,
1158 	};
1159 	int ret;
1160 
1161 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1162 			 sizeof(struct rte_flow_conv_rule));
1163 	if (dst_size &&
1164 	    (&dst->pattern != &desc->items ||
1165 	     &dst->actions != &desc->actions ||
1166 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1167 		rte_errno = EINVAL;
1168 		return 0;
1169 	}
1170 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1171 	if (ret < 0)
1172 		return 0;
1173 	ret += sizeof(*desc) - sizeof(*dst);
1174 	rte_memcpy(desc,
1175 		   (&(struct rte_flow_desc){
1176 			.size = ret,
1177 			.attr = *attr,
1178 			.items = dst_size ? dst->pattern : NULL,
1179 			.actions = dst_size ? dst->actions : NULL,
1180 		   }),
1181 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1182 
1183 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1184 
1185 	return ret;
1186 }
1187 
1188 int
1189 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1190 			FILE *file, struct rte_flow_error *error)
1191 {
1192 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1193 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1194 	int ret;
1195 
1196 	if (unlikely(!ops))
1197 		return -rte_errno;
1198 	if (likely(!!ops->dev_dump)) {
1199 		fts_enter(dev);
1200 		ret = ops->dev_dump(dev, flow, file, error);
1201 		fts_exit(dev);
1202 		return flow_err(port_id, ret, error);
1203 	}
1204 	return rte_flow_error_set(error, ENOSYS,
1205 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1206 				  NULL, rte_strerror(ENOSYS));
1207 }
1208 
1209 int
1210 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1211 		    uint32_t nb_contexts, struct rte_flow_error *error)
1212 {
1213 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1214 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1215 	int ret;
1216 
1217 	if (unlikely(!ops))
1218 		return -rte_errno;
1219 	if (likely(!!ops->get_aged_flows)) {
1220 		fts_enter(dev);
1221 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1222 		fts_exit(dev);
1223 		ret = flow_err(port_id, ret, error);
1224 
1225 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1226 
1227 		return ret;
1228 	}
1229 	return rte_flow_error_set(error, ENOTSUP,
1230 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1231 				  NULL, rte_strerror(ENOTSUP));
1232 }
1233 
1234 int
1235 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1236 			  uint32_t nb_contexts, struct rte_flow_error *error)
1237 {
1238 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1239 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1240 	int ret;
1241 
1242 	if (unlikely(!ops))
1243 		return -rte_errno;
1244 	if (likely(!!ops->get_q_aged_flows)) {
1245 		fts_enter(dev);
1246 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1247 					    nb_contexts, error);
1248 		fts_exit(dev);
1249 		ret = flow_err(port_id, ret, error);
1250 
1251 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1252 						nb_contexts, ret);
1253 
1254 		return ret;
1255 	}
1256 	return rte_flow_error_set(error, ENOTSUP,
1257 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1258 				  NULL, rte_strerror(ENOTSUP));
1259 }
1260 
1261 struct rte_flow_action_handle *
1262 rte_flow_action_handle_create(uint16_t port_id,
1263 			      const struct rte_flow_indir_action_conf *conf,
1264 			      const struct rte_flow_action *action,
1265 			      struct rte_flow_error *error)
1266 {
1267 	struct rte_flow_action_handle *handle;
1268 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1269 
1270 	if (unlikely(!ops))
1271 		return NULL;
1272 	if (unlikely(!ops->action_handle_create)) {
1273 		rte_flow_error_set(error, ENOSYS,
1274 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1275 				   rte_strerror(ENOSYS));
1276 		return NULL;
1277 	}
1278 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1279 					   conf, action, error);
1280 	if (handle == NULL)
1281 		flow_err(port_id, -rte_errno, error);
1282 
1283 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1284 
1285 	return handle;
1286 }
1287 
1288 int
1289 rte_flow_action_handle_destroy(uint16_t port_id,
1290 			       struct rte_flow_action_handle *handle,
1291 			       struct rte_flow_error *error)
1292 {
1293 	int ret;
1294 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1295 
1296 	if (unlikely(!ops))
1297 		return -rte_errno;
1298 	if (unlikely(!ops->action_handle_destroy))
1299 		return rte_flow_error_set(error, ENOSYS,
1300 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1301 					  NULL, rte_strerror(ENOSYS));
1302 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1303 					 handle, error);
1304 	ret = flow_err(port_id, ret, error);
1305 
1306 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1307 
1308 	return ret;
1309 }
1310 
1311 int
1312 rte_flow_action_handle_update(uint16_t port_id,
1313 			      struct rte_flow_action_handle *handle,
1314 			      const void *update,
1315 			      struct rte_flow_error *error)
1316 {
1317 	int ret;
1318 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1319 
1320 	if (unlikely(!ops))
1321 		return -rte_errno;
1322 	if (unlikely(!ops->action_handle_update))
1323 		return rte_flow_error_set(error, ENOSYS,
1324 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1325 					  NULL, rte_strerror(ENOSYS));
1326 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1327 					update, error);
1328 	ret = flow_err(port_id, ret, error);
1329 
1330 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1331 
1332 	return ret;
1333 }
1334 
1335 int
1336 rte_flow_action_handle_query(uint16_t port_id,
1337 			     const struct rte_flow_action_handle *handle,
1338 			     void *data,
1339 			     struct rte_flow_error *error)
1340 {
1341 	int ret;
1342 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1343 
1344 	if (unlikely(!ops))
1345 		return -rte_errno;
1346 	if (unlikely(!ops->action_handle_query))
1347 		return rte_flow_error_set(error, ENOSYS,
1348 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1349 					  NULL, rte_strerror(ENOSYS));
1350 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1351 				       data, error);
1352 	ret = flow_err(port_id, ret, error);
1353 
1354 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1355 
1356 	return ret;
1357 }
1358 
1359 int
1360 rte_flow_tunnel_decap_set(uint16_t port_id,
1361 			  struct rte_flow_tunnel *tunnel,
1362 			  struct rte_flow_action **actions,
1363 			  uint32_t *num_of_actions,
1364 			  struct rte_flow_error *error)
1365 {
1366 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1367 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1368 	int ret;
1369 
1370 	if (unlikely(!ops))
1371 		return -rte_errno;
1372 	if (likely(!!ops->tunnel_decap_set)) {
1373 		ret = flow_err(port_id,
1374 			       ops->tunnel_decap_set(dev, tunnel, actions,
1375 						     num_of_actions, error),
1376 			       error);
1377 
1378 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1379 						num_of_actions, ret);
1380 
1381 		return ret;
1382 	}
1383 	return rte_flow_error_set(error, ENOTSUP,
1384 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1385 				  NULL, rte_strerror(ENOTSUP));
1386 }
1387 
1388 int
1389 rte_flow_tunnel_match(uint16_t port_id,
1390 		      struct rte_flow_tunnel *tunnel,
1391 		      struct rte_flow_item **items,
1392 		      uint32_t *num_of_items,
1393 		      struct rte_flow_error *error)
1394 {
1395 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1396 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1397 	int ret;
1398 
1399 	if (unlikely(!ops))
1400 		return -rte_errno;
1401 	if (likely(!!ops->tunnel_match)) {
1402 		ret = flow_err(port_id,
1403 			       ops->tunnel_match(dev, tunnel, items,
1404 						 num_of_items, error),
1405 			       error);
1406 
1407 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1408 					    ret);
1409 
1410 		return ret;
1411 	}
1412 	return rte_flow_error_set(error, ENOTSUP,
1413 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1414 				  NULL, rte_strerror(ENOTSUP));
1415 }
1416 
1417 int
1418 rte_flow_get_restore_info(uint16_t port_id,
1419 			  struct rte_mbuf *m,
1420 			  struct rte_flow_restore_info *restore_info,
1421 			  struct rte_flow_error *error)
1422 {
1423 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1424 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1425 	int ret;
1426 
1427 	if (unlikely(!ops))
1428 		return -rte_errno;
1429 	if (likely(!!ops->get_restore_info)) {
1430 		ret = flow_err(port_id,
1431 			       ops->get_restore_info(dev, m, restore_info,
1432 						     error),
1433 			       error);
1434 
1435 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1436 
1437 		return ret;
1438 	}
1439 	return rte_flow_error_set(error, ENOTSUP,
1440 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1441 				  NULL, rte_strerror(ENOTSUP));
1442 }
1443 
1444 static struct {
1445 	const struct rte_mbuf_dynflag desc;
1446 	uint64_t value;
1447 } flow_restore_info_dynflag = {
1448 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1449 };
1450 
1451 uint64_t
1452 rte_flow_restore_info_dynflag(void)
1453 {
1454 	return flow_restore_info_dynflag.value;
1455 }
1456 
1457 int
1458 rte_flow_restore_info_dynflag_register(void)
1459 {
1460 	if (flow_restore_info_dynflag.value == 0) {
1461 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1462 
1463 		if (offset < 0)
1464 			return -1;
1465 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1466 	}
1467 
1468 	return 0;
1469 }
1470 
1471 int
1472 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1473 				     struct rte_flow_action *actions,
1474 				     uint32_t num_of_actions,
1475 				     struct rte_flow_error *error)
1476 {
1477 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1478 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1479 	int ret;
1480 
1481 	if (unlikely(!ops))
1482 		return -rte_errno;
1483 	if (likely(!!ops->tunnel_action_decap_release)) {
1484 		ret = flow_err(port_id,
1485 			       ops->tunnel_action_decap_release(dev, actions,
1486 								num_of_actions,
1487 								error),
1488 			       error);
1489 
1490 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1491 							   num_of_actions, ret);
1492 
1493 		return ret;
1494 	}
1495 	return rte_flow_error_set(error, ENOTSUP,
1496 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1497 				  NULL, rte_strerror(ENOTSUP));
1498 }
1499 
1500 int
1501 rte_flow_tunnel_item_release(uint16_t port_id,
1502 			     struct rte_flow_item *items,
1503 			     uint32_t num_of_items,
1504 			     struct rte_flow_error *error)
1505 {
1506 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1507 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1508 	int ret;
1509 
1510 	if (unlikely(!ops))
1511 		return -rte_errno;
1512 	if (likely(!!ops->tunnel_item_release)) {
1513 		ret = flow_err(port_id,
1514 			       ops->tunnel_item_release(dev, items,
1515 							num_of_items, error),
1516 			       error);
1517 
1518 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1519 
1520 		return ret;
1521 	}
1522 	return rte_flow_error_set(error, ENOTSUP,
1523 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1524 				  NULL, rte_strerror(ENOTSUP));
1525 }
1526 
1527 int
1528 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1529 			     struct rte_flow_error *error)
1530 {
1531 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1532 	struct rte_eth_dev *dev;
1533 	int ret;
1534 
1535 	if (unlikely(ops == NULL))
1536 		return -rte_errno;
1537 
1538 	if (ops->pick_transfer_proxy == NULL) {
1539 		*proxy_port_id = port_id;
1540 		return 0;
1541 	}
1542 
1543 	dev = &rte_eth_devices[port_id];
1544 
1545 	ret = flow_err(port_id,
1546 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1547 		       error);
1548 
1549 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1550 
1551 	return ret;
1552 }
1553 
1554 struct rte_flow_item_flex_handle *
1555 rte_flow_flex_item_create(uint16_t port_id,
1556 			  const struct rte_flow_item_flex_conf *conf,
1557 			  struct rte_flow_error *error)
1558 {
1559 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1560 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1561 	struct rte_flow_item_flex_handle *handle;
1562 
1563 	if (unlikely(!ops))
1564 		return NULL;
1565 	if (unlikely(!ops->flex_item_create)) {
1566 		rte_flow_error_set(error, ENOTSUP,
1567 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1568 				   NULL, rte_strerror(ENOTSUP));
1569 		return NULL;
1570 	}
1571 	handle = ops->flex_item_create(dev, conf, error);
1572 	if (handle == NULL)
1573 		flow_err(port_id, -rte_errno, error);
1574 
1575 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1576 
1577 	return handle;
1578 }
1579 
1580 int
1581 rte_flow_flex_item_release(uint16_t port_id,
1582 			   const struct rte_flow_item_flex_handle *handle,
1583 			   struct rte_flow_error *error)
1584 {
1585 	int ret;
1586 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1587 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1588 
1589 	if (unlikely(!ops || !ops->flex_item_release))
1590 		return rte_flow_error_set(error, ENOTSUP,
1591 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1592 					  NULL, rte_strerror(ENOTSUP));
1593 	ret = ops->flex_item_release(dev, handle, error);
1594 	ret = flow_err(port_id, ret, error);
1595 
1596 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1597 
1598 	return ret;
1599 }
1600 
1601 int
1602 rte_flow_info_get(uint16_t port_id,
1603 		  struct rte_flow_port_info *port_info,
1604 		  struct rte_flow_queue_info *queue_info,
1605 		  struct rte_flow_error *error)
1606 {
1607 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1608 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1609 	int ret;
1610 
1611 	if (unlikely(!ops))
1612 		return -rte_errno;
1613 	if (dev->data->dev_configured == 0) {
1614 		RTE_FLOW_LOG(INFO,
1615 			"Device with port_id=%"PRIu16" is not configured.\n",
1616 			port_id);
1617 		return -EINVAL;
1618 	}
1619 	if (port_info == NULL) {
1620 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1621 		return -EINVAL;
1622 	}
1623 	if (likely(!!ops->info_get)) {
1624 		ret = flow_err(port_id,
1625 			       ops->info_get(dev, port_info, queue_info, error),
1626 			       error);
1627 
1628 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1629 
1630 		return ret;
1631 	}
1632 	return rte_flow_error_set(error, ENOTSUP,
1633 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1634 				  NULL, rte_strerror(ENOTSUP));
1635 }
1636 
1637 int
1638 rte_flow_configure(uint16_t port_id,
1639 		   const struct rte_flow_port_attr *port_attr,
1640 		   uint16_t nb_queue,
1641 		   const struct rte_flow_queue_attr *queue_attr[],
1642 		   struct rte_flow_error *error)
1643 {
1644 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1645 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1646 	int ret;
1647 
1648 	if (unlikely(!ops))
1649 		return -rte_errno;
1650 	if (dev->data->dev_configured == 0) {
1651 		RTE_FLOW_LOG(INFO,
1652 			"Device with port_id=%"PRIu16" is not configured.\n",
1653 			port_id);
1654 		return -EINVAL;
1655 	}
1656 	if (dev->data->dev_started != 0) {
1657 		RTE_FLOW_LOG(INFO,
1658 			"Device with port_id=%"PRIu16" already started.\n",
1659 			port_id);
1660 		return -EINVAL;
1661 	}
1662 	if (port_attr == NULL) {
1663 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1664 		return -EINVAL;
1665 	}
1666 	if (queue_attr == NULL) {
1667 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1668 		return -EINVAL;
1669 	}
1670 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1671 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1672 		return rte_flow_error_set(error, ENODEV,
1673 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1674 					  NULL, rte_strerror(ENODEV));
1675 	}
1676 	if (likely(!!ops->configure)) {
1677 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1678 		if (ret == 0)
1679 			dev->data->flow_configured = 1;
1680 		ret = flow_err(port_id, ret, error);
1681 
1682 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1683 
1684 		return ret;
1685 	}
1686 	return rte_flow_error_set(error, ENOTSUP,
1687 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1688 				  NULL, rte_strerror(ENOTSUP));
1689 }
1690 
1691 struct rte_flow_pattern_template *
1692 rte_flow_pattern_template_create(uint16_t port_id,
1693 		const struct rte_flow_pattern_template_attr *template_attr,
1694 		const struct rte_flow_item pattern[],
1695 		struct rte_flow_error *error)
1696 {
1697 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1698 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1699 	struct rte_flow_pattern_template *template;
1700 
1701 	if (unlikely(!ops))
1702 		return NULL;
1703 	if (dev->data->flow_configured == 0) {
1704 		RTE_FLOW_LOG(INFO,
1705 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1706 			port_id);
1707 		rte_flow_error_set(error, EINVAL,
1708 				RTE_FLOW_ERROR_TYPE_STATE,
1709 				NULL, rte_strerror(EINVAL));
1710 		return NULL;
1711 	}
1712 	if (template_attr == NULL) {
1713 		RTE_FLOW_LOG(ERR,
1714 			     "Port %"PRIu16" template attr is NULL.\n",
1715 			     port_id);
1716 		rte_flow_error_set(error, EINVAL,
1717 				   RTE_FLOW_ERROR_TYPE_ATTR,
1718 				   NULL, rte_strerror(EINVAL));
1719 		return NULL;
1720 	}
1721 	if (pattern == NULL) {
1722 		RTE_FLOW_LOG(ERR,
1723 			     "Port %"PRIu16" pattern is NULL.\n",
1724 			     port_id);
1725 		rte_flow_error_set(error, EINVAL,
1726 				   RTE_FLOW_ERROR_TYPE_ATTR,
1727 				   NULL, rte_strerror(EINVAL));
1728 		return NULL;
1729 	}
1730 	if (likely(!!ops->pattern_template_create)) {
1731 		template = ops->pattern_template_create(dev, template_attr,
1732 							pattern, error);
1733 		if (template == NULL)
1734 			flow_err(port_id, -rte_errno, error);
1735 
1736 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1737 						       pattern, template);
1738 
1739 		return template;
1740 	}
1741 	rte_flow_error_set(error, ENOTSUP,
1742 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1743 			   NULL, rte_strerror(ENOTSUP));
1744 	return NULL;
1745 }
1746 
1747 int
1748 rte_flow_pattern_template_destroy(uint16_t port_id,
1749 		struct rte_flow_pattern_template *pattern_template,
1750 		struct rte_flow_error *error)
1751 {
1752 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1753 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1754 	int ret;
1755 
1756 	if (unlikely(!ops))
1757 		return -rte_errno;
1758 	if (unlikely(pattern_template == NULL))
1759 		return 0;
1760 	if (likely(!!ops->pattern_template_destroy)) {
1761 		ret = flow_err(port_id,
1762 			       ops->pattern_template_destroy(dev,
1763 							     pattern_template,
1764 							     error),
1765 			       error);
1766 
1767 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1768 							ret);
1769 
1770 		return ret;
1771 	}
1772 	return rte_flow_error_set(error, ENOTSUP,
1773 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1774 				  NULL, rte_strerror(ENOTSUP));
1775 }
1776 
1777 struct rte_flow_actions_template *
1778 rte_flow_actions_template_create(uint16_t port_id,
1779 			const struct rte_flow_actions_template_attr *template_attr,
1780 			const struct rte_flow_action actions[],
1781 			const struct rte_flow_action masks[],
1782 			struct rte_flow_error *error)
1783 {
1784 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1785 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1786 	struct rte_flow_actions_template *template;
1787 
1788 	if (unlikely(!ops))
1789 		return NULL;
1790 	if (dev->data->flow_configured == 0) {
1791 		RTE_FLOW_LOG(INFO,
1792 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1793 			port_id);
1794 		rte_flow_error_set(error, EINVAL,
1795 				   RTE_FLOW_ERROR_TYPE_STATE,
1796 				   NULL, rte_strerror(EINVAL));
1797 		return NULL;
1798 	}
1799 	if (template_attr == NULL) {
1800 		RTE_FLOW_LOG(ERR,
1801 			     "Port %"PRIu16" template attr is NULL.\n",
1802 			     port_id);
1803 		rte_flow_error_set(error, EINVAL,
1804 				   RTE_FLOW_ERROR_TYPE_ATTR,
1805 				   NULL, rte_strerror(EINVAL));
1806 		return NULL;
1807 	}
1808 	if (actions == NULL) {
1809 		RTE_FLOW_LOG(ERR,
1810 			     "Port %"PRIu16" actions is NULL.\n",
1811 			     port_id);
1812 		rte_flow_error_set(error, EINVAL,
1813 				   RTE_FLOW_ERROR_TYPE_ATTR,
1814 				   NULL, rte_strerror(EINVAL));
1815 		return NULL;
1816 	}
1817 	if (masks == NULL) {
1818 		RTE_FLOW_LOG(ERR,
1819 			     "Port %"PRIu16" masks is NULL.\n",
1820 			     port_id);
1821 		rte_flow_error_set(error, EINVAL,
1822 				   RTE_FLOW_ERROR_TYPE_ATTR,
1823 				   NULL, rte_strerror(EINVAL));
1824 
1825 	}
1826 	if (likely(!!ops->actions_template_create)) {
1827 		template = ops->actions_template_create(dev, template_attr,
1828 							actions, masks, error);
1829 		if (template == NULL)
1830 			flow_err(port_id, -rte_errno, error);
1831 
1832 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1833 						       masks, template);
1834 
1835 		return template;
1836 	}
1837 	rte_flow_error_set(error, ENOTSUP,
1838 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1839 			   NULL, rte_strerror(ENOTSUP));
1840 	return NULL;
1841 }
1842 
1843 int
1844 rte_flow_actions_template_destroy(uint16_t port_id,
1845 			struct rte_flow_actions_template *actions_template,
1846 			struct rte_flow_error *error)
1847 {
1848 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1849 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1850 	int ret;
1851 
1852 	if (unlikely(!ops))
1853 		return -rte_errno;
1854 	if (unlikely(actions_template == NULL))
1855 		return 0;
1856 	if (likely(!!ops->actions_template_destroy)) {
1857 		ret = flow_err(port_id,
1858 			       ops->actions_template_destroy(dev,
1859 							     actions_template,
1860 							     error),
1861 			       error);
1862 
1863 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1864 							ret);
1865 
1866 		return ret;
1867 	}
1868 	return rte_flow_error_set(error, ENOTSUP,
1869 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1870 				  NULL, rte_strerror(ENOTSUP));
1871 }
1872 
1873 struct rte_flow_template_table *
1874 rte_flow_template_table_create(uint16_t port_id,
1875 			const struct rte_flow_template_table_attr *table_attr,
1876 			struct rte_flow_pattern_template *pattern_templates[],
1877 			uint8_t nb_pattern_templates,
1878 			struct rte_flow_actions_template *actions_templates[],
1879 			uint8_t nb_actions_templates,
1880 			struct rte_flow_error *error)
1881 {
1882 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1883 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1884 	struct rte_flow_template_table *table;
1885 
1886 	if (unlikely(!ops))
1887 		return NULL;
1888 	if (dev->data->flow_configured == 0) {
1889 		RTE_FLOW_LOG(INFO,
1890 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1891 			port_id);
1892 		rte_flow_error_set(error, EINVAL,
1893 				   RTE_FLOW_ERROR_TYPE_STATE,
1894 				   NULL, rte_strerror(EINVAL));
1895 		return NULL;
1896 	}
1897 	if (table_attr == NULL) {
1898 		RTE_FLOW_LOG(ERR,
1899 			     "Port %"PRIu16" table attr is NULL.\n",
1900 			     port_id);
1901 		rte_flow_error_set(error, EINVAL,
1902 				   RTE_FLOW_ERROR_TYPE_ATTR,
1903 				   NULL, rte_strerror(EINVAL));
1904 		return NULL;
1905 	}
1906 	if (pattern_templates == NULL) {
1907 		RTE_FLOW_LOG(ERR,
1908 			     "Port %"PRIu16" pattern templates is NULL.\n",
1909 			     port_id);
1910 		rte_flow_error_set(error, EINVAL,
1911 				   RTE_FLOW_ERROR_TYPE_ATTR,
1912 				   NULL, rte_strerror(EINVAL));
1913 		return NULL;
1914 	}
1915 	if (actions_templates == NULL) {
1916 		RTE_FLOW_LOG(ERR,
1917 			     "Port %"PRIu16" actions templates is NULL.\n",
1918 			     port_id);
1919 		rte_flow_error_set(error, EINVAL,
1920 				   RTE_FLOW_ERROR_TYPE_ATTR,
1921 				   NULL, rte_strerror(EINVAL));
1922 		return NULL;
1923 	}
1924 	if (likely(!!ops->template_table_create)) {
1925 		table = ops->template_table_create(dev, table_attr,
1926 					pattern_templates, nb_pattern_templates,
1927 					actions_templates, nb_actions_templates,
1928 					error);
1929 		if (table == NULL)
1930 			flow_err(port_id, -rte_errno, error);
1931 
1932 		rte_flow_trace_template_table_create(port_id, table_attr,
1933 						     pattern_templates,
1934 						     nb_pattern_templates,
1935 						     actions_templates,
1936 						     nb_actions_templates, table);
1937 
1938 		return table;
1939 	}
1940 	rte_flow_error_set(error, ENOTSUP,
1941 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1942 			   NULL, rte_strerror(ENOTSUP));
1943 	return NULL;
1944 }
1945 
1946 int
1947 rte_flow_template_table_destroy(uint16_t port_id,
1948 				struct rte_flow_template_table *template_table,
1949 				struct rte_flow_error *error)
1950 {
1951 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1952 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1953 	int ret;
1954 
1955 	if (unlikely(!ops))
1956 		return -rte_errno;
1957 	if (unlikely(template_table == NULL))
1958 		return 0;
1959 	if (likely(!!ops->template_table_destroy)) {
1960 		ret = flow_err(port_id,
1961 			       ops->template_table_destroy(dev,
1962 							   template_table,
1963 							   error),
1964 			       error);
1965 
1966 		rte_flow_trace_template_table_destroy(port_id, template_table,
1967 						      ret);
1968 
1969 		return ret;
1970 	}
1971 	return rte_flow_error_set(error, ENOTSUP,
1972 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1973 				  NULL, rte_strerror(ENOTSUP));
1974 }
1975 
1976 struct rte_flow *
1977 rte_flow_async_create(uint16_t port_id,
1978 		      uint32_t queue_id,
1979 		      const struct rte_flow_op_attr *op_attr,
1980 		      struct rte_flow_template_table *template_table,
1981 		      const struct rte_flow_item pattern[],
1982 		      uint8_t pattern_template_index,
1983 		      const struct rte_flow_action actions[],
1984 		      uint8_t actions_template_index,
1985 		      void *user_data,
1986 		      struct rte_flow_error *error)
1987 {
1988 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1989 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1990 	struct rte_flow *flow;
1991 
1992 	flow = ops->async_create(dev, queue_id,
1993 				 op_attr, template_table,
1994 				 pattern, pattern_template_index,
1995 				 actions, actions_template_index,
1996 				 user_data, error);
1997 	if (flow == NULL)
1998 		flow_err(port_id, -rte_errno, error);
1999 
2000 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2001 				    pattern, pattern_template_index, actions,
2002 				    actions_template_index, user_data, flow);
2003 
2004 	return flow;
2005 }
2006 
2007 struct rte_flow *
2008 rte_flow_async_create_by_index(uint16_t port_id,
2009 			       uint32_t queue_id,
2010 			       const struct rte_flow_op_attr *op_attr,
2011 			       struct rte_flow_template_table *template_table,
2012 			       uint32_t rule_index,
2013 			       const struct rte_flow_action actions[],
2014 			       uint8_t actions_template_index,
2015 			       void *user_data,
2016 			       struct rte_flow_error *error)
2017 {
2018 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2019 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2020 	struct rte_flow *flow;
2021 
2022 	flow = ops->async_create_by_index(dev, queue_id,
2023 					  op_attr, template_table, rule_index,
2024 					  actions, actions_template_index,
2025 					  user_data, error);
2026 	if (flow == NULL)
2027 		flow_err(port_id, -rte_errno, error);
2028 	return flow;
2029 }
2030 
2031 int
2032 rte_flow_async_destroy(uint16_t port_id,
2033 		       uint32_t queue_id,
2034 		       const struct rte_flow_op_attr *op_attr,
2035 		       struct rte_flow *flow,
2036 		       void *user_data,
2037 		       struct rte_flow_error *error)
2038 {
2039 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2040 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2041 	int ret;
2042 
2043 	ret = flow_err(port_id,
2044 		       ops->async_destroy(dev, queue_id,
2045 					  op_attr, flow,
2046 					  user_data, error),
2047 		       error);
2048 
2049 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2050 				     user_data, ret);
2051 
2052 	return ret;
2053 }
2054 
2055 int
2056 rte_flow_async_actions_update(uint16_t port_id,
2057 			      uint32_t queue_id,
2058 			      const struct rte_flow_op_attr *op_attr,
2059 			      struct rte_flow *flow,
2060 			      const struct rte_flow_action actions[],
2061 			      uint8_t actions_template_index,
2062 			      void *user_data,
2063 			      struct rte_flow_error *error)
2064 {
2065 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2066 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2067 	int ret;
2068 
2069 	ret = flow_err(port_id,
2070 		       ops->async_actions_update(dev, queue_id, op_attr,
2071 						 flow, actions,
2072 						 actions_template_index,
2073 						 user_data, error),
2074 		       error);
2075 
2076 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2077 					    actions, actions_template_index,
2078 					    user_data, ret);
2079 
2080 	return ret;
2081 }
2082 
2083 int
2084 rte_flow_push(uint16_t port_id,
2085 	      uint32_t queue_id,
2086 	      struct rte_flow_error *error)
2087 {
2088 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2089 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2090 	int ret;
2091 
2092 	ret = flow_err(port_id,
2093 		       ops->push(dev, queue_id, error),
2094 		       error);
2095 
2096 	rte_flow_trace_push(port_id, queue_id, ret);
2097 
2098 	return ret;
2099 }
2100 
2101 int
2102 rte_flow_pull(uint16_t port_id,
2103 	      uint32_t queue_id,
2104 	      struct rte_flow_op_result res[],
2105 	      uint16_t n_res,
2106 	      struct rte_flow_error *error)
2107 {
2108 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2109 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2110 	int ret;
2111 	int rc;
2112 
2113 	ret = ops->pull(dev, queue_id, res, n_res, error);
2114 	rc = ret ? ret : flow_err(port_id, ret, error);
2115 
2116 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2117 
2118 	return rc;
2119 }
2120 
2121 struct rte_flow_action_handle *
2122 rte_flow_async_action_handle_create(uint16_t port_id,
2123 		uint32_t queue_id,
2124 		const struct rte_flow_op_attr *op_attr,
2125 		const struct rte_flow_indir_action_conf *indir_action_conf,
2126 		const struct rte_flow_action *action,
2127 		void *user_data,
2128 		struct rte_flow_error *error)
2129 {
2130 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2131 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2132 	struct rte_flow_action_handle *handle;
2133 
2134 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2135 					     indir_action_conf, action, user_data, error);
2136 	if (handle == NULL)
2137 		flow_err(port_id, -rte_errno, error);
2138 
2139 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2140 						  indir_action_conf, action,
2141 						  user_data, handle);
2142 
2143 	return handle;
2144 }
2145 
2146 int
2147 rte_flow_async_action_handle_destroy(uint16_t port_id,
2148 		uint32_t queue_id,
2149 		const struct rte_flow_op_attr *op_attr,
2150 		struct rte_flow_action_handle *action_handle,
2151 		void *user_data,
2152 		struct rte_flow_error *error)
2153 {
2154 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2155 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2156 	int ret;
2157 
2158 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2159 					   action_handle, user_data, error);
2160 	ret = flow_err(port_id, ret, error);
2161 
2162 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2163 						   action_handle, user_data, ret);
2164 
2165 	return ret;
2166 }
2167 
2168 int
2169 rte_flow_async_action_handle_update(uint16_t port_id,
2170 		uint32_t queue_id,
2171 		const struct rte_flow_op_attr *op_attr,
2172 		struct rte_flow_action_handle *action_handle,
2173 		const void *update,
2174 		void *user_data,
2175 		struct rte_flow_error *error)
2176 {
2177 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2178 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2179 	int ret;
2180 
2181 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2182 					  action_handle, update, user_data, error);
2183 	ret = flow_err(port_id, ret, error);
2184 
2185 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2186 						  action_handle, update,
2187 						  user_data, ret);
2188 
2189 	return ret;
2190 }
2191 
2192 int
2193 rte_flow_async_action_handle_query(uint16_t port_id,
2194 		uint32_t queue_id,
2195 		const struct rte_flow_op_attr *op_attr,
2196 		const struct rte_flow_action_handle *action_handle,
2197 		void *data,
2198 		void *user_data,
2199 		struct rte_flow_error *error)
2200 {
2201 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2202 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2203 	int ret;
2204 
2205 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2206 					  action_handle, data, user_data, error);
2207 	ret = flow_err(port_id, ret, error);
2208 
2209 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2210 						 action_handle, data, user_data,
2211 						 ret);
2212 
2213 	return ret;
2214 }
2215 
2216 int
2217 rte_flow_action_handle_query_update(uint16_t port_id,
2218 				    struct rte_flow_action_handle *handle,
2219 				    const void *update, void *query,
2220 				    enum rte_flow_query_update_mode mode,
2221 				    struct rte_flow_error *error)
2222 {
2223 	int ret;
2224 	struct rte_eth_dev *dev;
2225 	const struct rte_flow_ops *ops;
2226 
2227 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2228 	if (!handle)
2229 		return -EINVAL;
2230 	if (!update && !query)
2231 		return -EINVAL;
2232 	dev = &rte_eth_devices[port_id];
2233 	ops = rte_flow_ops_get(port_id, error);
2234 	if (!ops || !ops->action_handle_query_update)
2235 		return -ENOTSUP;
2236 	ret = ops->action_handle_query_update(dev, handle, update,
2237 					      query, mode, error);
2238 	return flow_err(port_id, ret, error);
2239 }
2240 
2241 int
2242 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2243 					  const struct rte_flow_op_attr *attr,
2244 					  struct rte_flow_action_handle *handle,
2245 					  const void *update, void *query,
2246 					  enum rte_flow_query_update_mode mode,
2247 					  void *user_data,
2248 					  struct rte_flow_error *error)
2249 {
2250 	int ret;
2251 	struct rte_eth_dev *dev;
2252 	const struct rte_flow_ops *ops;
2253 
2254 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2255 	if (!handle)
2256 		return -EINVAL;
2257 	if (!update && !query)
2258 		return -EINVAL;
2259 	dev = &rte_eth_devices[port_id];
2260 	ops = rte_flow_ops_get(port_id, error);
2261 	if (!ops || !ops->async_action_handle_query_update)
2262 		return -ENOTSUP;
2263 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2264 						    handle, update,
2265 						    query, mode,
2266 						    user_data, error);
2267 	return flow_err(port_id, ret, error);
2268 }
2269 
2270 struct rte_flow_action_list_handle *
2271 rte_flow_action_list_handle_create(uint16_t port_id,
2272 				   const
2273 				   struct rte_flow_indir_action_conf *conf,
2274 				   const struct rte_flow_action *actions,
2275 				   struct rte_flow_error *error)
2276 {
2277 	int ret;
2278 	struct rte_eth_dev *dev;
2279 	const struct rte_flow_ops *ops;
2280 	struct rte_flow_action_list_handle *handle;
2281 
2282 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2283 	ops = rte_flow_ops_get(port_id, error);
2284 	if (!ops || !ops->action_list_handle_create) {
2285 		rte_flow_error_set(error, ENOTSUP,
2286 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2287 				   "action_list handle not supported");
2288 		return NULL;
2289 	}
2290 	dev = &rte_eth_devices[port_id];
2291 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2292 	ret = flow_err(port_id, -rte_errno, error);
2293 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2294 	return handle;
2295 }
2296 
2297 int
2298 rte_flow_action_list_handle_destroy(uint16_t port_id,
2299 				    struct rte_flow_action_list_handle *handle,
2300 				    struct rte_flow_error *error)
2301 {
2302 	int ret;
2303 	struct rte_eth_dev *dev;
2304 	const struct rte_flow_ops *ops;
2305 
2306 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2307 	ops = rte_flow_ops_get(port_id, error);
2308 	if (!ops || !ops->action_list_handle_destroy)
2309 		return rte_flow_error_set(error, ENOTSUP,
2310 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2311 					  "action_list handle not supported");
2312 	dev = &rte_eth_devices[port_id];
2313 	ret = ops->action_list_handle_destroy(dev, handle, error);
2314 	ret = flow_err(port_id, ret, error);
2315 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2316 	return ret;
2317 }
2318 
2319 struct rte_flow_action_list_handle *
2320 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2321 					 const struct rte_flow_op_attr *attr,
2322 					 const struct rte_flow_indir_action_conf *conf,
2323 					 const struct rte_flow_action *actions,
2324 					 void *user_data,
2325 					 struct rte_flow_error *error)
2326 {
2327 	int ret;
2328 	struct rte_eth_dev *dev;
2329 	const struct rte_flow_ops *ops;
2330 	struct rte_flow_action_list_handle *handle;
2331 
2332 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2333 	ops = rte_flow_ops_get(port_id, error);
2334 	if (!ops || !ops->async_action_list_handle_create) {
2335 		rte_flow_error_set(error, ENOTSUP,
2336 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2337 				   "action_list handle not supported");
2338 		return NULL;
2339 	}
2340 	dev = &rte_eth_devices[port_id];
2341 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2342 						      actions, user_data,
2343 						      error);
2344 	ret = flow_err(port_id, -rte_errno, error);
2345 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2346 						       conf, actions, user_data,
2347 						       ret);
2348 	return handle;
2349 }
2350 
2351 int
2352 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2353 				 const struct rte_flow_op_attr *op_attr,
2354 				 struct rte_flow_action_list_handle *handle,
2355 				 void *user_data, struct rte_flow_error *error)
2356 {
2357 	int ret;
2358 	struct rte_eth_dev *dev;
2359 	const struct rte_flow_ops *ops;
2360 
2361 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2362 	ops = rte_flow_ops_get(port_id, error);
2363 	if (!ops || !ops->async_action_list_handle_destroy)
2364 		return rte_flow_error_set(error, ENOTSUP,
2365 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2366 					  "async action_list handle not supported");
2367 	dev = &rte_eth_devices[port_id];
2368 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2369 						    handle, user_data, error);
2370 	ret = flow_err(port_id, ret, error);
2371 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2372 							op_attr, handle,
2373 							user_data, ret);
2374 	return ret;
2375 }
2376 
2377 int
2378 rte_flow_action_list_handle_query_update(uint16_t port_id,
2379 			 const struct rte_flow_action_list_handle *handle,
2380 			 const void **update, void **query,
2381 			 enum rte_flow_query_update_mode mode,
2382 			 struct rte_flow_error *error)
2383 {
2384 	int ret;
2385 	struct rte_eth_dev *dev;
2386 	const struct rte_flow_ops *ops;
2387 
2388 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2389 	ops = rte_flow_ops_get(port_id, error);
2390 	if (!ops || !ops->action_list_handle_query_update)
2391 		return rte_flow_error_set(error, ENOTSUP,
2392 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2393 					  "action_list query_update not supported");
2394 	dev = &rte_eth_devices[port_id];
2395 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2396 						   mode, error);
2397 	ret = flow_err(port_id, ret, error);
2398 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2399 						       query, mode, ret);
2400 	return ret;
2401 }
2402 
2403 int
2404 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2405 			 const struct rte_flow_op_attr *attr,
2406 			 const struct rte_flow_action_list_handle *handle,
2407 			 const void **update, void **query,
2408 			 enum rte_flow_query_update_mode mode,
2409 			 void *user_data, struct rte_flow_error *error)
2410 {
2411 	int ret;
2412 	struct rte_eth_dev *dev;
2413 	const struct rte_flow_ops *ops;
2414 
2415 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2416 	ops = rte_flow_ops_get(port_id, error);
2417 	if (!ops || !ops->async_action_list_handle_query_update)
2418 		return rte_flow_error_set(error, ENOTSUP,
2419 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2420 					  "action_list async query_update not supported");
2421 	dev = &rte_eth_devices[port_id];
2422 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2423 							 handle, update, query,
2424 							 mode, user_data,
2425 							 error);
2426 	ret = flow_err(port_id, ret, error);
2427 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2428 							     attr, handle,
2429 							     update, query,
2430 							     mode, user_data,
2431 							     ret);
2432 	return ret;
2433 }
2434