xref: /dpdk/lib/ethdev/rte_flow.c (revision ef8bd7d0b25abdcc425d4a7e399c66957b15b935)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <pthread.h>
10 
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include <rte_mbuf_dyn.h>
16 #include "rte_ethdev.h"
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 /* Mbuf dynamic field name for metadata. */
23 int32_t rte_flow_dynf_metadata_offs = -1;
24 
25 /* Mbuf dynamic field flag bit number for metadata. */
26 uint64_t rte_flow_dynf_metadata_mask;
27 
28 /**
29  * Flow elements description tables.
30  */
31 struct rte_flow_desc_data {
32 	const char *name;
33 	size_t size;
34 	size_t (*desc_fn)(void *dst, const void *src);
35 };
36 
37 /**
38  *
39  * @param buf
40  * Destination memory.
41  * @param data
42  * Source memory
43  * @param size
44  * Requested copy size
45  * @param desc
46  * rte_flow_desc_item - for flow item conversion.
47  * rte_flow_desc_action - for flow action conversion.
48  * @param type
49  * Offset into the desc param or negative value for private flow elements.
50  */
51 static inline size_t
52 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
53 		   const struct rte_flow_desc_data *desc, int type)
54 {
55 	/**
56 	 * Allow PMD private flow item
57 	 */
58 	bool rte_type = type >= 0;
59 
60 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
61 	if (buf == NULL || data == NULL)
62 		return 0;
63 	rte_memcpy(buf, data, (size > sz ? sz : size));
64 	if (rte_type && desc[type].desc_fn)
65 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
66 	return sz;
67 }
68 
69 static size_t
70 rte_flow_item_flex_conv(void *buf, const void *data)
71 {
72 	struct rte_flow_item_flex *dst = buf;
73 	const struct rte_flow_item_flex *src = data;
74 	if (buf) {
75 		dst->pattern = rte_memcpy
76 			((void *)((uintptr_t)(dst + 1)), src->pattern,
77 			 src->length);
78 	}
79 	return src->length;
80 }
81 
82 /** Generate flow_item[] entry. */
83 #define MK_FLOW_ITEM(t, s) \
84 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
85 		.name = # t, \
86 		.size = s,               \
87 		.desc_fn = NULL,\
88 	}
89 
90 #define MK_FLOW_ITEM_FN(t, s, fn) \
91 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
92 		.name = # t,                 \
93 		.size = s,                   \
94 		.desc_fn = fn,               \
95 	}
96 
97 /** Information about known flow pattern items. */
98 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
99 	MK_FLOW_ITEM(END, 0),
100 	MK_FLOW_ITEM(VOID, 0),
101 	MK_FLOW_ITEM(INVERT, 0),
102 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
103 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
104 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
105 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
106 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
107 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
108 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
109 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
110 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
111 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
112 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
113 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
114 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
115 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
116 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
117 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
118 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
119 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
122 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
123 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
124 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
125 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
126 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
127 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
128 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
131 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
132 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
134 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
135 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
137 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
138 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
139 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
140 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
141 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
142 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
143 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
144 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
146 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
147 			sizeof(struct rte_flow_item_pppoe_proto_id)),
148 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
149 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
150 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
151 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
152 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
153 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
154 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
155 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
156 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
157 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
158 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
160 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
161 			rte_flow_item_flex_conv),
162 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
163 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
164 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
165 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
166 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
167 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
168 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
169 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
170 };
171 
172 /** Generate flow_action[] entry. */
173 #define MK_FLOW_ACTION(t, s) \
174 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
175 		.name = # t, \
176 		.size = s, \
177 		.desc_fn = NULL,\
178 	}
179 
180 #define MK_FLOW_ACTION_FN(t, fn) \
181 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
182 		.name = # t, \
183 		.size = 0, \
184 		.desc_fn = fn,\
185 	}
186 
187 
188 /** Information about known flow actions. */
189 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
190 	MK_FLOW_ACTION(END, 0),
191 	MK_FLOW_ACTION(VOID, 0),
192 	MK_FLOW_ACTION(PASSTHRU, 0),
193 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
194 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
195 	MK_FLOW_ACTION(FLAG, 0),
196 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
197 	MK_FLOW_ACTION(DROP, 0),
198 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
199 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
200 	MK_FLOW_ACTION(PF, 0),
201 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
202 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
203 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
204 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
205 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
206 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
207 	MK_FLOW_ACTION(OF_PUSH_VLAN,
208 		       sizeof(struct rte_flow_action_of_push_vlan)),
209 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
210 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
211 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
212 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
213 	MK_FLOW_ACTION(OF_POP_MPLS,
214 		       sizeof(struct rte_flow_action_of_pop_mpls)),
215 	MK_FLOW_ACTION(OF_PUSH_MPLS,
216 		       sizeof(struct rte_flow_action_of_push_mpls)),
217 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
218 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
219 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
220 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
221 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
222 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
223 	MK_FLOW_ACTION(SET_IPV4_SRC,
224 		       sizeof(struct rte_flow_action_set_ipv4)),
225 	MK_FLOW_ACTION(SET_IPV4_DST,
226 		       sizeof(struct rte_flow_action_set_ipv4)),
227 	MK_FLOW_ACTION(SET_IPV6_SRC,
228 		       sizeof(struct rte_flow_action_set_ipv6)),
229 	MK_FLOW_ACTION(SET_IPV6_DST,
230 		       sizeof(struct rte_flow_action_set_ipv6)),
231 	MK_FLOW_ACTION(SET_TP_SRC,
232 		       sizeof(struct rte_flow_action_set_tp)),
233 	MK_FLOW_ACTION(SET_TP_DST,
234 		       sizeof(struct rte_flow_action_set_tp)),
235 	MK_FLOW_ACTION(MAC_SWAP, 0),
236 	MK_FLOW_ACTION(DEC_TTL, 0),
237 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
238 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
239 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
240 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
241 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
242 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
243 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
244 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
245 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
246 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
247 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
248 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
249 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
250 	MK_FLOW_ACTION(MODIFY_FIELD,
251 		       sizeof(struct rte_flow_action_modify_field)),
252 	/**
253 	 * Indirect action represented as handle of type
254 	 * (struct rte_flow_action_handle *) stored in conf field (see
255 	 * struct rte_flow_action); no need for additional structure to * store
256 	 * indirect action handle.
257 	 */
258 	MK_FLOW_ACTION(INDIRECT, 0),
259 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
260 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
261 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
262 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
263 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
264 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
265 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
266 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
267 	MK_FLOW_ACTION(INDIRECT_LIST,
268 		       sizeof(struct rte_flow_action_indirect_list)),
269 };
270 
271 int
272 rte_flow_dynf_metadata_register(void)
273 {
274 	int offset;
275 	int flag;
276 
277 	static const struct rte_mbuf_dynfield desc_offs = {
278 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
279 		.size = sizeof(uint32_t),
280 		.align = __alignof__(uint32_t),
281 	};
282 	static const struct rte_mbuf_dynflag desc_flag = {
283 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
284 	};
285 
286 	offset = rte_mbuf_dynfield_register(&desc_offs);
287 	if (offset < 0)
288 		goto error;
289 	flag = rte_mbuf_dynflag_register(&desc_flag);
290 	if (flag < 0)
291 		goto error;
292 	rte_flow_dynf_metadata_offs = offset;
293 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
294 
295 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
296 
297 	return 0;
298 
299 error:
300 	rte_flow_dynf_metadata_offs = -1;
301 	rte_flow_dynf_metadata_mask = UINT64_C(0);
302 	return -rte_errno;
303 }
304 
305 static inline void
306 fts_enter(struct rte_eth_dev *dev)
307 {
308 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
309 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
310 }
311 
312 static inline void
313 fts_exit(struct rte_eth_dev *dev)
314 {
315 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
316 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
317 }
318 
319 static int
320 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
321 {
322 	if (ret == 0)
323 		return 0;
324 	if (rte_eth_dev_is_removed(port_id))
325 		return rte_flow_error_set(error, EIO,
326 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
327 					  NULL, rte_strerror(EIO));
328 	return ret;
329 }
330 
331 /* Get generic flow operations structure from a port. */
332 const struct rte_flow_ops *
333 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
334 {
335 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
336 	const struct rte_flow_ops *ops;
337 	int code;
338 
339 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
340 		code = ENODEV;
341 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
342 		/* flow API not supported with this driver dev_ops */
343 		code = ENOSYS;
344 	else
345 		code = dev->dev_ops->flow_ops_get(dev, &ops);
346 	if (code == 0 && ops == NULL)
347 		/* flow API not supported with this device */
348 		code = ENOSYS;
349 
350 	if (code != 0) {
351 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
352 				   NULL, rte_strerror(code));
353 		return NULL;
354 	}
355 	return ops;
356 }
357 
358 /* Check whether a flow rule can be created on a given port. */
359 int
360 rte_flow_validate(uint16_t port_id,
361 		  const struct rte_flow_attr *attr,
362 		  const struct rte_flow_item pattern[],
363 		  const struct rte_flow_action actions[],
364 		  struct rte_flow_error *error)
365 {
366 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
367 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
368 	int ret;
369 
370 	if (likely(!!attr) && attr->transfer &&
371 	    (attr->ingress || attr->egress)) {
372 		return rte_flow_error_set(error, EINVAL,
373 					  RTE_FLOW_ERROR_TYPE_ATTR,
374 					  attr, "cannot use attr ingress/egress with attr transfer");
375 	}
376 
377 	if (unlikely(!ops))
378 		return -rte_errno;
379 	if (likely(!!ops->validate)) {
380 		fts_enter(dev);
381 		ret = ops->validate(dev, attr, pattern, actions, error);
382 		fts_exit(dev);
383 		ret = flow_err(port_id, ret, error);
384 
385 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
386 
387 		return ret;
388 	}
389 	return rte_flow_error_set(error, ENOSYS,
390 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
391 				  NULL, rte_strerror(ENOSYS));
392 }
393 
394 /* Create a flow rule on a given port. */
395 struct rte_flow *
396 rte_flow_create(uint16_t port_id,
397 		const struct rte_flow_attr *attr,
398 		const struct rte_flow_item pattern[],
399 		const struct rte_flow_action actions[],
400 		struct rte_flow_error *error)
401 {
402 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
403 	struct rte_flow *flow;
404 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
405 
406 	if (unlikely(!ops))
407 		return NULL;
408 	if (likely(!!ops->create)) {
409 		fts_enter(dev);
410 		flow = ops->create(dev, attr, pattern, actions, error);
411 		fts_exit(dev);
412 		if (flow == NULL)
413 			flow_err(port_id, -rte_errno, error);
414 
415 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
416 
417 		return flow;
418 	}
419 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
420 			   NULL, rte_strerror(ENOSYS));
421 	return NULL;
422 }
423 
424 /* Destroy a flow rule on a given port. */
425 int
426 rte_flow_destroy(uint16_t port_id,
427 		 struct rte_flow *flow,
428 		 struct rte_flow_error *error)
429 {
430 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
431 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
432 	int ret;
433 
434 	if (unlikely(!ops))
435 		return -rte_errno;
436 	if (likely(!!ops->destroy)) {
437 		fts_enter(dev);
438 		ret = ops->destroy(dev, flow, error);
439 		fts_exit(dev);
440 		ret = flow_err(port_id, ret, error);
441 
442 		rte_flow_trace_destroy(port_id, flow, ret);
443 
444 		return ret;
445 	}
446 	return rte_flow_error_set(error, ENOSYS,
447 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
448 				  NULL, rte_strerror(ENOSYS));
449 }
450 
451 int
452 rte_flow_actions_update(uint16_t port_id,
453 			struct rte_flow *flow,
454 			const struct rte_flow_action actions[],
455 			struct rte_flow_error *error)
456 {
457 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
458 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
459 	int ret;
460 
461 	if (unlikely(!ops))
462 		return -rte_errno;
463 	if (likely(!!ops->actions_update)) {
464 		fts_enter(dev);
465 		ret = ops->actions_update(dev, flow, actions, error);
466 		fts_exit(dev);
467 
468 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
469 
470 		return flow_err(port_id, ret, error);
471 	}
472 	return rte_flow_error_set(error, ENOSYS,
473 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
474 				  NULL, rte_strerror(ENOSYS));
475 }
476 
477 /* Destroy all flow rules associated with a port. */
478 int
479 rte_flow_flush(uint16_t port_id,
480 	       struct rte_flow_error *error)
481 {
482 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
483 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
484 	int ret;
485 
486 	if (unlikely(!ops))
487 		return -rte_errno;
488 	if (likely(!!ops->flush)) {
489 		fts_enter(dev);
490 		ret = ops->flush(dev, error);
491 		fts_exit(dev);
492 		ret = flow_err(port_id, ret, error);
493 
494 		rte_flow_trace_flush(port_id, ret);
495 
496 		return ret;
497 	}
498 	return rte_flow_error_set(error, ENOSYS,
499 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
500 				  NULL, rte_strerror(ENOSYS));
501 }
502 
503 /* Query an existing flow rule. */
504 int
505 rte_flow_query(uint16_t port_id,
506 	       struct rte_flow *flow,
507 	       const struct rte_flow_action *action,
508 	       void *data,
509 	       struct rte_flow_error *error)
510 {
511 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
512 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
513 	int ret;
514 
515 	if (!ops)
516 		return -rte_errno;
517 	if (likely(!!ops->query)) {
518 		fts_enter(dev);
519 		ret = ops->query(dev, flow, action, data, error);
520 		fts_exit(dev);
521 		ret = flow_err(port_id, ret, error);
522 
523 		rte_flow_trace_query(port_id, flow, action, data, ret);
524 
525 		return ret;
526 	}
527 	return rte_flow_error_set(error, ENOSYS,
528 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
529 				  NULL, rte_strerror(ENOSYS));
530 }
531 
532 /* Restrict ingress traffic to the defined flow rules. */
533 int
534 rte_flow_isolate(uint16_t port_id,
535 		 int set,
536 		 struct rte_flow_error *error)
537 {
538 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
539 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
540 	int ret;
541 
542 	if (!ops)
543 		return -rte_errno;
544 	if (likely(!!ops->isolate)) {
545 		fts_enter(dev);
546 		ret = ops->isolate(dev, set, error);
547 		fts_exit(dev);
548 		ret = flow_err(port_id, ret, error);
549 
550 		rte_flow_trace_isolate(port_id, set, ret);
551 
552 		return ret;
553 	}
554 	return rte_flow_error_set(error, ENOSYS,
555 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
556 				  NULL, rte_strerror(ENOSYS));
557 }
558 
559 /* Initialize flow error structure. */
560 int
561 rte_flow_error_set(struct rte_flow_error *error,
562 		   int code,
563 		   enum rte_flow_error_type type,
564 		   const void *cause,
565 		   const char *message)
566 {
567 	if (error) {
568 		*error = (struct rte_flow_error){
569 			.type = type,
570 			.cause = cause,
571 			.message = message,
572 		};
573 	}
574 	rte_errno = code;
575 	return -code;
576 }
577 
578 /** Pattern item specification types. */
579 enum rte_flow_conv_item_spec_type {
580 	RTE_FLOW_CONV_ITEM_SPEC,
581 	RTE_FLOW_CONV_ITEM_LAST,
582 	RTE_FLOW_CONV_ITEM_MASK,
583 };
584 
585 /**
586  * Copy pattern item specification.
587  *
588  * @param[out] buf
589  *   Output buffer. Can be NULL if @p size is zero.
590  * @param size
591  *   Size of @p buf in bytes.
592  * @param[in] item
593  *   Pattern item to copy specification from.
594  * @param type
595  *   Specification selector for either @p spec, @p last or @p mask.
596  *
597  * @return
598  *   Number of bytes needed to store pattern item specification regardless
599  *   of @p size. @p buf contents are truncated to @p size if not large
600  *   enough.
601  */
602 static size_t
603 rte_flow_conv_item_spec(void *buf, const size_t size,
604 			const struct rte_flow_item *item,
605 			enum rte_flow_conv_item_spec_type type)
606 {
607 	size_t off;
608 	const void *data =
609 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
610 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
611 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
612 		NULL;
613 
614 	switch (item->type) {
615 		union {
616 			const struct rte_flow_item_raw *raw;
617 		} spec;
618 		union {
619 			const struct rte_flow_item_raw *raw;
620 		} last;
621 		union {
622 			const struct rte_flow_item_raw *raw;
623 		} mask;
624 		union {
625 			const struct rte_flow_item_raw *raw;
626 		} src;
627 		union {
628 			struct rte_flow_item_raw *raw;
629 		} dst;
630 		size_t tmp;
631 
632 	case RTE_FLOW_ITEM_TYPE_RAW:
633 		spec.raw = item->spec;
634 		last.raw = item->last ? item->last : item->spec;
635 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
636 		src.raw = data;
637 		dst.raw = buf;
638 		rte_memcpy(dst.raw,
639 			   (&(struct rte_flow_item_raw){
640 				.relative = src.raw->relative,
641 				.search = src.raw->search,
642 				.reserved = src.raw->reserved,
643 				.offset = src.raw->offset,
644 				.limit = src.raw->limit,
645 				.length = src.raw->length,
646 			   }),
647 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
648 		off = sizeof(*dst.raw);
649 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
650 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
651 		     ((spec.raw->length & mask.raw->length) >=
652 		      (last.raw->length & mask.raw->length))))
653 			tmp = spec.raw->length & mask.raw->length;
654 		else
655 			tmp = last.raw->length & mask.raw->length;
656 		if (tmp) {
657 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
658 			if (size >= off + tmp)
659 				dst.raw->pattern = rte_memcpy
660 					((void *)((uintptr_t)dst.raw + off),
661 					 src.raw->pattern, tmp);
662 			off += tmp;
663 		}
664 		break;
665 	default:
666 		off = rte_flow_conv_copy(buf, data, size,
667 					 rte_flow_desc_item, item->type);
668 		break;
669 	}
670 	return off;
671 }
672 
673 /**
674  * Copy action configuration.
675  *
676  * @param[out] buf
677  *   Output buffer. Can be NULL if @p size is zero.
678  * @param size
679  *   Size of @p buf in bytes.
680  * @param[in] action
681  *   Action to copy configuration from.
682  *
683  * @return
684  *   Number of bytes needed to store pattern item specification regardless
685  *   of @p size. @p buf contents are truncated to @p size if not large
686  *   enough.
687  */
688 static size_t
689 rte_flow_conv_action_conf(void *buf, const size_t size,
690 			  const struct rte_flow_action *action)
691 {
692 	size_t off;
693 
694 	switch (action->type) {
695 		union {
696 			const struct rte_flow_action_rss *rss;
697 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
698 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
699 		} src;
700 		union {
701 			struct rte_flow_action_rss *rss;
702 			struct rte_flow_action_vxlan_encap *vxlan_encap;
703 			struct rte_flow_action_nvgre_encap *nvgre_encap;
704 		} dst;
705 		size_t tmp;
706 		int ret;
707 
708 	case RTE_FLOW_ACTION_TYPE_RSS:
709 		src.rss = action->conf;
710 		dst.rss = buf;
711 		rte_memcpy(dst.rss,
712 			   (&(struct rte_flow_action_rss){
713 				.func = src.rss->func,
714 				.level = src.rss->level,
715 				.types = src.rss->types,
716 				.key_len = src.rss->key_len,
717 				.queue_num = src.rss->queue_num,
718 			   }),
719 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
720 		off = sizeof(*dst.rss);
721 		if (src.rss->key_len && src.rss->key) {
722 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
723 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
724 			if (size >= off + tmp)
725 				dst.rss->key = rte_memcpy
726 					((void *)((uintptr_t)dst.rss + off),
727 					 src.rss->key, tmp);
728 			off += tmp;
729 		}
730 		if (src.rss->queue_num) {
731 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
732 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
733 			if (size >= off + tmp)
734 				dst.rss->queue = rte_memcpy
735 					((void *)((uintptr_t)dst.rss + off),
736 					 src.rss->queue, tmp);
737 			off += tmp;
738 		}
739 		break;
740 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
741 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
742 		src.vxlan_encap = action->conf;
743 		dst.vxlan_encap = buf;
744 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
745 				 sizeof(*src.nvgre_encap) ||
746 				 offsetof(struct rte_flow_action_vxlan_encap,
747 					  definition) !=
748 				 offsetof(struct rte_flow_action_nvgre_encap,
749 					  definition));
750 		off = sizeof(*dst.vxlan_encap);
751 		if (src.vxlan_encap->definition) {
752 			off = RTE_ALIGN_CEIL
753 				(off, sizeof(*dst.vxlan_encap->definition));
754 			ret = rte_flow_conv
755 				(RTE_FLOW_CONV_OP_PATTERN,
756 				 (void *)((uintptr_t)dst.vxlan_encap + off),
757 				 size > off ? size - off : 0,
758 				 src.vxlan_encap->definition, NULL);
759 			if (ret < 0)
760 				return 0;
761 			if (size >= off + ret)
762 				dst.vxlan_encap->definition =
763 					(void *)((uintptr_t)dst.vxlan_encap +
764 						 off);
765 			off += ret;
766 		}
767 		break;
768 	default:
769 		off = rte_flow_conv_copy(buf, action->conf, size,
770 					 rte_flow_desc_action, action->type);
771 		break;
772 	}
773 	return off;
774 }
775 
776 /**
777  * Copy a list of pattern items.
778  *
779  * @param[out] dst
780  *   Destination buffer. Can be NULL if @p size is zero.
781  * @param size
782  *   Size of @p dst in bytes.
783  * @param[in] src
784  *   Source pattern items.
785  * @param num
786  *   Maximum number of pattern items to process from @p src or 0 to process
787  *   the entire list. In both cases, processing stops after
788  *   RTE_FLOW_ITEM_TYPE_END is encountered.
789  * @param[out] error
790  *   Perform verbose error reporting if not NULL.
791  *
792  * @return
793  *   A positive value representing the number of bytes needed to store
794  *   pattern items regardless of @p size on success (@p buf contents are
795  *   truncated to @p size if not large enough), a negative errno value
796  *   otherwise and rte_errno is set.
797  */
798 static int
799 rte_flow_conv_pattern(struct rte_flow_item *dst,
800 		      const size_t size,
801 		      const struct rte_flow_item *src,
802 		      unsigned int num,
803 		      struct rte_flow_error *error)
804 {
805 	uintptr_t data = (uintptr_t)dst;
806 	size_t off;
807 	size_t ret;
808 	unsigned int i;
809 
810 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
811 		/**
812 		 * allow PMD private flow item
813 		 */
814 		if (((int)src->type >= 0) &&
815 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
816 		    !rte_flow_desc_item[src->type].name))
817 			return rte_flow_error_set
818 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
819 				 "cannot convert unknown item type");
820 		if (size >= off + sizeof(*dst))
821 			*dst = (struct rte_flow_item){
822 				.type = src->type,
823 			};
824 		off += sizeof(*dst);
825 		if (!src->type)
826 			num = i + 1;
827 	}
828 	num = i;
829 	src -= num;
830 	dst -= num;
831 	do {
832 		if (src->spec) {
833 			off = RTE_ALIGN_CEIL(off, sizeof(double));
834 			ret = rte_flow_conv_item_spec
835 				((void *)(data + off),
836 				 size > off ? size - off : 0, src,
837 				 RTE_FLOW_CONV_ITEM_SPEC);
838 			if (size && size >= off + ret)
839 				dst->spec = (void *)(data + off);
840 			off += ret;
841 
842 		}
843 		if (src->last) {
844 			off = RTE_ALIGN_CEIL(off, sizeof(double));
845 			ret = rte_flow_conv_item_spec
846 				((void *)(data + off),
847 				 size > off ? size - off : 0, src,
848 				 RTE_FLOW_CONV_ITEM_LAST);
849 			if (size && size >= off + ret)
850 				dst->last = (void *)(data + off);
851 			off += ret;
852 		}
853 		if (src->mask) {
854 			off = RTE_ALIGN_CEIL(off, sizeof(double));
855 			ret = rte_flow_conv_item_spec
856 				((void *)(data + off),
857 				 size > off ? size - off : 0, src,
858 				 RTE_FLOW_CONV_ITEM_MASK);
859 			if (size && size >= off + ret)
860 				dst->mask = (void *)(data + off);
861 			off += ret;
862 		}
863 		++src;
864 		++dst;
865 	} while (--num);
866 	return off;
867 }
868 
869 /**
870  * Copy a list of actions.
871  *
872  * @param[out] dst
873  *   Destination buffer. Can be NULL if @p size is zero.
874  * @param size
875  *   Size of @p dst in bytes.
876  * @param[in] src
877  *   Source actions.
878  * @param num
879  *   Maximum number of actions to process from @p src or 0 to process the
880  *   entire list. In both cases, processing stops after
881  *   RTE_FLOW_ACTION_TYPE_END is encountered.
882  * @param[out] error
883  *   Perform verbose error reporting if not NULL.
884  *
885  * @return
886  *   A positive value representing the number of bytes needed to store
887  *   actions regardless of @p size on success (@p buf contents are truncated
888  *   to @p size if not large enough), a negative errno value otherwise and
889  *   rte_errno is set.
890  */
891 static int
892 rte_flow_conv_actions(struct rte_flow_action *dst,
893 		      const size_t size,
894 		      const struct rte_flow_action *src,
895 		      unsigned int num,
896 		      struct rte_flow_error *error)
897 {
898 	uintptr_t data = (uintptr_t)dst;
899 	size_t off;
900 	size_t ret;
901 	unsigned int i;
902 
903 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
904 		/**
905 		 * allow PMD private flow action
906 		 */
907 		if (((int)src->type >= 0) &&
908 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
909 		    !rte_flow_desc_action[src->type].name))
910 			return rte_flow_error_set
911 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
912 				 src, "cannot convert unknown action type");
913 		if (size >= off + sizeof(*dst))
914 			*dst = (struct rte_flow_action){
915 				.type = src->type,
916 			};
917 		off += sizeof(*dst);
918 		if (!src->type)
919 			num = i + 1;
920 	}
921 	num = i;
922 	src -= num;
923 	dst -= num;
924 	do {
925 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
926 			/*
927 			 * Indirect action conf fills the indirect action
928 			 * handler. Copy the action handle directly instead
929 			 * of duplicating the pointer memory.
930 			 */
931 			if (size)
932 				dst->conf = src->conf;
933 		} else if (src->conf) {
934 			off = RTE_ALIGN_CEIL(off, sizeof(double));
935 			ret = rte_flow_conv_action_conf
936 				((void *)(data + off),
937 				 size > off ? size - off : 0, src);
938 			if (size && size >= off + ret)
939 				dst->conf = (void *)(data + off);
940 			off += ret;
941 		}
942 		++src;
943 		++dst;
944 	} while (--num);
945 	return off;
946 }
947 
948 /**
949  * Copy flow rule components.
950  *
951  * This comprises the flow rule descriptor itself, attributes, pattern and
952  * actions list. NULL components in @p src are skipped.
953  *
954  * @param[out] dst
955  *   Destination buffer. Can be NULL if @p size is zero.
956  * @param size
957  *   Size of @p dst in bytes.
958  * @param[in] src
959  *   Source flow rule descriptor.
960  * @param[out] error
961  *   Perform verbose error reporting if not NULL.
962  *
963  * @return
964  *   A positive value representing the number of bytes needed to store all
965  *   components including the descriptor regardless of @p size on success
966  *   (@p buf contents are truncated to @p size if not large enough), a
967  *   negative errno value otherwise and rte_errno is set.
968  */
969 static int
970 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
971 		   const size_t size,
972 		   const struct rte_flow_conv_rule *src,
973 		   struct rte_flow_error *error)
974 {
975 	size_t off;
976 	int ret;
977 
978 	rte_memcpy(dst,
979 		   (&(struct rte_flow_conv_rule){
980 			.attr = NULL,
981 			.pattern = NULL,
982 			.actions = NULL,
983 		   }),
984 		   size > sizeof(*dst) ? sizeof(*dst) : size);
985 	off = sizeof(*dst);
986 	if (src->attr_ro) {
987 		off = RTE_ALIGN_CEIL(off, sizeof(double));
988 		if (size && size >= off + sizeof(*dst->attr))
989 			dst->attr = rte_memcpy
990 				((void *)((uintptr_t)dst + off),
991 				 src->attr_ro, sizeof(*dst->attr));
992 		off += sizeof(*dst->attr);
993 	}
994 	if (src->pattern_ro) {
995 		off = RTE_ALIGN_CEIL(off, sizeof(double));
996 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
997 					    size > off ? size - off : 0,
998 					    src->pattern_ro, 0, error);
999 		if (ret < 0)
1000 			return ret;
1001 		if (size && size >= off + (size_t)ret)
1002 			dst->pattern = (void *)((uintptr_t)dst + off);
1003 		off += ret;
1004 	}
1005 	if (src->actions_ro) {
1006 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1007 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1008 					    size > off ? size - off : 0,
1009 					    src->actions_ro, 0, error);
1010 		if (ret < 0)
1011 			return ret;
1012 		if (size >= off + (size_t)ret)
1013 			dst->actions = (void *)((uintptr_t)dst + off);
1014 		off += ret;
1015 	}
1016 	return off;
1017 }
1018 
1019 /**
1020  * Retrieve the name of a pattern item/action type.
1021  *
1022  * @param is_action
1023  *   Nonzero when @p src represents an action type instead of a pattern item
1024  *   type.
1025  * @param is_ptr
1026  *   Nonzero to write string address instead of contents into @p dst.
1027  * @param[out] dst
1028  *   Destination buffer. Can be NULL if @p size is zero.
1029  * @param size
1030  *   Size of @p dst in bytes.
1031  * @param[in] src
1032  *   Depending on @p is_action, source pattern item or action type cast as a
1033  *   pointer.
1034  * @param[out] error
1035  *   Perform verbose error reporting if not NULL.
1036  *
1037  * @return
1038  *   A positive value representing the number of bytes needed to store the
1039  *   name or its address regardless of @p size on success (@p buf contents
1040  *   are truncated to @p size if not large enough), a negative errno value
1041  *   otherwise and rte_errno is set.
1042  */
1043 static int
1044 rte_flow_conv_name(int is_action,
1045 		   int is_ptr,
1046 		   char *dst,
1047 		   const size_t size,
1048 		   const void *src,
1049 		   struct rte_flow_error *error)
1050 {
1051 	struct desc_info {
1052 		const struct rte_flow_desc_data *data;
1053 		size_t num;
1054 	};
1055 	static const struct desc_info info_rep[2] = {
1056 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1057 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1058 	};
1059 	const struct desc_info *const info = &info_rep[!!is_action];
1060 	unsigned int type = (uintptr_t)src;
1061 
1062 	if (type >= info->num)
1063 		return rte_flow_error_set
1064 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1065 			 "unknown object type to retrieve the name of");
1066 	if (!is_ptr)
1067 		return strlcpy(dst, info->data[type].name, size);
1068 	if (size >= sizeof(const char **))
1069 		*((const char **)dst) = info->data[type].name;
1070 	return sizeof(const char **);
1071 }
1072 
1073 /** Helper function to convert flow API objects. */
1074 int
1075 rte_flow_conv(enum rte_flow_conv_op op,
1076 	      void *dst,
1077 	      size_t size,
1078 	      const void *src,
1079 	      struct rte_flow_error *error)
1080 {
1081 	int ret;
1082 
1083 	switch (op) {
1084 		const struct rte_flow_attr *attr;
1085 
1086 	case RTE_FLOW_CONV_OP_NONE:
1087 		ret = 0;
1088 		break;
1089 	case RTE_FLOW_CONV_OP_ATTR:
1090 		attr = src;
1091 		if (size > sizeof(*attr))
1092 			size = sizeof(*attr);
1093 		rte_memcpy(dst, attr, size);
1094 		ret = sizeof(*attr);
1095 		break;
1096 	case RTE_FLOW_CONV_OP_ITEM:
1097 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1098 		break;
1099 	case RTE_FLOW_CONV_OP_ACTION:
1100 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1101 		break;
1102 	case RTE_FLOW_CONV_OP_PATTERN:
1103 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1104 		break;
1105 	case RTE_FLOW_CONV_OP_ACTIONS:
1106 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1107 		break;
1108 	case RTE_FLOW_CONV_OP_RULE:
1109 		ret = rte_flow_conv_rule(dst, size, src, error);
1110 		break;
1111 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1112 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1113 		break;
1114 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1115 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1116 		break;
1117 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1118 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1119 		break;
1120 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1121 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1122 		break;
1123 	default:
1124 		ret = rte_flow_error_set
1125 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1126 		 "unknown object conversion operation");
1127 	}
1128 
1129 	rte_flow_trace_conv(op, dst, size, src, ret);
1130 
1131 	return ret;
1132 }
1133 
1134 /** Store a full rte_flow description. */
1135 size_t
1136 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1137 	      const struct rte_flow_attr *attr,
1138 	      const struct rte_flow_item *items,
1139 	      const struct rte_flow_action *actions)
1140 {
1141 	/*
1142 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1143 	 * to convert the former to the latter without wasting space.
1144 	 */
1145 	struct rte_flow_conv_rule *dst =
1146 		len ?
1147 		(void *)((uintptr_t)desc +
1148 			 (offsetof(struct rte_flow_desc, actions) -
1149 			  offsetof(struct rte_flow_conv_rule, actions))) :
1150 		NULL;
1151 	size_t dst_size =
1152 		len > sizeof(*desc) - sizeof(*dst) ?
1153 		len - (sizeof(*desc) - sizeof(*dst)) :
1154 		0;
1155 	struct rte_flow_conv_rule src = {
1156 		.attr_ro = NULL,
1157 		.pattern_ro = items,
1158 		.actions_ro = actions,
1159 	};
1160 	int ret;
1161 
1162 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1163 			 sizeof(struct rte_flow_conv_rule));
1164 	if (dst_size &&
1165 	    (&dst->pattern != &desc->items ||
1166 	     &dst->actions != &desc->actions ||
1167 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1168 		rte_errno = EINVAL;
1169 		return 0;
1170 	}
1171 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1172 	if (ret < 0)
1173 		return 0;
1174 	ret += sizeof(*desc) - sizeof(*dst);
1175 	rte_memcpy(desc,
1176 		   (&(struct rte_flow_desc){
1177 			.size = ret,
1178 			.attr = *attr,
1179 			.items = dst_size ? dst->pattern : NULL,
1180 			.actions = dst_size ? dst->actions : NULL,
1181 		   }),
1182 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1183 
1184 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1185 
1186 	return ret;
1187 }
1188 
1189 int
1190 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1191 			FILE *file, struct rte_flow_error *error)
1192 {
1193 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1194 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1195 	int ret;
1196 
1197 	if (unlikely(!ops))
1198 		return -rte_errno;
1199 	if (likely(!!ops->dev_dump)) {
1200 		fts_enter(dev);
1201 		ret = ops->dev_dump(dev, flow, file, error);
1202 		fts_exit(dev);
1203 		return flow_err(port_id, ret, error);
1204 	}
1205 	return rte_flow_error_set(error, ENOSYS,
1206 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1207 				  NULL, rte_strerror(ENOSYS));
1208 }
1209 
1210 int
1211 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1212 		    uint32_t nb_contexts, struct rte_flow_error *error)
1213 {
1214 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1215 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1216 	int ret;
1217 
1218 	if (unlikely(!ops))
1219 		return -rte_errno;
1220 	if (likely(!!ops->get_aged_flows)) {
1221 		fts_enter(dev);
1222 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1223 		fts_exit(dev);
1224 		ret = flow_err(port_id, ret, error);
1225 
1226 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1227 
1228 		return ret;
1229 	}
1230 	return rte_flow_error_set(error, ENOTSUP,
1231 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1232 				  NULL, rte_strerror(ENOTSUP));
1233 }
1234 
1235 int
1236 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1237 			  uint32_t nb_contexts, struct rte_flow_error *error)
1238 {
1239 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1240 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1241 	int ret;
1242 
1243 	if (unlikely(!ops))
1244 		return -rte_errno;
1245 	if (likely(!!ops->get_q_aged_flows)) {
1246 		fts_enter(dev);
1247 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1248 					    nb_contexts, error);
1249 		fts_exit(dev);
1250 		ret = flow_err(port_id, ret, error);
1251 
1252 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1253 						nb_contexts, ret);
1254 
1255 		return ret;
1256 	}
1257 	return rte_flow_error_set(error, ENOTSUP,
1258 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1259 				  NULL, rte_strerror(ENOTSUP));
1260 }
1261 
1262 struct rte_flow_action_handle *
1263 rte_flow_action_handle_create(uint16_t port_id,
1264 			      const struct rte_flow_indir_action_conf *conf,
1265 			      const struct rte_flow_action *action,
1266 			      struct rte_flow_error *error)
1267 {
1268 	struct rte_flow_action_handle *handle;
1269 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1270 
1271 	if (unlikely(!ops))
1272 		return NULL;
1273 	if (unlikely(!ops->action_handle_create)) {
1274 		rte_flow_error_set(error, ENOSYS,
1275 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1276 				   rte_strerror(ENOSYS));
1277 		return NULL;
1278 	}
1279 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1280 					   conf, action, error);
1281 	if (handle == NULL)
1282 		flow_err(port_id, -rte_errno, error);
1283 
1284 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1285 
1286 	return handle;
1287 }
1288 
1289 int
1290 rte_flow_action_handle_destroy(uint16_t port_id,
1291 			       struct rte_flow_action_handle *handle,
1292 			       struct rte_flow_error *error)
1293 {
1294 	int ret;
1295 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1296 
1297 	if (unlikely(!ops))
1298 		return -rte_errno;
1299 	if (unlikely(!ops->action_handle_destroy))
1300 		return rte_flow_error_set(error, ENOSYS,
1301 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1302 					  NULL, rte_strerror(ENOSYS));
1303 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1304 					 handle, error);
1305 	ret = flow_err(port_id, ret, error);
1306 
1307 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1308 
1309 	return ret;
1310 }
1311 
1312 int
1313 rte_flow_action_handle_update(uint16_t port_id,
1314 			      struct rte_flow_action_handle *handle,
1315 			      const void *update,
1316 			      struct rte_flow_error *error)
1317 {
1318 	int ret;
1319 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1320 
1321 	if (unlikely(!ops))
1322 		return -rte_errno;
1323 	if (unlikely(!ops->action_handle_update))
1324 		return rte_flow_error_set(error, ENOSYS,
1325 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1326 					  NULL, rte_strerror(ENOSYS));
1327 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1328 					update, error);
1329 	ret = flow_err(port_id, ret, error);
1330 
1331 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1332 
1333 	return ret;
1334 }
1335 
1336 int
1337 rte_flow_action_handle_query(uint16_t port_id,
1338 			     const struct rte_flow_action_handle *handle,
1339 			     void *data,
1340 			     struct rte_flow_error *error)
1341 {
1342 	int ret;
1343 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1344 
1345 	if (unlikely(!ops))
1346 		return -rte_errno;
1347 	if (unlikely(!ops->action_handle_query))
1348 		return rte_flow_error_set(error, ENOSYS,
1349 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1350 					  NULL, rte_strerror(ENOSYS));
1351 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1352 				       data, error);
1353 	ret = flow_err(port_id, ret, error);
1354 
1355 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1356 
1357 	return ret;
1358 }
1359 
1360 int
1361 rte_flow_tunnel_decap_set(uint16_t port_id,
1362 			  struct rte_flow_tunnel *tunnel,
1363 			  struct rte_flow_action **actions,
1364 			  uint32_t *num_of_actions,
1365 			  struct rte_flow_error *error)
1366 {
1367 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1368 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1369 	int ret;
1370 
1371 	if (unlikely(!ops))
1372 		return -rte_errno;
1373 	if (likely(!!ops->tunnel_decap_set)) {
1374 		ret = flow_err(port_id,
1375 			       ops->tunnel_decap_set(dev, tunnel, actions,
1376 						     num_of_actions, error),
1377 			       error);
1378 
1379 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1380 						num_of_actions, ret);
1381 
1382 		return ret;
1383 	}
1384 	return rte_flow_error_set(error, ENOTSUP,
1385 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1386 				  NULL, rte_strerror(ENOTSUP));
1387 }
1388 
1389 int
1390 rte_flow_tunnel_match(uint16_t port_id,
1391 		      struct rte_flow_tunnel *tunnel,
1392 		      struct rte_flow_item **items,
1393 		      uint32_t *num_of_items,
1394 		      struct rte_flow_error *error)
1395 {
1396 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1397 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1398 	int ret;
1399 
1400 	if (unlikely(!ops))
1401 		return -rte_errno;
1402 	if (likely(!!ops->tunnel_match)) {
1403 		ret = flow_err(port_id,
1404 			       ops->tunnel_match(dev, tunnel, items,
1405 						 num_of_items, error),
1406 			       error);
1407 
1408 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1409 					    ret);
1410 
1411 		return ret;
1412 	}
1413 	return rte_flow_error_set(error, ENOTSUP,
1414 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1415 				  NULL, rte_strerror(ENOTSUP));
1416 }
1417 
1418 int
1419 rte_flow_get_restore_info(uint16_t port_id,
1420 			  struct rte_mbuf *m,
1421 			  struct rte_flow_restore_info *restore_info,
1422 			  struct rte_flow_error *error)
1423 {
1424 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1425 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1426 	int ret;
1427 
1428 	if (unlikely(!ops))
1429 		return -rte_errno;
1430 	if (likely(!!ops->get_restore_info)) {
1431 		ret = flow_err(port_id,
1432 			       ops->get_restore_info(dev, m, restore_info,
1433 						     error),
1434 			       error);
1435 
1436 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1437 
1438 		return ret;
1439 	}
1440 	return rte_flow_error_set(error, ENOTSUP,
1441 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1442 				  NULL, rte_strerror(ENOTSUP));
1443 }
1444 
1445 static struct {
1446 	const struct rte_mbuf_dynflag desc;
1447 	uint64_t value;
1448 } flow_restore_info_dynflag = {
1449 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1450 };
1451 
1452 uint64_t
1453 rte_flow_restore_info_dynflag(void)
1454 {
1455 	return flow_restore_info_dynflag.value;
1456 }
1457 
1458 int
1459 rte_flow_restore_info_dynflag_register(void)
1460 {
1461 	if (flow_restore_info_dynflag.value == 0) {
1462 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1463 
1464 		if (offset < 0)
1465 			return -1;
1466 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1467 	}
1468 
1469 	return 0;
1470 }
1471 
1472 int
1473 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1474 				     struct rte_flow_action *actions,
1475 				     uint32_t num_of_actions,
1476 				     struct rte_flow_error *error)
1477 {
1478 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1479 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1480 	int ret;
1481 
1482 	if (unlikely(!ops))
1483 		return -rte_errno;
1484 	if (likely(!!ops->tunnel_action_decap_release)) {
1485 		ret = flow_err(port_id,
1486 			       ops->tunnel_action_decap_release(dev, actions,
1487 								num_of_actions,
1488 								error),
1489 			       error);
1490 
1491 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1492 							   num_of_actions, ret);
1493 
1494 		return ret;
1495 	}
1496 	return rte_flow_error_set(error, ENOTSUP,
1497 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1498 				  NULL, rte_strerror(ENOTSUP));
1499 }
1500 
1501 int
1502 rte_flow_tunnel_item_release(uint16_t port_id,
1503 			     struct rte_flow_item *items,
1504 			     uint32_t num_of_items,
1505 			     struct rte_flow_error *error)
1506 {
1507 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1508 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1509 	int ret;
1510 
1511 	if (unlikely(!ops))
1512 		return -rte_errno;
1513 	if (likely(!!ops->tunnel_item_release)) {
1514 		ret = flow_err(port_id,
1515 			       ops->tunnel_item_release(dev, items,
1516 							num_of_items, error),
1517 			       error);
1518 
1519 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1520 
1521 		return ret;
1522 	}
1523 	return rte_flow_error_set(error, ENOTSUP,
1524 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1525 				  NULL, rte_strerror(ENOTSUP));
1526 }
1527 
1528 int
1529 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1530 			     struct rte_flow_error *error)
1531 {
1532 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1533 	struct rte_eth_dev *dev;
1534 	int ret;
1535 
1536 	if (unlikely(ops == NULL))
1537 		return -rte_errno;
1538 
1539 	if (ops->pick_transfer_proxy == NULL) {
1540 		*proxy_port_id = port_id;
1541 		return 0;
1542 	}
1543 
1544 	dev = &rte_eth_devices[port_id];
1545 
1546 	ret = flow_err(port_id,
1547 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1548 		       error);
1549 
1550 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1551 
1552 	return ret;
1553 }
1554 
1555 struct rte_flow_item_flex_handle *
1556 rte_flow_flex_item_create(uint16_t port_id,
1557 			  const struct rte_flow_item_flex_conf *conf,
1558 			  struct rte_flow_error *error)
1559 {
1560 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1561 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1562 	struct rte_flow_item_flex_handle *handle;
1563 
1564 	if (unlikely(!ops))
1565 		return NULL;
1566 	if (unlikely(!ops->flex_item_create)) {
1567 		rte_flow_error_set(error, ENOTSUP,
1568 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1569 				   NULL, rte_strerror(ENOTSUP));
1570 		return NULL;
1571 	}
1572 	handle = ops->flex_item_create(dev, conf, error);
1573 	if (handle == NULL)
1574 		flow_err(port_id, -rte_errno, error);
1575 
1576 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1577 
1578 	return handle;
1579 }
1580 
1581 int
1582 rte_flow_flex_item_release(uint16_t port_id,
1583 			   const struct rte_flow_item_flex_handle *handle,
1584 			   struct rte_flow_error *error)
1585 {
1586 	int ret;
1587 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1588 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1589 
1590 	if (unlikely(!ops || !ops->flex_item_release))
1591 		return rte_flow_error_set(error, ENOTSUP,
1592 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1593 					  NULL, rte_strerror(ENOTSUP));
1594 	ret = ops->flex_item_release(dev, handle, error);
1595 	ret = flow_err(port_id, ret, error);
1596 
1597 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1598 
1599 	return ret;
1600 }
1601 
1602 int
1603 rte_flow_info_get(uint16_t port_id,
1604 		  struct rte_flow_port_info *port_info,
1605 		  struct rte_flow_queue_info *queue_info,
1606 		  struct rte_flow_error *error)
1607 {
1608 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1609 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1610 	int ret;
1611 
1612 	if (unlikely(!ops))
1613 		return -rte_errno;
1614 	if (dev->data->dev_configured == 0) {
1615 		RTE_FLOW_LOG(INFO,
1616 			"Device with port_id=%"PRIu16" is not configured.\n",
1617 			port_id);
1618 		return -EINVAL;
1619 	}
1620 	if (port_info == NULL) {
1621 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1622 		return -EINVAL;
1623 	}
1624 	if (likely(!!ops->info_get)) {
1625 		ret = flow_err(port_id,
1626 			       ops->info_get(dev, port_info, queue_info, error),
1627 			       error);
1628 
1629 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1630 
1631 		return ret;
1632 	}
1633 	return rte_flow_error_set(error, ENOTSUP,
1634 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1635 				  NULL, rte_strerror(ENOTSUP));
1636 }
1637 
1638 int
1639 rte_flow_configure(uint16_t port_id,
1640 		   const struct rte_flow_port_attr *port_attr,
1641 		   uint16_t nb_queue,
1642 		   const struct rte_flow_queue_attr *queue_attr[],
1643 		   struct rte_flow_error *error)
1644 {
1645 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1646 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1647 	int ret;
1648 
1649 	if (unlikely(!ops))
1650 		return -rte_errno;
1651 	if (dev->data->dev_configured == 0) {
1652 		RTE_FLOW_LOG(INFO,
1653 			"Device with port_id=%"PRIu16" is not configured.\n",
1654 			port_id);
1655 		return -EINVAL;
1656 	}
1657 	if (dev->data->dev_started != 0) {
1658 		RTE_FLOW_LOG(INFO,
1659 			"Device with port_id=%"PRIu16" already started.\n",
1660 			port_id);
1661 		return -EINVAL;
1662 	}
1663 	if (port_attr == NULL) {
1664 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1665 		return -EINVAL;
1666 	}
1667 	if (queue_attr == NULL) {
1668 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1669 		return -EINVAL;
1670 	}
1671 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1672 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1673 		return rte_flow_error_set(error, ENODEV,
1674 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1675 					  NULL, rte_strerror(ENODEV));
1676 	}
1677 	if (likely(!!ops->configure)) {
1678 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1679 		if (ret == 0)
1680 			dev->data->flow_configured = 1;
1681 		ret = flow_err(port_id, ret, error);
1682 
1683 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1684 
1685 		return ret;
1686 	}
1687 	return rte_flow_error_set(error, ENOTSUP,
1688 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1689 				  NULL, rte_strerror(ENOTSUP));
1690 }
1691 
1692 struct rte_flow_pattern_template *
1693 rte_flow_pattern_template_create(uint16_t port_id,
1694 		const struct rte_flow_pattern_template_attr *template_attr,
1695 		const struct rte_flow_item pattern[],
1696 		struct rte_flow_error *error)
1697 {
1698 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1699 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1700 	struct rte_flow_pattern_template *template;
1701 
1702 	if (unlikely(!ops))
1703 		return NULL;
1704 	if (dev->data->flow_configured == 0) {
1705 		RTE_FLOW_LOG(INFO,
1706 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1707 			port_id);
1708 		rte_flow_error_set(error, EINVAL,
1709 				RTE_FLOW_ERROR_TYPE_STATE,
1710 				NULL, rte_strerror(EINVAL));
1711 		return NULL;
1712 	}
1713 	if (template_attr == NULL) {
1714 		RTE_FLOW_LOG(ERR,
1715 			     "Port %"PRIu16" template attr is NULL.\n",
1716 			     port_id);
1717 		rte_flow_error_set(error, EINVAL,
1718 				   RTE_FLOW_ERROR_TYPE_ATTR,
1719 				   NULL, rte_strerror(EINVAL));
1720 		return NULL;
1721 	}
1722 	if (pattern == NULL) {
1723 		RTE_FLOW_LOG(ERR,
1724 			     "Port %"PRIu16" pattern is NULL.\n",
1725 			     port_id);
1726 		rte_flow_error_set(error, EINVAL,
1727 				   RTE_FLOW_ERROR_TYPE_ATTR,
1728 				   NULL, rte_strerror(EINVAL));
1729 		return NULL;
1730 	}
1731 	if (likely(!!ops->pattern_template_create)) {
1732 		template = ops->pattern_template_create(dev, template_attr,
1733 							pattern, error);
1734 		if (template == NULL)
1735 			flow_err(port_id, -rte_errno, error);
1736 
1737 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1738 						       pattern, template);
1739 
1740 		return template;
1741 	}
1742 	rte_flow_error_set(error, ENOTSUP,
1743 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1744 			   NULL, rte_strerror(ENOTSUP));
1745 	return NULL;
1746 }
1747 
1748 int
1749 rte_flow_pattern_template_destroy(uint16_t port_id,
1750 		struct rte_flow_pattern_template *pattern_template,
1751 		struct rte_flow_error *error)
1752 {
1753 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1754 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1755 	int ret;
1756 
1757 	if (unlikely(!ops))
1758 		return -rte_errno;
1759 	if (unlikely(pattern_template == NULL))
1760 		return 0;
1761 	if (likely(!!ops->pattern_template_destroy)) {
1762 		ret = flow_err(port_id,
1763 			       ops->pattern_template_destroy(dev,
1764 							     pattern_template,
1765 							     error),
1766 			       error);
1767 
1768 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1769 							ret);
1770 
1771 		return ret;
1772 	}
1773 	return rte_flow_error_set(error, ENOTSUP,
1774 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1775 				  NULL, rte_strerror(ENOTSUP));
1776 }
1777 
1778 struct rte_flow_actions_template *
1779 rte_flow_actions_template_create(uint16_t port_id,
1780 			const struct rte_flow_actions_template_attr *template_attr,
1781 			const struct rte_flow_action actions[],
1782 			const struct rte_flow_action masks[],
1783 			struct rte_flow_error *error)
1784 {
1785 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1786 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1787 	struct rte_flow_actions_template *template;
1788 
1789 	if (unlikely(!ops))
1790 		return NULL;
1791 	if (dev->data->flow_configured == 0) {
1792 		RTE_FLOW_LOG(INFO,
1793 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1794 			port_id);
1795 		rte_flow_error_set(error, EINVAL,
1796 				   RTE_FLOW_ERROR_TYPE_STATE,
1797 				   NULL, rte_strerror(EINVAL));
1798 		return NULL;
1799 	}
1800 	if (template_attr == NULL) {
1801 		RTE_FLOW_LOG(ERR,
1802 			     "Port %"PRIu16" template attr is NULL.\n",
1803 			     port_id);
1804 		rte_flow_error_set(error, EINVAL,
1805 				   RTE_FLOW_ERROR_TYPE_ATTR,
1806 				   NULL, rte_strerror(EINVAL));
1807 		return NULL;
1808 	}
1809 	if (actions == NULL) {
1810 		RTE_FLOW_LOG(ERR,
1811 			     "Port %"PRIu16" actions is NULL.\n",
1812 			     port_id);
1813 		rte_flow_error_set(error, EINVAL,
1814 				   RTE_FLOW_ERROR_TYPE_ATTR,
1815 				   NULL, rte_strerror(EINVAL));
1816 		return NULL;
1817 	}
1818 	if (masks == NULL) {
1819 		RTE_FLOW_LOG(ERR,
1820 			     "Port %"PRIu16" masks is NULL.\n",
1821 			     port_id);
1822 		rte_flow_error_set(error, EINVAL,
1823 				   RTE_FLOW_ERROR_TYPE_ATTR,
1824 				   NULL, rte_strerror(EINVAL));
1825 
1826 	}
1827 	if (likely(!!ops->actions_template_create)) {
1828 		template = ops->actions_template_create(dev, template_attr,
1829 							actions, masks, error);
1830 		if (template == NULL)
1831 			flow_err(port_id, -rte_errno, error);
1832 
1833 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1834 						       masks, template);
1835 
1836 		return template;
1837 	}
1838 	rte_flow_error_set(error, ENOTSUP,
1839 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1840 			   NULL, rte_strerror(ENOTSUP));
1841 	return NULL;
1842 }
1843 
1844 int
1845 rte_flow_actions_template_destroy(uint16_t port_id,
1846 			struct rte_flow_actions_template *actions_template,
1847 			struct rte_flow_error *error)
1848 {
1849 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1850 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1851 	int ret;
1852 
1853 	if (unlikely(!ops))
1854 		return -rte_errno;
1855 	if (unlikely(actions_template == NULL))
1856 		return 0;
1857 	if (likely(!!ops->actions_template_destroy)) {
1858 		ret = flow_err(port_id,
1859 			       ops->actions_template_destroy(dev,
1860 							     actions_template,
1861 							     error),
1862 			       error);
1863 
1864 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1865 							ret);
1866 
1867 		return ret;
1868 	}
1869 	return rte_flow_error_set(error, ENOTSUP,
1870 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1871 				  NULL, rte_strerror(ENOTSUP));
1872 }
1873 
1874 struct rte_flow_template_table *
1875 rte_flow_template_table_create(uint16_t port_id,
1876 			const struct rte_flow_template_table_attr *table_attr,
1877 			struct rte_flow_pattern_template *pattern_templates[],
1878 			uint8_t nb_pattern_templates,
1879 			struct rte_flow_actions_template *actions_templates[],
1880 			uint8_t nb_actions_templates,
1881 			struct rte_flow_error *error)
1882 {
1883 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1884 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1885 	struct rte_flow_template_table *table;
1886 
1887 	if (unlikely(!ops))
1888 		return NULL;
1889 	if (dev->data->flow_configured == 0) {
1890 		RTE_FLOW_LOG(INFO,
1891 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1892 			port_id);
1893 		rte_flow_error_set(error, EINVAL,
1894 				   RTE_FLOW_ERROR_TYPE_STATE,
1895 				   NULL, rte_strerror(EINVAL));
1896 		return NULL;
1897 	}
1898 	if (table_attr == NULL) {
1899 		RTE_FLOW_LOG(ERR,
1900 			     "Port %"PRIu16" table attr is NULL.\n",
1901 			     port_id);
1902 		rte_flow_error_set(error, EINVAL,
1903 				   RTE_FLOW_ERROR_TYPE_ATTR,
1904 				   NULL, rte_strerror(EINVAL));
1905 		return NULL;
1906 	}
1907 	if (pattern_templates == NULL) {
1908 		RTE_FLOW_LOG(ERR,
1909 			     "Port %"PRIu16" pattern templates is NULL.\n",
1910 			     port_id);
1911 		rte_flow_error_set(error, EINVAL,
1912 				   RTE_FLOW_ERROR_TYPE_ATTR,
1913 				   NULL, rte_strerror(EINVAL));
1914 		return NULL;
1915 	}
1916 	if (actions_templates == NULL) {
1917 		RTE_FLOW_LOG(ERR,
1918 			     "Port %"PRIu16" actions templates is NULL.\n",
1919 			     port_id);
1920 		rte_flow_error_set(error, EINVAL,
1921 				   RTE_FLOW_ERROR_TYPE_ATTR,
1922 				   NULL, rte_strerror(EINVAL));
1923 		return NULL;
1924 	}
1925 	if (likely(!!ops->template_table_create)) {
1926 		table = ops->template_table_create(dev, table_attr,
1927 					pattern_templates, nb_pattern_templates,
1928 					actions_templates, nb_actions_templates,
1929 					error);
1930 		if (table == NULL)
1931 			flow_err(port_id, -rte_errno, error);
1932 
1933 		rte_flow_trace_template_table_create(port_id, table_attr,
1934 						     pattern_templates,
1935 						     nb_pattern_templates,
1936 						     actions_templates,
1937 						     nb_actions_templates, table);
1938 
1939 		return table;
1940 	}
1941 	rte_flow_error_set(error, ENOTSUP,
1942 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1943 			   NULL, rte_strerror(ENOTSUP));
1944 	return NULL;
1945 }
1946 
1947 int
1948 rte_flow_template_table_destroy(uint16_t port_id,
1949 				struct rte_flow_template_table *template_table,
1950 				struct rte_flow_error *error)
1951 {
1952 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1953 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1954 	int ret;
1955 
1956 	if (unlikely(!ops))
1957 		return -rte_errno;
1958 	if (unlikely(template_table == NULL))
1959 		return 0;
1960 	if (likely(!!ops->template_table_destroy)) {
1961 		ret = flow_err(port_id,
1962 			       ops->template_table_destroy(dev,
1963 							   template_table,
1964 							   error),
1965 			       error);
1966 
1967 		rte_flow_trace_template_table_destroy(port_id, template_table,
1968 						      ret);
1969 
1970 		return ret;
1971 	}
1972 	return rte_flow_error_set(error, ENOTSUP,
1973 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1974 				  NULL, rte_strerror(ENOTSUP));
1975 }
1976 
1977 int
1978 rte_flow_group_set_miss_actions(uint16_t port_id,
1979 				uint32_t group_id,
1980 				const struct rte_flow_group_attr *attr,
1981 				const struct rte_flow_action actions[],
1982 				struct rte_flow_error *error)
1983 {
1984 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1985 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1986 
1987 	if (unlikely(!ops))
1988 		return -rte_errno;
1989 	if (likely(!!ops->group_set_miss_actions)) {
1990 		return flow_err(port_id,
1991 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
1992 				error);
1993 	}
1994 	return rte_flow_error_set(error, ENOTSUP,
1995 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1996 				  NULL, rte_strerror(ENOTSUP));
1997 }
1998 
1999 struct rte_flow *
2000 rte_flow_async_create(uint16_t port_id,
2001 		      uint32_t queue_id,
2002 		      const struct rte_flow_op_attr *op_attr,
2003 		      struct rte_flow_template_table *template_table,
2004 		      const struct rte_flow_item pattern[],
2005 		      uint8_t pattern_template_index,
2006 		      const struct rte_flow_action actions[],
2007 		      uint8_t actions_template_index,
2008 		      void *user_data,
2009 		      struct rte_flow_error *error)
2010 {
2011 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2012 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2013 	struct rte_flow *flow;
2014 
2015 	flow = ops->async_create(dev, queue_id,
2016 				 op_attr, template_table,
2017 				 pattern, pattern_template_index,
2018 				 actions, actions_template_index,
2019 				 user_data, error);
2020 	if (flow == NULL)
2021 		flow_err(port_id, -rte_errno, error);
2022 
2023 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2024 				    pattern, pattern_template_index, actions,
2025 				    actions_template_index, user_data, flow);
2026 
2027 	return flow;
2028 }
2029 
2030 struct rte_flow *
2031 rte_flow_async_create_by_index(uint16_t port_id,
2032 			       uint32_t queue_id,
2033 			       const struct rte_flow_op_attr *op_attr,
2034 			       struct rte_flow_template_table *template_table,
2035 			       uint32_t rule_index,
2036 			       const struct rte_flow_action actions[],
2037 			       uint8_t actions_template_index,
2038 			       void *user_data,
2039 			       struct rte_flow_error *error)
2040 {
2041 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2042 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2043 	struct rte_flow *flow;
2044 
2045 	flow = ops->async_create_by_index(dev, queue_id,
2046 					  op_attr, template_table, rule_index,
2047 					  actions, actions_template_index,
2048 					  user_data, error);
2049 	if (flow == NULL)
2050 		flow_err(port_id, -rte_errno, error);
2051 	return flow;
2052 }
2053 
2054 int
2055 rte_flow_async_destroy(uint16_t port_id,
2056 		       uint32_t queue_id,
2057 		       const struct rte_flow_op_attr *op_attr,
2058 		       struct rte_flow *flow,
2059 		       void *user_data,
2060 		       struct rte_flow_error *error)
2061 {
2062 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2063 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2064 	int ret;
2065 
2066 	ret = flow_err(port_id,
2067 		       ops->async_destroy(dev, queue_id,
2068 					  op_attr, flow,
2069 					  user_data, error),
2070 		       error);
2071 
2072 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2073 				     user_data, ret);
2074 
2075 	return ret;
2076 }
2077 
2078 int
2079 rte_flow_async_actions_update(uint16_t port_id,
2080 			      uint32_t queue_id,
2081 			      const struct rte_flow_op_attr *op_attr,
2082 			      struct rte_flow *flow,
2083 			      const struct rte_flow_action actions[],
2084 			      uint8_t actions_template_index,
2085 			      void *user_data,
2086 			      struct rte_flow_error *error)
2087 {
2088 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2089 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2090 	int ret;
2091 
2092 	ret = flow_err(port_id,
2093 		       ops->async_actions_update(dev, queue_id, op_attr,
2094 						 flow, actions,
2095 						 actions_template_index,
2096 						 user_data, error),
2097 		       error);
2098 
2099 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2100 					    actions, actions_template_index,
2101 					    user_data, ret);
2102 
2103 	return ret;
2104 }
2105 
2106 int
2107 rte_flow_push(uint16_t port_id,
2108 	      uint32_t queue_id,
2109 	      struct rte_flow_error *error)
2110 {
2111 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2112 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2113 	int ret;
2114 
2115 	ret = flow_err(port_id,
2116 		       ops->push(dev, queue_id, error),
2117 		       error);
2118 
2119 	rte_flow_trace_push(port_id, queue_id, ret);
2120 
2121 	return ret;
2122 }
2123 
2124 int
2125 rte_flow_pull(uint16_t port_id,
2126 	      uint32_t queue_id,
2127 	      struct rte_flow_op_result res[],
2128 	      uint16_t n_res,
2129 	      struct rte_flow_error *error)
2130 {
2131 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2132 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2133 	int ret;
2134 	int rc;
2135 
2136 	ret = ops->pull(dev, queue_id, res, n_res, error);
2137 	rc = ret ? ret : flow_err(port_id, ret, error);
2138 
2139 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2140 
2141 	return rc;
2142 }
2143 
2144 struct rte_flow_action_handle *
2145 rte_flow_async_action_handle_create(uint16_t port_id,
2146 		uint32_t queue_id,
2147 		const struct rte_flow_op_attr *op_attr,
2148 		const struct rte_flow_indir_action_conf *indir_action_conf,
2149 		const struct rte_flow_action *action,
2150 		void *user_data,
2151 		struct rte_flow_error *error)
2152 {
2153 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2154 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2155 	struct rte_flow_action_handle *handle;
2156 
2157 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2158 					     indir_action_conf, action, user_data, error);
2159 	if (handle == NULL)
2160 		flow_err(port_id, -rte_errno, error);
2161 
2162 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2163 						  indir_action_conf, action,
2164 						  user_data, handle);
2165 
2166 	return handle;
2167 }
2168 
2169 int
2170 rte_flow_async_action_handle_destroy(uint16_t port_id,
2171 		uint32_t queue_id,
2172 		const struct rte_flow_op_attr *op_attr,
2173 		struct rte_flow_action_handle *action_handle,
2174 		void *user_data,
2175 		struct rte_flow_error *error)
2176 {
2177 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2178 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2179 	int ret;
2180 
2181 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2182 					   action_handle, user_data, error);
2183 	ret = flow_err(port_id, ret, error);
2184 
2185 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2186 						   action_handle, user_data, ret);
2187 
2188 	return ret;
2189 }
2190 
2191 int
2192 rte_flow_async_action_handle_update(uint16_t port_id,
2193 		uint32_t queue_id,
2194 		const struct rte_flow_op_attr *op_attr,
2195 		struct rte_flow_action_handle *action_handle,
2196 		const void *update,
2197 		void *user_data,
2198 		struct rte_flow_error *error)
2199 {
2200 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2201 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2202 	int ret;
2203 
2204 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2205 					  action_handle, update, user_data, error);
2206 	ret = flow_err(port_id, ret, error);
2207 
2208 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2209 						  action_handle, update,
2210 						  user_data, ret);
2211 
2212 	return ret;
2213 }
2214 
2215 int
2216 rte_flow_async_action_handle_query(uint16_t port_id,
2217 		uint32_t queue_id,
2218 		const struct rte_flow_op_attr *op_attr,
2219 		const struct rte_flow_action_handle *action_handle,
2220 		void *data,
2221 		void *user_data,
2222 		struct rte_flow_error *error)
2223 {
2224 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2225 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2226 	int ret;
2227 
2228 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2229 					  action_handle, data, user_data, error);
2230 	ret = flow_err(port_id, ret, error);
2231 
2232 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2233 						 action_handle, data, user_data,
2234 						 ret);
2235 
2236 	return ret;
2237 }
2238 
2239 int
2240 rte_flow_action_handle_query_update(uint16_t port_id,
2241 				    struct rte_flow_action_handle *handle,
2242 				    const void *update, void *query,
2243 				    enum rte_flow_query_update_mode mode,
2244 				    struct rte_flow_error *error)
2245 {
2246 	int ret;
2247 	struct rte_eth_dev *dev;
2248 	const struct rte_flow_ops *ops;
2249 
2250 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2251 	if (!handle)
2252 		return -EINVAL;
2253 	if (!update && !query)
2254 		return -EINVAL;
2255 	dev = &rte_eth_devices[port_id];
2256 	ops = rte_flow_ops_get(port_id, error);
2257 	if (!ops || !ops->action_handle_query_update)
2258 		return -ENOTSUP;
2259 	ret = ops->action_handle_query_update(dev, handle, update,
2260 					      query, mode, error);
2261 	return flow_err(port_id, ret, error);
2262 }
2263 
2264 int
2265 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2266 					  const struct rte_flow_op_attr *attr,
2267 					  struct rte_flow_action_handle *handle,
2268 					  const void *update, void *query,
2269 					  enum rte_flow_query_update_mode mode,
2270 					  void *user_data,
2271 					  struct rte_flow_error *error)
2272 {
2273 	int ret;
2274 	struct rte_eth_dev *dev;
2275 	const struct rte_flow_ops *ops;
2276 
2277 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2278 	if (!handle)
2279 		return -EINVAL;
2280 	if (!update && !query)
2281 		return -EINVAL;
2282 	dev = &rte_eth_devices[port_id];
2283 	ops = rte_flow_ops_get(port_id, error);
2284 	if (!ops || !ops->async_action_handle_query_update)
2285 		return -ENOTSUP;
2286 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2287 						    handle, update,
2288 						    query, mode,
2289 						    user_data, error);
2290 	return flow_err(port_id, ret, error);
2291 }
2292 
2293 struct rte_flow_action_list_handle *
2294 rte_flow_action_list_handle_create(uint16_t port_id,
2295 				   const
2296 				   struct rte_flow_indir_action_conf *conf,
2297 				   const struct rte_flow_action *actions,
2298 				   struct rte_flow_error *error)
2299 {
2300 	int ret;
2301 	struct rte_eth_dev *dev;
2302 	const struct rte_flow_ops *ops;
2303 	struct rte_flow_action_list_handle *handle;
2304 
2305 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2306 	ops = rte_flow_ops_get(port_id, error);
2307 	if (!ops || !ops->action_list_handle_create) {
2308 		rte_flow_error_set(error, ENOTSUP,
2309 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2310 				   "action_list handle not supported");
2311 		return NULL;
2312 	}
2313 	dev = &rte_eth_devices[port_id];
2314 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2315 	ret = flow_err(port_id, -rte_errno, error);
2316 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2317 	return handle;
2318 }
2319 
2320 int
2321 rte_flow_action_list_handle_destroy(uint16_t port_id,
2322 				    struct rte_flow_action_list_handle *handle,
2323 				    struct rte_flow_error *error)
2324 {
2325 	int ret;
2326 	struct rte_eth_dev *dev;
2327 	const struct rte_flow_ops *ops;
2328 
2329 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2330 	ops = rte_flow_ops_get(port_id, error);
2331 	if (!ops || !ops->action_list_handle_destroy)
2332 		return rte_flow_error_set(error, ENOTSUP,
2333 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2334 					  "action_list handle not supported");
2335 	dev = &rte_eth_devices[port_id];
2336 	ret = ops->action_list_handle_destroy(dev, handle, error);
2337 	ret = flow_err(port_id, ret, error);
2338 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2339 	return ret;
2340 }
2341 
2342 struct rte_flow_action_list_handle *
2343 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2344 					 const struct rte_flow_op_attr *attr,
2345 					 const struct rte_flow_indir_action_conf *conf,
2346 					 const struct rte_flow_action *actions,
2347 					 void *user_data,
2348 					 struct rte_flow_error *error)
2349 {
2350 	int ret;
2351 	struct rte_eth_dev *dev;
2352 	const struct rte_flow_ops *ops;
2353 	struct rte_flow_action_list_handle *handle;
2354 
2355 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2356 	ops = rte_flow_ops_get(port_id, error);
2357 	if (!ops || !ops->async_action_list_handle_create) {
2358 		rte_flow_error_set(error, ENOTSUP,
2359 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2360 				   "action_list handle not supported");
2361 		return NULL;
2362 	}
2363 	dev = &rte_eth_devices[port_id];
2364 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2365 						      actions, user_data,
2366 						      error);
2367 	ret = flow_err(port_id, -rte_errno, error);
2368 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2369 						       conf, actions, user_data,
2370 						       ret);
2371 	return handle;
2372 }
2373 
2374 int
2375 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2376 				 const struct rte_flow_op_attr *op_attr,
2377 				 struct rte_flow_action_list_handle *handle,
2378 				 void *user_data, struct rte_flow_error *error)
2379 {
2380 	int ret;
2381 	struct rte_eth_dev *dev;
2382 	const struct rte_flow_ops *ops;
2383 
2384 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2385 	ops = rte_flow_ops_get(port_id, error);
2386 	if (!ops || !ops->async_action_list_handle_destroy)
2387 		return rte_flow_error_set(error, ENOTSUP,
2388 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2389 					  "async action_list handle not supported");
2390 	dev = &rte_eth_devices[port_id];
2391 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2392 						    handle, user_data, error);
2393 	ret = flow_err(port_id, ret, error);
2394 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2395 							op_attr, handle,
2396 							user_data, ret);
2397 	return ret;
2398 }
2399 
2400 int
2401 rte_flow_action_list_handle_query_update(uint16_t port_id,
2402 			 const struct rte_flow_action_list_handle *handle,
2403 			 const void **update, void **query,
2404 			 enum rte_flow_query_update_mode mode,
2405 			 struct rte_flow_error *error)
2406 {
2407 	int ret;
2408 	struct rte_eth_dev *dev;
2409 	const struct rte_flow_ops *ops;
2410 
2411 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2412 	ops = rte_flow_ops_get(port_id, error);
2413 	if (!ops || !ops->action_list_handle_query_update)
2414 		return rte_flow_error_set(error, ENOTSUP,
2415 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2416 					  "action_list query_update not supported");
2417 	dev = &rte_eth_devices[port_id];
2418 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2419 						   mode, error);
2420 	ret = flow_err(port_id, ret, error);
2421 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2422 						       query, mode, ret);
2423 	return ret;
2424 }
2425 
2426 int
2427 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2428 			 const struct rte_flow_op_attr *attr,
2429 			 const struct rte_flow_action_list_handle *handle,
2430 			 const void **update, void **query,
2431 			 enum rte_flow_query_update_mode mode,
2432 			 void *user_data, struct rte_flow_error *error)
2433 {
2434 	int ret;
2435 	struct rte_eth_dev *dev;
2436 	const struct rte_flow_ops *ops;
2437 
2438 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2439 	ops = rte_flow_ops_get(port_id, error);
2440 	if (!ops || !ops->async_action_list_handle_query_update)
2441 		return rte_flow_error_set(error, ENOTSUP,
2442 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2443 					  "action_list async query_update not supported");
2444 	dev = &rte_eth_devices[port_id];
2445 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2446 							 handle, update, query,
2447 							 mode, user_data,
2448 							 error);
2449 	ret = flow_err(port_id, ret, error);
2450 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2451 							     attr, handle,
2452 							     update, query,
2453 							     mode, user_data,
2454 							     ret);
2455 	return ret;
2456 }
2457