xref: /dpdk/lib/ethdev/rte_flow.c (revision 5d52418fa4b9a7f28eaedc1d88ec5cf330381c0e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <pthread.h>
10 
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include <rte_mbuf_dyn.h>
16 #include "rte_ethdev.h"
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 /* Mbuf dynamic field name for metadata. */
23 int32_t rte_flow_dynf_metadata_offs = -1;
24 
25 /* Mbuf dynamic field flag bit number for metadata. */
26 uint64_t rte_flow_dynf_metadata_mask;
27 
28 /**
29  * Flow elements description tables.
30  */
31 struct rte_flow_desc_data {
32 	const char *name;
33 	size_t size;
34 	size_t (*desc_fn)(void *dst, const void *src);
35 };
36 
37 /**
38  *
39  * @param buf
40  * Destination memory.
41  * @param data
42  * Source memory
43  * @param size
44  * Requested copy size
45  * @param desc
46  * rte_flow_desc_item - for flow item conversion.
47  * rte_flow_desc_action - for flow action conversion.
48  * @param type
49  * Offset into the desc param or negative value for private flow elements.
50  */
51 static inline size_t
52 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
53 		   const struct rte_flow_desc_data *desc, int type)
54 {
55 	/**
56 	 * Allow PMD private flow item
57 	 */
58 	bool rte_type = type >= 0;
59 
60 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
61 	if (buf == NULL || data == NULL)
62 		return 0;
63 	rte_memcpy(buf, data, (size > sz ? sz : size));
64 	if (rte_type && desc[type].desc_fn)
65 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
66 	return sz;
67 }
68 
69 static size_t
70 rte_flow_item_flex_conv(void *buf, const void *data)
71 {
72 	struct rte_flow_item_flex *dst = buf;
73 	const struct rte_flow_item_flex *src = data;
74 	if (buf) {
75 		dst->pattern = rte_memcpy
76 			((void *)((uintptr_t)(dst + 1)), src->pattern,
77 			 src->length);
78 	}
79 	return src->length;
80 }
81 
82 /** Generate flow_item[] entry. */
83 #define MK_FLOW_ITEM(t, s) \
84 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
85 		.name = # t, \
86 		.size = s,               \
87 		.desc_fn = NULL,\
88 	}
89 
90 #define MK_FLOW_ITEM_FN(t, s, fn) \
91 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
92 		.name = # t,                 \
93 		.size = s,                   \
94 		.desc_fn = fn,               \
95 	}
96 
97 /** Information about known flow pattern items. */
98 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
99 	MK_FLOW_ITEM(END, 0),
100 	MK_FLOW_ITEM(VOID, 0),
101 	MK_FLOW_ITEM(INVERT, 0),
102 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
103 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
104 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
105 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
106 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
107 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
108 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
109 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
110 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
111 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
112 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
113 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
114 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
115 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
116 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
117 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
118 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
119 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
122 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
123 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
124 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
125 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
126 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
127 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
128 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
131 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
132 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
134 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
135 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
137 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
138 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
139 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
140 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
141 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
142 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
143 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
144 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
146 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
147 			sizeof(struct rte_flow_item_pppoe_proto_id)),
148 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
149 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
150 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
151 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
152 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
153 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
154 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
155 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
156 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
157 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
158 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
160 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
161 			rte_flow_item_flex_conv),
162 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
163 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
164 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
165 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
166 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
167 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
168 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
169 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
170 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
171 };
172 
173 /** Generate flow_action[] entry. */
174 #define MK_FLOW_ACTION(t, s) \
175 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
176 		.name = # t, \
177 		.size = s, \
178 		.desc_fn = NULL,\
179 	}
180 
181 #define MK_FLOW_ACTION_FN(t, fn) \
182 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
183 		.name = # t, \
184 		.size = 0, \
185 		.desc_fn = fn,\
186 	}
187 
188 
189 /** Information about known flow actions. */
190 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
191 	MK_FLOW_ACTION(END, 0),
192 	MK_FLOW_ACTION(VOID, 0),
193 	MK_FLOW_ACTION(PASSTHRU, 0),
194 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
195 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
196 	MK_FLOW_ACTION(FLAG, 0),
197 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
198 	MK_FLOW_ACTION(DROP, 0),
199 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
200 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
201 	MK_FLOW_ACTION(PF, 0),
202 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
203 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
204 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
205 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
206 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
207 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
208 	MK_FLOW_ACTION(OF_PUSH_VLAN,
209 		       sizeof(struct rte_flow_action_of_push_vlan)),
210 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
211 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
212 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
213 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
214 	MK_FLOW_ACTION(OF_POP_MPLS,
215 		       sizeof(struct rte_flow_action_of_pop_mpls)),
216 	MK_FLOW_ACTION(OF_PUSH_MPLS,
217 		       sizeof(struct rte_flow_action_of_push_mpls)),
218 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
219 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
220 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
221 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
222 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
223 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
224 	MK_FLOW_ACTION(SET_IPV4_SRC,
225 		       sizeof(struct rte_flow_action_set_ipv4)),
226 	MK_FLOW_ACTION(SET_IPV4_DST,
227 		       sizeof(struct rte_flow_action_set_ipv4)),
228 	MK_FLOW_ACTION(SET_IPV6_SRC,
229 		       sizeof(struct rte_flow_action_set_ipv6)),
230 	MK_FLOW_ACTION(SET_IPV6_DST,
231 		       sizeof(struct rte_flow_action_set_ipv6)),
232 	MK_FLOW_ACTION(SET_TP_SRC,
233 		       sizeof(struct rte_flow_action_set_tp)),
234 	MK_FLOW_ACTION(SET_TP_DST,
235 		       sizeof(struct rte_flow_action_set_tp)),
236 	MK_FLOW_ACTION(MAC_SWAP, 0),
237 	MK_FLOW_ACTION(DEC_TTL, 0),
238 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
239 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
240 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
241 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
242 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
243 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
244 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
245 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
246 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
247 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
248 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
249 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
250 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
251 	MK_FLOW_ACTION(MODIFY_FIELD,
252 		       sizeof(struct rte_flow_action_modify_field)),
253 	/**
254 	 * Indirect action represented as handle of type
255 	 * (struct rte_flow_action_handle *) stored in conf field (see
256 	 * struct rte_flow_action); no need for additional structure to * store
257 	 * indirect action handle.
258 	 */
259 	MK_FLOW_ACTION(INDIRECT, 0),
260 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
261 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
262 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
263 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
264 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
265 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
266 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
267 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
268 	MK_FLOW_ACTION(INDIRECT_LIST,
269 		       sizeof(struct rte_flow_action_indirect_list)),
270 };
271 
272 int
273 rte_flow_dynf_metadata_register(void)
274 {
275 	int offset;
276 	int flag;
277 
278 	static const struct rte_mbuf_dynfield desc_offs = {
279 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
280 		.size = sizeof(uint32_t),
281 		.align = __alignof__(uint32_t),
282 	};
283 	static const struct rte_mbuf_dynflag desc_flag = {
284 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
285 	};
286 
287 	offset = rte_mbuf_dynfield_register(&desc_offs);
288 	if (offset < 0)
289 		goto error;
290 	flag = rte_mbuf_dynflag_register(&desc_flag);
291 	if (flag < 0)
292 		goto error;
293 	rte_flow_dynf_metadata_offs = offset;
294 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
295 
296 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
297 
298 	return 0;
299 
300 error:
301 	rte_flow_dynf_metadata_offs = -1;
302 	rte_flow_dynf_metadata_mask = UINT64_C(0);
303 	return -rte_errno;
304 }
305 
306 static inline void
307 fts_enter(struct rte_eth_dev *dev)
308 {
309 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
310 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
311 }
312 
313 static inline void
314 fts_exit(struct rte_eth_dev *dev)
315 {
316 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
317 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
318 }
319 
320 static int
321 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
322 {
323 	if (ret == 0)
324 		return 0;
325 	if (rte_eth_dev_is_removed(port_id))
326 		return rte_flow_error_set(error, EIO,
327 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
328 					  NULL, rte_strerror(EIO));
329 	return ret;
330 }
331 
332 /* Get generic flow operations structure from a port. */
333 const struct rte_flow_ops *
334 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
335 {
336 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
337 	const struct rte_flow_ops *ops;
338 	int code;
339 
340 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
341 		code = ENODEV;
342 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
343 		/* flow API not supported with this driver dev_ops */
344 		code = ENOSYS;
345 	else
346 		code = dev->dev_ops->flow_ops_get(dev, &ops);
347 	if (code == 0 && ops == NULL)
348 		/* flow API not supported with this device */
349 		code = ENOSYS;
350 
351 	if (code != 0) {
352 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
353 				   NULL, rte_strerror(code));
354 		return NULL;
355 	}
356 	return ops;
357 }
358 
359 /* Check whether a flow rule can be created on a given port. */
360 int
361 rte_flow_validate(uint16_t port_id,
362 		  const struct rte_flow_attr *attr,
363 		  const struct rte_flow_item pattern[],
364 		  const struct rte_flow_action actions[],
365 		  struct rte_flow_error *error)
366 {
367 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
368 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
369 	int ret;
370 
371 	if (likely(!!attr) && attr->transfer &&
372 	    (attr->ingress || attr->egress)) {
373 		return rte_flow_error_set(error, EINVAL,
374 					  RTE_FLOW_ERROR_TYPE_ATTR,
375 					  attr, "cannot use attr ingress/egress with attr transfer");
376 	}
377 
378 	if (unlikely(!ops))
379 		return -rte_errno;
380 	if (likely(!!ops->validate)) {
381 		fts_enter(dev);
382 		ret = ops->validate(dev, attr, pattern, actions, error);
383 		fts_exit(dev);
384 		ret = flow_err(port_id, ret, error);
385 
386 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
387 
388 		return ret;
389 	}
390 	return rte_flow_error_set(error, ENOSYS,
391 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
392 				  NULL, rte_strerror(ENOSYS));
393 }
394 
395 /* Create a flow rule on a given port. */
396 struct rte_flow *
397 rte_flow_create(uint16_t port_id,
398 		const struct rte_flow_attr *attr,
399 		const struct rte_flow_item pattern[],
400 		const struct rte_flow_action actions[],
401 		struct rte_flow_error *error)
402 {
403 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
404 	struct rte_flow *flow;
405 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
406 
407 	if (unlikely(!ops))
408 		return NULL;
409 	if (likely(!!ops->create)) {
410 		fts_enter(dev);
411 		flow = ops->create(dev, attr, pattern, actions, error);
412 		fts_exit(dev);
413 		if (flow == NULL)
414 			flow_err(port_id, -rte_errno, error);
415 
416 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
417 
418 		return flow;
419 	}
420 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
421 			   NULL, rte_strerror(ENOSYS));
422 	return NULL;
423 }
424 
425 /* Destroy a flow rule on a given port. */
426 int
427 rte_flow_destroy(uint16_t port_id,
428 		 struct rte_flow *flow,
429 		 struct rte_flow_error *error)
430 {
431 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
432 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
433 	int ret;
434 
435 	if (unlikely(!ops))
436 		return -rte_errno;
437 	if (likely(!!ops->destroy)) {
438 		fts_enter(dev);
439 		ret = ops->destroy(dev, flow, error);
440 		fts_exit(dev);
441 		ret = flow_err(port_id, ret, error);
442 
443 		rte_flow_trace_destroy(port_id, flow, ret);
444 
445 		return ret;
446 	}
447 	return rte_flow_error_set(error, ENOSYS,
448 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
449 				  NULL, rte_strerror(ENOSYS));
450 }
451 
452 int
453 rte_flow_actions_update(uint16_t port_id,
454 			struct rte_flow *flow,
455 			const struct rte_flow_action actions[],
456 			struct rte_flow_error *error)
457 {
458 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
459 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
460 	int ret;
461 
462 	if (unlikely(!ops))
463 		return -rte_errno;
464 	if (likely(!!ops->actions_update)) {
465 		fts_enter(dev);
466 		ret = ops->actions_update(dev, flow, actions, error);
467 		fts_exit(dev);
468 
469 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
470 
471 		return flow_err(port_id, ret, error);
472 	}
473 	return rte_flow_error_set(error, ENOSYS,
474 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
475 				  NULL, rte_strerror(ENOSYS));
476 }
477 
478 /* Destroy all flow rules associated with a port. */
479 int
480 rte_flow_flush(uint16_t port_id,
481 	       struct rte_flow_error *error)
482 {
483 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
484 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
485 	int ret;
486 
487 	if (unlikely(!ops))
488 		return -rte_errno;
489 	if (likely(!!ops->flush)) {
490 		fts_enter(dev);
491 		ret = ops->flush(dev, error);
492 		fts_exit(dev);
493 		ret = flow_err(port_id, ret, error);
494 
495 		rte_flow_trace_flush(port_id, ret);
496 
497 		return ret;
498 	}
499 	return rte_flow_error_set(error, ENOSYS,
500 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
501 				  NULL, rte_strerror(ENOSYS));
502 }
503 
504 /* Query an existing flow rule. */
505 int
506 rte_flow_query(uint16_t port_id,
507 	       struct rte_flow *flow,
508 	       const struct rte_flow_action *action,
509 	       void *data,
510 	       struct rte_flow_error *error)
511 {
512 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
513 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
514 	int ret;
515 
516 	if (!ops)
517 		return -rte_errno;
518 	if (likely(!!ops->query)) {
519 		fts_enter(dev);
520 		ret = ops->query(dev, flow, action, data, error);
521 		fts_exit(dev);
522 		ret = flow_err(port_id, ret, error);
523 
524 		rte_flow_trace_query(port_id, flow, action, data, ret);
525 
526 		return ret;
527 	}
528 	return rte_flow_error_set(error, ENOSYS,
529 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
530 				  NULL, rte_strerror(ENOSYS));
531 }
532 
533 /* Restrict ingress traffic to the defined flow rules. */
534 int
535 rte_flow_isolate(uint16_t port_id,
536 		 int set,
537 		 struct rte_flow_error *error)
538 {
539 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
540 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
541 	int ret;
542 
543 	if (!ops)
544 		return -rte_errno;
545 	if (likely(!!ops->isolate)) {
546 		fts_enter(dev);
547 		ret = ops->isolate(dev, set, error);
548 		fts_exit(dev);
549 		ret = flow_err(port_id, ret, error);
550 
551 		rte_flow_trace_isolate(port_id, set, ret);
552 
553 		return ret;
554 	}
555 	return rte_flow_error_set(error, ENOSYS,
556 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
557 				  NULL, rte_strerror(ENOSYS));
558 }
559 
560 /* Initialize flow error structure. */
561 int
562 rte_flow_error_set(struct rte_flow_error *error,
563 		   int code,
564 		   enum rte_flow_error_type type,
565 		   const void *cause,
566 		   const char *message)
567 {
568 	if (error) {
569 		*error = (struct rte_flow_error){
570 			.type = type,
571 			.cause = cause,
572 			.message = message,
573 		};
574 	}
575 	rte_errno = code;
576 	return -code;
577 }
578 
579 /** Pattern item specification types. */
580 enum rte_flow_conv_item_spec_type {
581 	RTE_FLOW_CONV_ITEM_SPEC,
582 	RTE_FLOW_CONV_ITEM_LAST,
583 	RTE_FLOW_CONV_ITEM_MASK,
584 };
585 
586 /**
587  * Copy pattern item specification.
588  *
589  * @param[out] buf
590  *   Output buffer. Can be NULL if @p size is zero.
591  * @param size
592  *   Size of @p buf in bytes.
593  * @param[in] item
594  *   Pattern item to copy specification from.
595  * @param type
596  *   Specification selector for either @p spec, @p last or @p mask.
597  *
598  * @return
599  *   Number of bytes needed to store pattern item specification regardless
600  *   of @p size. @p buf contents are truncated to @p size if not large
601  *   enough.
602  */
603 static size_t
604 rte_flow_conv_item_spec(void *buf, const size_t size,
605 			const struct rte_flow_item *item,
606 			enum rte_flow_conv_item_spec_type type)
607 {
608 	size_t off;
609 	const void *data =
610 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
611 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
612 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
613 		NULL;
614 
615 	switch (item->type) {
616 		union {
617 			const struct rte_flow_item_raw *raw;
618 		} spec;
619 		union {
620 			const struct rte_flow_item_raw *raw;
621 		} last;
622 		union {
623 			const struct rte_flow_item_raw *raw;
624 		} mask;
625 		union {
626 			const struct rte_flow_item_raw *raw;
627 		} src;
628 		union {
629 			struct rte_flow_item_raw *raw;
630 		} dst;
631 		size_t tmp;
632 
633 	case RTE_FLOW_ITEM_TYPE_RAW:
634 		spec.raw = item->spec;
635 		last.raw = item->last ? item->last : item->spec;
636 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
637 		src.raw = data;
638 		dst.raw = buf;
639 		rte_memcpy(dst.raw,
640 			   (&(struct rte_flow_item_raw){
641 				.relative = src.raw->relative,
642 				.search = src.raw->search,
643 				.reserved = src.raw->reserved,
644 				.offset = src.raw->offset,
645 				.limit = src.raw->limit,
646 				.length = src.raw->length,
647 			   }),
648 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
649 		off = sizeof(*dst.raw);
650 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
651 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
652 		     ((spec.raw->length & mask.raw->length) >=
653 		      (last.raw->length & mask.raw->length))))
654 			tmp = spec.raw->length & mask.raw->length;
655 		else
656 			tmp = last.raw->length & mask.raw->length;
657 		if (tmp) {
658 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
659 			if (size >= off + tmp)
660 				dst.raw->pattern = rte_memcpy
661 					((void *)((uintptr_t)dst.raw + off),
662 					 src.raw->pattern, tmp);
663 			off += tmp;
664 		}
665 		break;
666 	default:
667 		off = rte_flow_conv_copy(buf, data, size,
668 					 rte_flow_desc_item, item->type);
669 		break;
670 	}
671 	return off;
672 }
673 
674 /**
675  * Copy action configuration.
676  *
677  * @param[out] buf
678  *   Output buffer. Can be NULL if @p size is zero.
679  * @param size
680  *   Size of @p buf in bytes.
681  * @param[in] action
682  *   Action to copy configuration from.
683  *
684  * @return
685  *   Number of bytes needed to store pattern item specification regardless
686  *   of @p size. @p buf contents are truncated to @p size if not large
687  *   enough.
688  */
689 static size_t
690 rte_flow_conv_action_conf(void *buf, const size_t size,
691 			  const struct rte_flow_action *action)
692 {
693 	size_t off;
694 
695 	switch (action->type) {
696 		union {
697 			const struct rte_flow_action_rss *rss;
698 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
699 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
700 		} src;
701 		union {
702 			struct rte_flow_action_rss *rss;
703 			struct rte_flow_action_vxlan_encap *vxlan_encap;
704 			struct rte_flow_action_nvgre_encap *nvgre_encap;
705 		} dst;
706 		size_t tmp;
707 		int ret;
708 
709 	case RTE_FLOW_ACTION_TYPE_RSS:
710 		src.rss = action->conf;
711 		dst.rss = buf;
712 		rte_memcpy(dst.rss,
713 			   (&(struct rte_flow_action_rss){
714 				.func = src.rss->func,
715 				.level = src.rss->level,
716 				.types = src.rss->types,
717 				.key_len = src.rss->key_len,
718 				.queue_num = src.rss->queue_num,
719 			   }),
720 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
721 		off = sizeof(*dst.rss);
722 		if (src.rss->key_len && src.rss->key) {
723 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
724 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
725 			if (size >= off + tmp)
726 				dst.rss->key = rte_memcpy
727 					((void *)((uintptr_t)dst.rss + off),
728 					 src.rss->key, tmp);
729 			off += tmp;
730 		}
731 		if (src.rss->queue_num) {
732 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
733 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
734 			if (size >= off + tmp)
735 				dst.rss->queue = rte_memcpy
736 					((void *)((uintptr_t)dst.rss + off),
737 					 src.rss->queue, tmp);
738 			off += tmp;
739 		}
740 		break;
741 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
742 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
743 		src.vxlan_encap = action->conf;
744 		dst.vxlan_encap = buf;
745 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
746 				 sizeof(*src.nvgre_encap) ||
747 				 offsetof(struct rte_flow_action_vxlan_encap,
748 					  definition) !=
749 				 offsetof(struct rte_flow_action_nvgre_encap,
750 					  definition));
751 		off = sizeof(*dst.vxlan_encap);
752 		if (src.vxlan_encap->definition) {
753 			off = RTE_ALIGN_CEIL
754 				(off, sizeof(*dst.vxlan_encap->definition));
755 			ret = rte_flow_conv
756 				(RTE_FLOW_CONV_OP_PATTERN,
757 				 (void *)((uintptr_t)dst.vxlan_encap + off),
758 				 size > off ? size - off : 0,
759 				 src.vxlan_encap->definition, NULL);
760 			if (ret < 0)
761 				return 0;
762 			if (size >= off + ret)
763 				dst.vxlan_encap->definition =
764 					(void *)((uintptr_t)dst.vxlan_encap +
765 						 off);
766 			off += ret;
767 		}
768 		break;
769 	default:
770 		off = rte_flow_conv_copy(buf, action->conf, size,
771 					 rte_flow_desc_action, action->type);
772 		break;
773 	}
774 	return off;
775 }
776 
777 /**
778  * Copy a list of pattern items.
779  *
780  * @param[out] dst
781  *   Destination buffer. Can be NULL if @p size is zero.
782  * @param size
783  *   Size of @p dst in bytes.
784  * @param[in] src
785  *   Source pattern items.
786  * @param num
787  *   Maximum number of pattern items to process from @p src or 0 to process
788  *   the entire list. In both cases, processing stops after
789  *   RTE_FLOW_ITEM_TYPE_END is encountered.
790  * @param[out] error
791  *   Perform verbose error reporting if not NULL.
792  *
793  * @return
794  *   A positive value representing the number of bytes needed to store
795  *   pattern items regardless of @p size on success (@p buf contents are
796  *   truncated to @p size if not large enough), a negative errno value
797  *   otherwise and rte_errno is set.
798  */
799 static int
800 rte_flow_conv_pattern(struct rte_flow_item *dst,
801 		      const size_t size,
802 		      const struct rte_flow_item *src,
803 		      unsigned int num,
804 		      struct rte_flow_error *error)
805 {
806 	uintptr_t data = (uintptr_t)dst;
807 	size_t off;
808 	size_t ret;
809 	unsigned int i;
810 
811 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
812 		/**
813 		 * allow PMD private flow item
814 		 */
815 		if (((int)src->type >= 0) &&
816 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
817 		    !rte_flow_desc_item[src->type].name))
818 			return rte_flow_error_set
819 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
820 				 "cannot convert unknown item type");
821 		if (size >= off + sizeof(*dst))
822 			*dst = (struct rte_flow_item){
823 				.type = src->type,
824 			};
825 		off += sizeof(*dst);
826 		if (!src->type)
827 			num = i + 1;
828 	}
829 	num = i;
830 	src -= num;
831 	dst -= num;
832 	do {
833 		if (src->spec) {
834 			off = RTE_ALIGN_CEIL(off, sizeof(double));
835 			ret = rte_flow_conv_item_spec
836 				((void *)(data + off),
837 				 size > off ? size - off : 0, src,
838 				 RTE_FLOW_CONV_ITEM_SPEC);
839 			if (size && size >= off + ret)
840 				dst->spec = (void *)(data + off);
841 			off += ret;
842 
843 		}
844 		if (src->last) {
845 			off = RTE_ALIGN_CEIL(off, sizeof(double));
846 			ret = rte_flow_conv_item_spec
847 				((void *)(data + off),
848 				 size > off ? size - off : 0, src,
849 				 RTE_FLOW_CONV_ITEM_LAST);
850 			if (size && size >= off + ret)
851 				dst->last = (void *)(data + off);
852 			off += ret;
853 		}
854 		if (src->mask) {
855 			off = RTE_ALIGN_CEIL(off, sizeof(double));
856 			ret = rte_flow_conv_item_spec
857 				((void *)(data + off),
858 				 size > off ? size - off : 0, src,
859 				 RTE_FLOW_CONV_ITEM_MASK);
860 			if (size && size >= off + ret)
861 				dst->mask = (void *)(data + off);
862 			off += ret;
863 		}
864 		++src;
865 		++dst;
866 	} while (--num);
867 	return off;
868 }
869 
870 /**
871  * Copy a list of actions.
872  *
873  * @param[out] dst
874  *   Destination buffer. Can be NULL if @p size is zero.
875  * @param size
876  *   Size of @p dst in bytes.
877  * @param[in] src
878  *   Source actions.
879  * @param num
880  *   Maximum number of actions to process from @p src or 0 to process the
881  *   entire list. In both cases, processing stops after
882  *   RTE_FLOW_ACTION_TYPE_END is encountered.
883  * @param[out] error
884  *   Perform verbose error reporting if not NULL.
885  *
886  * @return
887  *   A positive value representing the number of bytes needed to store
888  *   actions regardless of @p size on success (@p buf contents are truncated
889  *   to @p size if not large enough), a negative errno value otherwise and
890  *   rte_errno is set.
891  */
892 static int
893 rte_flow_conv_actions(struct rte_flow_action *dst,
894 		      const size_t size,
895 		      const struct rte_flow_action *src,
896 		      unsigned int num,
897 		      struct rte_flow_error *error)
898 {
899 	uintptr_t data = (uintptr_t)dst;
900 	size_t off;
901 	size_t ret;
902 	unsigned int i;
903 
904 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
905 		/**
906 		 * allow PMD private flow action
907 		 */
908 		if (((int)src->type >= 0) &&
909 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
910 		    !rte_flow_desc_action[src->type].name))
911 			return rte_flow_error_set
912 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
913 				 src, "cannot convert unknown action type");
914 		if (size >= off + sizeof(*dst))
915 			*dst = (struct rte_flow_action){
916 				.type = src->type,
917 			};
918 		off += sizeof(*dst);
919 		if (!src->type)
920 			num = i + 1;
921 	}
922 	num = i;
923 	src -= num;
924 	dst -= num;
925 	do {
926 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
927 			/*
928 			 * Indirect action conf fills the indirect action
929 			 * handler. Copy the action handle directly instead
930 			 * of duplicating the pointer memory.
931 			 */
932 			if (size)
933 				dst->conf = src->conf;
934 		} else if (src->conf) {
935 			off = RTE_ALIGN_CEIL(off, sizeof(double));
936 			ret = rte_flow_conv_action_conf
937 				((void *)(data + off),
938 				 size > off ? size - off : 0, src);
939 			if (size && size >= off + ret)
940 				dst->conf = (void *)(data + off);
941 			off += ret;
942 		}
943 		++src;
944 		++dst;
945 	} while (--num);
946 	return off;
947 }
948 
949 /**
950  * Copy flow rule components.
951  *
952  * This comprises the flow rule descriptor itself, attributes, pattern and
953  * actions list. NULL components in @p src are skipped.
954  *
955  * @param[out] dst
956  *   Destination buffer. Can be NULL if @p size is zero.
957  * @param size
958  *   Size of @p dst in bytes.
959  * @param[in] src
960  *   Source flow rule descriptor.
961  * @param[out] error
962  *   Perform verbose error reporting if not NULL.
963  *
964  * @return
965  *   A positive value representing the number of bytes needed to store all
966  *   components including the descriptor regardless of @p size on success
967  *   (@p buf contents are truncated to @p size if not large enough), a
968  *   negative errno value otherwise and rte_errno is set.
969  */
970 static int
971 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
972 		   const size_t size,
973 		   const struct rte_flow_conv_rule *src,
974 		   struct rte_flow_error *error)
975 {
976 	size_t off;
977 	int ret;
978 
979 	rte_memcpy(dst,
980 		   (&(struct rte_flow_conv_rule){
981 			.attr = NULL,
982 			.pattern = NULL,
983 			.actions = NULL,
984 		   }),
985 		   size > sizeof(*dst) ? sizeof(*dst) : size);
986 	off = sizeof(*dst);
987 	if (src->attr_ro) {
988 		off = RTE_ALIGN_CEIL(off, sizeof(double));
989 		if (size && size >= off + sizeof(*dst->attr))
990 			dst->attr = rte_memcpy
991 				((void *)((uintptr_t)dst + off),
992 				 src->attr_ro, sizeof(*dst->attr));
993 		off += sizeof(*dst->attr);
994 	}
995 	if (src->pattern_ro) {
996 		off = RTE_ALIGN_CEIL(off, sizeof(double));
997 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
998 					    size > off ? size - off : 0,
999 					    src->pattern_ro, 0, error);
1000 		if (ret < 0)
1001 			return ret;
1002 		if (size && size >= off + (size_t)ret)
1003 			dst->pattern = (void *)((uintptr_t)dst + off);
1004 		off += ret;
1005 	}
1006 	if (src->actions_ro) {
1007 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1008 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1009 					    size > off ? size - off : 0,
1010 					    src->actions_ro, 0, error);
1011 		if (ret < 0)
1012 			return ret;
1013 		if (size >= off + (size_t)ret)
1014 			dst->actions = (void *)((uintptr_t)dst + off);
1015 		off += ret;
1016 	}
1017 	return off;
1018 }
1019 
1020 /**
1021  * Retrieve the name of a pattern item/action type.
1022  *
1023  * @param is_action
1024  *   Nonzero when @p src represents an action type instead of a pattern item
1025  *   type.
1026  * @param is_ptr
1027  *   Nonzero to write string address instead of contents into @p dst.
1028  * @param[out] dst
1029  *   Destination buffer. Can be NULL if @p size is zero.
1030  * @param size
1031  *   Size of @p dst in bytes.
1032  * @param[in] src
1033  *   Depending on @p is_action, source pattern item or action type cast as a
1034  *   pointer.
1035  * @param[out] error
1036  *   Perform verbose error reporting if not NULL.
1037  *
1038  * @return
1039  *   A positive value representing the number of bytes needed to store the
1040  *   name or its address regardless of @p size on success (@p buf contents
1041  *   are truncated to @p size if not large enough), a negative errno value
1042  *   otherwise and rte_errno is set.
1043  */
1044 static int
1045 rte_flow_conv_name(int is_action,
1046 		   int is_ptr,
1047 		   char *dst,
1048 		   const size_t size,
1049 		   const void *src,
1050 		   struct rte_flow_error *error)
1051 {
1052 	struct desc_info {
1053 		const struct rte_flow_desc_data *data;
1054 		size_t num;
1055 	};
1056 	static const struct desc_info info_rep[2] = {
1057 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1058 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1059 	};
1060 	const struct desc_info *const info = &info_rep[!!is_action];
1061 	unsigned int type = (uintptr_t)src;
1062 
1063 	if (type >= info->num)
1064 		return rte_flow_error_set
1065 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1066 			 "unknown object type to retrieve the name of");
1067 	if (!is_ptr)
1068 		return strlcpy(dst, info->data[type].name, size);
1069 	if (size >= sizeof(const char **))
1070 		*((const char **)dst) = info->data[type].name;
1071 	return sizeof(const char **);
1072 }
1073 
1074 /** Helper function to convert flow API objects. */
1075 int
1076 rte_flow_conv(enum rte_flow_conv_op op,
1077 	      void *dst,
1078 	      size_t size,
1079 	      const void *src,
1080 	      struct rte_flow_error *error)
1081 {
1082 	int ret;
1083 
1084 	switch (op) {
1085 		const struct rte_flow_attr *attr;
1086 
1087 	case RTE_FLOW_CONV_OP_NONE:
1088 		ret = 0;
1089 		break;
1090 	case RTE_FLOW_CONV_OP_ATTR:
1091 		attr = src;
1092 		if (size > sizeof(*attr))
1093 			size = sizeof(*attr);
1094 		rte_memcpy(dst, attr, size);
1095 		ret = sizeof(*attr);
1096 		break;
1097 	case RTE_FLOW_CONV_OP_ITEM:
1098 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1099 		break;
1100 	case RTE_FLOW_CONV_OP_ACTION:
1101 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1102 		break;
1103 	case RTE_FLOW_CONV_OP_PATTERN:
1104 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1105 		break;
1106 	case RTE_FLOW_CONV_OP_ACTIONS:
1107 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1108 		break;
1109 	case RTE_FLOW_CONV_OP_RULE:
1110 		ret = rte_flow_conv_rule(dst, size, src, error);
1111 		break;
1112 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1113 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1114 		break;
1115 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1116 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1117 		break;
1118 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1119 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1120 		break;
1121 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1122 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1123 		break;
1124 	default:
1125 		ret = rte_flow_error_set
1126 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1127 		 "unknown object conversion operation");
1128 	}
1129 
1130 	rte_flow_trace_conv(op, dst, size, src, ret);
1131 
1132 	return ret;
1133 }
1134 
1135 /** Store a full rte_flow description. */
1136 size_t
1137 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1138 	      const struct rte_flow_attr *attr,
1139 	      const struct rte_flow_item *items,
1140 	      const struct rte_flow_action *actions)
1141 {
1142 	/*
1143 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1144 	 * to convert the former to the latter without wasting space.
1145 	 */
1146 	struct rte_flow_conv_rule *dst =
1147 		len ?
1148 		(void *)((uintptr_t)desc +
1149 			 (offsetof(struct rte_flow_desc, actions) -
1150 			  offsetof(struct rte_flow_conv_rule, actions))) :
1151 		NULL;
1152 	size_t dst_size =
1153 		len > sizeof(*desc) - sizeof(*dst) ?
1154 		len - (sizeof(*desc) - sizeof(*dst)) :
1155 		0;
1156 	struct rte_flow_conv_rule src = {
1157 		.attr_ro = NULL,
1158 		.pattern_ro = items,
1159 		.actions_ro = actions,
1160 	};
1161 	int ret;
1162 
1163 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1164 			 sizeof(struct rte_flow_conv_rule));
1165 	if (dst_size &&
1166 	    (&dst->pattern != &desc->items ||
1167 	     &dst->actions != &desc->actions ||
1168 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1169 		rte_errno = EINVAL;
1170 		return 0;
1171 	}
1172 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1173 	if (ret < 0)
1174 		return 0;
1175 	ret += sizeof(*desc) - sizeof(*dst);
1176 	rte_memcpy(desc,
1177 		   (&(struct rte_flow_desc){
1178 			.size = ret,
1179 			.attr = *attr,
1180 			.items = dst_size ? dst->pattern : NULL,
1181 			.actions = dst_size ? dst->actions : NULL,
1182 		   }),
1183 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1184 
1185 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1186 
1187 	return ret;
1188 }
1189 
1190 int
1191 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1192 			FILE *file, struct rte_flow_error *error)
1193 {
1194 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1195 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1196 	int ret;
1197 
1198 	if (unlikely(!ops))
1199 		return -rte_errno;
1200 	if (likely(!!ops->dev_dump)) {
1201 		fts_enter(dev);
1202 		ret = ops->dev_dump(dev, flow, file, error);
1203 		fts_exit(dev);
1204 		return flow_err(port_id, ret, error);
1205 	}
1206 	return rte_flow_error_set(error, ENOSYS,
1207 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1208 				  NULL, rte_strerror(ENOSYS));
1209 }
1210 
1211 int
1212 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1213 		    uint32_t nb_contexts, struct rte_flow_error *error)
1214 {
1215 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1216 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1217 	int ret;
1218 
1219 	if (unlikely(!ops))
1220 		return -rte_errno;
1221 	if (likely(!!ops->get_aged_flows)) {
1222 		fts_enter(dev);
1223 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1224 		fts_exit(dev);
1225 		ret = flow_err(port_id, ret, error);
1226 
1227 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1228 
1229 		return ret;
1230 	}
1231 	return rte_flow_error_set(error, ENOTSUP,
1232 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1233 				  NULL, rte_strerror(ENOTSUP));
1234 }
1235 
1236 int
1237 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1238 			  uint32_t nb_contexts, struct rte_flow_error *error)
1239 {
1240 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1241 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1242 	int ret;
1243 
1244 	if (unlikely(!ops))
1245 		return -rte_errno;
1246 	if (likely(!!ops->get_q_aged_flows)) {
1247 		fts_enter(dev);
1248 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1249 					    nb_contexts, error);
1250 		fts_exit(dev);
1251 		ret = flow_err(port_id, ret, error);
1252 
1253 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1254 						nb_contexts, ret);
1255 
1256 		return ret;
1257 	}
1258 	return rte_flow_error_set(error, ENOTSUP,
1259 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1260 				  NULL, rte_strerror(ENOTSUP));
1261 }
1262 
1263 struct rte_flow_action_handle *
1264 rte_flow_action_handle_create(uint16_t port_id,
1265 			      const struct rte_flow_indir_action_conf *conf,
1266 			      const struct rte_flow_action *action,
1267 			      struct rte_flow_error *error)
1268 {
1269 	struct rte_flow_action_handle *handle;
1270 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1271 
1272 	if (unlikely(!ops))
1273 		return NULL;
1274 	if (unlikely(!ops->action_handle_create)) {
1275 		rte_flow_error_set(error, ENOSYS,
1276 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1277 				   rte_strerror(ENOSYS));
1278 		return NULL;
1279 	}
1280 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1281 					   conf, action, error);
1282 	if (handle == NULL)
1283 		flow_err(port_id, -rte_errno, error);
1284 
1285 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1286 
1287 	return handle;
1288 }
1289 
1290 int
1291 rte_flow_action_handle_destroy(uint16_t port_id,
1292 			       struct rte_flow_action_handle *handle,
1293 			       struct rte_flow_error *error)
1294 {
1295 	int ret;
1296 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1297 
1298 	if (unlikely(!ops))
1299 		return -rte_errno;
1300 	if (unlikely(!ops->action_handle_destroy))
1301 		return rte_flow_error_set(error, ENOSYS,
1302 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1303 					  NULL, rte_strerror(ENOSYS));
1304 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1305 					 handle, error);
1306 	ret = flow_err(port_id, ret, error);
1307 
1308 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1309 
1310 	return ret;
1311 }
1312 
1313 int
1314 rte_flow_action_handle_update(uint16_t port_id,
1315 			      struct rte_flow_action_handle *handle,
1316 			      const void *update,
1317 			      struct rte_flow_error *error)
1318 {
1319 	int ret;
1320 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1321 
1322 	if (unlikely(!ops))
1323 		return -rte_errno;
1324 	if (unlikely(!ops->action_handle_update))
1325 		return rte_flow_error_set(error, ENOSYS,
1326 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1327 					  NULL, rte_strerror(ENOSYS));
1328 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1329 					update, error);
1330 	ret = flow_err(port_id, ret, error);
1331 
1332 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1333 
1334 	return ret;
1335 }
1336 
1337 int
1338 rte_flow_action_handle_query(uint16_t port_id,
1339 			     const struct rte_flow_action_handle *handle,
1340 			     void *data,
1341 			     struct rte_flow_error *error)
1342 {
1343 	int ret;
1344 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1345 
1346 	if (unlikely(!ops))
1347 		return -rte_errno;
1348 	if (unlikely(!ops->action_handle_query))
1349 		return rte_flow_error_set(error, ENOSYS,
1350 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1351 					  NULL, rte_strerror(ENOSYS));
1352 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1353 				       data, error);
1354 	ret = flow_err(port_id, ret, error);
1355 
1356 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1357 
1358 	return ret;
1359 }
1360 
1361 int
1362 rte_flow_tunnel_decap_set(uint16_t port_id,
1363 			  struct rte_flow_tunnel *tunnel,
1364 			  struct rte_flow_action **actions,
1365 			  uint32_t *num_of_actions,
1366 			  struct rte_flow_error *error)
1367 {
1368 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1369 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1370 	int ret;
1371 
1372 	if (unlikely(!ops))
1373 		return -rte_errno;
1374 	if (likely(!!ops->tunnel_decap_set)) {
1375 		ret = flow_err(port_id,
1376 			       ops->tunnel_decap_set(dev, tunnel, actions,
1377 						     num_of_actions, error),
1378 			       error);
1379 
1380 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1381 						num_of_actions, ret);
1382 
1383 		return ret;
1384 	}
1385 	return rte_flow_error_set(error, ENOTSUP,
1386 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1387 				  NULL, rte_strerror(ENOTSUP));
1388 }
1389 
1390 int
1391 rte_flow_tunnel_match(uint16_t port_id,
1392 		      struct rte_flow_tunnel *tunnel,
1393 		      struct rte_flow_item **items,
1394 		      uint32_t *num_of_items,
1395 		      struct rte_flow_error *error)
1396 {
1397 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1398 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1399 	int ret;
1400 
1401 	if (unlikely(!ops))
1402 		return -rte_errno;
1403 	if (likely(!!ops->tunnel_match)) {
1404 		ret = flow_err(port_id,
1405 			       ops->tunnel_match(dev, tunnel, items,
1406 						 num_of_items, error),
1407 			       error);
1408 
1409 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1410 					    ret);
1411 
1412 		return ret;
1413 	}
1414 	return rte_flow_error_set(error, ENOTSUP,
1415 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1416 				  NULL, rte_strerror(ENOTSUP));
1417 }
1418 
1419 int
1420 rte_flow_get_restore_info(uint16_t port_id,
1421 			  struct rte_mbuf *m,
1422 			  struct rte_flow_restore_info *restore_info,
1423 			  struct rte_flow_error *error)
1424 {
1425 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1426 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1427 	int ret;
1428 
1429 	if (unlikely(!ops))
1430 		return -rte_errno;
1431 	if (likely(!!ops->get_restore_info)) {
1432 		ret = flow_err(port_id,
1433 			       ops->get_restore_info(dev, m, restore_info,
1434 						     error),
1435 			       error);
1436 
1437 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1438 
1439 		return ret;
1440 	}
1441 	return rte_flow_error_set(error, ENOTSUP,
1442 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1443 				  NULL, rte_strerror(ENOTSUP));
1444 }
1445 
1446 static struct {
1447 	const struct rte_mbuf_dynflag desc;
1448 	uint64_t value;
1449 } flow_restore_info_dynflag = {
1450 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1451 };
1452 
1453 uint64_t
1454 rte_flow_restore_info_dynflag(void)
1455 {
1456 	return flow_restore_info_dynflag.value;
1457 }
1458 
1459 int
1460 rte_flow_restore_info_dynflag_register(void)
1461 {
1462 	if (flow_restore_info_dynflag.value == 0) {
1463 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1464 
1465 		if (offset < 0)
1466 			return -1;
1467 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1468 	}
1469 
1470 	return 0;
1471 }
1472 
1473 int
1474 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1475 				     struct rte_flow_action *actions,
1476 				     uint32_t num_of_actions,
1477 				     struct rte_flow_error *error)
1478 {
1479 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1480 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1481 	int ret;
1482 
1483 	if (unlikely(!ops))
1484 		return -rte_errno;
1485 	if (likely(!!ops->tunnel_action_decap_release)) {
1486 		ret = flow_err(port_id,
1487 			       ops->tunnel_action_decap_release(dev, actions,
1488 								num_of_actions,
1489 								error),
1490 			       error);
1491 
1492 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1493 							   num_of_actions, ret);
1494 
1495 		return ret;
1496 	}
1497 	return rte_flow_error_set(error, ENOTSUP,
1498 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1499 				  NULL, rte_strerror(ENOTSUP));
1500 }
1501 
1502 int
1503 rte_flow_tunnel_item_release(uint16_t port_id,
1504 			     struct rte_flow_item *items,
1505 			     uint32_t num_of_items,
1506 			     struct rte_flow_error *error)
1507 {
1508 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1509 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1510 	int ret;
1511 
1512 	if (unlikely(!ops))
1513 		return -rte_errno;
1514 	if (likely(!!ops->tunnel_item_release)) {
1515 		ret = flow_err(port_id,
1516 			       ops->tunnel_item_release(dev, items,
1517 							num_of_items, error),
1518 			       error);
1519 
1520 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1521 
1522 		return ret;
1523 	}
1524 	return rte_flow_error_set(error, ENOTSUP,
1525 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1526 				  NULL, rte_strerror(ENOTSUP));
1527 }
1528 
1529 int
1530 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1531 			     struct rte_flow_error *error)
1532 {
1533 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1534 	struct rte_eth_dev *dev;
1535 	int ret;
1536 
1537 	if (unlikely(ops == NULL))
1538 		return -rte_errno;
1539 
1540 	if (ops->pick_transfer_proxy == NULL) {
1541 		*proxy_port_id = port_id;
1542 		return 0;
1543 	}
1544 
1545 	dev = &rte_eth_devices[port_id];
1546 
1547 	ret = flow_err(port_id,
1548 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1549 		       error);
1550 
1551 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1552 
1553 	return ret;
1554 }
1555 
1556 struct rte_flow_item_flex_handle *
1557 rte_flow_flex_item_create(uint16_t port_id,
1558 			  const struct rte_flow_item_flex_conf *conf,
1559 			  struct rte_flow_error *error)
1560 {
1561 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1562 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1563 	struct rte_flow_item_flex_handle *handle;
1564 
1565 	if (unlikely(!ops))
1566 		return NULL;
1567 	if (unlikely(!ops->flex_item_create)) {
1568 		rte_flow_error_set(error, ENOTSUP,
1569 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1570 				   NULL, rte_strerror(ENOTSUP));
1571 		return NULL;
1572 	}
1573 	handle = ops->flex_item_create(dev, conf, error);
1574 	if (handle == NULL)
1575 		flow_err(port_id, -rte_errno, error);
1576 
1577 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1578 
1579 	return handle;
1580 }
1581 
1582 int
1583 rte_flow_flex_item_release(uint16_t port_id,
1584 			   const struct rte_flow_item_flex_handle *handle,
1585 			   struct rte_flow_error *error)
1586 {
1587 	int ret;
1588 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1589 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1590 
1591 	if (unlikely(!ops || !ops->flex_item_release))
1592 		return rte_flow_error_set(error, ENOTSUP,
1593 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1594 					  NULL, rte_strerror(ENOTSUP));
1595 	ret = ops->flex_item_release(dev, handle, error);
1596 	ret = flow_err(port_id, ret, error);
1597 
1598 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1599 
1600 	return ret;
1601 }
1602 
1603 int
1604 rte_flow_info_get(uint16_t port_id,
1605 		  struct rte_flow_port_info *port_info,
1606 		  struct rte_flow_queue_info *queue_info,
1607 		  struct rte_flow_error *error)
1608 {
1609 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1610 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1611 	int ret;
1612 
1613 	if (unlikely(!ops))
1614 		return -rte_errno;
1615 	if (dev->data->dev_configured == 0) {
1616 		RTE_FLOW_LOG(INFO,
1617 			"Device with port_id=%"PRIu16" is not configured.\n",
1618 			port_id);
1619 		return -EINVAL;
1620 	}
1621 	if (port_info == NULL) {
1622 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1623 		return -EINVAL;
1624 	}
1625 	if (likely(!!ops->info_get)) {
1626 		ret = flow_err(port_id,
1627 			       ops->info_get(dev, port_info, queue_info, error),
1628 			       error);
1629 
1630 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1631 
1632 		return ret;
1633 	}
1634 	return rte_flow_error_set(error, ENOTSUP,
1635 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1636 				  NULL, rte_strerror(ENOTSUP));
1637 }
1638 
1639 int
1640 rte_flow_configure(uint16_t port_id,
1641 		   const struct rte_flow_port_attr *port_attr,
1642 		   uint16_t nb_queue,
1643 		   const struct rte_flow_queue_attr *queue_attr[],
1644 		   struct rte_flow_error *error)
1645 {
1646 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1647 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1648 	int ret;
1649 
1650 	if (unlikely(!ops))
1651 		return -rte_errno;
1652 	if (dev->data->dev_configured == 0) {
1653 		RTE_FLOW_LOG(INFO,
1654 			"Device with port_id=%"PRIu16" is not configured.\n",
1655 			port_id);
1656 		return -EINVAL;
1657 	}
1658 	if (dev->data->dev_started != 0) {
1659 		RTE_FLOW_LOG(INFO,
1660 			"Device with port_id=%"PRIu16" already started.\n",
1661 			port_id);
1662 		return -EINVAL;
1663 	}
1664 	if (port_attr == NULL) {
1665 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1666 		return -EINVAL;
1667 	}
1668 	if (queue_attr == NULL) {
1669 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1670 		return -EINVAL;
1671 	}
1672 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1673 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1674 		return rte_flow_error_set(error, ENODEV,
1675 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1676 					  NULL, rte_strerror(ENODEV));
1677 	}
1678 	if (likely(!!ops->configure)) {
1679 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1680 		if (ret == 0)
1681 			dev->data->flow_configured = 1;
1682 		ret = flow_err(port_id, ret, error);
1683 
1684 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1685 
1686 		return ret;
1687 	}
1688 	return rte_flow_error_set(error, ENOTSUP,
1689 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1690 				  NULL, rte_strerror(ENOTSUP));
1691 }
1692 
1693 struct rte_flow_pattern_template *
1694 rte_flow_pattern_template_create(uint16_t port_id,
1695 		const struct rte_flow_pattern_template_attr *template_attr,
1696 		const struct rte_flow_item pattern[],
1697 		struct rte_flow_error *error)
1698 {
1699 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1700 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1701 	struct rte_flow_pattern_template *template;
1702 
1703 	if (unlikely(!ops))
1704 		return NULL;
1705 	if (dev->data->flow_configured == 0) {
1706 		RTE_FLOW_LOG(INFO,
1707 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1708 			port_id);
1709 		rte_flow_error_set(error, EINVAL,
1710 				RTE_FLOW_ERROR_TYPE_STATE,
1711 				NULL, rte_strerror(EINVAL));
1712 		return NULL;
1713 	}
1714 	if (template_attr == NULL) {
1715 		RTE_FLOW_LOG(ERR,
1716 			     "Port %"PRIu16" template attr is NULL.\n",
1717 			     port_id);
1718 		rte_flow_error_set(error, EINVAL,
1719 				   RTE_FLOW_ERROR_TYPE_ATTR,
1720 				   NULL, rte_strerror(EINVAL));
1721 		return NULL;
1722 	}
1723 	if (pattern == NULL) {
1724 		RTE_FLOW_LOG(ERR,
1725 			     "Port %"PRIu16" pattern is NULL.\n",
1726 			     port_id);
1727 		rte_flow_error_set(error, EINVAL,
1728 				   RTE_FLOW_ERROR_TYPE_ATTR,
1729 				   NULL, rte_strerror(EINVAL));
1730 		return NULL;
1731 	}
1732 	if (likely(!!ops->pattern_template_create)) {
1733 		template = ops->pattern_template_create(dev, template_attr,
1734 							pattern, error);
1735 		if (template == NULL)
1736 			flow_err(port_id, -rte_errno, error);
1737 
1738 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1739 						       pattern, template);
1740 
1741 		return template;
1742 	}
1743 	rte_flow_error_set(error, ENOTSUP,
1744 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1745 			   NULL, rte_strerror(ENOTSUP));
1746 	return NULL;
1747 }
1748 
1749 int
1750 rte_flow_pattern_template_destroy(uint16_t port_id,
1751 		struct rte_flow_pattern_template *pattern_template,
1752 		struct rte_flow_error *error)
1753 {
1754 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1755 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1756 	int ret;
1757 
1758 	if (unlikely(!ops))
1759 		return -rte_errno;
1760 	if (unlikely(pattern_template == NULL))
1761 		return 0;
1762 	if (likely(!!ops->pattern_template_destroy)) {
1763 		ret = flow_err(port_id,
1764 			       ops->pattern_template_destroy(dev,
1765 							     pattern_template,
1766 							     error),
1767 			       error);
1768 
1769 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1770 							ret);
1771 
1772 		return ret;
1773 	}
1774 	return rte_flow_error_set(error, ENOTSUP,
1775 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1776 				  NULL, rte_strerror(ENOTSUP));
1777 }
1778 
1779 struct rte_flow_actions_template *
1780 rte_flow_actions_template_create(uint16_t port_id,
1781 			const struct rte_flow_actions_template_attr *template_attr,
1782 			const struct rte_flow_action actions[],
1783 			const struct rte_flow_action masks[],
1784 			struct rte_flow_error *error)
1785 {
1786 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1787 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1788 	struct rte_flow_actions_template *template;
1789 
1790 	if (unlikely(!ops))
1791 		return NULL;
1792 	if (dev->data->flow_configured == 0) {
1793 		RTE_FLOW_LOG(INFO,
1794 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1795 			port_id);
1796 		rte_flow_error_set(error, EINVAL,
1797 				   RTE_FLOW_ERROR_TYPE_STATE,
1798 				   NULL, rte_strerror(EINVAL));
1799 		return NULL;
1800 	}
1801 	if (template_attr == NULL) {
1802 		RTE_FLOW_LOG(ERR,
1803 			     "Port %"PRIu16" template attr is NULL.\n",
1804 			     port_id);
1805 		rte_flow_error_set(error, EINVAL,
1806 				   RTE_FLOW_ERROR_TYPE_ATTR,
1807 				   NULL, rte_strerror(EINVAL));
1808 		return NULL;
1809 	}
1810 	if (actions == NULL) {
1811 		RTE_FLOW_LOG(ERR,
1812 			     "Port %"PRIu16" actions is NULL.\n",
1813 			     port_id);
1814 		rte_flow_error_set(error, EINVAL,
1815 				   RTE_FLOW_ERROR_TYPE_ATTR,
1816 				   NULL, rte_strerror(EINVAL));
1817 		return NULL;
1818 	}
1819 	if (masks == NULL) {
1820 		RTE_FLOW_LOG(ERR,
1821 			     "Port %"PRIu16" masks is NULL.\n",
1822 			     port_id);
1823 		rte_flow_error_set(error, EINVAL,
1824 				   RTE_FLOW_ERROR_TYPE_ATTR,
1825 				   NULL, rte_strerror(EINVAL));
1826 
1827 	}
1828 	if (likely(!!ops->actions_template_create)) {
1829 		template = ops->actions_template_create(dev, template_attr,
1830 							actions, masks, error);
1831 		if (template == NULL)
1832 			flow_err(port_id, -rte_errno, error);
1833 
1834 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1835 						       masks, template);
1836 
1837 		return template;
1838 	}
1839 	rte_flow_error_set(error, ENOTSUP,
1840 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1841 			   NULL, rte_strerror(ENOTSUP));
1842 	return NULL;
1843 }
1844 
1845 int
1846 rte_flow_actions_template_destroy(uint16_t port_id,
1847 			struct rte_flow_actions_template *actions_template,
1848 			struct rte_flow_error *error)
1849 {
1850 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1851 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1852 	int ret;
1853 
1854 	if (unlikely(!ops))
1855 		return -rte_errno;
1856 	if (unlikely(actions_template == NULL))
1857 		return 0;
1858 	if (likely(!!ops->actions_template_destroy)) {
1859 		ret = flow_err(port_id,
1860 			       ops->actions_template_destroy(dev,
1861 							     actions_template,
1862 							     error),
1863 			       error);
1864 
1865 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1866 							ret);
1867 
1868 		return ret;
1869 	}
1870 	return rte_flow_error_set(error, ENOTSUP,
1871 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1872 				  NULL, rte_strerror(ENOTSUP));
1873 }
1874 
1875 struct rte_flow_template_table *
1876 rte_flow_template_table_create(uint16_t port_id,
1877 			const struct rte_flow_template_table_attr *table_attr,
1878 			struct rte_flow_pattern_template *pattern_templates[],
1879 			uint8_t nb_pattern_templates,
1880 			struct rte_flow_actions_template *actions_templates[],
1881 			uint8_t nb_actions_templates,
1882 			struct rte_flow_error *error)
1883 {
1884 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1885 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1886 	struct rte_flow_template_table *table;
1887 
1888 	if (unlikely(!ops))
1889 		return NULL;
1890 	if (dev->data->flow_configured == 0) {
1891 		RTE_FLOW_LOG(INFO,
1892 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1893 			port_id);
1894 		rte_flow_error_set(error, EINVAL,
1895 				   RTE_FLOW_ERROR_TYPE_STATE,
1896 				   NULL, rte_strerror(EINVAL));
1897 		return NULL;
1898 	}
1899 	if (table_attr == NULL) {
1900 		RTE_FLOW_LOG(ERR,
1901 			     "Port %"PRIu16" table attr is NULL.\n",
1902 			     port_id);
1903 		rte_flow_error_set(error, EINVAL,
1904 				   RTE_FLOW_ERROR_TYPE_ATTR,
1905 				   NULL, rte_strerror(EINVAL));
1906 		return NULL;
1907 	}
1908 	if (pattern_templates == NULL) {
1909 		RTE_FLOW_LOG(ERR,
1910 			     "Port %"PRIu16" pattern templates is NULL.\n",
1911 			     port_id);
1912 		rte_flow_error_set(error, EINVAL,
1913 				   RTE_FLOW_ERROR_TYPE_ATTR,
1914 				   NULL, rte_strerror(EINVAL));
1915 		return NULL;
1916 	}
1917 	if (actions_templates == NULL) {
1918 		RTE_FLOW_LOG(ERR,
1919 			     "Port %"PRIu16" actions templates is NULL.\n",
1920 			     port_id);
1921 		rte_flow_error_set(error, EINVAL,
1922 				   RTE_FLOW_ERROR_TYPE_ATTR,
1923 				   NULL, rte_strerror(EINVAL));
1924 		return NULL;
1925 	}
1926 	if (likely(!!ops->template_table_create)) {
1927 		table = ops->template_table_create(dev, table_attr,
1928 					pattern_templates, nb_pattern_templates,
1929 					actions_templates, nb_actions_templates,
1930 					error);
1931 		if (table == NULL)
1932 			flow_err(port_id, -rte_errno, error);
1933 
1934 		rte_flow_trace_template_table_create(port_id, table_attr,
1935 						     pattern_templates,
1936 						     nb_pattern_templates,
1937 						     actions_templates,
1938 						     nb_actions_templates, table);
1939 
1940 		return table;
1941 	}
1942 	rte_flow_error_set(error, ENOTSUP,
1943 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1944 			   NULL, rte_strerror(ENOTSUP));
1945 	return NULL;
1946 }
1947 
1948 int
1949 rte_flow_template_table_destroy(uint16_t port_id,
1950 				struct rte_flow_template_table *template_table,
1951 				struct rte_flow_error *error)
1952 {
1953 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1954 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1955 	int ret;
1956 
1957 	if (unlikely(!ops))
1958 		return -rte_errno;
1959 	if (unlikely(template_table == NULL))
1960 		return 0;
1961 	if (likely(!!ops->template_table_destroy)) {
1962 		ret = flow_err(port_id,
1963 			       ops->template_table_destroy(dev,
1964 							   template_table,
1965 							   error),
1966 			       error);
1967 
1968 		rte_flow_trace_template_table_destroy(port_id, template_table,
1969 						      ret);
1970 
1971 		return ret;
1972 	}
1973 	return rte_flow_error_set(error, ENOTSUP,
1974 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1975 				  NULL, rte_strerror(ENOTSUP));
1976 }
1977 
1978 int
1979 rte_flow_group_set_miss_actions(uint16_t port_id,
1980 				uint32_t group_id,
1981 				const struct rte_flow_group_attr *attr,
1982 				const struct rte_flow_action actions[],
1983 				struct rte_flow_error *error)
1984 {
1985 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1986 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1987 
1988 	if (unlikely(!ops))
1989 		return -rte_errno;
1990 	if (likely(!!ops->group_set_miss_actions)) {
1991 		return flow_err(port_id,
1992 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
1993 				error);
1994 	}
1995 	return rte_flow_error_set(error, ENOTSUP,
1996 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1997 				  NULL, rte_strerror(ENOTSUP));
1998 }
1999 
2000 struct rte_flow *
2001 rte_flow_async_create(uint16_t port_id,
2002 		      uint32_t queue_id,
2003 		      const struct rte_flow_op_attr *op_attr,
2004 		      struct rte_flow_template_table *template_table,
2005 		      const struct rte_flow_item pattern[],
2006 		      uint8_t pattern_template_index,
2007 		      const struct rte_flow_action actions[],
2008 		      uint8_t actions_template_index,
2009 		      void *user_data,
2010 		      struct rte_flow_error *error)
2011 {
2012 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2013 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2014 	struct rte_flow *flow;
2015 
2016 	flow = ops->async_create(dev, queue_id,
2017 				 op_attr, template_table,
2018 				 pattern, pattern_template_index,
2019 				 actions, actions_template_index,
2020 				 user_data, error);
2021 	if (flow == NULL)
2022 		flow_err(port_id, -rte_errno, error);
2023 
2024 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2025 				    pattern, pattern_template_index, actions,
2026 				    actions_template_index, user_data, flow);
2027 
2028 	return flow;
2029 }
2030 
2031 struct rte_flow *
2032 rte_flow_async_create_by_index(uint16_t port_id,
2033 			       uint32_t queue_id,
2034 			       const struct rte_flow_op_attr *op_attr,
2035 			       struct rte_flow_template_table *template_table,
2036 			       uint32_t rule_index,
2037 			       const struct rte_flow_action actions[],
2038 			       uint8_t actions_template_index,
2039 			       void *user_data,
2040 			       struct rte_flow_error *error)
2041 {
2042 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2043 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2044 	struct rte_flow *flow;
2045 
2046 	flow = ops->async_create_by_index(dev, queue_id,
2047 					  op_attr, template_table, rule_index,
2048 					  actions, actions_template_index,
2049 					  user_data, error);
2050 	if (flow == NULL)
2051 		flow_err(port_id, -rte_errno, error);
2052 	return flow;
2053 }
2054 
2055 int
2056 rte_flow_async_destroy(uint16_t port_id,
2057 		       uint32_t queue_id,
2058 		       const struct rte_flow_op_attr *op_attr,
2059 		       struct rte_flow *flow,
2060 		       void *user_data,
2061 		       struct rte_flow_error *error)
2062 {
2063 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2064 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2065 	int ret;
2066 
2067 	ret = flow_err(port_id,
2068 		       ops->async_destroy(dev, queue_id,
2069 					  op_attr, flow,
2070 					  user_data, error),
2071 		       error);
2072 
2073 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2074 				     user_data, ret);
2075 
2076 	return ret;
2077 }
2078 
2079 int
2080 rte_flow_async_actions_update(uint16_t port_id,
2081 			      uint32_t queue_id,
2082 			      const struct rte_flow_op_attr *op_attr,
2083 			      struct rte_flow *flow,
2084 			      const struct rte_flow_action actions[],
2085 			      uint8_t actions_template_index,
2086 			      void *user_data,
2087 			      struct rte_flow_error *error)
2088 {
2089 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2090 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2091 	int ret;
2092 
2093 	ret = flow_err(port_id,
2094 		       ops->async_actions_update(dev, queue_id, op_attr,
2095 						 flow, actions,
2096 						 actions_template_index,
2097 						 user_data, error),
2098 		       error);
2099 
2100 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2101 					    actions, actions_template_index,
2102 					    user_data, ret);
2103 
2104 	return ret;
2105 }
2106 
2107 int
2108 rte_flow_push(uint16_t port_id,
2109 	      uint32_t queue_id,
2110 	      struct rte_flow_error *error)
2111 {
2112 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2113 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2114 	int ret;
2115 
2116 	ret = flow_err(port_id,
2117 		       ops->push(dev, queue_id, error),
2118 		       error);
2119 
2120 	rte_flow_trace_push(port_id, queue_id, ret);
2121 
2122 	return ret;
2123 }
2124 
2125 int
2126 rte_flow_pull(uint16_t port_id,
2127 	      uint32_t queue_id,
2128 	      struct rte_flow_op_result res[],
2129 	      uint16_t n_res,
2130 	      struct rte_flow_error *error)
2131 {
2132 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2133 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2134 	int ret;
2135 	int rc;
2136 
2137 	ret = ops->pull(dev, queue_id, res, n_res, error);
2138 	rc = ret ? ret : flow_err(port_id, ret, error);
2139 
2140 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2141 
2142 	return rc;
2143 }
2144 
2145 struct rte_flow_action_handle *
2146 rte_flow_async_action_handle_create(uint16_t port_id,
2147 		uint32_t queue_id,
2148 		const struct rte_flow_op_attr *op_attr,
2149 		const struct rte_flow_indir_action_conf *indir_action_conf,
2150 		const struct rte_flow_action *action,
2151 		void *user_data,
2152 		struct rte_flow_error *error)
2153 {
2154 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2155 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2156 	struct rte_flow_action_handle *handle;
2157 
2158 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2159 					     indir_action_conf, action, user_data, error);
2160 	if (handle == NULL)
2161 		flow_err(port_id, -rte_errno, error);
2162 
2163 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2164 						  indir_action_conf, action,
2165 						  user_data, handle);
2166 
2167 	return handle;
2168 }
2169 
2170 int
2171 rte_flow_async_action_handle_destroy(uint16_t port_id,
2172 		uint32_t queue_id,
2173 		const struct rte_flow_op_attr *op_attr,
2174 		struct rte_flow_action_handle *action_handle,
2175 		void *user_data,
2176 		struct rte_flow_error *error)
2177 {
2178 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2179 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2180 	int ret;
2181 
2182 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2183 					   action_handle, user_data, error);
2184 	ret = flow_err(port_id, ret, error);
2185 
2186 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2187 						   action_handle, user_data, ret);
2188 
2189 	return ret;
2190 }
2191 
2192 int
2193 rte_flow_async_action_handle_update(uint16_t port_id,
2194 		uint32_t queue_id,
2195 		const struct rte_flow_op_attr *op_attr,
2196 		struct rte_flow_action_handle *action_handle,
2197 		const void *update,
2198 		void *user_data,
2199 		struct rte_flow_error *error)
2200 {
2201 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2202 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2203 	int ret;
2204 
2205 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2206 					  action_handle, update, user_data, error);
2207 	ret = flow_err(port_id, ret, error);
2208 
2209 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2210 						  action_handle, update,
2211 						  user_data, ret);
2212 
2213 	return ret;
2214 }
2215 
2216 int
2217 rte_flow_async_action_handle_query(uint16_t port_id,
2218 		uint32_t queue_id,
2219 		const struct rte_flow_op_attr *op_attr,
2220 		const struct rte_flow_action_handle *action_handle,
2221 		void *data,
2222 		void *user_data,
2223 		struct rte_flow_error *error)
2224 {
2225 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2226 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2227 	int ret;
2228 
2229 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2230 					  action_handle, data, user_data, error);
2231 	ret = flow_err(port_id, ret, error);
2232 
2233 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2234 						 action_handle, data, user_data,
2235 						 ret);
2236 
2237 	return ret;
2238 }
2239 
2240 int
2241 rte_flow_action_handle_query_update(uint16_t port_id,
2242 				    struct rte_flow_action_handle *handle,
2243 				    const void *update, void *query,
2244 				    enum rte_flow_query_update_mode mode,
2245 				    struct rte_flow_error *error)
2246 {
2247 	int ret;
2248 	struct rte_eth_dev *dev;
2249 	const struct rte_flow_ops *ops;
2250 
2251 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2252 	if (!handle)
2253 		return -EINVAL;
2254 	if (!update && !query)
2255 		return -EINVAL;
2256 	dev = &rte_eth_devices[port_id];
2257 	ops = rte_flow_ops_get(port_id, error);
2258 	if (!ops || !ops->action_handle_query_update)
2259 		return -ENOTSUP;
2260 	ret = ops->action_handle_query_update(dev, handle, update,
2261 					      query, mode, error);
2262 	return flow_err(port_id, ret, error);
2263 }
2264 
2265 int
2266 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2267 					  const struct rte_flow_op_attr *attr,
2268 					  struct rte_flow_action_handle *handle,
2269 					  const void *update, void *query,
2270 					  enum rte_flow_query_update_mode mode,
2271 					  void *user_data,
2272 					  struct rte_flow_error *error)
2273 {
2274 	int ret;
2275 	struct rte_eth_dev *dev;
2276 	const struct rte_flow_ops *ops;
2277 
2278 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2279 	if (!handle)
2280 		return -EINVAL;
2281 	if (!update && !query)
2282 		return -EINVAL;
2283 	dev = &rte_eth_devices[port_id];
2284 	ops = rte_flow_ops_get(port_id, error);
2285 	if (!ops || !ops->async_action_handle_query_update)
2286 		return -ENOTSUP;
2287 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2288 						    handle, update,
2289 						    query, mode,
2290 						    user_data, error);
2291 	return flow_err(port_id, ret, error);
2292 }
2293 
2294 struct rte_flow_action_list_handle *
2295 rte_flow_action_list_handle_create(uint16_t port_id,
2296 				   const
2297 				   struct rte_flow_indir_action_conf *conf,
2298 				   const struct rte_flow_action *actions,
2299 				   struct rte_flow_error *error)
2300 {
2301 	int ret;
2302 	struct rte_eth_dev *dev;
2303 	const struct rte_flow_ops *ops;
2304 	struct rte_flow_action_list_handle *handle;
2305 
2306 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2307 	ops = rte_flow_ops_get(port_id, error);
2308 	if (!ops || !ops->action_list_handle_create) {
2309 		rte_flow_error_set(error, ENOTSUP,
2310 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2311 				   "action_list handle not supported");
2312 		return NULL;
2313 	}
2314 	dev = &rte_eth_devices[port_id];
2315 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2316 	ret = flow_err(port_id, -rte_errno, error);
2317 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2318 	return handle;
2319 }
2320 
2321 int
2322 rte_flow_action_list_handle_destroy(uint16_t port_id,
2323 				    struct rte_flow_action_list_handle *handle,
2324 				    struct rte_flow_error *error)
2325 {
2326 	int ret;
2327 	struct rte_eth_dev *dev;
2328 	const struct rte_flow_ops *ops;
2329 
2330 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2331 	ops = rte_flow_ops_get(port_id, error);
2332 	if (!ops || !ops->action_list_handle_destroy)
2333 		return rte_flow_error_set(error, ENOTSUP,
2334 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2335 					  "action_list handle not supported");
2336 	dev = &rte_eth_devices[port_id];
2337 	ret = ops->action_list_handle_destroy(dev, handle, error);
2338 	ret = flow_err(port_id, ret, error);
2339 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2340 	return ret;
2341 }
2342 
2343 struct rte_flow_action_list_handle *
2344 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2345 					 const struct rte_flow_op_attr *attr,
2346 					 const struct rte_flow_indir_action_conf *conf,
2347 					 const struct rte_flow_action *actions,
2348 					 void *user_data,
2349 					 struct rte_flow_error *error)
2350 {
2351 	int ret;
2352 	struct rte_eth_dev *dev;
2353 	const struct rte_flow_ops *ops;
2354 	struct rte_flow_action_list_handle *handle;
2355 
2356 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2357 	ops = rte_flow_ops_get(port_id, error);
2358 	if (!ops || !ops->async_action_list_handle_create) {
2359 		rte_flow_error_set(error, ENOTSUP,
2360 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2361 				   "action_list handle not supported");
2362 		return NULL;
2363 	}
2364 	dev = &rte_eth_devices[port_id];
2365 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2366 						      actions, user_data,
2367 						      error);
2368 	ret = flow_err(port_id, -rte_errno, error);
2369 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2370 						       conf, actions, user_data,
2371 						       ret);
2372 	return handle;
2373 }
2374 
2375 int
2376 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2377 				 const struct rte_flow_op_attr *op_attr,
2378 				 struct rte_flow_action_list_handle *handle,
2379 				 void *user_data, struct rte_flow_error *error)
2380 {
2381 	int ret;
2382 	struct rte_eth_dev *dev;
2383 	const struct rte_flow_ops *ops;
2384 
2385 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2386 	ops = rte_flow_ops_get(port_id, error);
2387 	if (!ops || !ops->async_action_list_handle_destroy)
2388 		return rte_flow_error_set(error, ENOTSUP,
2389 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2390 					  "async action_list handle not supported");
2391 	dev = &rte_eth_devices[port_id];
2392 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2393 						    handle, user_data, error);
2394 	ret = flow_err(port_id, ret, error);
2395 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2396 							op_attr, handle,
2397 							user_data, ret);
2398 	return ret;
2399 }
2400 
2401 int
2402 rte_flow_action_list_handle_query_update(uint16_t port_id,
2403 			 const struct rte_flow_action_list_handle *handle,
2404 			 const void **update, void **query,
2405 			 enum rte_flow_query_update_mode mode,
2406 			 struct rte_flow_error *error)
2407 {
2408 	int ret;
2409 	struct rte_eth_dev *dev;
2410 	const struct rte_flow_ops *ops;
2411 
2412 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2413 	ops = rte_flow_ops_get(port_id, error);
2414 	if (!ops || !ops->action_list_handle_query_update)
2415 		return rte_flow_error_set(error, ENOTSUP,
2416 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2417 					  "action_list query_update not supported");
2418 	dev = &rte_eth_devices[port_id];
2419 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2420 						   mode, error);
2421 	ret = flow_err(port_id, ret, error);
2422 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2423 						       query, mode, ret);
2424 	return ret;
2425 }
2426 
2427 int
2428 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2429 			 const struct rte_flow_op_attr *attr,
2430 			 const struct rte_flow_action_list_handle *handle,
2431 			 const void **update, void **query,
2432 			 enum rte_flow_query_update_mode mode,
2433 			 void *user_data, struct rte_flow_error *error)
2434 {
2435 	int ret;
2436 	struct rte_eth_dev *dev;
2437 	const struct rte_flow_ops *ops;
2438 
2439 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2440 	ops = rte_flow_ops_get(port_id, error);
2441 	if (!ops || !ops->async_action_list_handle_query_update)
2442 		return rte_flow_error_set(error, ENOTSUP,
2443 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2444 					  "action_list async query_update not supported");
2445 	dev = &rte_eth_devices[port_id];
2446 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2447 							 handle, update, query,
2448 							 mode, user_data,
2449 							 error);
2450 	ret = flow_err(port_id, ret, error);
2451 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2452 							     attr, handle,
2453 							     update, query,
2454 							     mode, user_data,
2455 							     ret);
2456 	return ret;
2457 }
2458