xref: /dpdk/lib/ethdev/rte_flow.c (revision e30aa5255e3b7fa345055040f50f4f9e4ae26e96)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_string_fns.h>
14 #include <rte_mbuf_dyn.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18 
19 #include "ethdev_trace.h"
20 
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23 
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26 
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31 	const char *name;
32 	size_t size;
33 	size_t (*desc_fn)(void *dst, const void *src);
34 };
35 
36 /**
37  *
38  * @param buf
39  * Destination memory.
40  * @param data
41  * Source memory
42  * @param size
43  * Requested copy size
44  * @param desc
45  * rte_flow_desc_item - for flow item conversion.
46  * rte_flow_desc_action - for flow action conversion.
47  * @param type
48  * Offset into the desc param or negative value for private flow elements.
49  */
50 static inline size_t
51 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
52 		   const struct rte_flow_desc_data *desc, int type)
53 {
54 	/**
55 	 * Allow PMD private flow item
56 	 */
57 	bool rte_type = type >= 0;
58 
59 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
60 	if (buf == NULL || data == NULL)
61 		return 0;
62 	rte_memcpy(buf, data, (size > sz ? sz : size));
63 	if (rte_type && desc[type].desc_fn)
64 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
65 	return sz;
66 }
67 
68 static size_t
69 rte_flow_item_flex_conv(void *buf, const void *data)
70 {
71 	struct rte_flow_item_flex *dst = buf;
72 	const struct rte_flow_item_flex *src = data;
73 	if (buf) {
74 		dst->pattern = rte_memcpy
75 			((void *)((uintptr_t)(dst + 1)), src->pattern,
76 			 src->length);
77 	}
78 	return src->length;
79 }
80 
81 /** Generate flow_item[] entry. */
82 #define MK_FLOW_ITEM(t, s) \
83 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
84 		.name = # t, \
85 		.size = s,               \
86 		.desc_fn = NULL,\
87 	}
88 
89 #define MK_FLOW_ITEM_FN(t, s, fn) \
90 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
91 		.name = # t,                 \
92 		.size = s,                   \
93 		.desc_fn = fn,               \
94 	}
95 
96 /** Information about known flow pattern items. */
97 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
98 	MK_FLOW_ITEM(END, 0),
99 	MK_FLOW_ITEM(VOID, 0),
100 	MK_FLOW_ITEM(INVERT, 0),
101 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
102 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
103 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
104 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
105 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
106 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
107 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
108 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
109 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
110 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
111 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
112 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
113 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
114 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
115 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
116 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
117 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
118 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
119 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
122 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
123 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
124 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
125 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
126 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
127 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
128 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
131 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
132 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
134 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
136 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
137 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
138 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
139 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
140 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
141 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
142 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
143 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
144 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
146 			sizeof(struct rte_flow_item_pppoe_proto_id)),
147 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
148 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
149 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
150 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
151 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
152 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
153 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
154 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
155 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
156 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
157 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
158 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
160 			rte_flow_item_flex_conv),
161 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
162 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
163 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
164 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
165 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
166 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
167 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
168 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
169 };
170 
171 /** Generate flow_action[] entry. */
172 #define MK_FLOW_ACTION(t, s) \
173 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
174 		.name = # t, \
175 		.size = s, \
176 		.desc_fn = NULL,\
177 	}
178 
179 #define MK_FLOW_ACTION_FN(t, fn) \
180 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
181 		.name = # t, \
182 		.size = 0, \
183 		.desc_fn = fn,\
184 	}
185 
186 
187 /** Information about known flow actions. */
188 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
189 	MK_FLOW_ACTION(END, 0),
190 	MK_FLOW_ACTION(VOID, 0),
191 	MK_FLOW_ACTION(PASSTHRU, 0),
192 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
193 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
194 	MK_FLOW_ACTION(FLAG, 0),
195 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
196 	MK_FLOW_ACTION(DROP, 0),
197 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
198 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
199 	MK_FLOW_ACTION(PF, 0),
200 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
201 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
202 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
203 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
204 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
205 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
206 	MK_FLOW_ACTION(OF_PUSH_VLAN,
207 		       sizeof(struct rte_flow_action_of_push_vlan)),
208 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
209 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
210 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
211 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
212 	MK_FLOW_ACTION(OF_POP_MPLS,
213 		       sizeof(struct rte_flow_action_of_pop_mpls)),
214 	MK_FLOW_ACTION(OF_PUSH_MPLS,
215 		       sizeof(struct rte_flow_action_of_push_mpls)),
216 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
217 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
218 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
219 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
220 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
221 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
222 	MK_FLOW_ACTION(SET_IPV4_SRC,
223 		       sizeof(struct rte_flow_action_set_ipv4)),
224 	MK_FLOW_ACTION(SET_IPV4_DST,
225 		       sizeof(struct rte_flow_action_set_ipv4)),
226 	MK_FLOW_ACTION(SET_IPV6_SRC,
227 		       sizeof(struct rte_flow_action_set_ipv6)),
228 	MK_FLOW_ACTION(SET_IPV6_DST,
229 		       sizeof(struct rte_flow_action_set_ipv6)),
230 	MK_FLOW_ACTION(SET_TP_SRC,
231 		       sizeof(struct rte_flow_action_set_tp)),
232 	MK_FLOW_ACTION(SET_TP_DST,
233 		       sizeof(struct rte_flow_action_set_tp)),
234 	MK_FLOW_ACTION(MAC_SWAP, 0),
235 	MK_FLOW_ACTION(DEC_TTL, 0),
236 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
237 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
238 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
239 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
240 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
241 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
242 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
243 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
244 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
245 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
246 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
247 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
248 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
249 	MK_FLOW_ACTION(MODIFY_FIELD,
250 		       sizeof(struct rte_flow_action_modify_field)),
251 	/**
252 	 * Indirect action represented as handle of type
253 	 * (struct rte_flow_action_handle *) stored in conf field (see
254 	 * struct rte_flow_action); no need for additional structure to * store
255 	 * indirect action handle.
256 	 */
257 	MK_FLOW_ACTION(INDIRECT, 0),
258 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
259 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
260 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
261 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
262 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
263 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
264 	MK_FLOW_ACTION(INDIRECT_LIST,
265 		       sizeof(struct rte_flow_action_indirect_list)),
266 };
267 
268 int
269 rte_flow_dynf_metadata_register(void)
270 {
271 	int offset;
272 	int flag;
273 
274 	static const struct rte_mbuf_dynfield desc_offs = {
275 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
276 		.size = sizeof(uint32_t),
277 		.align = __alignof__(uint32_t),
278 	};
279 	static const struct rte_mbuf_dynflag desc_flag = {
280 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
281 	};
282 
283 	offset = rte_mbuf_dynfield_register(&desc_offs);
284 	if (offset < 0)
285 		goto error;
286 	flag = rte_mbuf_dynflag_register(&desc_flag);
287 	if (flag < 0)
288 		goto error;
289 	rte_flow_dynf_metadata_offs = offset;
290 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
291 
292 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
293 
294 	return 0;
295 
296 error:
297 	rte_flow_dynf_metadata_offs = -1;
298 	rte_flow_dynf_metadata_mask = UINT64_C(0);
299 	return -rte_errno;
300 }
301 
302 static inline void
303 fts_enter(struct rte_eth_dev *dev)
304 {
305 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
306 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
307 }
308 
309 static inline void
310 fts_exit(struct rte_eth_dev *dev)
311 {
312 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
313 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
314 }
315 
316 static int
317 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
318 {
319 	if (ret == 0)
320 		return 0;
321 	if (rte_eth_dev_is_removed(port_id))
322 		return rte_flow_error_set(error, EIO,
323 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
324 					  NULL, rte_strerror(EIO));
325 	return ret;
326 }
327 
328 /* Get generic flow operations structure from a port. */
329 const struct rte_flow_ops *
330 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
331 {
332 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
333 	const struct rte_flow_ops *ops;
334 	int code;
335 
336 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
337 		code = ENODEV;
338 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
339 		/* flow API not supported with this driver dev_ops */
340 		code = ENOSYS;
341 	else
342 		code = dev->dev_ops->flow_ops_get(dev, &ops);
343 	if (code == 0 && ops == NULL)
344 		/* flow API not supported with this device */
345 		code = ENOSYS;
346 
347 	if (code != 0) {
348 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
349 				   NULL, rte_strerror(code));
350 		return NULL;
351 	}
352 	return ops;
353 }
354 
355 /* Check whether a flow rule can be created on a given port. */
356 int
357 rte_flow_validate(uint16_t port_id,
358 		  const struct rte_flow_attr *attr,
359 		  const struct rte_flow_item pattern[],
360 		  const struct rte_flow_action actions[],
361 		  struct rte_flow_error *error)
362 {
363 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
364 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
365 	int ret;
366 
367 	if (likely(!!attr) && attr->transfer &&
368 	    (attr->ingress || attr->egress)) {
369 		return rte_flow_error_set(error, EINVAL,
370 					  RTE_FLOW_ERROR_TYPE_ATTR,
371 					  attr, "cannot use attr ingress/egress with attr transfer");
372 	}
373 
374 	if (unlikely(!ops))
375 		return -rte_errno;
376 	if (likely(!!ops->validate)) {
377 		fts_enter(dev);
378 		ret = ops->validate(dev, attr, pattern, actions, error);
379 		fts_exit(dev);
380 		ret = flow_err(port_id, ret, error);
381 
382 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
383 
384 		return ret;
385 	}
386 	return rte_flow_error_set(error, ENOSYS,
387 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
388 				  NULL, rte_strerror(ENOSYS));
389 }
390 
391 /* Create a flow rule on a given port. */
392 struct rte_flow *
393 rte_flow_create(uint16_t port_id,
394 		const struct rte_flow_attr *attr,
395 		const struct rte_flow_item pattern[],
396 		const struct rte_flow_action actions[],
397 		struct rte_flow_error *error)
398 {
399 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
400 	struct rte_flow *flow;
401 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
402 
403 	if (unlikely(!ops))
404 		return NULL;
405 	if (likely(!!ops->create)) {
406 		fts_enter(dev);
407 		flow = ops->create(dev, attr, pattern, actions, error);
408 		fts_exit(dev);
409 		if (flow == NULL)
410 			flow_err(port_id, -rte_errno, error);
411 
412 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
413 
414 		return flow;
415 	}
416 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
417 			   NULL, rte_strerror(ENOSYS));
418 	return NULL;
419 }
420 
421 /* Destroy a flow rule on a given port. */
422 int
423 rte_flow_destroy(uint16_t port_id,
424 		 struct rte_flow *flow,
425 		 struct rte_flow_error *error)
426 {
427 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
428 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
429 	int ret;
430 
431 	if (unlikely(!ops))
432 		return -rte_errno;
433 	if (likely(!!ops->destroy)) {
434 		fts_enter(dev);
435 		ret = ops->destroy(dev, flow, error);
436 		fts_exit(dev);
437 		ret = flow_err(port_id, ret, error);
438 
439 		rte_flow_trace_destroy(port_id, flow, ret);
440 
441 		return ret;
442 	}
443 	return rte_flow_error_set(error, ENOSYS,
444 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
445 				  NULL, rte_strerror(ENOSYS));
446 }
447 
448 int
449 rte_flow_actions_update(uint16_t port_id,
450 			struct rte_flow *flow,
451 			const struct rte_flow_action actions[],
452 			struct rte_flow_error *error)
453 {
454 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
455 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
456 	int ret;
457 
458 	if (unlikely(!ops))
459 		return -rte_errno;
460 	if (likely(!!ops->actions_update)) {
461 		fts_enter(dev);
462 		ret = ops->actions_update(dev, flow, actions, error);
463 		fts_exit(dev);
464 
465 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
466 
467 		return flow_err(port_id, ret, error);
468 	}
469 	return rte_flow_error_set(error, ENOSYS,
470 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
471 				  NULL, rte_strerror(ENOSYS));
472 }
473 
474 /* Destroy all flow rules associated with a port. */
475 int
476 rte_flow_flush(uint16_t port_id,
477 	       struct rte_flow_error *error)
478 {
479 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
480 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
481 	int ret;
482 
483 	if (unlikely(!ops))
484 		return -rte_errno;
485 	if (likely(!!ops->flush)) {
486 		fts_enter(dev);
487 		ret = ops->flush(dev, error);
488 		fts_exit(dev);
489 		ret = flow_err(port_id, ret, error);
490 
491 		rte_flow_trace_flush(port_id, ret);
492 
493 		return ret;
494 	}
495 	return rte_flow_error_set(error, ENOSYS,
496 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
497 				  NULL, rte_strerror(ENOSYS));
498 }
499 
500 /* Query an existing flow rule. */
501 int
502 rte_flow_query(uint16_t port_id,
503 	       struct rte_flow *flow,
504 	       const struct rte_flow_action *action,
505 	       void *data,
506 	       struct rte_flow_error *error)
507 {
508 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
509 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
510 	int ret;
511 
512 	if (!ops)
513 		return -rte_errno;
514 	if (likely(!!ops->query)) {
515 		fts_enter(dev);
516 		ret = ops->query(dev, flow, action, data, error);
517 		fts_exit(dev);
518 		ret = flow_err(port_id, ret, error);
519 
520 		rte_flow_trace_query(port_id, flow, action, data, ret);
521 
522 		return ret;
523 	}
524 	return rte_flow_error_set(error, ENOSYS,
525 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
526 				  NULL, rte_strerror(ENOSYS));
527 }
528 
529 /* Restrict ingress traffic to the defined flow rules. */
530 int
531 rte_flow_isolate(uint16_t port_id,
532 		 int set,
533 		 struct rte_flow_error *error)
534 {
535 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
536 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
537 	int ret;
538 
539 	if (!ops)
540 		return -rte_errno;
541 	if (likely(!!ops->isolate)) {
542 		fts_enter(dev);
543 		ret = ops->isolate(dev, set, error);
544 		fts_exit(dev);
545 		ret = flow_err(port_id, ret, error);
546 
547 		rte_flow_trace_isolate(port_id, set, ret);
548 
549 		return ret;
550 	}
551 	return rte_flow_error_set(error, ENOSYS,
552 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
553 				  NULL, rte_strerror(ENOSYS));
554 }
555 
556 /* Initialize flow error structure. */
557 int
558 rte_flow_error_set(struct rte_flow_error *error,
559 		   int code,
560 		   enum rte_flow_error_type type,
561 		   const void *cause,
562 		   const char *message)
563 {
564 	if (error) {
565 		*error = (struct rte_flow_error){
566 			.type = type,
567 			.cause = cause,
568 			.message = message,
569 		};
570 	}
571 	rte_errno = code;
572 	return -code;
573 }
574 
575 /** Pattern item specification types. */
576 enum rte_flow_conv_item_spec_type {
577 	RTE_FLOW_CONV_ITEM_SPEC,
578 	RTE_FLOW_CONV_ITEM_LAST,
579 	RTE_FLOW_CONV_ITEM_MASK,
580 };
581 
582 /**
583  * Copy pattern item specification.
584  *
585  * @param[out] buf
586  *   Output buffer. Can be NULL if @p size is zero.
587  * @param size
588  *   Size of @p buf in bytes.
589  * @param[in] item
590  *   Pattern item to copy specification from.
591  * @param type
592  *   Specification selector for either @p spec, @p last or @p mask.
593  *
594  * @return
595  *   Number of bytes needed to store pattern item specification regardless
596  *   of @p size. @p buf contents are truncated to @p size if not large
597  *   enough.
598  */
599 static size_t
600 rte_flow_conv_item_spec(void *buf, const size_t size,
601 			const struct rte_flow_item *item,
602 			enum rte_flow_conv_item_spec_type type)
603 {
604 	size_t off;
605 	const void *data =
606 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
607 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
608 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
609 		NULL;
610 
611 	switch (item->type) {
612 		union {
613 			const struct rte_flow_item_raw *raw;
614 		} spec;
615 		union {
616 			const struct rte_flow_item_raw *raw;
617 		} last;
618 		union {
619 			const struct rte_flow_item_raw *raw;
620 		} mask;
621 		union {
622 			const struct rte_flow_item_raw *raw;
623 		} src;
624 		union {
625 			struct rte_flow_item_raw *raw;
626 		} dst;
627 		size_t tmp;
628 
629 	case RTE_FLOW_ITEM_TYPE_RAW:
630 		spec.raw = item->spec;
631 		last.raw = item->last ? item->last : item->spec;
632 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
633 		src.raw = data;
634 		dst.raw = buf;
635 		rte_memcpy(dst.raw,
636 			   (&(struct rte_flow_item_raw){
637 				.relative = src.raw->relative,
638 				.search = src.raw->search,
639 				.reserved = src.raw->reserved,
640 				.offset = src.raw->offset,
641 				.limit = src.raw->limit,
642 				.length = src.raw->length,
643 			   }),
644 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
645 		off = sizeof(*dst.raw);
646 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
647 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
648 		     ((spec.raw->length & mask.raw->length) >=
649 		      (last.raw->length & mask.raw->length))))
650 			tmp = spec.raw->length & mask.raw->length;
651 		else
652 			tmp = last.raw->length & mask.raw->length;
653 		if (tmp) {
654 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
655 			if (size >= off + tmp)
656 				dst.raw->pattern = rte_memcpy
657 					((void *)((uintptr_t)dst.raw + off),
658 					 src.raw->pattern, tmp);
659 			off += tmp;
660 		}
661 		break;
662 	default:
663 		off = rte_flow_conv_copy(buf, data, size,
664 					 rte_flow_desc_item, item->type);
665 		break;
666 	}
667 	return off;
668 }
669 
670 /**
671  * Copy action configuration.
672  *
673  * @param[out] buf
674  *   Output buffer. Can be NULL if @p size is zero.
675  * @param size
676  *   Size of @p buf in bytes.
677  * @param[in] action
678  *   Action to copy configuration from.
679  *
680  * @return
681  *   Number of bytes needed to store pattern item specification regardless
682  *   of @p size. @p buf contents are truncated to @p size if not large
683  *   enough.
684  */
685 static size_t
686 rte_flow_conv_action_conf(void *buf, const size_t size,
687 			  const struct rte_flow_action *action)
688 {
689 	size_t off;
690 
691 	switch (action->type) {
692 		union {
693 			const struct rte_flow_action_rss *rss;
694 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
695 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
696 		} src;
697 		union {
698 			struct rte_flow_action_rss *rss;
699 			struct rte_flow_action_vxlan_encap *vxlan_encap;
700 			struct rte_flow_action_nvgre_encap *nvgre_encap;
701 		} dst;
702 		size_t tmp;
703 		int ret;
704 
705 	case RTE_FLOW_ACTION_TYPE_RSS:
706 		src.rss = action->conf;
707 		dst.rss = buf;
708 		rte_memcpy(dst.rss,
709 			   (&(struct rte_flow_action_rss){
710 				.func = src.rss->func,
711 				.level = src.rss->level,
712 				.types = src.rss->types,
713 				.key_len = src.rss->key_len,
714 				.queue_num = src.rss->queue_num,
715 			   }),
716 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
717 		off = sizeof(*dst.rss);
718 		if (src.rss->key_len && src.rss->key) {
719 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
720 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
721 			if (size >= off + tmp)
722 				dst.rss->key = rte_memcpy
723 					((void *)((uintptr_t)dst.rss + off),
724 					 src.rss->key, tmp);
725 			off += tmp;
726 		}
727 		if (src.rss->queue_num) {
728 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
729 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
730 			if (size >= off + tmp)
731 				dst.rss->queue = rte_memcpy
732 					((void *)((uintptr_t)dst.rss + off),
733 					 src.rss->queue, tmp);
734 			off += tmp;
735 		}
736 		break;
737 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
738 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
739 		src.vxlan_encap = action->conf;
740 		dst.vxlan_encap = buf;
741 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
742 				 sizeof(*src.nvgre_encap) ||
743 				 offsetof(struct rte_flow_action_vxlan_encap,
744 					  definition) !=
745 				 offsetof(struct rte_flow_action_nvgre_encap,
746 					  definition));
747 		off = sizeof(*dst.vxlan_encap);
748 		if (src.vxlan_encap->definition) {
749 			off = RTE_ALIGN_CEIL
750 				(off, sizeof(*dst.vxlan_encap->definition));
751 			ret = rte_flow_conv
752 				(RTE_FLOW_CONV_OP_PATTERN,
753 				 (void *)((uintptr_t)dst.vxlan_encap + off),
754 				 size > off ? size - off : 0,
755 				 src.vxlan_encap->definition, NULL);
756 			if (ret < 0)
757 				return 0;
758 			if (size >= off + ret)
759 				dst.vxlan_encap->definition =
760 					(void *)((uintptr_t)dst.vxlan_encap +
761 						 off);
762 			off += ret;
763 		}
764 		break;
765 	default:
766 		off = rte_flow_conv_copy(buf, action->conf, size,
767 					 rte_flow_desc_action, action->type);
768 		break;
769 	}
770 	return off;
771 }
772 
773 /**
774  * Copy a list of pattern items.
775  *
776  * @param[out] dst
777  *   Destination buffer. Can be NULL if @p size is zero.
778  * @param size
779  *   Size of @p dst in bytes.
780  * @param[in] src
781  *   Source pattern items.
782  * @param num
783  *   Maximum number of pattern items to process from @p src or 0 to process
784  *   the entire list. In both cases, processing stops after
785  *   RTE_FLOW_ITEM_TYPE_END is encountered.
786  * @param[out] error
787  *   Perform verbose error reporting if not NULL.
788  *
789  * @return
790  *   A positive value representing the number of bytes needed to store
791  *   pattern items regardless of @p size on success (@p buf contents are
792  *   truncated to @p size if not large enough), a negative errno value
793  *   otherwise and rte_errno is set.
794  */
795 static int
796 rte_flow_conv_pattern(struct rte_flow_item *dst,
797 		      const size_t size,
798 		      const struct rte_flow_item *src,
799 		      unsigned int num,
800 		      struct rte_flow_error *error)
801 {
802 	uintptr_t data = (uintptr_t)dst;
803 	size_t off;
804 	size_t ret;
805 	unsigned int i;
806 
807 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
808 		/**
809 		 * allow PMD private flow item
810 		 */
811 		if (((int)src->type >= 0) &&
812 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
813 		    !rte_flow_desc_item[src->type].name))
814 			return rte_flow_error_set
815 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
816 				 "cannot convert unknown item type");
817 		if (size >= off + sizeof(*dst))
818 			*dst = (struct rte_flow_item){
819 				.type = src->type,
820 			};
821 		off += sizeof(*dst);
822 		if (!src->type)
823 			num = i + 1;
824 	}
825 	num = i;
826 	src -= num;
827 	dst -= num;
828 	do {
829 		if (src->spec) {
830 			off = RTE_ALIGN_CEIL(off, sizeof(double));
831 			ret = rte_flow_conv_item_spec
832 				((void *)(data + off),
833 				 size > off ? size - off : 0, src,
834 				 RTE_FLOW_CONV_ITEM_SPEC);
835 			if (size && size >= off + ret)
836 				dst->spec = (void *)(data + off);
837 			off += ret;
838 
839 		}
840 		if (src->last) {
841 			off = RTE_ALIGN_CEIL(off, sizeof(double));
842 			ret = rte_flow_conv_item_spec
843 				((void *)(data + off),
844 				 size > off ? size - off : 0, src,
845 				 RTE_FLOW_CONV_ITEM_LAST);
846 			if (size && size >= off + ret)
847 				dst->last = (void *)(data + off);
848 			off += ret;
849 		}
850 		if (src->mask) {
851 			off = RTE_ALIGN_CEIL(off, sizeof(double));
852 			ret = rte_flow_conv_item_spec
853 				((void *)(data + off),
854 				 size > off ? size - off : 0, src,
855 				 RTE_FLOW_CONV_ITEM_MASK);
856 			if (size && size >= off + ret)
857 				dst->mask = (void *)(data + off);
858 			off += ret;
859 		}
860 		++src;
861 		++dst;
862 	} while (--num);
863 	return off;
864 }
865 
866 /**
867  * Copy a list of actions.
868  *
869  * @param[out] dst
870  *   Destination buffer. Can be NULL if @p size is zero.
871  * @param size
872  *   Size of @p dst in bytes.
873  * @param[in] src
874  *   Source actions.
875  * @param num
876  *   Maximum number of actions to process from @p src or 0 to process the
877  *   entire list. In both cases, processing stops after
878  *   RTE_FLOW_ACTION_TYPE_END is encountered.
879  * @param[out] error
880  *   Perform verbose error reporting if not NULL.
881  *
882  * @return
883  *   A positive value representing the number of bytes needed to store
884  *   actions regardless of @p size on success (@p buf contents are truncated
885  *   to @p size if not large enough), a negative errno value otherwise and
886  *   rte_errno is set.
887  */
888 static int
889 rte_flow_conv_actions(struct rte_flow_action *dst,
890 		      const size_t size,
891 		      const struct rte_flow_action *src,
892 		      unsigned int num,
893 		      struct rte_flow_error *error)
894 {
895 	uintptr_t data = (uintptr_t)dst;
896 	size_t off;
897 	size_t ret;
898 	unsigned int i;
899 
900 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
901 		/**
902 		 * allow PMD private flow action
903 		 */
904 		if (((int)src->type >= 0) &&
905 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
906 		    !rte_flow_desc_action[src->type].name))
907 			return rte_flow_error_set
908 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
909 				 src, "cannot convert unknown action type");
910 		if (size >= off + sizeof(*dst))
911 			*dst = (struct rte_flow_action){
912 				.type = src->type,
913 			};
914 		off += sizeof(*dst);
915 		if (!src->type)
916 			num = i + 1;
917 	}
918 	num = i;
919 	src -= num;
920 	dst -= num;
921 	do {
922 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
923 			/*
924 			 * Indirect action conf fills the indirect action
925 			 * handler. Copy the action handle directly instead
926 			 * of duplicating the pointer memory.
927 			 */
928 			if (size)
929 				dst->conf = src->conf;
930 		} else if (src->conf) {
931 			off = RTE_ALIGN_CEIL(off, sizeof(double));
932 			ret = rte_flow_conv_action_conf
933 				((void *)(data + off),
934 				 size > off ? size - off : 0, src);
935 			if (size && size >= off + ret)
936 				dst->conf = (void *)(data + off);
937 			off += ret;
938 		}
939 		++src;
940 		++dst;
941 	} while (--num);
942 	return off;
943 }
944 
945 /**
946  * Copy flow rule components.
947  *
948  * This comprises the flow rule descriptor itself, attributes, pattern and
949  * actions list. NULL components in @p src are skipped.
950  *
951  * @param[out] dst
952  *   Destination buffer. Can be NULL if @p size is zero.
953  * @param size
954  *   Size of @p dst in bytes.
955  * @param[in] src
956  *   Source flow rule descriptor.
957  * @param[out] error
958  *   Perform verbose error reporting if not NULL.
959  *
960  * @return
961  *   A positive value representing the number of bytes needed to store all
962  *   components including the descriptor regardless of @p size on success
963  *   (@p buf contents are truncated to @p size if not large enough), a
964  *   negative errno value otherwise and rte_errno is set.
965  */
966 static int
967 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
968 		   const size_t size,
969 		   const struct rte_flow_conv_rule *src,
970 		   struct rte_flow_error *error)
971 {
972 	size_t off;
973 	int ret;
974 
975 	rte_memcpy(dst,
976 		   (&(struct rte_flow_conv_rule){
977 			.attr = NULL,
978 			.pattern = NULL,
979 			.actions = NULL,
980 		   }),
981 		   size > sizeof(*dst) ? sizeof(*dst) : size);
982 	off = sizeof(*dst);
983 	if (src->attr_ro) {
984 		off = RTE_ALIGN_CEIL(off, sizeof(double));
985 		if (size && size >= off + sizeof(*dst->attr))
986 			dst->attr = rte_memcpy
987 				((void *)((uintptr_t)dst + off),
988 				 src->attr_ro, sizeof(*dst->attr));
989 		off += sizeof(*dst->attr);
990 	}
991 	if (src->pattern_ro) {
992 		off = RTE_ALIGN_CEIL(off, sizeof(double));
993 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
994 					    size > off ? size - off : 0,
995 					    src->pattern_ro, 0, error);
996 		if (ret < 0)
997 			return ret;
998 		if (size && size >= off + (size_t)ret)
999 			dst->pattern = (void *)((uintptr_t)dst + off);
1000 		off += ret;
1001 	}
1002 	if (src->actions_ro) {
1003 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1004 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1005 					    size > off ? size - off : 0,
1006 					    src->actions_ro, 0, error);
1007 		if (ret < 0)
1008 			return ret;
1009 		if (size >= off + (size_t)ret)
1010 			dst->actions = (void *)((uintptr_t)dst + off);
1011 		off += ret;
1012 	}
1013 	return off;
1014 }
1015 
1016 /**
1017  * Retrieve the name of a pattern item/action type.
1018  *
1019  * @param is_action
1020  *   Nonzero when @p src represents an action type instead of a pattern item
1021  *   type.
1022  * @param is_ptr
1023  *   Nonzero to write string address instead of contents into @p dst.
1024  * @param[out] dst
1025  *   Destination buffer. Can be NULL if @p size is zero.
1026  * @param size
1027  *   Size of @p dst in bytes.
1028  * @param[in] src
1029  *   Depending on @p is_action, source pattern item or action type cast as a
1030  *   pointer.
1031  * @param[out] error
1032  *   Perform verbose error reporting if not NULL.
1033  *
1034  * @return
1035  *   A positive value representing the number of bytes needed to store the
1036  *   name or its address regardless of @p size on success (@p buf contents
1037  *   are truncated to @p size if not large enough), a negative errno value
1038  *   otherwise and rte_errno is set.
1039  */
1040 static int
1041 rte_flow_conv_name(int is_action,
1042 		   int is_ptr,
1043 		   char *dst,
1044 		   const size_t size,
1045 		   const void *src,
1046 		   struct rte_flow_error *error)
1047 {
1048 	struct desc_info {
1049 		const struct rte_flow_desc_data *data;
1050 		size_t num;
1051 	};
1052 	static const struct desc_info info_rep[2] = {
1053 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1054 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1055 	};
1056 	const struct desc_info *const info = &info_rep[!!is_action];
1057 	unsigned int type = (uintptr_t)src;
1058 
1059 	if (type >= info->num)
1060 		return rte_flow_error_set
1061 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1062 			 "unknown object type to retrieve the name of");
1063 	if (!is_ptr)
1064 		return strlcpy(dst, info->data[type].name, size);
1065 	if (size >= sizeof(const char **))
1066 		*((const char **)dst) = info->data[type].name;
1067 	return sizeof(const char **);
1068 }
1069 
1070 /** Helper function to convert flow API objects. */
1071 int
1072 rte_flow_conv(enum rte_flow_conv_op op,
1073 	      void *dst,
1074 	      size_t size,
1075 	      const void *src,
1076 	      struct rte_flow_error *error)
1077 {
1078 	int ret;
1079 
1080 	switch (op) {
1081 		const struct rte_flow_attr *attr;
1082 
1083 	case RTE_FLOW_CONV_OP_NONE:
1084 		ret = 0;
1085 		break;
1086 	case RTE_FLOW_CONV_OP_ATTR:
1087 		attr = src;
1088 		if (size > sizeof(*attr))
1089 			size = sizeof(*attr);
1090 		rte_memcpy(dst, attr, size);
1091 		ret = sizeof(*attr);
1092 		break;
1093 	case RTE_FLOW_CONV_OP_ITEM:
1094 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1095 		break;
1096 	case RTE_FLOW_CONV_OP_ACTION:
1097 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1098 		break;
1099 	case RTE_FLOW_CONV_OP_PATTERN:
1100 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1101 		break;
1102 	case RTE_FLOW_CONV_OP_ACTIONS:
1103 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1104 		break;
1105 	case RTE_FLOW_CONV_OP_RULE:
1106 		ret = rte_flow_conv_rule(dst, size, src, error);
1107 		break;
1108 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1109 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1110 		break;
1111 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1112 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1113 		break;
1114 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1115 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1116 		break;
1117 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1118 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1119 		break;
1120 	default:
1121 		ret = rte_flow_error_set
1122 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1123 		 "unknown object conversion operation");
1124 	}
1125 
1126 	rte_flow_trace_conv(op, dst, size, src, ret);
1127 
1128 	return ret;
1129 }
1130 
1131 /** Store a full rte_flow description. */
1132 size_t
1133 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1134 	      const struct rte_flow_attr *attr,
1135 	      const struct rte_flow_item *items,
1136 	      const struct rte_flow_action *actions)
1137 {
1138 	/*
1139 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1140 	 * to convert the former to the latter without wasting space.
1141 	 */
1142 	struct rte_flow_conv_rule *dst =
1143 		len ?
1144 		(void *)((uintptr_t)desc +
1145 			 (offsetof(struct rte_flow_desc, actions) -
1146 			  offsetof(struct rte_flow_conv_rule, actions))) :
1147 		NULL;
1148 	size_t dst_size =
1149 		len > sizeof(*desc) - sizeof(*dst) ?
1150 		len - (sizeof(*desc) - sizeof(*dst)) :
1151 		0;
1152 	struct rte_flow_conv_rule src = {
1153 		.attr_ro = NULL,
1154 		.pattern_ro = items,
1155 		.actions_ro = actions,
1156 	};
1157 	int ret;
1158 
1159 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1160 			 sizeof(struct rte_flow_conv_rule));
1161 	if (dst_size &&
1162 	    (&dst->pattern != &desc->items ||
1163 	     &dst->actions != &desc->actions ||
1164 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1165 		rte_errno = EINVAL;
1166 		return 0;
1167 	}
1168 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1169 	if (ret < 0)
1170 		return 0;
1171 	ret += sizeof(*desc) - sizeof(*dst);
1172 	rte_memcpy(desc,
1173 		   (&(struct rte_flow_desc){
1174 			.size = ret,
1175 			.attr = *attr,
1176 			.items = dst_size ? dst->pattern : NULL,
1177 			.actions = dst_size ? dst->actions : NULL,
1178 		   }),
1179 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1180 
1181 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1182 
1183 	return ret;
1184 }
1185 
1186 int
1187 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1188 			FILE *file, struct rte_flow_error *error)
1189 {
1190 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1191 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1192 	int ret;
1193 
1194 	if (unlikely(!ops))
1195 		return -rte_errno;
1196 	if (likely(!!ops->dev_dump)) {
1197 		fts_enter(dev);
1198 		ret = ops->dev_dump(dev, flow, file, error);
1199 		fts_exit(dev);
1200 		return flow_err(port_id, ret, error);
1201 	}
1202 	return rte_flow_error_set(error, ENOSYS,
1203 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1204 				  NULL, rte_strerror(ENOSYS));
1205 }
1206 
1207 int
1208 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1209 		    uint32_t nb_contexts, struct rte_flow_error *error)
1210 {
1211 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1212 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1213 	int ret;
1214 
1215 	if (unlikely(!ops))
1216 		return -rte_errno;
1217 	if (likely(!!ops->get_aged_flows)) {
1218 		fts_enter(dev);
1219 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1220 		fts_exit(dev);
1221 		ret = flow_err(port_id, ret, error);
1222 
1223 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1224 
1225 		return ret;
1226 	}
1227 	return rte_flow_error_set(error, ENOTSUP,
1228 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1229 				  NULL, rte_strerror(ENOTSUP));
1230 }
1231 
1232 int
1233 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1234 			  uint32_t nb_contexts, struct rte_flow_error *error)
1235 {
1236 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1237 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1238 	int ret;
1239 
1240 	if (unlikely(!ops))
1241 		return -rte_errno;
1242 	if (likely(!!ops->get_q_aged_flows)) {
1243 		fts_enter(dev);
1244 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1245 					    nb_contexts, error);
1246 		fts_exit(dev);
1247 		ret = flow_err(port_id, ret, error);
1248 
1249 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1250 						nb_contexts, ret);
1251 
1252 		return ret;
1253 	}
1254 	return rte_flow_error_set(error, ENOTSUP,
1255 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1256 				  NULL, rte_strerror(ENOTSUP));
1257 }
1258 
1259 struct rte_flow_action_handle *
1260 rte_flow_action_handle_create(uint16_t port_id,
1261 			      const struct rte_flow_indir_action_conf *conf,
1262 			      const struct rte_flow_action *action,
1263 			      struct rte_flow_error *error)
1264 {
1265 	struct rte_flow_action_handle *handle;
1266 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1267 
1268 	if (unlikely(!ops))
1269 		return NULL;
1270 	if (unlikely(!ops->action_handle_create)) {
1271 		rte_flow_error_set(error, ENOSYS,
1272 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1273 				   rte_strerror(ENOSYS));
1274 		return NULL;
1275 	}
1276 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1277 					   conf, action, error);
1278 	if (handle == NULL)
1279 		flow_err(port_id, -rte_errno, error);
1280 
1281 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1282 
1283 	return handle;
1284 }
1285 
1286 int
1287 rte_flow_action_handle_destroy(uint16_t port_id,
1288 			       struct rte_flow_action_handle *handle,
1289 			       struct rte_flow_error *error)
1290 {
1291 	int ret;
1292 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1293 
1294 	if (unlikely(!ops))
1295 		return -rte_errno;
1296 	if (unlikely(!ops->action_handle_destroy))
1297 		return rte_flow_error_set(error, ENOSYS,
1298 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1299 					  NULL, rte_strerror(ENOSYS));
1300 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1301 					 handle, error);
1302 	ret = flow_err(port_id, ret, error);
1303 
1304 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1305 
1306 	return ret;
1307 }
1308 
1309 int
1310 rte_flow_action_handle_update(uint16_t port_id,
1311 			      struct rte_flow_action_handle *handle,
1312 			      const void *update,
1313 			      struct rte_flow_error *error)
1314 {
1315 	int ret;
1316 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1317 
1318 	if (unlikely(!ops))
1319 		return -rte_errno;
1320 	if (unlikely(!ops->action_handle_update))
1321 		return rte_flow_error_set(error, ENOSYS,
1322 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1323 					  NULL, rte_strerror(ENOSYS));
1324 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1325 					update, error);
1326 	ret = flow_err(port_id, ret, error);
1327 
1328 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1329 
1330 	return ret;
1331 }
1332 
1333 int
1334 rte_flow_action_handle_query(uint16_t port_id,
1335 			     const struct rte_flow_action_handle *handle,
1336 			     void *data,
1337 			     struct rte_flow_error *error)
1338 {
1339 	int ret;
1340 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1341 
1342 	if (unlikely(!ops))
1343 		return -rte_errno;
1344 	if (unlikely(!ops->action_handle_query))
1345 		return rte_flow_error_set(error, ENOSYS,
1346 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1347 					  NULL, rte_strerror(ENOSYS));
1348 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1349 				       data, error);
1350 	ret = flow_err(port_id, ret, error);
1351 
1352 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1353 
1354 	return ret;
1355 }
1356 
1357 int
1358 rte_flow_tunnel_decap_set(uint16_t port_id,
1359 			  struct rte_flow_tunnel *tunnel,
1360 			  struct rte_flow_action **actions,
1361 			  uint32_t *num_of_actions,
1362 			  struct rte_flow_error *error)
1363 {
1364 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1365 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1366 	int ret;
1367 
1368 	if (unlikely(!ops))
1369 		return -rte_errno;
1370 	if (likely(!!ops->tunnel_decap_set)) {
1371 		ret = flow_err(port_id,
1372 			       ops->tunnel_decap_set(dev, tunnel, actions,
1373 						     num_of_actions, error),
1374 			       error);
1375 
1376 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1377 						num_of_actions, ret);
1378 
1379 		return ret;
1380 	}
1381 	return rte_flow_error_set(error, ENOTSUP,
1382 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1383 				  NULL, rte_strerror(ENOTSUP));
1384 }
1385 
1386 int
1387 rte_flow_tunnel_match(uint16_t port_id,
1388 		      struct rte_flow_tunnel *tunnel,
1389 		      struct rte_flow_item **items,
1390 		      uint32_t *num_of_items,
1391 		      struct rte_flow_error *error)
1392 {
1393 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1394 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1395 	int ret;
1396 
1397 	if (unlikely(!ops))
1398 		return -rte_errno;
1399 	if (likely(!!ops->tunnel_match)) {
1400 		ret = flow_err(port_id,
1401 			       ops->tunnel_match(dev, tunnel, items,
1402 						 num_of_items, error),
1403 			       error);
1404 
1405 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1406 					    ret);
1407 
1408 		return ret;
1409 	}
1410 	return rte_flow_error_set(error, ENOTSUP,
1411 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1412 				  NULL, rte_strerror(ENOTSUP));
1413 }
1414 
1415 int
1416 rte_flow_get_restore_info(uint16_t port_id,
1417 			  struct rte_mbuf *m,
1418 			  struct rte_flow_restore_info *restore_info,
1419 			  struct rte_flow_error *error)
1420 {
1421 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1422 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1423 	int ret;
1424 
1425 	if (unlikely(!ops))
1426 		return -rte_errno;
1427 	if (likely(!!ops->get_restore_info)) {
1428 		ret = flow_err(port_id,
1429 			       ops->get_restore_info(dev, m, restore_info,
1430 						     error),
1431 			       error);
1432 
1433 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1434 
1435 		return ret;
1436 	}
1437 	return rte_flow_error_set(error, ENOTSUP,
1438 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1439 				  NULL, rte_strerror(ENOTSUP));
1440 }
1441 
1442 int
1443 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1444 				     struct rte_flow_action *actions,
1445 				     uint32_t num_of_actions,
1446 				     struct rte_flow_error *error)
1447 {
1448 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1449 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1450 	int ret;
1451 
1452 	if (unlikely(!ops))
1453 		return -rte_errno;
1454 	if (likely(!!ops->tunnel_action_decap_release)) {
1455 		ret = flow_err(port_id,
1456 			       ops->tunnel_action_decap_release(dev, actions,
1457 								num_of_actions,
1458 								error),
1459 			       error);
1460 
1461 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1462 							   num_of_actions, ret);
1463 
1464 		return ret;
1465 	}
1466 	return rte_flow_error_set(error, ENOTSUP,
1467 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1468 				  NULL, rte_strerror(ENOTSUP));
1469 }
1470 
1471 int
1472 rte_flow_tunnel_item_release(uint16_t port_id,
1473 			     struct rte_flow_item *items,
1474 			     uint32_t num_of_items,
1475 			     struct rte_flow_error *error)
1476 {
1477 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1478 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1479 	int ret;
1480 
1481 	if (unlikely(!ops))
1482 		return -rte_errno;
1483 	if (likely(!!ops->tunnel_item_release)) {
1484 		ret = flow_err(port_id,
1485 			       ops->tunnel_item_release(dev, items,
1486 							num_of_items, error),
1487 			       error);
1488 
1489 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1490 
1491 		return ret;
1492 	}
1493 	return rte_flow_error_set(error, ENOTSUP,
1494 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1495 				  NULL, rte_strerror(ENOTSUP));
1496 }
1497 
1498 int
1499 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1500 			     struct rte_flow_error *error)
1501 {
1502 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1503 	struct rte_eth_dev *dev;
1504 	int ret;
1505 
1506 	if (unlikely(ops == NULL))
1507 		return -rte_errno;
1508 
1509 	if (ops->pick_transfer_proxy == NULL) {
1510 		*proxy_port_id = port_id;
1511 		return 0;
1512 	}
1513 
1514 	dev = &rte_eth_devices[port_id];
1515 
1516 	ret = flow_err(port_id,
1517 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1518 		       error);
1519 
1520 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1521 
1522 	return ret;
1523 }
1524 
1525 struct rte_flow_item_flex_handle *
1526 rte_flow_flex_item_create(uint16_t port_id,
1527 			  const struct rte_flow_item_flex_conf *conf,
1528 			  struct rte_flow_error *error)
1529 {
1530 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1531 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1532 	struct rte_flow_item_flex_handle *handle;
1533 
1534 	if (unlikely(!ops))
1535 		return NULL;
1536 	if (unlikely(!ops->flex_item_create)) {
1537 		rte_flow_error_set(error, ENOTSUP,
1538 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1539 				   NULL, rte_strerror(ENOTSUP));
1540 		return NULL;
1541 	}
1542 	handle = ops->flex_item_create(dev, conf, error);
1543 	if (handle == NULL)
1544 		flow_err(port_id, -rte_errno, error);
1545 
1546 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1547 
1548 	return handle;
1549 }
1550 
1551 int
1552 rte_flow_flex_item_release(uint16_t port_id,
1553 			   const struct rte_flow_item_flex_handle *handle,
1554 			   struct rte_flow_error *error)
1555 {
1556 	int ret;
1557 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1558 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1559 
1560 	if (unlikely(!ops || !ops->flex_item_release))
1561 		return rte_flow_error_set(error, ENOTSUP,
1562 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1563 					  NULL, rte_strerror(ENOTSUP));
1564 	ret = ops->flex_item_release(dev, handle, error);
1565 	ret = flow_err(port_id, ret, error);
1566 
1567 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1568 
1569 	return ret;
1570 }
1571 
1572 int
1573 rte_flow_info_get(uint16_t port_id,
1574 		  struct rte_flow_port_info *port_info,
1575 		  struct rte_flow_queue_info *queue_info,
1576 		  struct rte_flow_error *error)
1577 {
1578 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1579 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1580 	int ret;
1581 
1582 	if (unlikely(!ops))
1583 		return -rte_errno;
1584 	if (dev->data->dev_configured == 0) {
1585 		RTE_FLOW_LOG(INFO,
1586 			"Device with port_id=%"PRIu16" is not configured.\n",
1587 			port_id);
1588 		return -EINVAL;
1589 	}
1590 	if (port_info == NULL) {
1591 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1592 		return -EINVAL;
1593 	}
1594 	if (likely(!!ops->info_get)) {
1595 		ret = flow_err(port_id,
1596 			       ops->info_get(dev, port_info, queue_info, error),
1597 			       error);
1598 
1599 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1600 
1601 		return ret;
1602 	}
1603 	return rte_flow_error_set(error, ENOTSUP,
1604 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1605 				  NULL, rte_strerror(ENOTSUP));
1606 }
1607 
1608 int
1609 rte_flow_configure(uint16_t port_id,
1610 		   const struct rte_flow_port_attr *port_attr,
1611 		   uint16_t nb_queue,
1612 		   const struct rte_flow_queue_attr *queue_attr[],
1613 		   struct rte_flow_error *error)
1614 {
1615 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1616 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1617 	int ret;
1618 
1619 	if (unlikely(!ops))
1620 		return -rte_errno;
1621 	if (dev->data->dev_configured == 0) {
1622 		RTE_FLOW_LOG(INFO,
1623 			"Device with port_id=%"PRIu16" is not configured.\n",
1624 			port_id);
1625 		return -EINVAL;
1626 	}
1627 	if (dev->data->dev_started != 0) {
1628 		RTE_FLOW_LOG(INFO,
1629 			"Device with port_id=%"PRIu16" already started.\n",
1630 			port_id);
1631 		return -EINVAL;
1632 	}
1633 	if (port_attr == NULL) {
1634 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1635 		return -EINVAL;
1636 	}
1637 	if (queue_attr == NULL) {
1638 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1639 		return -EINVAL;
1640 	}
1641 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1642 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1643 		return rte_flow_error_set(error, ENODEV,
1644 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1645 					  NULL, rte_strerror(ENODEV));
1646 	}
1647 	if (likely(!!ops->configure)) {
1648 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1649 		if (ret == 0)
1650 			dev->data->flow_configured = 1;
1651 		ret = flow_err(port_id, ret, error);
1652 
1653 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1654 
1655 		return ret;
1656 	}
1657 	return rte_flow_error_set(error, ENOTSUP,
1658 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1659 				  NULL, rte_strerror(ENOTSUP));
1660 }
1661 
1662 struct rte_flow_pattern_template *
1663 rte_flow_pattern_template_create(uint16_t port_id,
1664 		const struct rte_flow_pattern_template_attr *template_attr,
1665 		const struct rte_flow_item pattern[],
1666 		struct rte_flow_error *error)
1667 {
1668 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1669 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1670 	struct rte_flow_pattern_template *template;
1671 
1672 	if (unlikely(!ops))
1673 		return NULL;
1674 	if (dev->data->flow_configured == 0) {
1675 		RTE_FLOW_LOG(INFO,
1676 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1677 			port_id);
1678 		rte_flow_error_set(error, EINVAL,
1679 				RTE_FLOW_ERROR_TYPE_STATE,
1680 				NULL, rte_strerror(EINVAL));
1681 		return NULL;
1682 	}
1683 	if (template_attr == NULL) {
1684 		RTE_FLOW_LOG(ERR,
1685 			     "Port %"PRIu16" template attr is NULL.\n",
1686 			     port_id);
1687 		rte_flow_error_set(error, EINVAL,
1688 				   RTE_FLOW_ERROR_TYPE_ATTR,
1689 				   NULL, rte_strerror(EINVAL));
1690 		return NULL;
1691 	}
1692 	if (pattern == NULL) {
1693 		RTE_FLOW_LOG(ERR,
1694 			     "Port %"PRIu16" pattern is NULL.\n",
1695 			     port_id);
1696 		rte_flow_error_set(error, EINVAL,
1697 				   RTE_FLOW_ERROR_TYPE_ATTR,
1698 				   NULL, rte_strerror(EINVAL));
1699 		return NULL;
1700 	}
1701 	if (likely(!!ops->pattern_template_create)) {
1702 		template = ops->pattern_template_create(dev, template_attr,
1703 							pattern, error);
1704 		if (template == NULL)
1705 			flow_err(port_id, -rte_errno, error);
1706 
1707 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1708 						       pattern, template);
1709 
1710 		return template;
1711 	}
1712 	rte_flow_error_set(error, ENOTSUP,
1713 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1714 			   NULL, rte_strerror(ENOTSUP));
1715 	return NULL;
1716 }
1717 
1718 int
1719 rte_flow_pattern_template_destroy(uint16_t port_id,
1720 		struct rte_flow_pattern_template *pattern_template,
1721 		struct rte_flow_error *error)
1722 {
1723 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1724 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1725 	int ret;
1726 
1727 	if (unlikely(!ops))
1728 		return -rte_errno;
1729 	if (unlikely(pattern_template == NULL))
1730 		return 0;
1731 	if (likely(!!ops->pattern_template_destroy)) {
1732 		ret = flow_err(port_id,
1733 			       ops->pattern_template_destroy(dev,
1734 							     pattern_template,
1735 							     error),
1736 			       error);
1737 
1738 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1739 							ret);
1740 
1741 		return ret;
1742 	}
1743 	return rte_flow_error_set(error, ENOTSUP,
1744 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1745 				  NULL, rte_strerror(ENOTSUP));
1746 }
1747 
1748 struct rte_flow_actions_template *
1749 rte_flow_actions_template_create(uint16_t port_id,
1750 			const struct rte_flow_actions_template_attr *template_attr,
1751 			const struct rte_flow_action actions[],
1752 			const struct rte_flow_action masks[],
1753 			struct rte_flow_error *error)
1754 {
1755 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1756 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1757 	struct rte_flow_actions_template *template;
1758 
1759 	if (unlikely(!ops))
1760 		return NULL;
1761 	if (dev->data->flow_configured == 0) {
1762 		RTE_FLOW_LOG(INFO,
1763 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1764 			port_id);
1765 		rte_flow_error_set(error, EINVAL,
1766 				   RTE_FLOW_ERROR_TYPE_STATE,
1767 				   NULL, rte_strerror(EINVAL));
1768 		return NULL;
1769 	}
1770 	if (template_attr == NULL) {
1771 		RTE_FLOW_LOG(ERR,
1772 			     "Port %"PRIu16" template attr is NULL.\n",
1773 			     port_id);
1774 		rte_flow_error_set(error, EINVAL,
1775 				   RTE_FLOW_ERROR_TYPE_ATTR,
1776 				   NULL, rte_strerror(EINVAL));
1777 		return NULL;
1778 	}
1779 	if (actions == NULL) {
1780 		RTE_FLOW_LOG(ERR,
1781 			     "Port %"PRIu16" actions is NULL.\n",
1782 			     port_id);
1783 		rte_flow_error_set(error, EINVAL,
1784 				   RTE_FLOW_ERROR_TYPE_ATTR,
1785 				   NULL, rte_strerror(EINVAL));
1786 		return NULL;
1787 	}
1788 	if (masks == NULL) {
1789 		RTE_FLOW_LOG(ERR,
1790 			     "Port %"PRIu16" masks is NULL.\n",
1791 			     port_id);
1792 		rte_flow_error_set(error, EINVAL,
1793 				   RTE_FLOW_ERROR_TYPE_ATTR,
1794 				   NULL, rte_strerror(EINVAL));
1795 
1796 	}
1797 	if (likely(!!ops->actions_template_create)) {
1798 		template = ops->actions_template_create(dev, template_attr,
1799 							actions, masks, error);
1800 		if (template == NULL)
1801 			flow_err(port_id, -rte_errno, error);
1802 
1803 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1804 						       masks, template);
1805 
1806 		return template;
1807 	}
1808 	rte_flow_error_set(error, ENOTSUP,
1809 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1810 			   NULL, rte_strerror(ENOTSUP));
1811 	return NULL;
1812 }
1813 
1814 int
1815 rte_flow_actions_template_destroy(uint16_t port_id,
1816 			struct rte_flow_actions_template *actions_template,
1817 			struct rte_flow_error *error)
1818 {
1819 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1820 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1821 	int ret;
1822 
1823 	if (unlikely(!ops))
1824 		return -rte_errno;
1825 	if (unlikely(actions_template == NULL))
1826 		return 0;
1827 	if (likely(!!ops->actions_template_destroy)) {
1828 		ret = flow_err(port_id,
1829 			       ops->actions_template_destroy(dev,
1830 							     actions_template,
1831 							     error),
1832 			       error);
1833 
1834 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1835 							ret);
1836 
1837 		return ret;
1838 	}
1839 	return rte_flow_error_set(error, ENOTSUP,
1840 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1841 				  NULL, rte_strerror(ENOTSUP));
1842 }
1843 
1844 struct rte_flow_template_table *
1845 rte_flow_template_table_create(uint16_t port_id,
1846 			const struct rte_flow_template_table_attr *table_attr,
1847 			struct rte_flow_pattern_template *pattern_templates[],
1848 			uint8_t nb_pattern_templates,
1849 			struct rte_flow_actions_template *actions_templates[],
1850 			uint8_t nb_actions_templates,
1851 			struct rte_flow_error *error)
1852 {
1853 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1854 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1855 	struct rte_flow_template_table *table;
1856 
1857 	if (unlikely(!ops))
1858 		return NULL;
1859 	if (dev->data->flow_configured == 0) {
1860 		RTE_FLOW_LOG(INFO,
1861 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1862 			port_id);
1863 		rte_flow_error_set(error, EINVAL,
1864 				   RTE_FLOW_ERROR_TYPE_STATE,
1865 				   NULL, rte_strerror(EINVAL));
1866 		return NULL;
1867 	}
1868 	if (table_attr == NULL) {
1869 		RTE_FLOW_LOG(ERR,
1870 			     "Port %"PRIu16" table attr is NULL.\n",
1871 			     port_id);
1872 		rte_flow_error_set(error, EINVAL,
1873 				   RTE_FLOW_ERROR_TYPE_ATTR,
1874 				   NULL, rte_strerror(EINVAL));
1875 		return NULL;
1876 	}
1877 	if (pattern_templates == NULL) {
1878 		RTE_FLOW_LOG(ERR,
1879 			     "Port %"PRIu16" pattern templates is NULL.\n",
1880 			     port_id);
1881 		rte_flow_error_set(error, EINVAL,
1882 				   RTE_FLOW_ERROR_TYPE_ATTR,
1883 				   NULL, rte_strerror(EINVAL));
1884 		return NULL;
1885 	}
1886 	if (actions_templates == NULL) {
1887 		RTE_FLOW_LOG(ERR,
1888 			     "Port %"PRIu16" actions templates is NULL.\n",
1889 			     port_id);
1890 		rte_flow_error_set(error, EINVAL,
1891 				   RTE_FLOW_ERROR_TYPE_ATTR,
1892 				   NULL, rte_strerror(EINVAL));
1893 		return NULL;
1894 	}
1895 	if (likely(!!ops->template_table_create)) {
1896 		table = ops->template_table_create(dev, table_attr,
1897 					pattern_templates, nb_pattern_templates,
1898 					actions_templates, nb_actions_templates,
1899 					error);
1900 		if (table == NULL)
1901 			flow_err(port_id, -rte_errno, error);
1902 
1903 		rte_flow_trace_template_table_create(port_id, table_attr,
1904 						     pattern_templates,
1905 						     nb_pattern_templates,
1906 						     actions_templates,
1907 						     nb_actions_templates, table);
1908 
1909 		return table;
1910 	}
1911 	rte_flow_error_set(error, ENOTSUP,
1912 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1913 			   NULL, rte_strerror(ENOTSUP));
1914 	return NULL;
1915 }
1916 
1917 int
1918 rte_flow_template_table_destroy(uint16_t port_id,
1919 				struct rte_flow_template_table *template_table,
1920 				struct rte_flow_error *error)
1921 {
1922 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1923 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1924 	int ret;
1925 
1926 	if (unlikely(!ops))
1927 		return -rte_errno;
1928 	if (unlikely(template_table == NULL))
1929 		return 0;
1930 	if (likely(!!ops->template_table_destroy)) {
1931 		ret = flow_err(port_id,
1932 			       ops->template_table_destroy(dev,
1933 							   template_table,
1934 							   error),
1935 			       error);
1936 
1937 		rte_flow_trace_template_table_destroy(port_id, template_table,
1938 						      ret);
1939 
1940 		return ret;
1941 	}
1942 	return rte_flow_error_set(error, ENOTSUP,
1943 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1944 				  NULL, rte_strerror(ENOTSUP));
1945 }
1946 
1947 struct rte_flow *
1948 rte_flow_async_create(uint16_t port_id,
1949 		      uint32_t queue_id,
1950 		      const struct rte_flow_op_attr *op_attr,
1951 		      struct rte_flow_template_table *template_table,
1952 		      const struct rte_flow_item pattern[],
1953 		      uint8_t pattern_template_index,
1954 		      const struct rte_flow_action actions[],
1955 		      uint8_t actions_template_index,
1956 		      void *user_data,
1957 		      struct rte_flow_error *error)
1958 {
1959 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1960 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1961 	struct rte_flow *flow;
1962 
1963 	flow = ops->async_create(dev, queue_id,
1964 				 op_attr, template_table,
1965 				 pattern, pattern_template_index,
1966 				 actions, actions_template_index,
1967 				 user_data, error);
1968 	if (flow == NULL)
1969 		flow_err(port_id, -rte_errno, error);
1970 
1971 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
1972 				    pattern, pattern_template_index, actions,
1973 				    actions_template_index, user_data, flow);
1974 
1975 	return flow;
1976 }
1977 
1978 struct rte_flow *
1979 rte_flow_async_create_by_index(uint16_t port_id,
1980 			       uint32_t queue_id,
1981 			       const struct rte_flow_op_attr *op_attr,
1982 			       struct rte_flow_template_table *template_table,
1983 			       uint32_t rule_index,
1984 			       const struct rte_flow_action actions[],
1985 			       uint8_t actions_template_index,
1986 			       void *user_data,
1987 			       struct rte_flow_error *error)
1988 {
1989 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1990 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1991 	struct rte_flow *flow;
1992 
1993 	flow = ops->async_create_by_index(dev, queue_id,
1994 					  op_attr, template_table, rule_index,
1995 					  actions, actions_template_index,
1996 					  user_data, error);
1997 	if (flow == NULL)
1998 		flow_err(port_id, -rte_errno, error);
1999 	return flow;
2000 }
2001 
2002 int
2003 rte_flow_async_destroy(uint16_t port_id,
2004 		       uint32_t queue_id,
2005 		       const struct rte_flow_op_attr *op_attr,
2006 		       struct rte_flow *flow,
2007 		       void *user_data,
2008 		       struct rte_flow_error *error)
2009 {
2010 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2011 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2012 	int ret;
2013 
2014 	ret = flow_err(port_id,
2015 		       ops->async_destroy(dev, queue_id,
2016 					  op_attr, flow,
2017 					  user_data, error),
2018 		       error);
2019 
2020 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2021 				     user_data, ret);
2022 
2023 	return ret;
2024 }
2025 
2026 int
2027 rte_flow_async_actions_update(uint16_t port_id,
2028 			      uint32_t queue_id,
2029 			      const struct rte_flow_op_attr *op_attr,
2030 			      struct rte_flow *flow,
2031 			      const struct rte_flow_action actions[],
2032 			      uint8_t actions_template_index,
2033 			      void *user_data,
2034 			      struct rte_flow_error *error)
2035 {
2036 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2037 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2038 	int ret;
2039 
2040 	ret = flow_err(port_id,
2041 		       ops->async_actions_update(dev, queue_id, op_attr,
2042 						 flow, actions,
2043 						 actions_template_index,
2044 						 user_data, error),
2045 		       error);
2046 
2047 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2048 					    actions, actions_template_index,
2049 					    user_data, ret);
2050 
2051 	return ret;
2052 }
2053 
2054 int
2055 rte_flow_push(uint16_t port_id,
2056 	      uint32_t queue_id,
2057 	      struct rte_flow_error *error)
2058 {
2059 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2060 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2061 	int ret;
2062 
2063 	ret = flow_err(port_id,
2064 		       ops->push(dev, queue_id, error),
2065 		       error);
2066 
2067 	rte_flow_trace_push(port_id, queue_id, ret);
2068 
2069 	return ret;
2070 }
2071 
2072 int
2073 rte_flow_pull(uint16_t port_id,
2074 	      uint32_t queue_id,
2075 	      struct rte_flow_op_result res[],
2076 	      uint16_t n_res,
2077 	      struct rte_flow_error *error)
2078 {
2079 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2080 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2081 	int ret;
2082 	int rc;
2083 
2084 	ret = ops->pull(dev, queue_id, res, n_res, error);
2085 	rc = ret ? ret : flow_err(port_id, ret, error);
2086 
2087 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2088 
2089 	return rc;
2090 }
2091 
2092 struct rte_flow_action_handle *
2093 rte_flow_async_action_handle_create(uint16_t port_id,
2094 		uint32_t queue_id,
2095 		const struct rte_flow_op_attr *op_attr,
2096 		const struct rte_flow_indir_action_conf *indir_action_conf,
2097 		const struct rte_flow_action *action,
2098 		void *user_data,
2099 		struct rte_flow_error *error)
2100 {
2101 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2102 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2103 	struct rte_flow_action_handle *handle;
2104 
2105 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2106 					     indir_action_conf, action, user_data, error);
2107 	if (handle == NULL)
2108 		flow_err(port_id, -rte_errno, error);
2109 
2110 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2111 						  indir_action_conf, action,
2112 						  user_data, handle);
2113 
2114 	return handle;
2115 }
2116 
2117 int
2118 rte_flow_async_action_handle_destroy(uint16_t port_id,
2119 		uint32_t queue_id,
2120 		const struct rte_flow_op_attr *op_attr,
2121 		struct rte_flow_action_handle *action_handle,
2122 		void *user_data,
2123 		struct rte_flow_error *error)
2124 {
2125 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2126 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2127 	int ret;
2128 
2129 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2130 					   action_handle, user_data, error);
2131 	ret = flow_err(port_id, ret, error);
2132 
2133 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2134 						   action_handle, user_data, ret);
2135 
2136 	return ret;
2137 }
2138 
2139 int
2140 rte_flow_async_action_handle_update(uint16_t port_id,
2141 		uint32_t queue_id,
2142 		const struct rte_flow_op_attr *op_attr,
2143 		struct rte_flow_action_handle *action_handle,
2144 		const void *update,
2145 		void *user_data,
2146 		struct rte_flow_error *error)
2147 {
2148 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2149 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2150 	int ret;
2151 
2152 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2153 					  action_handle, update, user_data, error);
2154 	ret = flow_err(port_id, ret, error);
2155 
2156 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2157 						  action_handle, update,
2158 						  user_data, ret);
2159 
2160 	return ret;
2161 }
2162 
2163 int
2164 rte_flow_async_action_handle_query(uint16_t port_id,
2165 		uint32_t queue_id,
2166 		const struct rte_flow_op_attr *op_attr,
2167 		const struct rte_flow_action_handle *action_handle,
2168 		void *data,
2169 		void *user_data,
2170 		struct rte_flow_error *error)
2171 {
2172 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2173 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2174 	int ret;
2175 
2176 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2177 					  action_handle, data, user_data, error);
2178 	ret = flow_err(port_id, ret, error);
2179 
2180 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2181 						 action_handle, data, user_data,
2182 						 ret);
2183 
2184 	return ret;
2185 }
2186 
2187 int
2188 rte_flow_action_handle_query_update(uint16_t port_id,
2189 				    struct rte_flow_action_handle *handle,
2190 				    const void *update, void *query,
2191 				    enum rte_flow_query_update_mode mode,
2192 				    struct rte_flow_error *error)
2193 {
2194 	int ret;
2195 	struct rte_eth_dev *dev;
2196 	const struct rte_flow_ops *ops;
2197 
2198 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2199 	if (!handle)
2200 		return -EINVAL;
2201 	if (!update && !query)
2202 		return -EINVAL;
2203 	dev = &rte_eth_devices[port_id];
2204 	ops = rte_flow_ops_get(port_id, error);
2205 	if (!ops || !ops->action_handle_query_update)
2206 		return -ENOTSUP;
2207 	ret = ops->action_handle_query_update(dev, handle, update,
2208 					      query, mode, error);
2209 	return flow_err(port_id, ret, error);
2210 }
2211 
2212 int
2213 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2214 					  const struct rte_flow_op_attr *attr,
2215 					  struct rte_flow_action_handle *handle,
2216 					  const void *update, void *query,
2217 					  enum rte_flow_query_update_mode mode,
2218 					  void *user_data,
2219 					  struct rte_flow_error *error)
2220 {
2221 	int ret;
2222 	struct rte_eth_dev *dev;
2223 	const struct rte_flow_ops *ops;
2224 
2225 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2226 	if (!handle)
2227 		return -EINVAL;
2228 	if (!update && !query)
2229 		return -EINVAL;
2230 	dev = &rte_eth_devices[port_id];
2231 	ops = rte_flow_ops_get(port_id, error);
2232 	if (!ops || !ops->async_action_handle_query_update)
2233 		return -ENOTSUP;
2234 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2235 						    handle, update,
2236 						    query, mode,
2237 						    user_data, error);
2238 	return flow_err(port_id, ret, error);
2239 }
2240 
2241 struct rte_flow_action_list_handle *
2242 rte_flow_action_list_handle_create(uint16_t port_id,
2243 				   const
2244 				   struct rte_flow_indir_action_conf *conf,
2245 				   const struct rte_flow_action *actions,
2246 				   struct rte_flow_error *error)
2247 {
2248 	int ret;
2249 	struct rte_eth_dev *dev;
2250 	const struct rte_flow_ops *ops;
2251 	struct rte_flow_action_list_handle *handle;
2252 
2253 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2254 	ops = rte_flow_ops_get(port_id, error);
2255 	if (!ops || !ops->action_list_handle_create) {
2256 		rte_flow_error_set(error, ENOTSUP,
2257 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2258 				   "action_list handle not supported");
2259 		return NULL;
2260 	}
2261 	dev = &rte_eth_devices[port_id];
2262 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2263 	ret = flow_err(port_id, -rte_errno, error);
2264 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2265 	return handle;
2266 }
2267 
2268 int
2269 rte_flow_action_list_handle_destroy(uint16_t port_id,
2270 				    struct rte_flow_action_list_handle *handle,
2271 				    struct rte_flow_error *error)
2272 {
2273 	int ret;
2274 	struct rte_eth_dev *dev;
2275 	const struct rte_flow_ops *ops;
2276 
2277 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2278 	ops = rte_flow_ops_get(port_id, error);
2279 	if (!ops || !ops->action_list_handle_destroy)
2280 		return rte_flow_error_set(error, ENOTSUP,
2281 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2282 					  "action_list handle not supported");
2283 	dev = &rte_eth_devices[port_id];
2284 	ret = ops->action_list_handle_destroy(dev, handle, error);
2285 	ret = flow_err(port_id, ret, error);
2286 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2287 	return ret;
2288 }
2289 
2290 struct rte_flow_action_list_handle *
2291 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2292 					 const struct rte_flow_op_attr *attr,
2293 					 const struct rte_flow_indir_action_conf *conf,
2294 					 const struct rte_flow_action *actions,
2295 					 void *user_data,
2296 					 struct rte_flow_error *error)
2297 {
2298 	int ret;
2299 	struct rte_eth_dev *dev;
2300 	const struct rte_flow_ops *ops;
2301 	struct rte_flow_action_list_handle *handle;
2302 
2303 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2304 	ops = rte_flow_ops_get(port_id, error);
2305 	if (!ops || !ops->async_action_list_handle_create) {
2306 		rte_flow_error_set(error, ENOTSUP,
2307 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2308 				   "action_list handle not supported");
2309 		return NULL;
2310 	}
2311 	dev = &rte_eth_devices[port_id];
2312 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2313 						      actions, user_data,
2314 						      error);
2315 	ret = flow_err(port_id, -rte_errno, error);
2316 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2317 						       conf, actions, user_data,
2318 						       ret);
2319 	return handle;
2320 }
2321 
2322 int
2323 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2324 				 const struct rte_flow_op_attr *op_attr,
2325 				 struct rte_flow_action_list_handle *handle,
2326 				 void *user_data, struct rte_flow_error *error)
2327 {
2328 	int ret;
2329 	struct rte_eth_dev *dev;
2330 	const struct rte_flow_ops *ops;
2331 
2332 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2333 	ops = rte_flow_ops_get(port_id, error);
2334 	if (!ops || !ops->async_action_list_handle_destroy)
2335 		return rte_flow_error_set(error, ENOTSUP,
2336 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2337 					  "async action_list handle not supported");
2338 	dev = &rte_eth_devices[port_id];
2339 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2340 						    handle, user_data, error);
2341 	ret = flow_err(port_id, ret, error);
2342 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2343 							op_attr, handle,
2344 							user_data, ret);
2345 	return ret;
2346 }
2347 
2348 int
2349 rte_flow_action_list_handle_query_update(uint16_t port_id,
2350 			 const struct rte_flow_action_list_handle *handle,
2351 			 const void **update, void **query,
2352 			 enum rte_flow_query_update_mode mode,
2353 			 struct rte_flow_error *error)
2354 {
2355 	int ret;
2356 	struct rte_eth_dev *dev;
2357 	const struct rte_flow_ops *ops;
2358 
2359 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2360 	ops = rte_flow_ops_get(port_id, error);
2361 	if (!ops || !ops->action_list_handle_query_update)
2362 		return rte_flow_error_set(error, ENOTSUP,
2363 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2364 					  "action_list query_update not supported");
2365 	dev = &rte_eth_devices[port_id];
2366 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2367 						   mode, error);
2368 	ret = flow_err(port_id, ret, error);
2369 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2370 						       query, mode, ret);
2371 	return ret;
2372 }
2373 
2374 int
2375 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2376 			 const struct rte_flow_op_attr *attr,
2377 			 const struct rte_flow_action_list_handle *handle,
2378 			 const void **update, void **query,
2379 			 enum rte_flow_query_update_mode mode,
2380 			 void *user_data, struct rte_flow_error *error)
2381 {
2382 	int ret;
2383 	struct rte_eth_dev *dev;
2384 	const struct rte_flow_ops *ops;
2385 
2386 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2387 	ops = rte_flow_ops_get(port_id, error);
2388 	if (!ops || !ops->async_action_list_handle_query_update)
2389 		return rte_flow_error_set(error, ENOTSUP,
2390 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2391 					  "action_list async query_update not supported");
2392 	dev = &rte_eth_devices[port_id];
2393 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2394 							 handle, update, query,
2395 							 mode, user_data,
2396 							 error);
2397 	ret = flow_err(port_id, ret, error);
2398 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2399 							     attr, handle,
2400 							     update, query,
2401 							     mode, user_data,
2402 							     ret);
2403 	return ret;
2404 }
2405