xref: /dpdk/lib/ethdev/rte_flow.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <pthread.h>
11 
12 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 #define FLOW_LOG RTE_ETHDEV_LOG_LINE
23 
24 /* Mbuf dynamic field name for metadata. */
25 int32_t rte_flow_dynf_metadata_offs = -1;
26 
27 /* Mbuf dynamic field flag bit number for metadata. */
28 uint64_t rte_flow_dynf_metadata_mask;
29 
30 /**
31  * Flow elements description tables.
32  */
33 struct rte_flow_desc_data {
34 	const char *name;
35 	size_t size;
36 	size_t (*desc_fn)(void *dst, const void *src);
37 };
38 
39 /**
40  *
41  * @param buf
42  * Destination memory.
43  * @param data
44  * Source memory
45  * @param size
46  * Requested copy size
47  * @param desc
48  * rte_flow_desc_item - for flow item conversion.
49  * rte_flow_desc_action - for flow action conversion.
50  * @param type
51  * Offset into the desc param or negative value for private flow elements.
52  */
53 static inline size_t
54 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
55 		   const struct rte_flow_desc_data *desc, int type)
56 {
57 	/**
58 	 * Allow PMD private flow item
59 	 */
60 	bool rte_type = type >= 0;
61 
62 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
63 	if (buf == NULL || data == NULL)
64 		return 0;
65 	rte_memcpy(buf, data, (size > sz ? sz : size));
66 	if (rte_type && desc[type].desc_fn)
67 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
68 	return sz;
69 }
70 
71 static size_t
72 rte_flow_item_flex_conv(void *buf, const void *data)
73 {
74 	struct rte_flow_item_flex *dst = buf;
75 	const struct rte_flow_item_flex *src = data;
76 	if (buf) {
77 		dst->pattern = rte_memcpy
78 			((void *)((uintptr_t)(dst + 1)), src->pattern,
79 			 src->length);
80 	}
81 	return src->length;
82 }
83 
84 /** Generate flow_item[] entry. */
85 #define MK_FLOW_ITEM(t, s) \
86 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
87 		.name = # t, \
88 		.size = s,               \
89 		.desc_fn = NULL,\
90 	}
91 
92 #define MK_FLOW_ITEM_FN(t, s, fn) \
93 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
94 		.name = # t,                 \
95 		.size = s,                   \
96 		.desc_fn = fn,               \
97 	}
98 
99 /** Information about known flow pattern items. */
100 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
101 	MK_FLOW_ITEM(END, 0),
102 	MK_FLOW_ITEM(VOID, 0),
103 	MK_FLOW_ITEM(INVERT, 0),
104 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
105 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
106 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
107 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
108 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
109 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
110 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
111 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
112 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
113 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
114 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
115 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
116 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
117 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
118 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
119 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
120 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
121 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
122 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
123 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
124 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
125 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
126 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
127 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
128 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
129 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
130 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
131 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
132 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
133 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
134 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
137 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
138 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
139 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
140 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
141 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
142 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
143 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
144 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
145 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
146 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
147 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
148 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
149 			sizeof(struct rte_flow_item_pppoe_proto_id)),
150 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
151 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
152 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
153 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
154 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
155 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
156 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
157 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
158 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
159 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
160 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
161 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
162 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
163 			rte_flow_item_flex_conv),
164 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
165 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
166 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
167 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
168 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
169 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
170 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
171 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
172 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
173 };
174 
175 /** Generate flow_action[] entry. */
176 #define MK_FLOW_ACTION(t, s) \
177 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
178 		.name = # t, \
179 		.size = s, \
180 		.desc_fn = NULL,\
181 	}
182 
183 #define MK_FLOW_ACTION_FN(t, fn) \
184 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
185 		.name = # t, \
186 		.size = 0, \
187 		.desc_fn = fn,\
188 	}
189 
190 
191 /** Information about known flow actions. */
192 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
193 	MK_FLOW_ACTION(END, 0),
194 	MK_FLOW_ACTION(VOID, 0),
195 	MK_FLOW_ACTION(PASSTHRU, 0),
196 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
197 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
198 	MK_FLOW_ACTION(FLAG, 0),
199 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
200 	MK_FLOW_ACTION(DROP, 0),
201 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
202 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
203 	MK_FLOW_ACTION(PF, 0),
204 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
205 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
206 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
207 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
208 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
209 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
210 	MK_FLOW_ACTION(OF_PUSH_VLAN,
211 		       sizeof(struct rte_flow_action_of_push_vlan)),
212 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
213 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
214 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
215 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
216 	MK_FLOW_ACTION(OF_POP_MPLS,
217 		       sizeof(struct rte_flow_action_of_pop_mpls)),
218 	MK_FLOW_ACTION(OF_PUSH_MPLS,
219 		       sizeof(struct rte_flow_action_of_push_mpls)),
220 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
221 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
222 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
223 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
224 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
225 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
226 	MK_FLOW_ACTION(SET_IPV4_SRC,
227 		       sizeof(struct rte_flow_action_set_ipv4)),
228 	MK_FLOW_ACTION(SET_IPV4_DST,
229 		       sizeof(struct rte_flow_action_set_ipv4)),
230 	MK_FLOW_ACTION(SET_IPV6_SRC,
231 		       sizeof(struct rte_flow_action_set_ipv6)),
232 	MK_FLOW_ACTION(SET_IPV6_DST,
233 		       sizeof(struct rte_flow_action_set_ipv6)),
234 	MK_FLOW_ACTION(SET_TP_SRC,
235 		       sizeof(struct rte_flow_action_set_tp)),
236 	MK_FLOW_ACTION(SET_TP_DST,
237 		       sizeof(struct rte_flow_action_set_tp)),
238 	MK_FLOW_ACTION(MAC_SWAP, 0),
239 	MK_FLOW_ACTION(DEC_TTL, 0),
240 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
241 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
242 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
243 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
244 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
245 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
246 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
247 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
248 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
249 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
250 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
251 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
252 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
253 	MK_FLOW_ACTION(MODIFY_FIELD,
254 		       sizeof(struct rte_flow_action_modify_field)),
255 	/**
256 	 * Indirect action represented as handle of type
257 	 * (struct rte_flow_action_handle *) stored in conf field (see
258 	 * struct rte_flow_action); no need for additional structure to * store
259 	 * indirect action handle.
260 	 */
261 	MK_FLOW_ACTION(INDIRECT, 0),
262 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
263 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
264 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
265 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
266 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
267 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
268 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
269 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
270 	MK_FLOW_ACTION(INDIRECT_LIST,
271 		       sizeof(struct rte_flow_action_indirect_list)),
272 	MK_FLOW_ACTION(PROG,
273 		       sizeof(struct rte_flow_action_prog)),
274 };
275 
276 int
277 rte_flow_dynf_metadata_register(void)
278 {
279 	int offset;
280 	int flag;
281 
282 	static const struct rte_mbuf_dynfield desc_offs = {
283 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
284 		.size = sizeof(uint32_t),
285 		.align = alignof(uint32_t),
286 	};
287 	static const struct rte_mbuf_dynflag desc_flag = {
288 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
289 	};
290 
291 	offset = rte_mbuf_dynfield_register(&desc_offs);
292 	if (offset < 0)
293 		goto error;
294 	flag = rte_mbuf_dynflag_register(&desc_flag);
295 	if (flag < 0)
296 		goto error;
297 	rte_flow_dynf_metadata_offs = offset;
298 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
299 
300 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
301 
302 	return 0;
303 
304 error:
305 	rte_flow_dynf_metadata_offs = -1;
306 	rte_flow_dynf_metadata_mask = UINT64_C(0);
307 	return -rte_errno;
308 }
309 
310 static inline void
311 fts_enter(struct rte_eth_dev *dev)
312 {
313 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
314 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
315 }
316 
317 static inline void
318 fts_exit(struct rte_eth_dev *dev)
319 {
320 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
321 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
322 }
323 
324 static int
325 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
326 {
327 	if (ret == 0)
328 		return 0;
329 	if (rte_eth_dev_is_removed(port_id))
330 		return rte_flow_error_set(error, EIO,
331 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
332 					  NULL, rte_strerror(EIO));
333 	return ret;
334 }
335 
336 /* Get generic flow operations structure from a port. */
337 const struct rte_flow_ops *
338 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
339 {
340 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
341 	const struct rte_flow_ops *ops;
342 	int code;
343 
344 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
345 		code = ENODEV;
346 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
347 		/* flow API not supported with this driver dev_ops */
348 		code = ENOSYS;
349 	else
350 		code = dev->dev_ops->flow_ops_get(dev, &ops);
351 	if (code == 0 && ops == NULL)
352 		/* flow API not supported with this device */
353 		code = ENOSYS;
354 
355 	if (code != 0) {
356 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
357 				   NULL, rte_strerror(code));
358 		return NULL;
359 	}
360 	return ops;
361 }
362 
363 /* Check whether a flow rule can be created on a given port. */
364 int
365 rte_flow_validate(uint16_t port_id,
366 		  const struct rte_flow_attr *attr,
367 		  const struct rte_flow_item pattern[],
368 		  const struct rte_flow_action actions[],
369 		  struct rte_flow_error *error)
370 {
371 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
372 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
373 	int ret;
374 
375 	if (likely(!!attr) && attr->transfer &&
376 	    (attr->ingress || attr->egress)) {
377 		return rte_flow_error_set(error, EINVAL,
378 					  RTE_FLOW_ERROR_TYPE_ATTR,
379 					  attr, "cannot use attr ingress/egress with attr transfer");
380 	}
381 
382 	if (unlikely(!ops))
383 		return -rte_errno;
384 	if (likely(!!ops->validate)) {
385 		fts_enter(dev);
386 		ret = ops->validate(dev, attr, pattern, actions, error);
387 		fts_exit(dev);
388 		ret = flow_err(port_id, ret, error);
389 
390 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
391 
392 		return ret;
393 	}
394 	return rte_flow_error_set(error, ENOSYS,
395 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
396 				  NULL, rte_strerror(ENOSYS));
397 }
398 
399 /* Create a flow rule on a given port. */
400 struct rte_flow *
401 rte_flow_create(uint16_t port_id,
402 		const struct rte_flow_attr *attr,
403 		const struct rte_flow_item pattern[],
404 		const struct rte_flow_action actions[],
405 		struct rte_flow_error *error)
406 {
407 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
408 	struct rte_flow *flow;
409 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
410 
411 	if (unlikely(!ops))
412 		return NULL;
413 	if (likely(!!ops->create)) {
414 		fts_enter(dev);
415 		flow = ops->create(dev, attr, pattern, actions, error);
416 		fts_exit(dev);
417 		if (flow == NULL)
418 			flow_err(port_id, -rte_errno, error);
419 
420 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
421 
422 		return flow;
423 	}
424 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
425 			   NULL, rte_strerror(ENOSYS));
426 	return NULL;
427 }
428 
429 /* Destroy a flow rule on a given port. */
430 int
431 rte_flow_destroy(uint16_t port_id,
432 		 struct rte_flow *flow,
433 		 struct rte_flow_error *error)
434 {
435 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
436 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
437 	int ret;
438 
439 	if (unlikely(!ops))
440 		return -rte_errno;
441 	if (likely(!!ops->destroy)) {
442 		fts_enter(dev);
443 		ret = ops->destroy(dev, flow, error);
444 		fts_exit(dev);
445 		ret = flow_err(port_id, ret, error);
446 
447 		rte_flow_trace_destroy(port_id, flow, ret);
448 
449 		return ret;
450 	}
451 	return rte_flow_error_set(error, ENOSYS,
452 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
453 				  NULL, rte_strerror(ENOSYS));
454 }
455 
456 int
457 rte_flow_actions_update(uint16_t port_id,
458 			struct rte_flow *flow,
459 			const struct rte_flow_action actions[],
460 			struct rte_flow_error *error)
461 {
462 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
463 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
464 	int ret;
465 
466 	if (unlikely(!ops))
467 		return -rte_errno;
468 	if (likely(!!ops->actions_update)) {
469 		fts_enter(dev);
470 		ret = ops->actions_update(dev, flow, actions, error);
471 		fts_exit(dev);
472 
473 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
474 
475 		return flow_err(port_id, ret, error);
476 	}
477 	return rte_flow_error_set(error, ENOSYS,
478 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
479 				  NULL, rte_strerror(ENOSYS));
480 }
481 
482 /* Destroy all flow rules associated with a port. */
483 int
484 rte_flow_flush(uint16_t port_id,
485 	       struct rte_flow_error *error)
486 {
487 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
488 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
489 	int ret;
490 
491 	if (unlikely(!ops))
492 		return -rte_errno;
493 	if (likely(!!ops->flush)) {
494 		fts_enter(dev);
495 		ret = ops->flush(dev, error);
496 		fts_exit(dev);
497 		ret = flow_err(port_id, ret, error);
498 
499 		rte_flow_trace_flush(port_id, ret);
500 
501 		return ret;
502 	}
503 	return rte_flow_error_set(error, ENOSYS,
504 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
505 				  NULL, rte_strerror(ENOSYS));
506 }
507 
508 /* Query an existing flow rule. */
509 int
510 rte_flow_query(uint16_t port_id,
511 	       struct rte_flow *flow,
512 	       const struct rte_flow_action *action,
513 	       void *data,
514 	       struct rte_flow_error *error)
515 {
516 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
517 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
518 	int ret;
519 
520 	if (!ops)
521 		return -rte_errno;
522 	if (likely(!!ops->query)) {
523 		fts_enter(dev);
524 		ret = ops->query(dev, flow, action, data, error);
525 		fts_exit(dev);
526 		ret = flow_err(port_id, ret, error);
527 
528 		rte_flow_trace_query(port_id, flow, action, data, ret);
529 
530 		return ret;
531 	}
532 	return rte_flow_error_set(error, ENOSYS,
533 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
534 				  NULL, rte_strerror(ENOSYS));
535 }
536 
537 /* Restrict ingress traffic to the defined flow rules. */
538 int
539 rte_flow_isolate(uint16_t port_id,
540 		 int set,
541 		 struct rte_flow_error *error)
542 {
543 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
544 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
545 	int ret;
546 
547 	if (!ops)
548 		return -rte_errno;
549 	if (likely(!!ops->isolate)) {
550 		fts_enter(dev);
551 		ret = ops->isolate(dev, set, error);
552 		fts_exit(dev);
553 		ret = flow_err(port_id, ret, error);
554 
555 		rte_flow_trace_isolate(port_id, set, ret);
556 
557 		return ret;
558 	}
559 	return rte_flow_error_set(error, ENOSYS,
560 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
561 				  NULL, rte_strerror(ENOSYS));
562 }
563 
564 /* Initialize flow error structure. */
565 int
566 rte_flow_error_set(struct rte_flow_error *error,
567 		   int code,
568 		   enum rte_flow_error_type type,
569 		   const void *cause,
570 		   const char *message)
571 {
572 	if (error) {
573 		*error = (struct rte_flow_error){
574 			.type = type,
575 			.cause = cause,
576 			.message = message,
577 		};
578 	}
579 	rte_errno = code;
580 	return -code;
581 }
582 
583 /** Pattern item specification types. */
584 enum rte_flow_conv_item_spec_type {
585 	RTE_FLOW_CONV_ITEM_SPEC,
586 	RTE_FLOW_CONV_ITEM_LAST,
587 	RTE_FLOW_CONV_ITEM_MASK,
588 };
589 
590 /**
591  * Copy pattern item specification.
592  *
593  * @param[out] buf
594  *   Output buffer. Can be NULL if @p size is zero.
595  * @param size
596  *   Size of @p buf in bytes.
597  * @param[in] item
598  *   Pattern item to copy specification from.
599  * @param type
600  *   Specification selector for either @p spec, @p last or @p mask.
601  *
602  * @return
603  *   Number of bytes needed to store pattern item specification regardless
604  *   of @p size. @p buf contents are truncated to @p size if not large
605  *   enough.
606  */
607 static size_t
608 rte_flow_conv_item_spec(void *buf, const size_t size,
609 			const struct rte_flow_item *item,
610 			enum rte_flow_conv_item_spec_type type)
611 {
612 	size_t off;
613 	const void *data =
614 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
615 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
616 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
617 		NULL;
618 
619 	switch (item->type) {
620 		union {
621 			const struct rte_flow_item_raw *raw;
622 		} spec;
623 		union {
624 			const struct rte_flow_item_raw *raw;
625 		} last;
626 		union {
627 			const struct rte_flow_item_raw *raw;
628 		} mask;
629 		union {
630 			const struct rte_flow_item_raw *raw;
631 		} src;
632 		union {
633 			struct rte_flow_item_raw *raw;
634 		} dst;
635 		size_t tmp;
636 
637 	case RTE_FLOW_ITEM_TYPE_RAW:
638 		spec.raw = item->spec;
639 		last.raw = item->last ? item->last : item->spec;
640 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
641 		src.raw = data;
642 		dst.raw = buf;
643 		rte_memcpy(dst.raw,
644 			   (&(struct rte_flow_item_raw){
645 				.relative = src.raw->relative,
646 				.search = src.raw->search,
647 				.reserved = src.raw->reserved,
648 				.offset = src.raw->offset,
649 				.limit = src.raw->limit,
650 				.length = src.raw->length,
651 			   }),
652 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
653 		off = sizeof(*dst.raw);
654 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
655 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
656 		     ((spec.raw->length & mask.raw->length) >=
657 		      (last.raw->length & mask.raw->length))))
658 			tmp = spec.raw->length & mask.raw->length;
659 		else
660 			tmp = last.raw->length & mask.raw->length;
661 		if (tmp) {
662 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
663 			if (size >= off + tmp)
664 				dst.raw->pattern = rte_memcpy
665 					((void *)((uintptr_t)dst.raw + off),
666 					 src.raw->pattern, tmp);
667 			off += tmp;
668 		}
669 		break;
670 	default:
671 		off = rte_flow_conv_copy(buf, data, size,
672 					 rte_flow_desc_item, item->type);
673 		break;
674 	}
675 	return off;
676 }
677 
678 /**
679  * Copy action configuration.
680  *
681  * @param[out] buf
682  *   Output buffer. Can be NULL if @p size is zero.
683  * @param size
684  *   Size of @p buf in bytes.
685  * @param[in] action
686  *   Action to copy configuration from.
687  *
688  * @return
689  *   Number of bytes needed to store pattern item specification regardless
690  *   of @p size. @p buf contents are truncated to @p size if not large
691  *   enough.
692  */
693 static size_t
694 rte_flow_conv_action_conf(void *buf, const size_t size,
695 			  const struct rte_flow_action *action)
696 {
697 	size_t off;
698 
699 	switch (action->type) {
700 		union {
701 			const struct rte_flow_action_rss *rss;
702 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
703 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
704 		} src;
705 		union {
706 			struct rte_flow_action_rss *rss;
707 			struct rte_flow_action_vxlan_encap *vxlan_encap;
708 			struct rte_flow_action_nvgre_encap *nvgre_encap;
709 		} dst;
710 		size_t tmp;
711 		int ret;
712 
713 	case RTE_FLOW_ACTION_TYPE_RSS:
714 		src.rss = action->conf;
715 		dst.rss = buf;
716 		rte_memcpy(dst.rss,
717 			   (&(struct rte_flow_action_rss){
718 				.func = src.rss->func,
719 				.level = src.rss->level,
720 				.types = src.rss->types,
721 				.key_len = src.rss->key_len,
722 				.queue_num = src.rss->queue_num,
723 			   }),
724 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
725 		off = sizeof(*dst.rss);
726 		if (src.rss->key_len && src.rss->key) {
727 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
728 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
729 			if (size >= (uint64_t)off + (uint64_t)tmp)
730 				dst.rss->key = rte_memcpy
731 					((void *)((uintptr_t)dst.rss + off),
732 					 src.rss->key, tmp);
733 			off += tmp;
734 		}
735 		if (src.rss->queue_num) {
736 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
737 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
738 			if (size >= (uint64_t)off + (uint64_t)tmp)
739 				dst.rss->queue = rte_memcpy
740 					((void *)((uintptr_t)dst.rss + off),
741 					 src.rss->queue, tmp);
742 			off += tmp;
743 		}
744 		break;
745 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
746 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
747 		src.vxlan_encap = action->conf;
748 		dst.vxlan_encap = buf;
749 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
750 				 sizeof(*src.nvgre_encap) ||
751 				 offsetof(struct rte_flow_action_vxlan_encap,
752 					  definition) !=
753 				 offsetof(struct rte_flow_action_nvgre_encap,
754 					  definition));
755 		off = sizeof(*dst.vxlan_encap);
756 		if (src.vxlan_encap->definition) {
757 			off = RTE_ALIGN_CEIL
758 				(off, sizeof(*dst.vxlan_encap->definition));
759 			ret = rte_flow_conv
760 				(RTE_FLOW_CONV_OP_PATTERN,
761 				 (void *)((uintptr_t)dst.vxlan_encap + off),
762 				 size > off ? size - off : 0,
763 				 src.vxlan_encap->definition, NULL);
764 			if (ret < 0)
765 				return 0;
766 			if (size >= off + ret)
767 				dst.vxlan_encap->definition =
768 					(void *)((uintptr_t)dst.vxlan_encap +
769 						 off);
770 			off += ret;
771 		}
772 		break;
773 	default:
774 		off = rte_flow_conv_copy(buf, action->conf, size,
775 					 rte_flow_desc_action, action->type);
776 		break;
777 	}
778 	return off;
779 }
780 
781 /**
782  * Copy a list of pattern items.
783  *
784  * @param[out] dst
785  *   Destination buffer. Can be NULL if @p size is zero.
786  * @param size
787  *   Size of @p dst in bytes.
788  * @param[in] src
789  *   Source pattern items.
790  * @param num
791  *   Maximum number of pattern items to process from @p src or 0 to process
792  *   the entire list. In both cases, processing stops after
793  *   RTE_FLOW_ITEM_TYPE_END is encountered.
794  * @param[out] error
795  *   Perform verbose error reporting if not NULL.
796  *
797  * @return
798  *   A positive value representing the number of bytes needed to store
799  *   pattern items regardless of @p size on success (@p buf contents are
800  *   truncated to @p size if not large enough), a negative errno value
801  *   otherwise and rte_errno is set.
802  */
803 static int
804 rte_flow_conv_pattern(struct rte_flow_item *dst,
805 		      const size_t size,
806 		      const struct rte_flow_item *src,
807 		      unsigned int num,
808 		      struct rte_flow_error *error)
809 {
810 	uintptr_t data = (uintptr_t)dst;
811 	size_t off;
812 	size_t ret;
813 	unsigned int i;
814 
815 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
816 		/**
817 		 * allow PMD private flow item
818 		 */
819 		if (((int)src->type >= 0) &&
820 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
821 		    !rte_flow_desc_item[src->type].name))
822 			return rte_flow_error_set
823 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
824 				 "cannot convert unknown item type");
825 		if (size >= off + sizeof(*dst))
826 			*dst = (struct rte_flow_item){
827 				.type = src->type,
828 			};
829 		off += sizeof(*dst);
830 		if (!src->type)
831 			num = i + 1;
832 	}
833 	num = i;
834 	src -= num;
835 	dst -= num;
836 	do {
837 		if (src->spec) {
838 			off = RTE_ALIGN_CEIL(off, sizeof(double));
839 			ret = rte_flow_conv_item_spec
840 				((void *)(data + off),
841 				 size > off ? size - off : 0, src,
842 				 RTE_FLOW_CONV_ITEM_SPEC);
843 			if (size && size >= off + ret)
844 				dst->spec = (void *)(data + off);
845 			off += ret;
846 
847 		}
848 		if (src->last) {
849 			off = RTE_ALIGN_CEIL(off, sizeof(double));
850 			ret = rte_flow_conv_item_spec
851 				((void *)(data + off),
852 				 size > off ? size - off : 0, src,
853 				 RTE_FLOW_CONV_ITEM_LAST);
854 			if (size && size >= off + ret)
855 				dst->last = (void *)(data + off);
856 			off += ret;
857 		}
858 		if (src->mask) {
859 			off = RTE_ALIGN_CEIL(off, sizeof(double));
860 			ret = rte_flow_conv_item_spec
861 				((void *)(data + off),
862 				 size > off ? size - off : 0, src,
863 				 RTE_FLOW_CONV_ITEM_MASK);
864 			if (size && size >= off + ret)
865 				dst->mask = (void *)(data + off);
866 			off += ret;
867 		}
868 		++src;
869 		++dst;
870 	} while (--num);
871 	return off;
872 }
873 
874 /**
875  * Copy a list of actions.
876  *
877  * @param[out] dst
878  *   Destination buffer. Can be NULL if @p size is zero.
879  * @param size
880  *   Size of @p dst in bytes.
881  * @param[in] src
882  *   Source actions.
883  * @param num
884  *   Maximum number of actions to process from @p src or 0 to process the
885  *   entire list. In both cases, processing stops after
886  *   RTE_FLOW_ACTION_TYPE_END is encountered.
887  * @param[out] error
888  *   Perform verbose error reporting if not NULL.
889  *
890  * @return
891  *   A positive value representing the number of bytes needed to store
892  *   actions regardless of @p size on success (@p buf contents are truncated
893  *   to @p size if not large enough), a negative errno value otherwise and
894  *   rte_errno is set.
895  */
896 static int
897 rte_flow_conv_actions(struct rte_flow_action *dst,
898 		      const size_t size,
899 		      const struct rte_flow_action *src,
900 		      unsigned int num,
901 		      struct rte_flow_error *error)
902 {
903 	uintptr_t data = (uintptr_t)dst;
904 	size_t off;
905 	size_t ret;
906 	unsigned int i;
907 
908 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
909 		/**
910 		 * allow PMD private flow action
911 		 */
912 		if (((int)src->type >= 0) &&
913 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
914 		    !rte_flow_desc_action[src->type].name))
915 			return rte_flow_error_set
916 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
917 				 src, "cannot convert unknown action type");
918 		if (size >= off + sizeof(*dst))
919 			*dst = (struct rte_flow_action){
920 				.type = src->type,
921 			};
922 		off += sizeof(*dst);
923 		if (!src->type)
924 			num = i + 1;
925 	}
926 	num = i;
927 	src -= num;
928 	dst -= num;
929 	do {
930 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
931 			/*
932 			 * Indirect action conf fills the indirect action
933 			 * handler. Copy the action handle directly instead
934 			 * of duplicating the pointer memory.
935 			 */
936 			if (size)
937 				dst->conf = src->conf;
938 		} else if (src->conf) {
939 			off = RTE_ALIGN_CEIL(off, sizeof(double));
940 			ret = rte_flow_conv_action_conf
941 				((void *)(data + off),
942 				 size > off ? size - off : 0, src);
943 			if (size && size >= off + ret)
944 				dst->conf = (void *)(data + off);
945 			off += ret;
946 		}
947 		++src;
948 		++dst;
949 	} while (--num);
950 	return off;
951 }
952 
953 /**
954  * Copy flow rule components.
955  *
956  * This comprises the flow rule descriptor itself, attributes, pattern and
957  * actions list. NULL components in @p src are skipped.
958  *
959  * @param[out] dst
960  *   Destination buffer. Can be NULL if @p size is zero.
961  * @param size
962  *   Size of @p dst in bytes.
963  * @param[in] src
964  *   Source flow rule descriptor.
965  * @param[out] error
966  *   Perform verbose error reporting if not NULL.
967  *
968  * @return
969  *   A positive value representing the number of bytes needed to store all
970  *   components including the descriptor regardless of @p size on success
971  *   (@p buf contents are truncated to @p size if not large enough), a
972  *   negative errno value otherwise and rte_errno is set.
973  */
974 static int
975 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
976 		   const size_t size,
977 		   const struct rte_flow_conv_rule *src,
978 		   struct rte_flow_error *error)
979 {
980 	size_t off;
981 	int ret;
982 
983 	rte_memcpy(dst,
984 		   (&(struct rte_flow_conv_rule){
985 			.attr = NULL,
986 			.pattern = NULL,
987 			.actions = NULL,
988 		   }),
989 		   size > sizeof(*dst) ? sizeof(*dst) : size);
990 	off = sizeof(*dst);
991 	if (src->attr_ro) {
992 		off = RTE_ALIGN_CEIL(off, sizeof(double));
993 		if (size && size >= off + sizeof(*dst->attr))
994 			dst->attr = rte_memcpy
995 				((void *)((uintptr_t)dst + off),
996 				 src->attr_ro, sizeof(*dst->attr));
997 		off += sizeof(*dst->attr);
998 	}
999 	if (src->pattern_ro) {
1000 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1001 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
1002 					    size > off ? size - off : 0,
1003 					    src->pattern_ro, 0, error);
1004 		if (ret < 0)
1005 			return ret;
1006 		if (size && size >= off + (size_t)ret)
1007 			dst->pattern = (void *)((uintptr_t)dst + off);
1008 		off += ret;
1009 	}
1010 	if (src->actions_ro) {
1011 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1012 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1013 					    size > off ? size - off : 0,
1014 					    src->actions_ro, 0, error);
1015 		if (ret < 0)
1016 			return ret;
1017 		if (size >= off + (size_t)ret)
1018 			dst->actions = (void *)((uintptr_t)dst + off);
1019 		off += ret;
1020 	}
1021 	return off;
1022 }
1023 
1024 /**
1025  * Retrieve the name of a pattern item/action type.
1026  *
1027  * @param is_action
1028  *   Nonzero when @p src represents an action type instead of a pattern item
1029  *   type.
1030  * @param is_ptr
1031  *   Nonzero to write string address instead of contents into @p dst.
1032  * @param[out] dst
1033  *   Destination buffer. Can be NULL if @p size is zero.
1034  * @param size
1035  *   Size of @p dst in bytes.
1036  * @param[in] src
1037  *   Depending on @p is_action, source pattern item or action type cast as a
1038  *   pointer.
1039  * @param[out] error
1040  *   Perform verbose error reporting if not NULL.
1041  *
1042  * @return
1043  *   A positive value representing the number of bytes needed to store the
1044  *   name or its address regardless of @p size on success (@p buf contents
1045  *   are truncated to @p size if not large enough), a negative errno value
1046  *   otherwise and rte_errno is set.
1047  */
1048 static int
1049 rte_flow_conv_name(int is_action,
1050 		   int is_ptr,
1051 		   char *dst,
1052 		   const size_t size,
1053 		   const void *src,
1054 		   struct rte_flow_error *error)
1055 {
1056 	struct desc_info {
1057 		const struct rte_flow_desc_data *data;
1058 		size_t num;
1059 	};
1060 	static const struct desc_info info_rep[2] = {
1061 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1062 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1063 	};
1064 	const struct desc_info *const info = &info_rep[!!is_action];
1065 	unsigned int type = (uintptr_t)src;
1066 
1067 	if (type >= info->num)
1068 		return rte_flow_error_set
1069 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1070 			 "unknown object type to retrieve the name of");
1071 	if (!is_ptr)
1072 		return strlcpy(dst, info->data[type].name, size);
1073 	if (size >= sizeof(const char **))
1074 		*((const char **)dst) = info->data[type].name;
1075 	return sizeof(const char **);
1076 }
1077 
1078 /** Helper function to convert flow API objects. */
1079 int
1080 rte_flow_conv(enum rte_flow_conv_op op,
1081 	      void *dst,
1082 	      size_t size,
1083 	      const void *src,
1084 	      struct rte_flow_error *error)
1085 {
1086 	int ret;
1087 
1088 	switch (op) {
1089 		const struct rte_flow_attr *attr;
1090 
1091 	case RTE_FLOW_CONV_OP_NONE:
1092 		ret = 0;
1093 		break;
1094 	case RTE_FLOW_CONV_OP_ATTR:
1095 		attr = src;
1096 		if (size > sizeof(*attr))
1097 			size = sizeof(*attr);
1098 		rte_memcpy(dst, attr, size);
1099 		ret = sizeof(*attr);
1100 		break;
1101 	case RTE_FLOW_CONV_OP_ITEM:
1102 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1103 		break;
1104 	case RTE_FLOW_CONV_OP_ACTION:
1105 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1106 		break;
1107 	case RTE_FLOW_CONV_OP_PATTERN:
1108 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1109 		break;
1110 	case RTE_FLOW_CONV_OP_ACTIONS:
1111 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1112 		break;
1113 	case RTE_FLOW_CONV_OP_RULE:
1114 		ret = rte_flow_conv_rule(dst, size, src, error);
1115 		break;
1116 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1117 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1118 		break;
1119 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1120 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1121 		break;
1122 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1123 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1124 		break;
1125 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1126 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1127 		break;
1128 	default:
1129 		ret = rte_flow_error_set
1130 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1131 		 "unknown object conversion operation");
1132 	}
1133 
1134 	rte_flow_trace_conv(op, dst, size, src, ret);
1135 
1136 	return ret;
1137 }
1138 
1139 /** Store a full rte_flow description. */
1140 size_t
1141 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1142 	      const struct rte_flow_attr *attr,
1143 	      const struct rte_flow_item *items,
1144 	      const struct rte_flow_action *actions)
1145 {
1146 	/*
1147 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1148 	 * to convert the former to the latter without wasting space.
1149 	 */
1150 	struct rte_flow_conv_rule *dst =
1151 		len ?
1152 		(void *)((uintptr_t)desc +
1153 			 (offsetof(struct rte_flow_desc, actions) -
1154 			  offsetof(struct rte_flow_conv_rule, actions))) :
1155 		NULL;
1156 	size_t dst_size =
1157 		len > sizeof(*desc) - sizeof(*dst) ?
1158 		len - (sizeof(*desc) - sizeof(*dst)) :
1159 		0;
1160 	struct rte_flow_conv_rule src = {
1161 		.attr_ro = NULL,
1162 		.pattern_ro = items,
1163 		.actions_ro = actions,
1164 	};
1165 	int ret;
1166 
1167 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1168 			 sizeof(struct rte_flow_conv_rule));
1169 	if (dst_size &&
1170 	    (&dst->pattern != &desc->items ||
1171 	     &dst->actions != &desc->actions ||
1172 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1173 		rte_errno = EINVAL;
1174 		return 0;
1175 	}
1176 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1177 	if (ret < 0)
1178 		return 0;
1179 	ret += sizeof(*desc) - sizeof(*dst);
1180 	rte_memcpy(desc,
1181 		   (&(struct rte_flow_desc){
1182 			.size = ret,
1183 			.attr = *attr,
1184 			.items = dst_size ? dst->pattern : NULL,
1185 			.actions = dst_size ? dst->actions : NULL,
1186 		   }),
1187 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1188 
1189 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1190 
1191 	return ret;
1192 }
1193 
1194 int
1195 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1196 			FILE *file, struct rte_flow_error *error)
1197 {
1198 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1199 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1200 	int ret;
1201 
1202 	if (unlikely(!ops))
1203 		return -rte_errno;
1204 	if (likely(!!ops->dev_dump)) {
1205 		fts_enter(dev);
1206 		ret = ops->dev_dump(dev, flow, file, error);
1207 		fts_exit(dev);
1208 		return flow_err(port_id, ret, error);
1209 	}
1210 	return rte_flow_error_set(error, ENOSYS,
1211 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1212 				  NULL, rte_strerror(ENOSYS));
1213 }
1214 
1215 int
1216 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1217 		    uint32_t nb_contexts, struct rte_flow_error *error)
1218 {
1219 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1220 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1221 	int ret;
1222 
1223 	if (unlikely(!ops))
1224 		return -rte_errno;
1225 	if (likely(!!ops->get_aged_flows)) {
1226 		fts_enter(dev);
1227 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1228 		fts_exit(dev);
1229 		ret = flow_err(port_id, ret, error);
1230 
1231 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1232 
1233 		return ret;
1234 	}
1235 	return rte_flow_error_set(error, ENOTSUP,
1236 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1237 				  NULL, rte_strerror(ENOTSUP));
1238 }
1239 
1240 int
1241 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1242 			  uint32_t nb_contexts, struct rte_flow_error *error)
1243 {
1244 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1245 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1246 	int ret;
1247 
1248 	if (unlikely(!ops))
1249 		return -rte_errno;
1250 	if (likely(!!ops->get_q_aged_flows)) {
1251 		fts_enter(dev);
1252 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1253 					    nb_contexts, error);
1254 		fts_exit(dev);
1255 		ret = flow_err(port_id, ret, error);
1256 
1257 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1258 						nb_contexts, ret);
1259 
1260 		return ret;
1261 	}
1262 	return rte_flow_error_set(error, ENOTSUP,
1263 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1264 				  NULL, rte_strerror(ENOTSUP));
1265 }
1266 
1267 struct rte_flow_action_handle *
1268 rte_flow_action_handle_create(uint16_t port_id,
1269 			      const struct rte_flow_indir_action_conf *conf,
1270 			      const struct rte_flow_action *action,
1271 			      struct rte_flow_error *error)
1272 {
1273 	struct rte_flow_action_handle *handle;
1274 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1275 
1276 	if (unlikely(!ops))
1277 		return NULL;
1278 	if (unlikely(!ops->action_handle_create)) {
1279 		rte_flow_error_set(error, ENOSYS,
1280 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1281 				   rte_strerror(ENOSYS));
1282 		return NULL;
1283 	}
1284 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1285 					   conf, action, error);
1286 	if (handle == NULL)
1287 		flow_err(port_id, -rte_errno, error);
1288 
1289 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1290 
1291 	return handle;
1292 }
1293 
1294 int
1295 rte_flow_action_handle_destroy(uint16_t port_id,
1296 			       struct rte_flow_action_handle *handle,
1297 			       struct rte_flow_error *error)
1298 {
1299 	int ret;
1300 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1301 
1302 	if (unlikely(!ops))
1303 		return -rte_errno;
1304 	if (unlikely(!ops->action_handle_destroy))
1305 		return rte_flow_error_set(error, ENOSYS,
1306 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1307 					  NULL, rte_strerror(ENOSYS));
1308 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1309 					 handle, error);
1310 	ret = flow_err(port_id, ret, error);
1311 
1312 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1313 
1314 	return ret;
1315 }
1316 
1317 int
1318 rte_flow_action_handle_update(uint16_t port_id,
1319 			      struct rte_flow_action_handle *handle,
1320 			      const void *update,
1321 			      struct rte_flow_error *error)
1322 {
1323 	int ret;
1324 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1325 
1326 	if (unlikely(!ops))
1327 		return -rte_errno;
1328 	if (unlikely(!ops->action_handle_update))
1329 		return rte_flow_error_set(error, ENOSYS,
1330 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1331 					  NULL, rte_strerror(ENOSYS));
1332 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1333 					update, error);
1334 	ret = flow_err(port_id, ret, error);
1335 
1336 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1337 
1338 	return ret;
1339 }
1340 
1341 int
1342 rte_flow_action_handle_query(uint16_t port_id,
1343 			     const struct rte_flow_action_handle *handle,
1344 			     void *data,
1345 			     struct rte_flow_error *error)
1346 {
1347 	int ret;
1348 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1349 
1350 	if (unlikely(!ops))
1351 		return -rte_errno;
1352 	if (unlikely(!ops->action_handle_query))
1353 		return rte_flow_error_set(error, ENOSYS,
1354 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1355 					  NULL, rte_strerror(ENOSYS));
1356 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1357 				       data, error);
1358 	ret = flow_err(port_id, ret, error);
1359 
1360 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1361 
1362 	return ret;
1363 }
1364 
1365 int
1366 rte_flow_tunnel_decap_set(uint16_t port_id,
1367 			  struct rte_flow_tunnel *tunnel,
1368 			  struct rte_flow_action **actions,
1369 			  uint32_t *num_of_actions,
1370 			  struct rte_flow_error *error)
1371 {
1372 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1373 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1374 	int ret;
1375 
1376 	if (unlikely(!ops))
1377 		return -rte_errno;
1378 	if (likely(!!ops->tunnel_decap_set)) {
1379 		ret = flow_err(port_id,
1380 			       ops->tunnel_decap_set(dev, tunnel, actions,
1381 						     num_of_actions, error),
1382 			       error);
1383 
1384 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1385 						num_of_actions, ret);
1386 
1387 		return ret;
1388 	}
1389 	return rte_flow_error_set(error, ENOTSUP,
1390 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1391 				  NULL, rte_strerror(ENOTSUP));
1392 }
1393 
1394 int
1395 rte_flow_tunnel_match(uint16_t port_id,
1396 		      struct rte_flow_tunnel *tunnel,
1397 		      struct rte_flow_item **items,
1398 		      uint32_t *num_of_items,
1399 		      struct rte_flow_error *error)
1400 {
1401 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1402 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1403 	int ret;
1404 
1405 	if (unlikely(!ops))
1406 		return -rte_errno;
1407 	if (likely(!!ops->tunnel_match)) {
1408 		ret = flow_err(port_id,
1409 			       ops->tunnel_match(dev, tunnel, items,
1410 						 num_of_items, error),
1411 			       error);
1412 
1413 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1414 					    ret);
1415 
1416 		return ret;
1417 	}
1418 	return rte_flow_error_set(error, ENOTSUP,
1419 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1420 				  NULL, rte_strerror(ENOTSUP));
1421 }
1422 
1423 int
1424 rte_flow_get_restore_info(uint16_t port_id,
1425 			  struct rte_mbuf *m,
1426 			  struct rte_flow_restore_info *restore_info,
1427 			  struct rte_flow_error *error)
1428 {
1429 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1430 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1431 	int ret;
1432 
1433 	if (unlikely(!ops))
1434 		return -rte_errno;
1435 	if (likely(!!ops->get_restore_info)) {
1436 		ret = flow_err(port_id,
1437 			       ops->get_restore_info(dev, m, restore_info,
1438 						     error),
1439 			       error);
1440 
1441 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1442 
1443 		return ret;
1444 	}
1445 	return rte_flow_error_set(error, ENOTSUP,
1446 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1447 				  NULL, rte_strerror(ENOTSUP));
1448 }
1449 
1450 static struct {
1451 	const struct rte_mbuf_dynflag desc;
1452 	uint64_t value;
1453 } flow_restore_info_dynflag = {
1454 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1455 };
1456 
1457 uint64_t
1458 rte_flow_restore_info_dynflag(void)
1459 {
1460 	return flow_restore_info_dynflag.value;
1461 }
1462 
1463 int
1464 rte_flow_restore_info_dynflag_register(void)
1465 {
1466 	if (flow_restore_info_dynflag.value == 0) {
1467 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1468 
1469 		if (offset < 0)
1470 			return -1;
1471 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1472 	}
1473 
1474 	return 0;
1475 }
1476 
1477 int
1478 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1479 				     struct rte_flow_action *actions,
1480 				     uint32_t num_of_actions,
1481 				     struct rte_flow_error *error)
1482 {
1483 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1484 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1485 	int ret;
1486 
1487 	if (unlikely(!ops))
1488 		return -rte_errno;
1489 	if (likely(!!ops->tunnel_action_decap_release)) {
1490 		ret = flow_err(port_id,
1491 			       ops->tunnel_action_decap_release(dev, actions,
1492 								num_of_actions,
1493 								error),
1494 			       error);
1495 
1496 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1497 							   num_of_actions, ret);
1498 
1499 		return ret;
1500 	}
1501 	return rte_flow_error_set(error, ENOTSUP,
1502 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1503 				  NULL, rte_strerror(ENOTSUP));
1504 }
1505 
1506 int
1507 rte_flow_tunnel_item_release(uint16_t port_id,
1508 			     struct rte_flow_item *items,
1509 			     uint32_t num_of_items,
1510 			     struct rte_flow_error *error)
1511 {
1512 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1513 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1514 	int ret;
1515 
1516 	if (unlikely(!ops))
1517 		return -rte_errno;
1518 	if (likely(!!ops->tunnel_item_release)) {
1519 		ret = flow_err(port_id,
1520 			       ops->tunnel_item_release(dev, items,
1521 							num_of_items, error),
1522 			       error);
1523 
1524 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1525 
1526 		return ret;
1527 	}
1528 	return rte_flow_error_set(error, ENOTSUP,
1529 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1530 				  NULL, rte_strerror(ENOTSUP));
1531 }
1532 
1533 int
1534 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1535 			     struct rte_flow_error *error)
1536 {
1537 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1538 	struct rte_eth_dev *dev;
1539 	int ret;
1540 
1541 	if (unlikely(ops == NULL))
1542 		return -rte_errno;
1543 
1544 	if (ops->pick_transfer_proxy == NULL) {
1545 		*proxy_port_id = port_id;
1546 		return 0;
1547 	}
1548 
1549 	dev = &rte_eth_devices[port_id];
1550 
1551 	ret = flow_err(port_id,
1552 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1553 		       error);
1554 
1555 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1556 
1557 	return ret;
1558 }
1559 
1560 struct rte_flow_item_flex_handle *
1561 rte_flow_flex_item_create(uint16_t port_id,
1562 			  const struct rte_flow_item_flex_conf *conf,
1563 			  struct rte_flow_error *error)
1564 {
1565 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1566 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1567 	struct rte_flow_item_flex_handle *handle;
1568 
1569 	if (unlikely(!ops))
1570 		return NULL;
1571 	if (unlikely(!ops->flex_item_create)) {
1572 		rte_flow_error_set(error, ENOTSUP,
1573 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1574 				   NULL, rte_strerror(ENOTSUP));
1575 		return NULL;
1576 	}
1577 	handle = ops->flex_item_create(dev, conf, error);
1578 	if (handle == NULL)
1579 		flow_err(port_id, -rte_errno, error);
1580 
1581 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1582 
1583 	return handle;
1584 }
1585 
1586 int
1587 rte_flow_flex_item_release(uint16_t port_id,
1588 			   const struct rte_flow_item_flex_handle *handle,
1589 			   struct rte_flow_error *error)
1590 {
1591 	int ret;
1592 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1593 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1594 
1595 	if (unlikely(!ops || !ops->flex_item_release))
1596 		return rte_flow_error_set(error, ENOTSUP,
1597 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1598 					  NULL, rte_strerror(ENOTSUP));
1599 	ret = ops->flex_item_release(dev, handle, error);
1600 	ret = flow_err(port_id, ret, error);
1601 
1602 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1603 
1604 	return ret;
1605 }
1606 
1607 int
1608 rte_flow_info_get(uint16_t port_id,
1609 		  struct rte_flow_port_info *port_info,
1610 		  struct rte_flow_queue_info *queue_info,
1611 		  struct rte_flow_error *error)
1612 {
1613 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1614 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1615 	int ret;
1616 
1617 	if (unlikely(!ops))
1618 		return -rte_errno;
1619 	if (dev->data->dev_configured == 0) {
1620 		FLOW_LOG(INFO,
1621 			"Device with port_id=%"PRIu16" is not configured.",
1622 			port_id);
1623 		return -EINVAL;
1624 	}
1625 	if (port_info == NULL) {
1626 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1627 		return -EINVAL;
1628 	}
1629 	if (likely(!!ops->info_get)) {
1630 		ret = flow_err(port_id,
1631 			       ops->info_get(dev, port_info, queue_info, error),
1632 			       error);
1633 
1634 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1635 
1636 		return ret;
1637 	}
1638 	return rte_flow_error_set(error, ENOTSUP,
1639 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1640 				  NULL, rte_strerror(ENOTSUP));
1641 }
1642 
1643 int
1644 rte_flow_configure(uint16_t port_id,
1645 		   const struct rte_flow_port_attr *port_attr,
1646 		   uint16_t nb_queue,
1647 		   const struct rte_flow_queue_attr *queue_attr[],
1648 		   struct rte_flow_error *error)
1649 {
1650 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1651 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1652 	int ret;
1653 
1654 	if (unlikely(!ops))
1655 		return -rte_errno;
1656 	if (dev->data->dev_configured == 0) {
1657 		FLOW_LOG(INFO,
1658 			"Device with port_id=%"PRIu16" is not configured.",
1659 			port_id);
1660 		return -EINVAL;
1661 	}
1662 	if (dev->data->dev_started != 0) {
1663 		FLOW_LOG(INFO,
1664 			"Device with port_id=%"PRIu16" already started.",
1665 			port_id);
1666 		return -EINVAL;
1667 	}
1668 	if (port_attr == NULL) {
1669 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1670 		return -EINVAL;
1671 	}
1672 	if (queue_attr == NULL) {
1673 		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
1674 		return -EINVAL;
1675 	}
1676 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1677 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1678 		return rte_flow_error_set(error, ENODEV,
1679 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1680 					  NULL, rte_strerror(ENODEV));
1681 	}
1682 	if (likely(!!ops->configure)) {
1683 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1684 		if (ret == 0)
1685 			dev->data->flow_configured = 1;
1686 		ret = flow_err(port_id, ret, error);
1687 
1688 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1689 
1690 		return ret;
1691 	}
1692 	return rte_flow_error_set(error, ENOTSUP,
1693 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1694 				  NULL, rte_strerror(ENOTSUP));
1695 }
1696 
1697 struct rte_flow_pattern_template *
1698 rte_flow_pattern_template_create(uint16_t port_id,
1699 		const struct rte_flow_pattern_template_attr *template_attr,
1700 		const struct rte_flow_item pattern[],
1701 		struct rte_flow_error *error)
1702 {
1703 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1704 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1705 	struct rte_flow_pattern_template *template;
1706 
1707 	if (unlikely(!ops))
1708 		return NULL;
1709 	if (dev->data->flow_configured == 0) {
1710 		FLOW_LOG(INFO,
1711 			"Flow engine on port_id=%"PRIu16" is not configured.",
1712 			port_id);
1713 		rte_flow_error_set(error, EINVAL,
1714 				RTE_FLOW_ERROR_TYPE_STATE,
1715 				NULL, rte_strerror(EINVAL));
1716 		return NULL;
1717 	}
1718 	if (template_attr == NULL) {
1719 		FLOW_LOG(ERR,
1720 			     "Port %"PRIu16" template attr is NULL.",
1721 			     port_id);
1722 		rte_flow_error_set(error, EINVAL,
1723 				   RTE_FLOW_ERROR_TYPE_ATTR,
1724 				   NULL, rte_strerror(EINVAL));
1725 		return NULL;
1726 	}
1727 	if (pattern == NULL) {
1728 		FLOW_LOG(ERR,
1729 			     "Port %"PRIu16" pattern is NULL.",
1730 			     port_id);
1731 		rte_flow_error_set(error, EINVAL,
1732 				   RTE_FLOW_ERROR_TYPE_ATTR,
1733 				   NULL, rte_strerror(EINVAL));
1734 		return NULL;
1735 	}
1736 	if (likely(!!ops->pattern_template_create)) {
1737 		template = ops->pattern_template_create(dev, template_attr,
1738 							pattern, error);
1739 		if (template == NULL)
1740 			flow_err(port_id, -rte_errno, error);
1741 
1742 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1743 						       pattern, template);
1744 
1745 		return template;
1746 	}
1747 	rte_flow_error_set(error, ENOTSUP,
1748 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1749 			   NULL, rte_strerror(ENOTSUP));
1750 	return NULL;
1751 }
1752 
1753 int
1754 rte_flow_pattern_template_destroy(uint16_t port_id,
1755 		struct rte_flow_pattern_template *pattern_template,
1756 		struct rte_flow_error *error)
1757 {
1758 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1759 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1760 	int ret;
1761 
1762 	if (unlikely(!ops))
1763 		return -rte_errno;
1764 	if (unlikely(pattern_template == NULL))
1765 		return 0;
1766 	if (likely(!!ops->pattern_template_destroy)) {
1767 		ret = flow_err(port_id,
1768 			       ops->pattern_template_destroy(dev,
1769 							     pattern_template,
1770 							     error),
1771 			       error);
1772 
1773 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1774 							ret);
1775 
1776 		return ret;
1777 	}
1778 	return rte_flow_error_set(error, ENOTSUP,
1779 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1780 				  NULL, rte_strerror(ENOTSUP));
1781 }
1782 
1783 struct rte_flow_actions_template *
1784 rte_flow_actions_template_create(uint16_t port_id,
1785 			const struct rte_flow_actions_template_attr *template_attr,
1786 			const struct rte_flow_action actions[],
1787 			const struct rte_flow_action masks[],
1788 			struct rte_flow_error *error)
1789 {
1790 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1791 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1792 	struct rte_flow_actions_template *template;
1793 
1794 	if (unlikely(!ops))
1795 		return NULL;
1796 	if (dev->data->flow_configured == 0) {
1797 		FLOW_LOG(INFO,
1798 			"Flow engine on port_id=%"PRIu16" is not configured.",
1799 			port_id);
1800 		rte_flow_error_set(error, EINVAL,
1801 				   RTE_FLOW_ERROR_TYPE_STATE,
1802 				   NULL, rte_strerror(EINVAL));
1803 		return NULL;
1804 	}
1805 	if (template_attr == NULL) {
1806 		FLOW_LOG(ERR,
1807 			     "Port %"PRIu16" template attr is NULL.",
1808 			     port_id);
1809 		rte_flow_error_set(error, EINVAL,
1810 				   RTE_FLOW_ERROR_TYPE_ATTR,
1811 				   NULL, rte_strerror(EINVAL));
1812 		return NULL;
1813 	}
1814 	if (actions == NULL) {
1815 		FLOW_LOG(ERR,
1816 			     "Port %"PRIu16" actions is NULL.",
1817 			     port_id);
1818 		rte_flow_error_set(error, EINVAL,
1819 				   RTE_FLOW_ERROR_TYPE_ATTR,
1820 				   NULL, rte_strerror(EINVAL));
1821 		return NULL;
1822 	}
1823 	if (masks == NULL) {
1824 		FLOW_LOG(ERR,
1825 			     "Port %"PRIu16" masks is NULL.",
1826 			     port_id);
1827 		rte_flow_error_set(error, EINVAL,
1828 				   RTE_FLOW_ERROR_TYPE_ATTR,
1829 				   NULL, rte_strerror(EINVAL));
1830 
1831 	}
1832 	if (likely(!!ops->actions_template_create)) {
1833 		template = ops->actions_template_create(dev, template_attr,
1834 							actions, masks, error);
1835 		if (template == NULL)
1836 			flow_err(port_id, -rte_errno, error);
1837 
1838 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1839 						       masks, template);
1840 
1841 		return template;
1842 	}
1843 	rte_flow_error_set(error, ENOTSUP,
1844 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1845 			   NULL, rte_strerror(ENOTSUP));
1846 	return NULL;
1847 }
1848 
1849 int
1850 rte_flow_actions_template_destroy(uint16_t port_id,
1851 			struct rte_flow_actions_template *actions_template,
1852 			struct rte_flow_error *error)
1853 {
1854 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1855 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1856 	int ret;
1857 
1858 	if (unlikely(!ops))
1859 		return -rte_errno;
1860 	if (unlikely(actions_template == NULL))
1861 		return 0;
1862 	if (likely(!!ops->actions_template_destroy)) {
1863 		ret = flow_err(port_id,
1864 			       ops->actions_template_destroy(dev,
1865 							     actions_template,
1866 							     error),
1867 			       error);
1868 
1869 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1870 							ret);
1871 
1872 		return ret;
1873 	}
1874 	return rte_flow_error_set(error, ENOTSUP,
1875 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1876 				  NULL, rte_strerror(ENOTSUP));
1877 }
1878 
1879 struct rte_flow_template_table *
1880 rte_flow_template_table_create(uint16_t port_id,
1881 			const struct rte_flow_template_table_attr *table_attr,
1882 			struct rte_flow_pattern_template *pattern_templates[],
1883 			uint8_t nb_pattern_templates,
1884 			struct rte_flow_actions_template *actions_templates[],
1885 			uint8_t nb_actions_templates,
1886 			struct rte_flow_error *error)
1887 {
1888 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1889 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1890 	struct rte_flow_template_table *table;
1891 
1892 	if (unlikely(!ops))
1893 		return NULL;
1894 	if (dev->data->flow_configured == 0) {
1895 		FLOW_LOG(INFO,
1896 			"Flow engine on port_id=%"PRIu16" is not configured.",
1897 			port_id);
1898 		rte_flow_error_set(error, EINVAL,
1899 				   RTE_FLOW_ERROR_TYPE_STATE,
1900 				   NULL, rte_strerror(EINVAL));
1901 		return NULL;
1902 	}
1903 	if (table_attr == NULL) {
1904 		FLOW_LOG(ERR,
1905 			     "Port %"PRIu16" table attr is NULL.",
1906 			     port_id);
1907 		rte_flow_error_set(error, EINVAL,
1908 				   RTE_FLOW_ERROR_TYPE_ATTR,
1909 				   NULL, rte_strerror(EINVAL));
1910 		return NULL;
1911 	}
1912 	if (pattern_templates == NULL) {
1913 		FLOW_LOG(ERR,
1914 			     "Port %"PRIu16" pattern templates is NULL.",
1915 			     port_id);
1916 		rte_flow_error_set(error, EINVAL,
1917 				   RTE_FLOW_ERROR_TYPE_ATTR,
1918 				   NULL, rte_strerror(EINVAL));
1919 		return NULL;
1920 	}
1921 	if (actions_templates == NULL) {
1922 		FLOW_LOG(ERR,
1923 			     "Port %"PRIu16" actions templates is NULL.",
1924 			     port_id);
1925 		rte_flow_error_set(error, EINVAL,
1926 				   RTE_FLOW_ERROR_TYPE_ATTR,
1927 				   NULL, rte_strerror(EINVAL));
1928 		return NULL;
1929 	}
1930 	if (likely(!!ops->template_table_create)) {
1931 		table = ops->template_table_create(dev, table_attr,
1932 					pattern_templates, nb_pattern_templates,
1933 					actions_templates, nb_actions_templates,
1934 					error);
1935 		if (table == NULL)
1936 			flow_err(port_id, -rte_errno, error);
1937 
1938 		rte_flow_trace_template_table_create(port_id, table_attr,
1939 						     pattern_templates,
1940 						     nb_pattern_templates,
1941 						     actions_templates,
1942 						     nb_actions_templates, table);
1943 
1944 		return table;
1945 	}
1946 	rte_flow_error_set(error, ENOTSUP,
1947 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1948 			   NULL, rte_strerror(ENOTSUP));
1949 	return NULL;
1950 }
1951 
1952 int
1953 rte_flow_template_table_destroy(uint16_t port_id,
1954 				struct rte_flow_template_table *template_table,
1955 				struct rte_flow_error *error)
1956 {
1957 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1958 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1959 	int ret;
1960 
1961 	if (unlikely(!ops))
1962 		return -rte_errno;
1963 	if (unlikely(template_table == NULL))
1964 		return 0;
1965 	if (likely(!!ops->template_table_destroy)) {
1966 		ret = flow_err(port_id,
1967 			       ops->template_table_destroy(dev,
1968 							   template_table,
1969 							   error),
1970 			       error);
1971 
1972 		rte_flow_trace_template_table_destroy(port_id, template_table,
1973 						      ret);
1974 
1975 		return ret;
1976 	}
1977 	return rte_flow_error_set(error, ENOTSUP,
1978 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1979 				  NULL, rte_strerror(ENOTSUP));
1980 }
1981 
1982 int
1983 rte_flow_group_set_miss_actions(uint16_t port_id,
1984 				uint32_t group_id,
1985 				const struct rte_flow_group_attr *attr,
1986 				const struct rte_flow_action actions[],
1987 				struct rte_flow_error *error)
1988 {
1989 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1990 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1991 
1992 	if (unlikely(!ops))
1993 		return -rte_errno;
1994 	if (likely(!!ops->group_set_miss_actions)) {
1995 		return flow_err(port_id,
1996 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
1997 				error);
1998 	}
1999 	return rte_flow_error_set(error, ENOTSUP,
2000 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2001 				  NULL, rte_strerror(ENOTSUP));
2002 }
2003 
2004 struct rte_flow *
2005 rte_flow_async_create(uint16_t port_id,
2006 		      uint32_t queue_id,
2007 		      const struct rte_flow_op_attr *op_attr,
2008 		      struct rte_flow_template_table *template_table,
2009 		      const struct rte_flow_item pattern[],
2010 		      uint8_t pattern_template_index,
2011 		      const struct rte_flow_action actions[],
2012 		      uint8_t actions_template_index,
2013 		      void *user_data,
2014 		      struct rte_flow_error *error)
2015 {
2016 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2017 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2018 	struct rte_flow *flow;
2019 
2020 	flow = ops->async_create(dev, queue_id,
2021 				 op_attr, template_table,
2022 				 pattern, pattern_template_index,
2023 				 actions, actions_template_index,
2024 				 user_data, error);
2025 	if (flow == NULL)
2026 		flow_err(port_id, -rte_errno, error);
2027 
2028 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2029 				    pattern, pattern_template_index, actions,
2030 				    actions_template_index, user_data, flow);
2031 
2032 	return flow;
2033 }
2034 
2035 struct rte_flow *
2036 rte_flow_async_create_by_index(uint16_t port_id,
2037 			       uint32_t queue_id,
2038 			       const struct rte_flow_op_attr *op_attr,
2039 			       struct rte_flow_template_table *template_table,
2040 			       uint32_t rule_index,
2041 			       const struct rte_flow_action actions[],
2042 			       uint8_t actions_template_index,
2043 			       void *user_data,
2044 			       struct rte_flow_error *error)
2045 {
2046 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2047 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2048 	struct rte_flow *flow;
2049 
2050 	flow = ops->async_create_by_index(dev, queue_id,
2051 					  op_attr, template_table, rule_index,
2052 					  actions, actions_template_index,
2053 					  user_data, error);
2054 	if (flow == NULL)
2055 		flow_err(port_id, -rte_errno, error);
2056 	return flow;
2057 }
2058 
2059 int
2060 rte_flow_async_destroy(uint16_t port_id,
2061 		       uint32_t queue_id,
2062 		       const struct rte_flow_op_attr *op_attr,
2063 		       struct rte_flow *flow,
2064 		       void *user_data,
2065 		       struct rte_flow_error *error)
2066 {
2067 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2068 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2069 	int ret;
2070 
2071 	ret = flow_err(port_id,
2072 		       ops->async_destroy(dev, queue_id,
2073 					  op_attr, flow,
2074 					  user_data, error),
2075 		       error);
2076 
2077 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2078 				     user_data, ret);
2079 
2080 	return ret;
2081 }
2082 
2083 int
2084 rte_flow_async_actions_update(uint16_t port_id,
2085 			      uint32_t queue_id,
2086 			      const struct rte_flow_op_attr *op_attr,
2087 			      struct rte_flow *flow,
2088 			      const struct rte_flow_action actions[],
2089 			      uint8_t actions_template_index,
2090 			      void *user_data,
2091 			      struct rte_flow_error *error)
2092 {
2093 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2094 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2095 	int ret;
2096 
2097 	ret = flow_err(port_id,
2098 		       ops->async_actions_update(dev, queue_id, op_attr,
2099 						 flow, actions,
2100 						 actions_template_index,
2101 						 user_data, error),
2102 		       error);
2103 
2104 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2105 					    actions, actions_template_index,
2106 					    user_data, ret);
2107 
2108 	return ret;
2109 }
2110 
2111 int
2112 rte_flow_push(uint16_t port_id,
2113 	      uint32_t queue_id,
2114 	      struct rte_flow_error *error)
2115 {
2116 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2117 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2118 	int ret;
2119 
2120 	ret = flow_err(port_id,
2121 		       ops->push(dev, queue_id, error),
2122 		       error);
2123 
2124 	rte_flow_trace_push(port_id, queue_id, ret);
2125 
2126 	return ret;
2127 }
2128 
2129 int
2130 rte_flow_pull(uint16_t port_id,
2131 	      uint32_t queue_id,
2132 	      struct rte_flow_op_result res[],
2133 	      uint16_t n_res,
2134 	      struct rte_flow_error *error)
2135 {
2136 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2137 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2138 	int ret;
2139 	int rc;
2140 
2141 	ret = ops->pull(dev, queue_id, res, n_res, error);
2142 	rc = ret ? ret : flow_err(port_id, ret, error);
2143 
2144 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2145 
2146 	return rc;
2147 }
2148 
2149 struct rte_flow_action_handle *
2150 rte_flow_async_action_handle_create(uint16_t port_id,
2151 		uint32_t queue_id,
2152 		const struct rte_flow_op_attr *op_attr,
2153 		const struct rte_flow_indir_action_conf *indir_action_conf,
2154 		const struct rte_flow_action *action,
2155 		void *user_data,
2156 		struct rte_flow_error *error)
2157 {
2158 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2159 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2160 	struct rte_flow_action_handle *handle;
2161 
2162 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2163 					     indir_action_conf, action, user_data, error);
2164 	if (handle == NULL)
2165 		flow_err(port_id, -rte_errno, error);
2166 
2167 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2168 						  indir_action_conf, action,
2169 						  user_data, handle);
2170 
2171 	return handle;
2172 }
2173 
2174 int
2175 rte_flow_async_action_handle_destroy(uint16_t port_id,
2176 		uint32_t queue_id,
2177 		const struct rte_flow_op_attr *op_attr,
2178 		struct rte_flow_action_handle *action_handle,
2179 		void *user_data,
2180 		struct rte_flow_error *error)
2181 {
2182 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2183 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2184 	int ret;
2185 
2186 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2187 					   action_handle, user_data, error);
2188 	ret = flow_err(port_id, ret, error);
2189 
2190 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2191 						   action_handle, user_data, ret);
2192 
2193 	return ret;
2194 }
2195 
2196 int
2197 rte_flow_async_action_handle_update(uint16_t port_id,
2198 		uint32_t queue_id,
2199 		const struct rte_flow_op_attr *op_attr,
2200 		struct rte_flow_action_handle *action_handle,
2201 		const void *update,
2202 		void *user_data,
2203 		struct rte_flow_error *error)
2204 {
2205 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2206 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2207 	int ret;
2208 
2209 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2210 					  action_handle, update, user_data, error);
2211 	ret = flow_err(port_id, ret, error);
2212 
2213 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2214 						  action_handle, update,
2215 						  user_data, ret);
2216 
2217 	return ret;
2218 }
2219 
2220 int
2221 rte_flow_async_action_handle_query(uint16_t port_id,
2222 		uint32_t queue_id,
2223 		const struct rte_flow_op_attr *op_attr,
2224 		const struct rte_flow_action_handle *action_handle,
2225 		void *data,
2226 		void *user_data,
2227 		struct rte_flow_error *error)
2228 {
2229 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2230 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2231 	int ret;
2232 
2233 	if (unlikely(!ops))
2234 		return -rte_errno;
2235 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2236 					  action_handle, data, user_data, error);
2237 	ret = flow_err(port_id, ret, error);
2238 
2239 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2240 						 action_handle, data, user_data,
2241 						 ret);
2242 
2243 	return ret;
2244 }
2245 
2246 int
2247 rte_flow_action_handle_query_update(uint16_t port_id,
2248 				    struct rte_flow_action_handle *handle,
2249 				    const void *update, void *query,
2250 				    enum rte_flow_query_update_mode mode,
2251 				    struct rte_flow_error *error)
2252 {
2253 	int ret;
2254 	struct rte_eth_dev *dev;
2255 	const struct rte_flow_ops *ops;
2256 
2257 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2258 	if (!handle)
2259 		return -EINVAL;
2260 	if (!update && !query)
2261 		return -EINVAL;
2262 	dev = &rte_eth_devices[port_id];
2263 	ops = rte_flow_ops_get(port_id, error);
2264 	if (!ops || !ops->action_handle_query_update)
2265 		return -ENOTSUP;
2266 	ret = ops->action_handle_query_update(dev, handle, update,
2267 					      query, mode, error);
2268 	return flow_err(port_id, ret, error);
2269 }
2270 
2271 int
2272 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2273 					  const struct rte_flow_op_attr *attr,
2274 					  struct rte_flow_action_handle *handle,
2275 					  const void *update, void *query,
2276 					  enum rte_flow_query_update_mode mode,
2277 					  void *user_data,
2278 					  struct rte_flow_error *error)
2279 {
2280 	int ret;
2281 	struct rte_eth_dev *dev;
2282 	const struct rte_flow_ops *ops;
2283 
2284 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2285 	if (!handle)
2286 		return -EINVAL;
2287 	if (!update && !query)
2288 		return -EINVAL;
2289 	dev = &rte_eth_devices[port_id];
2290 	ops = rte_flow_ops_get(port_id, error);
2291 	if (!ops || !ops->async_action_handle_query_update)
2292 		return -ENOTSUP;
2293 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2294 						    handle, update,
2295 						    query, mode,
2296 						    user_data, error);
2297 	return flow_err(port_id, ret, error);
2298 }
2299 
2300 struct rte_flow_action_list_handle *
2301 rte_flow_action_list_handle_create(uint16_t port_id,
2302 				   const
2303 				   struct rte_flow_indir_action_conf *conf,
2304 				   const struct rte_flow_action *actions,
2305 				   struct rte_flow_error *error)
2306 {
2307 	int ret;
2308 	struct rte_eth_dev *dev;
2309 	const struct rte_flow_ops *ops;
2310 	struct rte_flow_action_list_handle *handle;
2311 
2312 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2313 	ops = rte_flow_ops_get(port_id, error);
2314 	if (!ops || !ops->action_list_handle_create) {
2315 		rte_flow_error_set(error, ENOTSUP,
2316 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2317 				   "action_list handle not supported");
2318 		return NULL;
2319 	}
2320 	dev = &rte_eth_devices[port_id];
2321 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2322 	ret = flow_err(port_id, -rte_errno, error);
2323 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2324 	return handle;
2325 }
2326 
2327 int
2328 rte_flow_action_list_handle_destroy(uint16_t port_id,
2329 				    struct rte_flow_action_list_handle *handle,
2330 				    struct rte_flow_error *error)
2331 {
2332 	int ret;
2333 	struct rte_eth_dev *dev;
2334 	const struct rte_flow_ops *ops;
2335 
2336 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2337 	ops = rte_flow_ops_get(port_id, error);
2338 	if (!ops || !ops->action_list_handle_destroy)
2339 		return rte_flow_error_set(error, ENOTSUP,
2340 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2341 					  "action_list handle not supported");
2342 	dev = &rte_eth_devices[port_id];
2343 	ret = ops->action_list_handle_destroy(dev, handle, error);
2344 	ret = flow_err(port_id, ret, error);
2345 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2346 	return ret;
2347 }
2348 
2349 struct rte_flow_action_list_handle *
2350 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2351 					 const struct rte_flow_op_attr *attr,
2352 					 const struct rte_flow_indir_action_conf *conf,
2353 					 const struct rte_flow_action *actions,
2354 					 void *user_data,
2355 					 struct rte_flow_error *error)
2356 {
2357 	int ret;
2358 	struct rte_eth_dev *dev;
2359 	const struct rte_flow_ops *ops;
2360 	struct rte_flow_action_list_handle *handle;
2361 
2362 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2363 	ops = rte_flow_ops_get(port_id, error);
2364 	if (!ops || !ops->async_action_list_handle_create) {
2365 		rte_flow_error_set(error, ENOTSUP,
2366 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2367 				   "action_list handle not supported");
2368 		return NULL;
2369 	}
2370 	dev = &rte_eth_devices[port_id];
2371 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2372 						      actions, user_data,
2373 						      error);
2374 	ret = flow_err(port_id, -rte_errno, error);
2375 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2376 						       conf, actions, user_data,
2377 						       ret);
2378 	return handle;
2379 }
2380 
2381 int
2382 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2383 				 const struct rte_flow_op_attr *op_attr,
2384 				 struct rte_flow_action_list_handle *handle,
2385 				 void *user_data, struct rte_flow_error *error)
2386 {
2387 	int ret;
2388 	struct rte_eth_dev *dev;
2389 	const struct rte_flow_ops *ops;
2390 
2391 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2392 	ops = rte_flow_ops_get(port_id, error);
2393 	if (!ops || !ops->async_action_list_handle_destroy)
2394 		return rte_flow_error_set(error, ENOTSUP,
2395 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2396 					  "async action_list handle not supported");
2397 	dev = &rte_eth_devices[port_id];
2398 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2399 						    handle, user_data, error);
2400 	ret = flow_err(port_id, ret, error);
2401 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2402 							op_attr, handle,
2403 							user_data, ret);
2404 	return ret;
2405 }
2406 
2407 int
2408 rte_flow_action_list_handle_query_update(uint16_t port_id,
2409 			 const struct rte_flow_action_list_handle *handle,
2410 			 const void **update, void **query,
2411 			 enum rte_flow_query_update_mode mode,
2412 			 struct rte_flow_error *error)
2413 {
2414 	int ret;
2415 	struct rte_eth_dev *dev;
2416 	const struct rte_flow_ops *ops;
2417 
2418 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2419 	ops = rte_flow_ops_get(port_id, error);
2420 	if (!ops || !ops->action_list_handle_query_update)
2421 		return rte_flow_error_set(error, ENOTSUP,
2422 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2423 					  "action_list query_update not supported");
2424 	dev = &rte_eth_devices[port_id];
2425 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2426 						   mode, error);
2427 	ret = flow_err(port_id, ret, error);
2428 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2429 						       query, mode, ret);
2430 	return ret;
2431 }
2432 
2433 int
2434 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2435 			 const struct rte_flow_op_attr *attr,
2436 			 const struct rte_flow_action_list_handle *handle,
2437 			 const void **update, void **query,
2438 			 enum rte_flow_query_update_mode mode,
2439 			 void *user_data, struct rte_flow_error *error)
2440 {
2441 	int ret;
2442 	struct rte_eth_dev *dev;
2443 	const struct rte_flow_ops *ops;
2444 
2445 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2446 	ops = rte_flow_ops_get(port_id, error);
2447 	if (!ops || !ops->async_action_list_handle_query_update)
2448 		return rte_flow_error_set(error, ENOTSUP,
2449 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2450 					  "action_list async query_update not supported");
2451 	dev = &rte_eth_devices[port_id];
2452 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2453 							 handle, update, query,
2454 							 mode, user_data,
2455 							 error);
2456 	ret = flow_err(port_id, ret, error);
2457 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2458 							     attr, handle,
2459 							     update, query,
2460 							     mode, user_data,
2461 							     ret);
2462 	return ret;
2463 }
2464 
2465 int
2466 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2467 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2468 			 uint32_t *hash, struct rte_flow_error *error)
2469 {
2470 	int ret;
2471 	struct rte_eth_dev *dev;
2472 	const struct rte_flow_ops *ops;
2473 
2474 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2475 	ops = rte_flow_ops_get(port_id, error);
2476 	if (!ops || !ops->flow_calc_table_hash)
2477 		return rte_flow_error_set(error, ENOTSUP,
2478 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2479 					  "action_list async query_update not supported");
2480 	dev = &rte_eth_devices[port_id];
2481 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2482 					hash, error);
2483 	return flow_err(port_id, ret, error);
2484 }
2485