xref: /dpdk/lib/ethdev/rte_flow.c (revision 8f6c2a1209c31b401d0a8fc74e4b98b1f2d599dc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <pthread.h>
10 
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include <rte_mbuf_dyn.h>
16 #include "rte_ethdev.h"
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 /* Mbuf dynamic field name for metadata. */
23 int32_t rte_flow_dynf_metadata_offs = -1;
24 
25 /* Mbuf dynamic field flag bit number for metadata. */
26 uint64_t rte_flow_dynf_metadata_mask;
27 
28 /**
29  * Flow elements description tables.
30  */
31 struct rte_flow_desc_data {
32 	const char *name;
33 	size_t size;
34 	size_t (*desc_fn)(void *dst, const void *src);
35 };
36 
37 /**
38  *
39  * @param buf
40  * Destination memory.
41  * @param data
42  * Source memory
43  * @param size
44  * Requested copy size
45  * @param desc
46  * rte_flow_desc_item - for flow item conversion.
47  * rte_flow_desc_action - for flow action conversion.
48  * @param type
49  * Offset into the desc param or negative value for private flow elements.
50  */
51 static inline size_t
52 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
53 		   const struct rte_flow_desc_data *desc, int type)
54 {
55 	/**
56 	 * Allow PMD private flow item
57 	 */
58 	bool rte_type = type >= 0;
59 
60 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
61 	if (buf == NULL || data == NULL)
62 		return 0;
63 	rte_memcpy(buf, data, (size > sz ? sz : size));
64 	if (rte_type && desc[type].desc_fn)
65 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
66 	return sz;
67 }
68 
69 static size_t
70 rte_flow_item_flex_conv(void *buf, const void *data)
71 {
72 	struct rte_flow_item_flex *dst = buf;
73 	const struct rte_flow_item_flex *src = data;
74 	if (buf) {
75 		dst->pattern = rte_memcpy
76 			((void *)((uintptr_t)(dst + 1)), src->pattern,
77 			 src->length);
78 	}
79 	return src->length;
80 }
81 
82 /** Generate flow_item[] entry. */
83 #define MK_FLOW_ITEM(t, s) \
84 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
85 		.name = # t, \
86 		.size = s,               \
87 		.desc_fn = NULL,\
88 	}
89 
90 #define MK_FLOW_ITEM_FN(t, s, fn) \
91 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
92 		.name = # t,                 \
93 		.size = s,                   \
94 		.desc_fn = fn,               \
95 	}
96 
97 /** Information about known flow pattern items. */
98 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
99 	MK_FLOW_ITEM(END, 0),
100 	MK_FLOW_ITEM(VOID, 0),
101 	MK_FLOW_ITEM(INVERT, 0),
102 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
103 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
104 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
105 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
106 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
107 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
108 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
109 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
110 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
111 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
112 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
113 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
114 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
115 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
116 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
117 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
118 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
119 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
122 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
123 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
124 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
125 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
126 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
127 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
128 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
131 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
132 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
134 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
135 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
137 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
138 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
139 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
140 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
141 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
142 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
143 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
144 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
146 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
147 			sizeof(struct rte_flow_item_pppoe_proto_id)),
148 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
149 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
150 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
151 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
152 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
153 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
154 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
155 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
156 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
157 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
158 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
160 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
161 			rte_flow_item_flex_conv),
162 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
163 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
164 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
165 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
166 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
167 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
168 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
169 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
170 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
171 };
172 
173 /** Generate flow_action[] entry. */
174 #define MK_FLOW_ACTION(t, s) \
175 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
176 		.name = # t, \
177 		.size = s, \
178 		.desc_fn = NULL,\
179 	}
180 
181 #define MK_FLOW_ACTION_FN(t, fn) \
182 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
183 		.name = # t, \
184 		.size = 0, \
185 		.desc_fn = fn,\
186 	}
187 
188 
189 /** Information about known flow actions. */
190 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
191 	MK_FLOW_ACTION(END, 0),
192 	MK_FLOW_ACTION(VOID, 0),
193 	MK_FLOW_ACTION(PASSTHRU, 0),
194 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
195 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
196 	MK_FLOW_ACTION(FLAG, 0),
197 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
198 	MK_FLOW_ACTION(DROP, 0),
199 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
200 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
201 	MK_FLOW_ACTION(PF, 0),
202 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
203 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
204 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
205 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
206 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
207 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
208 	MK_FLOW_ACTION(OF_PUSH_VLAN,
209 		       sizeof(struct rte_flow_action_of_push_vlan)),
210 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
211 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
212 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
213 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
214 	MK_FLOW_ACTION(OF_POP_MPLS,
215 		       sizeof(struct rte_flow_action_of_pop_mpls)),
216 	MK_FLOW_ACTION(OF_PUSH_MPLS,
217 		       sizeof(struct rte_flow_action_of_push_mpls)),
218 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
219 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
220 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
221 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
222 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
223 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
224 	MK_FLOW_ACTION(SET_IPV4_SRC,
225 		       sizeof(struct rte_flow_action_set_ipv4)),
226 	MK_FLOW_ACTION(SET_IPV4_DST,
227 		       sizeof(struct rte_flow_action_set_ipv4)),
228 	MK_FLOW_ACTION(SET_IPV6_SRC,
229 		       sizeof(struct rte_flow_action_set_ipv6)),
230 	MK_FLOW_ACTION(SET_IPV6_DST,
231 		       sizeof(struct rte_flow_action_set_ipv6)),
232 	MK_FLOW_ACTION(SET_TP_SRC,
233 		       sizeof(struct rte_flow_action_set_tp)),
234 	MK_FLOW_ACTION(SET_TP_DST,
235 		       sizeof(struct rte_flow_action_set_tp)),
236 	MK_FLOW_ACTION(MAC_SWAP, 0),
237 	MK_FLOW_ACTION(DEC_TTL, 0),
238 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
239 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
240 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
241 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
242 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
243 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
244 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
245 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
246 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
247 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
248 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
249 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
250 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
251 	MK_FLOW_ACTION(MODIFY_FIELD,
252 		       sizeof(struct rte_flow_action_modify_field)),
253 	/**
254 	 * Indirect action represented as handle of type
255 	 * (struct rte_flow_action_handle *) stored in conf field (see
256 	 * struct rte_flow_action); no need for additional structure to * store
257 	 * indirect action handle.
258 	 */
259 	MK_FLOW_ACTION(INDIRECT, 0),
260 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
261 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
262 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
263 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
264 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
265 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
266 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
267 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
268 	MK_FLOW_ACTION(INDIRECT_LIST,
269 		       sizeof(struct rte_flow_action_indirect_list)),
270 	MK_FLOW_ACTION(PROG,
271 		       sizeof(struct rte_flow_action_prog)),
272 };
273 
274 int
275 rte_flow_dynf_metadata_register(void)
276 {
277 	int offset;
278 	int flag;
279 
280 	static const struct rte_mbuf_dynfield desc_offs = {
281 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
282 		.size = sizeof(uint32_t),
283 		.align = __alignof__(uint32_t),
284 	};
285 	static const struct rte_mbuf_dynflag desc_flag = {
286 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
287 	};
288 
289 	offset = rte_mbuf_dynfield_register(&desc_offs);
290 	if (offset < 0)
291 		goto error;
292 	flag = rte_mbuf_dynflag_register(&desc_flag);
293 	if (flag < 0)
294 		goto error;
295 	rte_flow_dynf_metadata_offs = offset;
296 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
297 
298 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
299 
300 	return 0;
301 
302 error:
303 	rte_flow_dynf_metadata_offs = -1;
304 	rte_flow_dynf_metadata_mask = UINT64_C(0);
305 	return -rte_errno;
306 }
307 
308 static inline void
309 fts_enter(struct rte_eth_dev *dev)
310 {
311 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
312 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
313 }
314 
315 static inline void
316 fts_exit(struct rte_eth_dev *dev)
317 {
318 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
319 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
320 }
321 
322 static int
323 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
324 {
325 	if (ret == 0)
326 		return 0;
327 	if (rte_eth_dev_is_removed(port_id))
328 		return rte_flow_error_set(error, EIO,
329 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
330 					  NULL, rte_strerror(EIO));
331 	return ret;
332 }
333 
334 /* Get generic flow operations structure from a port. */
335 const struct rte_flow_ops *
336 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
337 {
338 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
339 	const struct rte_flow_ops *ops;
340 	int code;
341 
342 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
343 		code = ENODEV;
344 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
345 		/* flow API not supported with this driver dev_ops */
346 		code = ENOSYS;
347 	else
348 		code = dev->dev_ops->flow_ops_get(dev, &ops);
349 	if (code == 0 && ops == NULL)
350 		/* flow API not supported with this device */
351 		code = ENOSYS;
352 
353 	if (code != 0) {
354 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
355 				   NULL, rte_strerror(code));
356 		return NULL;
357 	}
358 	return ops;
359 }
360 
361 /* Check whether a flow rule can be created on a given port. */
362 int
363 rte_flow_validate(uint16_t port_id,
364 		  const struct rte_flow_attr *attr,
365 		  const struct rte_flow_item pattern[],
366 		  const struct rte_flow_action actions[],
367 		  struct rte_flow_error *error)
368 {
369 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
370 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
371 	int ret;
372 
373 	if (likely(!!attr) && attr->transfer &&
374 	    (attr->ingress || attr->egress)) {
375 		return rte_flow_error_set(error, EINVAL,
376 					  RTE_FLOW_ERROR_TYPE_ATTR,
377 					  attr, "cannot use attr ingress/egress with attr transfer");
378 	}
379 
380 	if (unlikely(!ops))
381 		return -rte_errno;
382 	if (likely(!!ops->validate)) {
383 		fts_enter(dev);
384 		ret = ops->validate(dev, attr, pattern, actions, error);
385 		fts_exit(dev);
386 		ret = flow_err(port_id, ret, error);
387 
388 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
389 
390 		return ret;
391 	}
392 	return rte_flow_error_set(error, ENOSYS,
393 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
394 				  NULL, rte_strerror(ENOSYS));
395 }
396 
397 /* Create a flow rule on a given port. */
398 struct rte_flow *
399 rte_flow_create(uint16_t port_id,
400 		const struct rte_flow_attr *attr,
401 		const struct rte_flow_item pattern[],
402 		const struct rte_flow_action actions[],
403 		struct rte_flow_error *error)
404 {
405 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
406 	struct rte_flow *flow;
407 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
408 
409 	if (unlikely(!ops))
410 		return NULL;
411 	if (likely(!!ops->create)) {
412 		fts_enter(dev);
413 		flow = ops->create(dev, attr, pattern, actions, error);
414 		fts_exit(dev);
415 		if (flow == NULL)
416 			flow_err(port_id, -rte_errno, error);
417 
418 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
419 
420 		return flow;
421 	}
422 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
423 			   NULL, rte_strerror(ENOSYS));
424 	return NULL;
425 }
426 
427 /* Destroy a flow rule on a given port. */
428 int
429 rte_flow_destroy(uint16_t port_id,
430 		 struct rte_flow *flow,
431 		 struct rte_flow_error *error)
432 {
433 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
434 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
435 	int ret;
436 
437 	if (unlikely(!ops))
438 		return -rte_errno;
439 	if (likely(!!ops->destroy)) {
440 		fts_enter(dev);
441 		ret = ops->destroy(dev, flow, error);
442 		fts_exit(dev);
443 		ret = flow_err(port_id, ret, error);
444 
445 		rte_flow_trace_destroy(port_id, flow, ret);
446 
447 		return ret;
448 	}
449 	return rte_flow_error_set(error, ENOSYS,
450 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
451 				  NULL, rte_strerror(ENOSYS));
452 }
453 
454 int
455 rte_flow_actions_update(uint16_t port_id,
456 			struct rte_flow *flow,
457 			const struct rte_flow_action actions[],
458 			struct rte_flow_error *error)
459 {
460 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
461 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
462 	int ret;
463 
464 	if (unlikely(!ops))
465 		return -rte_errno;
466 	if (likely(!!ops->actions_update)) {
467 		fts_enter(dev);
468 		ret = ops->actions_update(dev, flow, actions, error);
469 		fts_exit(dev);
470 
471 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
472 
473 		return flow_err(port_id, ret, error);
474 	}
475 	return rte_flow_error_set(error, ENOSYS,
476 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
477 				  NULL, rte_strerror(ENOSYS));
478 }
479 
480 /* Destroy all flow rules associated with a port. */
481 int
482 rte_flow_flush(uint16_t port_id,
483 	       struct rte_flow_error *error)
484 {
485 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
486 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
487 	int ret;
488 
489 	if (unlikely(!ops))
490 		return -rte_errno;
491 	if (likely(!!ops->flush)) {
492 		fts_enter(dev);
493 		ret = ops->flush(dev, error);
494 		fts_exit(dev);
495 		ret = flow_err(port_id, ret, error);
496 
497 		rte_flow_trace_flush(port_id, ret);
498 
499 		return ret;
500 	}
501 	return rte_flow_error_set(error, ENOSYS,
502 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
503 				  NULL, rte_strerror(ENOSYS));
504 }
505 
506 /* Query an existing flow rule. */
507 int
508 rte_flow_query(uint16_t port_id,
509 	       struct rte_flow *flow,
510 	       const struct rte_flow_action *action,
511 	       void *data,
512 	       struct rte_flow_error *error)
513 {
514 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
515 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
516 	int ret;
517 
518 	if (!ops)
519 		return -rte_errno;
520 	if (likely(!!ops->query)) {
521 		fts_enter(dev);
522 		ret = ops->query(dev, flow, action, data, error);
523 		fts_exit(dev);
524 		ret = flow_err(port_id, ret, error);
525 
526 		rte_flow_trace_query(port_id, flow, action, data, ret);
527 
528 		return ret;
529 	}
530 	return rte_flow_error_set(error, ENOSYS,
531 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
532 				  NULL, rte_strerror(ENOSYS));
533 }
534 
535 /* Restrict ingress traffic to the defined flow rules. */
536 int
537 rte_flow_isolate(uint16_t port_id,
538 		 int set,
539 		 struct rte_flow_error *error)
540 {
541 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
542 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
543 	int ret;
544 
545 	if (!ops)
546 		return -rte_errno;
547 	if (likely(!!ops->isolate)) {
548 		fts_enter(dev);
549 		ret = ops->isolate(dev, set, error);
550 		fts_exit(dev);
551 		ret = flow_err(port_id, ret, error);
552 
553 		rte_flow_trace_isolate(port_id, set, ret);
554 
555 		return ret;
556 	}
557 	return rte_flow_error_set(error, ENOSYS,
558 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
559 				  NULL, rte_strerror(ENOSYS));
560 }
561 
562 /* Initialize flow error structure. */
563 int
564 rte_flow_error_set(struct rte_flow_error *error,
565 		   int code,
566 		   enum rte_flow_error_type type,
567 		   const void *cause,
568 		   const char *message)
569 {
570 	if (error) {
571 		*error = (struct rte_flow_error){
572 			.type = type,
573 			.cause = cause,
574 			.message = message,
575 		};
576 	}
577 	rte_errno = code;
578 	return -code;
579 }
580 
581 /** Pattern item specification types. */
582 enum rte_flow_conv_item_spec_type {
583 	RTE_FLOW_CONV_ITEM_SPEC,
584 	RTE_FLOW_CONV_ITEM_LAST,
585 	RTE_FLOW_CONV_ITEM_MASK,
586 };
587 
588 /**
589  * Copy pattern item specification.
590  *
591  * @param[out] buf
592  *   Output buffer. Can be NULL if @p size is zero.
593  * @param size
594  *   Size of @p buf in bytes.
595  * @param[in] item
596  *   Pattern item to copy specification from.
597  * @param type
598  *   Specification selector for either @p spec, @p last or @p mask.
599  *
600  * @return
601  *   Number of bytes needed to store pattern item specification regardless
602  *   of @p size. @p buf contents are truncated to @p size if not large
603  *   enough.
604  */
605 static size_t
606 rte_flow_conv_item_spec(void *buf, const size_t size,
607 			const struct rte_flow_item *item,
608 			enum rte_flow_conv_item_spec_type type)
609 {
610 	size_t off;
611 	const void *data =
612 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
613 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
614 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
615 		NULL;
616 
617 	switch (item->type) {
618 		union {
619 			const struct rte_flow_item_raw *raw;
620 		} spec;
621 		union {
622 			const struct rte_flow_item_raw *raw;
623 		} last;
624 		union {
625 			const struct rte_flow_item_raw *raw;
626 		} mask;
627 		union {
628 			const struct rte_flow_item_raw *raw;
629 		} src;
630 		union {
631 			struct rte_flow_item_raw *raw;
632 		} dst;
633 		size_t tmp;
634 
635 	case RTE_FLOW_ITEM_TYPE_RAW:
636 		spec.raw = item->spec;
637 		last.raw = item->last ? item->last : item->spec;
638 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
639 		src.raw = data;
640 		dst.raw = buf;
641 		rte_memcpy(dst.raw,
642 			   (&(struct rte_flow_item_raw){
643 				.relative = src.raw->relative,
644 				.search = src.raw->search,
645 				.reserved = src.raw->reserved,
646 				.offset = src.raw->offset,
647 				.limit = src.raw->limit,
648 				.length = src.raw->length,
649 			   }),
650 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
651 		off = sizeof(*dst.raw);
652 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
653 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
654 		     ((spec.raw->length & mask.raw->length) >=
655 		      (last.raw->length & mask.raw->length))))
656 			tmp = spec.raw->length & mask.raw->length;
657 		else
658 			tmp = last.raw->length & mask.raw->length;
659 		if (tmp) {
660 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
661 			if (size >= off + tmp)
662 				dst.raw->pattern = rte_memcpy
663 					((void *)((uintptr_t)dst.raw + off),
664 					 src.raw->pattern, tmp);
665 			off += tmp;
666 		}
667 		break;
668 	default:
669 		off = rte_flow_conv_copy(buf, data, size,
670 					 rte_flow_desc_item, item->type);
671 		break;
672 	}
673 	return off;
674 }
675 
676 /**
677  * Copy action configuration.
678  *
679  * @param[out] buf
680  *   Output buffer. Can be NULL if @p size is zero.
681  * @param size
682  *   Size of @p buf in bytes.
683  * @param[in] action
684  *   Action to copy configuration from.
685  *
686  * @return
687  *   Number of bytes needed to store pattern item specification regardless
688  *   of @p size. @p buf contents are truncated to @p size if not large
689  *   enough.
690  */
691 static size_t
692 rte_flow_conv_action_conf(void *buf, const size_t size,
693 			  const struct rte_flow_action *action)
694 {
695 	size_t off;
696 
697 	switch (action->type) {
698 		union {
699 			const struct rte_flow_action_rss *rss;
700 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
701 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
702 		} src;
703 		union {
704 			struct rte_flow_action_rss *rss;
705 			struct rte_flow_action_vxlan_encap *vxlan_encap;
706 			struct rte_flow_action_nvgre_encap *nvgre_encap;
707 		} dst;
708 		size_t tmp;
709 		int ret;
710 
711 	case RTE_FLOW_ACTION_TYPE_RSS:
712 		src.rss = action->conf;
713 		dst.rss = buf;
714 		rte_memcpy(dst.rss,
715 			   (&(struct rte_flow_action_rss){
716 				.func = src.rss->func,
717 				.level = src.rss->level,
718 				.types = src.rss->types,
719 				.key_len = src.rss->key_len,
720 				.queue_num = src.rss->queue_num,
721 			   }),
722 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
723 		off = sizeof(*dst.rss);
724 		if (src.rss->key_len && src.rss->key) {
725 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
726 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
727 			if (size >= off + tmp)
728 				dst.rss->key = rte_memcpy
729 					((void *)((uintptr_t)dst.rss + off),
730 					 src.rss->key, tmp);
731 			off += tmp;
732 		}
733 		if (src.rss->queue_num) {
734 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
735 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
736 			if (size >= off + tmp)
737 				dst.rss->queue = rte_memcpy
738 					((void *)((uintptr_t)dst.rss + off),
739 					 src.rss->queue, tmp);
740 			off += tmp;
741 		}
742 		break;
743 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
744 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
745 		src.vxlan_encap = action->conf;
746 		dst.vxlan_encap = buf;
747 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
748 				 sizeof(*src.nvgre_encap) ||
749 				 offsetof(struct rte_flow_action_vxlan_encap,
750 					  definition) !=
751 				 offsetof(struct rte_flow_action_nvgre_encap,
752 					  definition));
753 		off = sizeof(*dst.vxlan_encap);
754 		if (src.vxlan_encap->definition) {
755 			off = RTE_ALIGN_CEIL
756 				(off, sizeof(*dst.vxlan_encap->definition));
757 			ret = rte_flow_conv
758 				(RTE_FLOW_CONV_OP_PATTERN,
759 				 (void *)((uintptr_t)dst.vxlan_encap + off),
760 				 size > off ? size - off : 0,
761 				 src.vxlan_encap->definition, NULL);
762 			if (ret < 0)
763 				return 0;
764 			if (size >= off + ret)
765 				dst.vxlan_encap->definition =
766 					(void *)((uintptr_t)dst.vxlan_encap +
767 						 off);
768 			off += ret;
769 		}
770 		break;
771 	default:
772 		off = rte_flow_conv_copy(buf, action->conf, size,
773 					 rte_flow_desc_action, action->type);
774 		break;
775 	}
776 	return off;
777 }
778 
779 /**
780  * Copy a list of pattern items.
781  *
782  * @param[out] dst
783  *   Destination buffer. Can be NULL if @p size is zero.
784  * @param size
785  *   Size of @p dst in bytes.
786  * @param[in] src
787  *   Source pattern items.
788  * @param num
789  *   Maximum number of pattern items to process from @p src or 0 to process
790  *   the entire list. In both cases, processing stops after
791  *   RTE_FLOW_ITEM_TYPE_END is encountered.
792  * @param[out] error
793  *   Perform verbose error reporting if not NULL.
794  *
795  * @return
796  *   A positive value representing the number of bytes needed to store
797  *   pattern items regardless of @p size on success (@p buf contents are
798  *   truncated to @p size if not large enough), a negative errno value
799  *   otherwise and rte_errno is set.
800  */
801 static int
802 rte_flow_conv_pattern(struct rte_flow_item *dst,
803 		      const size_t size,
804 		      const struct rte_flow_item *src,
805 		      unsigned int num,
806 		      struct rte_flow_error *error)
807 {
808 	uintptr_t data = (uintptr_t)dst;
809 	size_t off;
810 	size_t ret;
811 	unsigned int i;
812 
813 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
814 		/**
815 		 * allow PMD private flow item
816 		 */
817 		if (((int)src->type >= 0) &&
818 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
819 		    !rte_flow_desc_item[src->type].name))
820 			return rte_flow_error_set
821 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
822 				 "cannot convert unknown item type");
823 		if (size >= off + sizeof(*dst))
824 			*dst = (struct rte_flow_item){
825 				.type = src->type,
826 			};
827 		off += sizeof(*dst);
828 		if (!src->type)
829 			num = i + 1;
830 	}
831 	num = i;
832 	src -= num;
833 	dst -= num;
834 	do {
835 		if (src->spec) {
836 			off = RTE_ALIGN_CEIL(off, sizeof(double));
837 			ret = rte_flow_conv_item_spec
838 				((void *)(data + off),
839 				 size > off ? size - off : 0, src,
840 				 RTE_FLOW_CONV_ITEM_SPEC);
841 			if (size && size >= off + ret)
842 				dst->spec = (void *)(data + off);
843 			off += ret;
844 
845 		}
846 		if (src->last) {
847 			off = RTE_ALIGN_CEIL(off, sizeof(double));
848 			ret = rte_flow_conv_item_spec
849 				((void *)(data + off),
850 				 size > off ? size - off : 0, src,
851 				 RTE_FLOW_CONV_ITEM_LAST);
852 			if (size && size >= off + ret)
853 				dst->last = (void *)(data + off);
854 			off += ret;
855 		}
856 		if (src->mask) {
857 			off = RTE_ALIGN_CEIL(off, sizeof(double));
858 			ret = rte_flow_conv_item_spec
859 				((void *)(data + off),
860 				 size > off ? size - off : 0, src,
861 				 RTE_FLOW_CONV_ITEM_MASK);
862 			if (size && size >= off + ret)
863 				dst->mask = (void *)(data + off);
864 			off += ret;
865 		}
866 		++src;
867 		++dst;
868 	} while (--num);
869 	return off;
870 }
871 
872 /**
873  * Copy a list of actions.
874  *
875  * @param[out] dst
876  *   Destination buffer. Can be NULL if @p size is zero.
877  * @param size
878  *   Size of @p dst in bytes.
879  * @param[in] src
880  *   Source actions.
881  * @param num
882  *   Maximum number of actions to process from @p src or 0 to process the
883  *   entire list. In both cases, processing stops after
884  *   RTE_FLOW_ACTION_TYPE_END is encountered.
885  * @param[out] error
886  *   Perform verbose error reporting if not NULL.
887  *
888  * @return
889  *   A positive value representing the number of bytes needed to store
890  *   actions regardless of @p size on success (@p buf contents are truncated
891  *   to @p size if not large enough), a negative errno value otherwise and
892  *   rte_errno is set.
893  */
894 static int
895 rte_flow_conv_actions(struct rte_flow_action *dst,
896 		      const size_t size,
897 		      const struct rte_flow_action *src,
898 		      unsigned int num,
899 		      struct rte_flow_error *error)
900 {
901 	uintptr_t data = (uintptr_t)dst;
902 	size_t off;
903 	size_t ret;
904 	unsigned int i;
905 
906 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
907 		/**
908 		 * allow PMD private flow action
909 		 */
910 		if (((int)src->type >= 0) &&
911 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
912 		    !rte_flow_desc_action[src->type].name))
913 			return rte_flow_error_set
914 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
915 				 src, "cannot convert unknown action type");
916 		if (size >= off + sizeof(*dst))
917 			*dst = (struct rte_flow_action){
918 				.type = src->type,
919 			};
920 		off += sizeof(*dst);
921 		if (!src->type)
922 			num = i + 1;
923 	}
924 	num = i;
925 	src -= num;
926 	dst -= num;
927 	do {
928 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
929 			/*
930 			 * Indirect action conf fills the indirect action
931 			 * handler. Copy the action handle directly instead
932 			 * of duplicating the pointer memory.
933 			 */
934 			if (size)
935 				dst->conf = src->conf;
936 		} else if (src->conf) {
937 			off = RTE_ALIGN_CEIL(off, sizeof(double));
938 			ret = rte_flow_conv_action_conf
939 				((void *)(data + off),
940 				 size > off ? size - off : 0, src);
941 			if (size && size >= off + ret)
942 				dst->conf = (void *)(data + off);
943 			off += ret;
944 		}
945 		++src;
946 		++dst;
947 	} while (--num);
948 	return off;
949 }
950 
951 /**
952  * Copy flow rule components.
953  *
954  * This comprises the flow rule descriptor itself, attributes, pattern and
955  * actions list. NULL components in @p src are skipped.
956  *
957  * @param[out] dst
958  *   Destination buffer. Can be NULL if @p size is zero.
959  * @param size
960  *   Size of @p dst in bytes.
961  * @param[in] src
962  *   Source flow rule descriptor.
963  * @param[out] error
964  *   Perform verbose error reporting if not NULL.
965  *
966  * @return
967  *   A positive value representing the number of bytes needed to store all
968  *   components including the descriptor regardless of @p size on success
969  *   (@p buf contents are truncated to @p size if not large enough), a
970  *   negative errno value otherwise and rte_errno is set.
971  */
972 static int
973 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
974 		   const size_t size,
975 		   const struct rte_flow_conv_rule *src,
976 		   struct rte_flow_error *error)
977 {
978 	size_t off;
979 	int ret;
980 
981 	rte_memcpy(dst,
982 		   (&(struct rte_flow_conv_rule){
983 			.attr = NULL,
984 			.pattern = NULL,
985 			.actions = NULL,
986 		   }),
987 		   size > sizeof(*dst) ? sizeof(*dst) : size);
988 	off = sizeof(*dst);
989 	if (src->attr_ro) {
990 		off = RTE_ALIGN_CEIL(off, sizeof(double));
991 		if (size && size >= off + sizeof(*dst->attr))
992 			dst->attr = rte_memcpy
993 				((void *)((uintptr_t)dst + off),
994 				 src->attr_ro, sizeof(*dst->attr));
995 		off += sizeof(*dst->attr);
996 	}
997 	if (src->pattern_ro) {
998 		off = RTE_ALIGN_CEIL(off, sizeof(double));
999 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
1000 					    size > off ? size - off : 0,
1001 					    src->pattern_ro, 0, error);
1002 		if (ret < 0)
1003 			return ret;
1004 		if (size && size >= off + (size_t)ret)
1005 			dst->pattern = (void *)((uintptr_t)dst + off);
1006 		off += ret;
1007 	}
1008 	if (src->actions_ro) {
1009 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1010 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1011 					    size > off ? size - off : 0,
1012 					    src->actions_ro, 0, error);
1013 		if (ret < 0)
1014 			return ret;
1015 		if (size >= off + (size_t)ret)
1016 			dst->actions = (void *)((uintptr_t)dst + off);
1017 		off += ret;
1018 	}
1019 	return off;
1020 }
1021 
1022 /**
1023  * Retrieve the name of a pattern item/action type.
1024  *
1025  * @param is_action
1026  *   Nonzero when @p src represents an action type instead of a pattern item
1027  *   type.
1028  * @param is_ptr
1029  *   Nonzero to write string address instead of contents into @p dst.
1030  * @param[out] dst
1031  *   Destination buffer. Can be NULL if @p size is zero.
1032  * @param size
1033  *   Size of @p dst in bytes.
1034  * @param[in] src
1035  *   Depending on @p is_action, source pattern item or action type cast as a
1036  *   pointer.
1037  * @param[out] error
1038  *   Perform verbose error reporting if not NULL.
1039  *
1040  * @return
1041  *   A positive value representing the number of bytes needed to store the
1042  *   name or its address regardless of @p size on success (@p buf contents
1043  *   are truncated to @p size if not large enough), a negative errno value
1044  *   otherwise and rte_errno is set.
1045  */
1046 static int
1047 rte_flow_conv_name(int is_action,
1048 		   int is_ptr,
1049 		   char *dst,
1050 		   const size_t size,
1051 		   const void *src,
1052 		   struct rte_flow_error *error)
1053 {
1054 	struct desc_info {
1055 		const struct rte_flow_desc_data *data;
1056 		size_t num;
1057 	};
1058 	static const struct desc_info info_rep[2] = {
1059 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1060 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1061 	};
1062 	const struct desc_info *const info = &info_rep[!!is_action];
1063 	unsigned int type = (uintptr_t)src;
1064 
1065 	if (type >= info->num)
1066 		return rte_flow_error_set
1067 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1068 			 "unknown object type to retrieve the name of");
1069 	if (!is_ptr)
1070 		return strlcpy(dst, info->data[type].name, size);
1071 	if (size >= sizeof(const char **))
1072 		*((const char **)dst) = info->data[type].name;
1073 	return sizeof(const char **);
1074 }
1075 
1076 /** Helper function to convert flow API objects. */
1077 int
1078 rte_flow_conv(enum rte_flow_conv_op op,
1079 	      void *dst,
1080 	      size_t size,
1081 	      const void *src,
1082 	      struct rte_flow_error *error)
1083 {
1084 	int ret;
1085 
1086 	switch (op) {
1087 		const struct rte_flow_attr *attr;
1088 
1089 	case RTE_FLOW_CONV_OP_NONE:
1090 		ret = 0;
1091 		break;
1092 	case RTE_FLOW_CONV_OP_ATTR:
1093 		attr = src;
1094 		if (size > sizeof(*attr))
1095 			size = sizeof(*attr);
1096 		rte_memcpy(dst, attr, size);
1097 		ret = sizeof(*attr);
1098 		break;
1099 	case RTE_FLOW_CONV_OP_ITEM:
1100 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1101 		break;
1102 	case RTE_FLOW_CONV_OP_ACTION:
1103 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1104 		break;
1105 	case RTE_FLOW_CONV_OP_PATTERN:
1106 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1107 		break;
1108 	case RTE_FLOW_CONV_OP_ACTIONS:
1109 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1110 		break;
1111 	case RTE_FLOW_CONV_OP_RULE:
1112 		ret = rte_flow_conv_rule(dst, size, src, error);
1113 		break;
1114 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1115 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1116 		break;
1117 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1118 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1119 		break;
1120 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1121 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1122 		break;
1123 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1124 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1125 		break;
1126 	default:
1127 		ret = rte_flow_error_set
1128 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1129 		 "unknown object conversion operation");
1130 	}
1131 
1132 	rte_flow_trace_conv(op, dst, size, src, ret);
1133 
1134 	return ret;
1135 }
1136 
1137 /** Store a full rte_flow description. */
1138 size_t
1139 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1140 	      const struct rte_flow_attr *attr,
1141 	      const struct rte_flow_item *items,
1142 	      const struct rte_flow_action *actions)
1143 {
1144 	/*
1145 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1146 	 * to convert the former to the latter without wasting space.
1147 	 */
1148 	struct rte_flow_conv_rule *dst =
1149 		len ?
1150 		(void *)((uintptr_t)desc +
1151 			 (offsetof(struct rte_flow_desc, actions) -
1152 			  offsetof(struct rte_flow_conv_rule, actions))) :
1153 		NULL;
1154 	size_t dst_size =
1155 		len > sizeof(*desc) - sizeof(*dst) ?
1156 		len - (sizeof(*desc) - sizeof(*dst)) :
1157 		0;
1158 	struct rte_flow_conv_rule src = {
1159 		.attr_ro = NULL,
1160 		.pattern_ro = items,
1161 		.actions_ro = actions,
1162 	};
1163 	int ret;
1164 
1165 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1166 			 sizeof(struct rte_flow_conv_rule));
1167 	if (dst_size &&
1168 	    (&dst->pattern != &desc->items ||
1169 	     &dst->actions != &desc->actions ||
1170 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1171 		rte_errno = EINVAL;
1172 		return 0;
1173 	}
1174 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1175 	if (ret < 0)
1176 		return 0;
1177 	ret += sizeof(*desc) - sizeof(*dst);
1178 	rte_memcpy(desc,
1179 		   (&(struct rte_flow_desc){
1180 			.size = ret,
1181 			.attr = *attr,
1182 			.items = dst_size ? dst->pattern : NULL,
1183 			.actions = dst_size ? dst->actions : NULL,
1184 		   }),
1185 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1186 
1187 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1188 
1189 	return ret;
1190 }
1191 
1192 int
1193 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1194 			FILE *file, struct rte_flow_error *error)
1195 {
1196 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1197 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1198 	int ret;
1199 
1200 	if (unlikely(!ops))
1201 		return -rte_errno;
1202 	if (likely(!!ops->dev_dump)) {
1203 		fts_enter(dev);
1204 		ret = ops->dev_dump(dev, flow, file, error);
1205 		fts_exit(dev);
1206 		return flow_err(port_id, ret, error);
1207 	}
1208 	return rte_flow_error_set(error, ENOSYS,
1209 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1210 				  NULL, rte_strerror(ENOSYS));
1211 }
1212 
1213 int
1214 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1215 		    uint32_t nb_contexts, struct rte_flow_error *error)
1216 {
1217 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1218 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1219 	int ret;
1220 
1221 	if (unlikely(!ops))
1222 		return -rte_errno;
1223 	if (likely(!!ops->get_aged_flows)) {
1224 		fts_enter(dev);
1225 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1226 		fts_exit(dev);
1227 		ret = flow_err(port_id, ret, error);
1228 
1229 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1230 
1231 		return ret;
1232 	}
1233 	return rte_flow_error_set(error, ENOTSUP,
1234 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1235 				  NULL, rte_strerror(ENOTSUP));
1236 }
1237 
1238 int
1239 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1240 			  uint32_t nb_contexts, struct rte_flow_error *error)
1241 {
1242 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1243 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1244 	int ret;
1245 
1246 	if (unlikely(!ops))
1247 		return -rte_errno;
1248 	if (likely(!!ops->get_q_aged_flows)) {
1249 		fts_enter(dev);
1250 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1251 					    nb_contexts, error);
1252 		fts_exit(dev);
1253 		ret = flow_err(port_id, ret, error);
1254 
1255 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1256 						nb_contexts, ret);
1257 
1258 		return ret;
1259 	}
1260 	return rte_flow_error_set(error, ENOTSUP,
1261 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1262 				  NULL, rte_strerror(ENOTSUP));
1263 }
1264 
1265 struct rte_flow_action_handle *
1266 rte_flow_action_handle_create(uint16_t port_id,
1267 			      const struct rte_flow_indir_action_conf *conf,
1268 			      const struct rte_flow_action *action,
1269 			      struct rte_flow_error *error)
1270 {
1271 	struct rte_flow_action_handle *handle;
1272 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1273 
1274 	if (unlikely(!ops))
1275 		return NULL;
1276 	if (unlikely(!ops->action_handle_create)) {
1277 		rte_flow_error_set(error, ENOSYS,
1278 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1279 				   rte_strerror(ENOSYS));
1280 		return NULL;
1281 	}
1282 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1283 					   conf, action, error);
1284 	if (handle == NULL)
1285 		flow_err(port_id, -rte_errno, error);
1286 
1287 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1288 
1289 	return handle;
1290 }
1291 
1292 int
1293 rte_flow_action_handle_destroy(uint16_t port_id,
1294 			       struct rte_flow_action_handle *handle,
1295 			       struct rte_flow_error *error)
1296 {
1297 	int ret;
1298 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1299 
1300 	if (unlikely(!ops))
1301 		return -rte_errno;
1302 	if (unlikely(!ops->action_handle_destroy))
1303 		return rte_flow_error_set(error, ENOSYS,
1304 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1305 					  NULL, rte_strerror(ENOSYS));
1306 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1307 					 handle, error);
1308 	ret = flow_err(port_id, ret, error);
1309 
1310 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1311 
1312 	return ret;
1313 }
1314 
1315 int
1316 rte_flow_action_handle_update(uint16_t port_id,
1317 			      struct rte_flow_action_handle *handle,
1318 			      const void *update,
1319 			      struct rte_flow_error *error)
1320 {
1321 	int ret;
1322 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1323 
1324 	if (unlikely(!ops))
1325 		return -rte_errno;
1326 	if (unlikely(!ops->action_handle_update))
1327 		return rte_flow_error_set(error, ENOSYS,
1328 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1329 					  NULL, rte_strerror(ENOSYS));
1330 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1331 					update, error);
1332 	ret = flow_err(port_id, ret, error);
1333 
1334 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1335 
1336 	return ret;
1337 }
1338 
1339 int
1340 rte_flow_action_handle_query(uint16_t port_id,
1341 			     const struct rte_flow_action_handle *handle,
1342 			     void *data,
1343 			     struct rte_flow_error *error)
1344 {
1345 	int ret;
1346 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1347 
1348 	if (unlikely(!ops))
1349 		return -rte_errno;
1350 	if (unlikely(!ops->action_handle_query))
1351 		return rte_flow_error_set(error, ENOSYS,
1352 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1353 					  NULL, rte_strerror(ENOSYS));
1354 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1355 				       data, error);
1356 	ret = flow_err(port_id, ret, error);
1357 
1358 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1359 
1360 	return ret;
1361 }
1362 
1363 int
1364 rte_flow_tunnel_decap_set(uint16_t port_id,
1365 			  struct rte_flow_tunnel *tunnel,
1366 			  struct rte_flow_action **actions,
1367 			  uint32_t *num_of_actions,
1368 			  struct rte_flow_error *error)
1369 {
1370 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1371 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1372 	int ret;
1373 
1374 	if (unlikely(!ops))
1375 		return -rte_errno;
1376 	if (likely(!!ops->tunnel_decap_set)) {
1377 		ret = flow_err(port_id,
1378 			       ops->tunnel_decap_set(dev, tunnel, actions,
1379 						     num_of_actions, error),
1380 			       error);
1381 
1382 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1383 						num_of_actions, ret);
1384 
1385 		return ret;
1386 	}
1387 	return rte_flow_error_set(error, ENOTSUP,
1388 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1389 				  NULL, rte_strerror(ENOTSUP));
1390 }
1391 
1392 int
1393 rte_flow_tunnel_match(uint16_t port_id,
1394 		      struct rte_flow_tunnel *tunnel,
1395 		      struct rte_flow_item **items,
1396 		      uint32_t *num_of_items,
1397 		      struct rte_flow_error *error)
1398 {
1399 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1400 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1401 	int ret;
1402 
1403 	if (unlikely(!ops))
1404 		return -rte_errno;
1405 	if (likely(!!ops->tunnel_match)) {
1406 		ret = flow_err(port_id,
1407 			       ops->tunnel_match(dev, tunnel, items,
1408 						 num_of_items, error),
1409 			       error);
1410 
1411 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1412 					    ret);
1413 
1414 		return ret;
1415 	}
1416 	return rte_flow_error_set(error, ENOTSUP,
1417 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1418 				  NULL, rte_strerror(ENOTSUP));
1419 }
1420 
1421 int
1422 rte_flow_get_restore_info(uint16_t port_id,
1423 			  struct rte_mbuf *m,
1424 			  struct rte_flow_restore_info *restore_info,
1425 			  struct rte_flow_error *error)
1426 {
1427 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1428 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1429 	int ret;
1430 
1431 	if (unlikely(!ops))
1432 		return -rte_errno;
1433 	if (likely(!!ops->get_restore_info)) {
1434 		ret = flow_err(port_id,
1435 			       ops->get_restore_info(dev, m, restore_info,
1436 						     error),
1437 			       error);
1438 
1439 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1440 
1441 		return ret;
1442 	}
1443 	return rte_flow_error_set(error, ENOTSUP,
1444 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1445 				  NULL, rte_strerror(ENOTSUP));
1446 }
1447 
1448 static struct {
1449 	const struct rte_mbuf_dynflag desc;
1450 	uint64_t value;
1451 } flow_restore_info_dynflag = {
1452 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1453 };
1454 
1455 uint64_t
1456 rte_flow_restore_info_dynflag(void)
1457 {
1458 	return flow_restore_info_dynflag.value;
1459 }
1460 
1461 int
1462 rte_flow_restore_info_dynflag_register(void)
1463 {
1464 	if (flow_restore_info_dynflag.value == 0) {
1465 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1466 
1467 		if (offset < 0)
1468 			return -1;
1469 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1470 	}
1471 
1472 	return 0;
1473 }
1474 
1475 int
1476 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1477 				     struct rte_flow_action *actions,
1478 				     uint32_t num_of_actions,
1479 				     struct rte_flow_error *error)
1480 {
1481 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1482 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1483 	int ret;
1484 
1485 	if (unlikely(!ops))
1486 		return -rte_errno;
1487 	if (likely(!!ops->tunnel_action_decap_release)) {
1488 		ret = flow_err(port_id,
1489 			       ops->tunnel_action_decap_release(dev, actions,
1490 								num_of_actions,
1491 								error),
1492 			       error);
1493 
1494 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1495 							   num_of_actions, ret);
1496 
1497 		return ret;
1498 	}
1499 	return rte_flow_error_set(error, ENOTSUP,
1500 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1501 				  NULL, rte_strerror(ENOTSUP));
1502 }
1503 
1504 int
1505 rte_flow_tunnel_item_release(uint16_t port_id,
1506 			     struct rte_flow_item *items,
1507 			     uint32_t num_of_items,
1508 			     struct rte_flow_error *error)
1509 {
1510 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1511 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1512 	int ret;
1513 
1514 	if (unlikely(!ops))
1515 		return -rte_errno;
1516 	if (likely(!!ops->tunnel_item_release)) {
1517 		ret = flow_err(port_id,
1518 			       ops->tunnel_item_release(dev, items,
1519 							num_of_items, error),
1520 			       error);
1521 
1522 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1523 
1524 		return ret;
1525 	}
1526 	return rte_flow_error_set(error, ENOTSUP,
1527 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1528 				  NULL, rte_strerror(ENOTSUP));
1529 }
1530 
1531 int
1532 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1533 			     struct rte_flow_error *error)
1534 {
1535 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1536 	struct rte_eth_dev *dev;
1537 	int ret;
1538 
1539 	if (unlikely(ops == NULL))
1540 		return -rte_errno;
1541 
1542 	if (ops->pick_transfer_proxy == NULL) {
1543 		*proxy_port_id = port_id;
1544 		return 0;
1545 	}
1546 
1547 	dev = &rte_eth_devices[port_id];
1548 
1549 	ret = flow_err(port_id,
1550 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1551 		       error);
1552 
1553 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1554 
1555 	return ret;
1556 }
1557 
1558 struct rte_flow_item_flex_handle *
1559 rte_flow_flex_item_create(uint16_t port_id,
1560 			  const struct rte_flow_item_flex_conf *conf,
1561 			  struct rte_flow_error *error)
1562 {
1563 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1564 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1565 	struct rte_flow_item_flex_handle *handle;
1566 
1567 	if (unlikely(!ops))
1568 		return NULL;
1569 	if (unlikely(!ops->flex_item_create)) {
1570 		rte_flow_error_set(error, ENOTSUP,
1571 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1572 				   NULL, rte_strerror(ENOTSUP));
1573 		return NULL;
1574 	}
1575 	handle = ops->flex_item_create(dev, conf, error);
1576 	if (handle == NULL)
1577 		flow_err(port_id, -rte_errno, error);
1578 
1579 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1580 
1581 	return handle;
1582 }
1583 
1584 int
1585 rte_flow_flex_item_release(uint16_t port_id,
1586 			   const struct rte_flow_item_flex_handle *handle,
1587 			   struct rte_flow_error *error)
1588 {
1589 	int ret;
1590 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1591 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1592 
1593 	if (unlikely(!ops || !ops->flex_item_release))
1594 		return rte_flow_error_set(error, ENOTSUP,
1595 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1596 					  NULL, rte_strerror(ENOTSUP));
1597 	ret = ops->flex_item_release(dev, handle, error);
1598 	ret = flow_err(port_id, ret, error);
1599 
1600 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1601 
1602 	return ret;
1603 }
1604 
1605 int
1606 rte_flow_info_get(uint16_t port_id,
1607 		  struct rte_flow_port_info *port_info,
1608 		  struct rte_flow_queue_info *queue_info,
1609 		  struct rte_flow_error *error)
1610 {
1611 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1612 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1613 	int ret;
1614 
1615 	if (unlikely(!ops))
1616 		return -rte_errno;
1617 	if (dev->data->dev_configured == 0) {
1618 		RTE_FLOW_LOG(INFO,
1619 			"Device with port_id=%"PRIu16" is not configured.\n",
1620 			port_id);
1621 		return -EINVAL;
1622 	}
1623 	if (port_info == NULL) {
1624 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1625 		return -EINVAL;
1626 	}
1627 	if (likely(!!ops->info_get)) {
1628 		ret = flow_err(port_id,
1629 			       ops->info_get(dev, port_info, queue_info, error),
1630 			       error);
1631 
1632 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1633 
1634 		return ret;
1635 	}
1636 	return rte_flow_error_set(error, ENOTSUP,
1637 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1638 				  NULL, rte_strerror(ENOTSUP));
1639 }
1640 
1641 int
1642 rte_flow_configure(uint16_t port_id,
1643 		   const struct rte_flow_port_attr *port_attr,
1644 		   uint16_t nb_queue,
1645 		   const struct rte_flow_queue_attr *queue_attr[],
1646 		   struct rte_flow_error *error)
1647 {
1648 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1649 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1650 	int ret;
1651 
1652 	if (unlikely(!ops))
1653 		return -rte_errno;
1654 	if (dev->data->dev_configured == 0) {
1655 		RTE_FLOW_LOG(INFO,
1656 			"Device with port_id=%"PRIu16" is not configured.\n",
1657 			port_id);
1658 		return -EINVAL;
1659 	}
1660 	if (dev->data->dev_started != 0) {
1661 		RTE_FLOW_LOG(INFO,
1662 			"Device with port_id=%"PRIu16" already started.\n",
1663 			port_id);
1664 		return -EINVAL;
1665 	}
1666 	if (port_attr == NULL) {
1667 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1668 		return -EINVAL;
1669 	}
1670 	if (queue_attr == NULL) {
1671 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1672 		return -EINVAL;
1673 	}
1674 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1675 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1676 		return rte_flow_error_set(error, ENODEV,
1677 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1678 					  NULL, rte_strerror(ENODEV));
1679 	}
1680 	if (likely(!!ops->configure)) {
1681 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1682 		if (ret == 0)
1683 			dev->data->flow_configured = 1;
1684 		ret = flow_err(port_id, ret, error);
1685 
1686 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1687 
1688 		return ret;
1689 	}
1690 	return rte_flow_error_set(error, ENOTSUP,
1691 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1692 				  NULL, rte_strerror(ENOTSUP));
1693 }
1694 
1695 struct rte_flow_pattern_template *
1696 rte_flow_pattern_template_create(uint16_t port_id,
1697 		const struct rte_flow_pattern_template_attr *template_attr,
1698 		const struct rte_flow_item pattern[],
1699 		struct rte_flow_error *error)
1700 {
1701 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1702 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1703 	struct rte_flow_pattern_template *template;
1704 
1705 	if (unlikely(!ops))
1706 		return NULL;
1707 	if (dev->data->flow_configured == 0) {
1708 		RTE_FLOW_LOG(INFO,
1709 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1710 			port_id);
1711 		rte_flow_error_set(error, EINVAL,
1712 				RTE_FLOW_ERROR_TYPE_STATE,
1713 				NULL, rte_strerror(EINVAL));
1714 		return NULL;
1715 	}
1716 	if (template_attr == NULL) {
1717 		RTE_FLOW_LOG(ERR,
1718 			     "Port %"PRIu16" template attr is NULL.\n",
1719 			     port_id);
1720 		rte_flow_error_set(error, EINVAL,
1721 				   RTE_FLOW_ERROR_TYPE_ATTR,
1722 				   NULL, rte_strerror(EINVAL));
1723 		return NULL;
1724 	}
1725 	if (pattern == NULL) {
1726 		RTE_FLOW_LOG(ERR,
1727 			     "Port %"PRIu16" pattern is NULL.\n",
1728 			     port_id);
1729 		rte_flow_error_set(error, EINVAL,
1730 				   RTE_FLOW_ERROR_TYPE_ATTR,
1731 				   NULL, rte_strerror(EINVAL));
1732 		return NULL;
1733 	}
1734 	if (likely(!!ops->pattern_template_create)) {
1735 		template = ops->pattern_template_create(dev, template_attr,
1736 							pattern, error);
1737 		if (template == NULL)
1738 			flow_err(port_id, -rte_errno, error);
1739 
1740 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1741 						       pattern, template);
1742 
1743 		return template;
1744 	}
1745 	rte_flow_error_set(error, ENOTSUP,
1746 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1747 			   NULL, rte_strerror(ENOTSUP));
1748 	return NULL;
1749 }
1750 
1751 int
1752 rte_flow_pattern_template_destroy(uint16_t port_id,
1753 		struct rte_flow_pattern_template *pattern_template,
1754 		struct rte_flow_error *error)
1755 {
1756 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1757 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1758 	int ret;
1759 
1760 	if (unlikely(!ops))
1761 		return -rte_errno;
1762 	if (unlikely(pattern_template == NULL))
1763 		return 0;
1764 	if (likely(!!ops->pattern_template_destroy)) {
1765 		ret = flow_err(port_id,
1766 			       ops->pattern_template_destroy(dev,
1767 							     pattern_template,
1768 							     error),
1769 			       error);
1770 
1771 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1772 							ret);
1773 
1774 		return ret;
1775 	}
1776 	return rte_flow_error_set(error, ENOTSUP,
1777 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1778 				  NULL, rte_strerror(ENOTSUP));
1779 }
1780 
1781 struct rte_flow_actions_template *
1782 rte_flow_actions_template_create(uint16_t port_id,
1783 			const struct rte_flow_actions_template_attr *template_attr,
1784 			const struct rte_flow_action actions[],
1785 			const struct rte_flow_action masks[],
1786 			struct rte_flow_error *error)
1787 {
1788 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1789 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1790 	struct rte_flow_actions_template *template;
1791 
1792 	if (unlikely(!ops))
1793 		return NULL;
1794 	if (dev->data->flow_configured == 0) {
1795 		RTE_FLOW_LOG(INFO,
1796 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1797 			port_id);
1798 		rte_flow_error_set(error, EINVAL,
1799 				   RTE_FLOW_ERROR_TYPE_STATE,
1800 				   NULL, rte_strerror(EINVAL));
1801 		return NULL;
1802 	}
1803 	if (template_attr == NULL) {
1804 		RTE_FLOW_LOG(ERR,
1805 			     "Port %"PRIu16" template attr is NULL.\n",
1806 			     port_id);
1807 		rte_flow_error_set(error, EINVAL,
1808 				   RTE_FLOW_ERROR_TYPE_ATTR,
1809 				   NULL, rte_strerror(EINVAL));
1810 		return NULL;
1811 	}
1812 	if (actions == NULL) {
1813 		RTE_FLOW_LOG(ERR,
1814 			     "Port %"PRIu16" actions is NULL.\n",
1815 			     port_id);
1816 		rte_flow_error_set(error, EINVAL,
1817 				   RTE_FLOW_ERROR_TYPE_ATTR,
1818 				   NULL, rte_strerror(EINVAL));
1819 		return NULL;
1820 	}
1821 	if (masks == NULL) {
1822 		RTE_FLOW_LOG(ERR,
1823 			     "Port %"PRIu16" masks is NULL.\n",
1824 			     port_id);
1825 		rte_flow_error_set(error, EINVAL,
1826 				   RTE_FLOW_ERROR_TYPE_ATTR,
1827 				   NULL, rte_strerror(EINVAL));
1828 
1829 	}
1830 	if (likely(!!ops->actions_template_create)) {
1831 		template = ops->actions_template_create(dev, template_attr,
1832 							actions, masks, error);
1833 		if (template == NULL)
1834 			flow_err(port_id, -rte_errno, error);
1835 
1836 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1837 						       masks, template);
1838 
1839 		return template;
1840 	}
1841 	rte_flow_error_set(error, ENOTSUP,
1842 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1843 			   NULL, rte_strerror(ENOTSUP));
1844 	return NULL;
1845 }
1846 
1847 int
1848 rte_flow_actions_template_destroy(uint16_t port_id,
1849 			struct rte_flow_actions_template *actions_template,
1850 			struct rte_flow_error *error)
1851 {
1852 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1853 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1854 	int ret;
1855 
1856 	if (unlikely(!ops))
1857 		return -rte_errno;
1858 	if (unlikely(actions_template == NULL))
1859 		return 0;
1860 	if (likely(!!ops->actions_template_destroy)) {
1861 		ret = flow_err(port_id,
1862 			       ops->actions_template_destroy(dev,
1863 							     actions_template,
1864 							     error),
1865 			       error);
1866 
1867 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1868 							ret);
1869 
1870 		return ret;
1871 	}
1872 	return rte_flow_error_set(error, ENOTSUP,
1873 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1874 				  NULL, rte_strerror(ENOTSUP));
1875 }
1876 
1877 struct rte_flow_template_table *
1878 rte_flow_template_table_create(uint16_t port_id,
1879 			const struct rte_flow_template_table_attr *table_attr,
1880 			struct rte_flow_pattern_template *pattern_templates[],
1881 			uint8_t nb_pattern_templates,
1882 			struct rte_flow_actions_template *actions_templates[],
1883 			uint8_t nb_actions_templates,
1884 			struct rte_flow_error *error)
1885 {
1886 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1887 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1888 	struct rte_flow_template_table *table;
1889 
1890 	if (unlikely(!ops))
1891 		return NULL;
1892 	if (dev->data->flow_configured == 0) {
1893 		RTE_FLOW_LOG(INFO,
1894 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1895 			port_id);
1896 		rte_flow_error_set(error, EINVAL,
1897 				   RTE_FLOW_ERROR_TYPE_STATE,
1898 				   NULL, rte_strerror(EINVAL));
1899 		return NULL;
1900 	}
1901 	if (table_attr == NULL) {
1902 		RTE_FLOW_LOG(ERR,
1903 			     "Port %"PRIu16" table attr is NULL.\n",
1904 			     port_id);
1905 		rte_flow_error_set(error, EINVAL,
1906 				   RTE_FLOW_ERROR_TYPE_ATTR,
1907 				   NULL, rte_strerror(EINVAL));
1908 		return NULL;
1909 	}
1910 	if (pattern_templates == NULL) {
1911 		RTE_FLOW_LOG(ERR,
1912 			     "Port %"PRIu16" pattern templates is NULL.\n",
1913 			     port_id);
1914 		rte_flow_error_set(error, EINVAL,
1915 				   RTE_FLOW_ERROR_TYPE_ATTR,
1916 				   NULL, rte_strerror(EINVAL));
1917 		return NULL;
1918 	}
1919 	if (actions_templates == NULL) {
1920 		RTE_FLOW_LOG(ERR,
1921 			     "Port %"PRIu16" actions templates is NULL.\n",
1922 			     port_id);
1923 		rte_flow_error_set(error, EINVAL,
1924 				   RTE_FLOW_ERROR_TYPE_ATTR,
1925 				   NULL, rte_strerror(EINVAL));
1926 		return NULL;
1927 	}
1928 	if (likely(!!ops->template_table_create)) {
1929 		table = ops->template_table_create(dev, table_attr,
1930 					pattern_templates, nb_pattern_templates,
1931 					actions_templates, nb_actions_templates,
1932 					error);
1933 		if (table == NULL)
1934 			flow_err(port_id, -rte_errno, error);
1935 
1936 		rte_flow_trace_template_table_create(port_id, table_attr,
1937 						     pattern_templates,
1938 						     nb_pattern_templates,
1939 						     actions_templates,
1940 						     nb_actions_templates, table);
1941 
1942 		return table;
1943 	}
1944 	rte_flow_error_set(error, ENOTSUP,
1945 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1946 			   NULL, rte_strerror(ENOTSUP));
1947 	return NULL;
1948 }
1949 
1950 int
1951 rte_flow_template_table_destroy(uint16_t port_id,
1952 				struct rte_flow_template_table *template_table,
1953 				struct rte_flow_error *error)
1954 {
1955 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1956 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1957 	int ret;
1958 
1959 	if (unlikely(!ops))
1960 		return -rte_errno;
1961 	if (unlikely(template_table == NULL))
1962 		return 0;
1963 	if (likely(!!ops->template_table_destroy)) {
1964 		ret = flow_err(port_id,
1965 			       ops->template_table_destroy(dev,
1966 							   template_table,
1967 							   error),
1968 			       error);
1969 
1970 		rte_flow_trace_template_table_destroy(port_id, template_table,
1971 						      ret);
1972 
1973 		return ret;
1974 	}
1975 	return rte_flow_error_set(error, ENOTSUP,
1976 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1977 				  NULL, rte_strerror(ENOTSUP));
1978 }
1979 
1980 int
1981 rte_flow_group_set_miss_actions(uint16_t port_id,
1982 				uint32_t group_id,
1983 				const struct rte_flow_group_attr *attr,
1984 				const struct rte_flow_action actions[],
1985 				struct rte_flow_error *error)
1986 {
1987 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1988 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1989 
1990 	if (unlikely(!ops))
1991 		return -rte_errno;
1992 	if (likely(!!ops->group_set_miss_actions)) {
1993 		return flow_err(port_id,
1994 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
1995 				error);
1996 	}
1997 	return rte_flow_error_set(error, ENOTSUP,
1998 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1999 				  NULL, rte_strerror(ENOTSUP));
2000 }
2001 
2002 struct rte_flow *
2003 rte_flow_async_create(uint16_t port_id,
2004 		      uint32_t queue_id,
2005 		      const struct rte_flow_op_attr *op_attr,
2006 		      struct rte_flow_template_table *template_table,
2007 		      const struct rte_flow_item pattern[],
2008 		      uint8_t pattern_template_index,
2009 		      const struct rte_flow_action actions[],
2010 		      uint8_t actions_template_index,
2011 		      void *user_data,
2012 		      struct rte_flow_error *error)
2013 {
2014 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2015 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2016 	struct rte_flow *flow;
2017 
2018 	flow = ops->async_create(dev, queue_id,
2019 				 op_attr, template_table,
2020 				 pattern, pattern_template_index,
2021 				 actions, actions_template_index,
2022 				 user_data, error);
2023 	if (flow == NULL)
2024 		flow_err(port_id, -rte_errno, error);
2025 
2026 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2027 				    pattern, pattern_template_index, actions,
2028 				    actions_template_index, user_data, flow);
2029 
2030 	return flow;
2031 }
2032 
2033 struct rte_flow *
2034 rte_flow_async_create_by_index(uint16_t port_id,
2035 			       uint32_t queue_id,
2036 			       const struct rte_flow_op_attr *op_attr,
2037 			       struct rte_flow_template_table *template_table,
2038 			       uint32_t rule_index,
2039 			       const struct rte_flow_action actions[],
2040 			       uint8_t actions_template_index,
2041 			       void *user_data,
2042 			       struct rte_flow_error *error)
2043 {
2044 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2045 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2046 	struct rte_flow *flow;
2047 
2048 	flow = ops->async_create_by_index(dev, queue_id,
2049 					  op_attr, template_table, rule_index,
2050 					  actions, actions_template_index,
2051 					  user_data, error);
2052 	if (flow == NULL)
2053 		flow_err(port_id, -rte_errno, error);
2054 	return flow;
2055 }
2056 
2057 int
2058 rte_flow_async_destroy(uint16_t port_id,
2059 		       uint32_t queue_id,
2060 		       const struct rte_flow_op_attr *op_attr,
2061 		       struct rte_flow *flow,
2062 		       void *user_data,
2063 		       struct rte_flow_error *error)
2064 {
2065 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2066 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2067 	int ret;
2068 
2069 	ret = flow_err(port_id,
2070 		       ops->async_destroy(dev, queue_id,
2071 					  op_attr, flow,
2072 					  user_data, error),
2073 		       error);
2074 
2075 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2076 				     user_data, ret);
2077 
2078 	return ret;
2079 }
2080 
2081 int
2082 rte_flow_async_actions_update(uint16_t port_id,
2083 			      uint32_t queue_id,
2084 			      const struct rte_flow_op_attr *op_attr,
2085 			      struct rte_flow *flow,
2086 			      const struct rte_flow_action actions[],
2087 			      uint8_t actions_template_index,
2088 			      void *user_data,
2089 			      struct rte_flow_error *error)
2090 {
2091 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2092 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2093 	int ret;
2094 
2095 	ret = flow_err(port_id,
2096 		       ops->async_actions_update(dev, queue_id, op_attr,
2097 						 flow, actions,
2098 						 actions_template_index,
2099 						 user_data, error),
2100 		       error);
2101 
2102 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2103 					    actions, actions_template_index,
2104 					    user_data, ret);
2105 
2106 	return ret;
2107 }
2108 
2109 int
2110 rte_flow_push(uint16_t port_id,
2111 	      uint32_t queue_id,
2112 	      struct rte_flow_error *error)
2113 {
2114 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2115 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2116 	int ret;
2117 
2118 	ret = flow_err(port_id,
2119 		       ops->push(dev, queue_id, error),
2120 		       error);
2121 
2122 	rte_flow_trace_push(port_id, queue_id, ret);
2123 
2124 	return ret;
2125 }
2126 
2127 int
2128 rte_flow_pull(uint16_t port_id,
2129 	      uint32_t queue_id,
2130 	      struct rte_flow_op_result res[],
2131 	      uint16_t n_res,
2132 	      struct rte_flow_error *error)
2133 {
2134 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2135 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2136 	int ret;
2137 	int rc;
2138 
2139 	ret = ops->pull(dev, queue_id, res, n_res, error);
2140 	rc = ret ? ret : flow_err(port_id, ret, error);
2141 
2142 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2143 
2144 	return rc;
2145 }
2146 
2147 struct rte_flow_action_handle *
2148 rte_flow_async_action_handle_create(uint16_t port_id,
2149 		uint32_t queue_id,
2150 		const struct rte_flow_op_attr *op_attr,
2151 		const struct rte_flow_indir_action_conf *indir_action_conf,
2152 		const struct rte_flow_action *action,
2153 		void *user_data,
2154 		struct rte_flow_error *error)
2155 {
2156 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2157 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2158 	struct rte_flow_action_handle *handle;
2159 
2160 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2161 					     indir_action_conf, action, user_data, error);
2162 	if (handle == NULL)
2163 		flow_err(port_id, -rte_errno, error);
2164 
2165 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2166 						  indir_action_conf, action,
2167 						  user_data, handle);
2168 
2169 	return handle;
2170 }
2171 
2172 int
2173 rte_flow_async_action_handle_destroy(uint16_t port_id,
2174 		uint32_t queue_id,
2175 		const struct rte_flow_op_attr *op_attr,
2176 		struct rte_flow_action_handle *action_handle,
2177 		void *user_data,
2178 		struct rte_flow_error *error)
2179 {
2180 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2181 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2182 	int ret;
2183 
2184 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2185 					   action_handle, user_data, error);
2186 	ret = flow_err(port_id, ret, error);
2187 
2188 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2189 						   action_handle, user_data, ret);
2190 
2191 	return ret;
2192 }
2193 
2194 int
2195 rte_flow_async_action_handle_update(uint16_t port_id,
2196 		uint32_t queue_id,
2197 		const struct rte_flow_op_attr *op_attr,
2198 		struct rte_flow_action_handle *action_handle,
2199 		const void *update,
2200 		void *user_data,
2201 		struct rte_flow_error *error)
2202 {
2203 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2204 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2205 	int ret;
2206 
2207 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2208 					  action_handle, update, user_data, error);
2209 	ret = flow_err(port_id, ret, error);
2210 
2211 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2212 						  action_handle, update,
2213 						  user_data, ret);
2214 
2215 	return ret;
2216 }
2217 
2218 int
2219 rte_flow_async_action_handle_query(uint16_t port_id,
2220 		uint32_t queue_id,
2221 		const struct rte_flow_op_attr *op_attr,
2222 		const struct rte_flow_action_handle *action_handle,
2223 		void *data,
2224 		void *user_data,
2225 		struct rte_flow_error *error)
2226 {
2227 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2228 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2229 	int ret;
2230 
2231 	if (unlikely(!ops))
2232 		return -rte_errno;
2233 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2234 					  action_handle, data, user_data, error);
2235 	ret = flow_err(port_id, ret, error);
2236 
2237 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2238 						 action_handle, data, user_data,
2239 						 ret);
2240 
2241 	return ret;
2242 }
2243 
2244 int
2245 rte_flow_action_handle_query_update(uint16_t port_id,
2246 				    struct rte_flow_action_handle *handle,
2247 				    const void *update, void *query,
2248 				    enum rte_flow_query_update_mode mode,
2249 				    struct rte_flow_error *error)
2250 {
2251 	int ret;
2252 	struct rte_eth_dev *dev;
2253 	const struct rte_flow_ops *ops;
2254 
2255 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2256 	if (!handle)
2257 		return -EINVAL;
2258 	if (!update && !query)
2259 		return -EINVAL;
2260 	dev = &rte_eth_devices[port_id];
2261 	ops = rte_flow_ops_get(port_id, error);
2262 	if (!ops || !ops->action_handle_query_update)
2263 		return -ENOTSUP;
2264 	ret = ops->action_handle_query_update(dev, handle, update,
2265 					      query, mode, error);
2266 	return flow_err(port_id, ret, error);
2267 }
2268 
2269 int
2270 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2271 					  const struct rte_flow_op_attr *attr,
2272 					  struct rte_flow_action_handle *handle,
2273 					  const void *update, void *query,
2274 					  enum rte_flow_query_update_mode mode,
2275 					  void *user_data,
2276 					  struct rte_flow_error *error)
2277 {
2278 	int ret;
2279 	struct rte_eth_dev *dev;
2280 	const struct rte_flow_ops *ops;
2281 
2282 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2283 	if (!handle)
2284 		return -EINVAL;
2285 	if (!update && !query)
2286 		return -EINVAL;
2287 	dev = &rte_eth_devices[port_id];
2288 	ops = rte_flow_ops_get(port_id, error);
2289 	if (!ops || !ops->async_action_handle_query_update)
2290 		return -ENOTSUP;
2291 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2292 						    handle, update,
2293 						    query, mode,
2294 						    user_data, error);
2295 	return flow_err(port_id, ret, error);
2296 }
2297 
2298 struct rte_flow_action_list_handle *
2299 rte_flow_action_list_handle_create(uint16_t port_id,
2300 				   const
2301 				   struct rte_flow_indir_action_conf *conf,
2302 				   const struct rte_flow_action *actions,
2303 				   struct rte_flow_error *error)
2304 {
2305 	int ret;
2306 	struct rte_eth_dev *dev;
2307 	const struct rte_flow_ops *ops;
2308 	struct rte_flow_action_list_handle *handle;
2309 
2310 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2311 	ops = rte_flow_ops_get(port_id, error);
2312 	if (!ops || !ops->action_list_handle_create) {
2313 		rte_flow_error_set(error, ENOTSUP,
2314 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2315 				   "action_list handle not supported");
2316 		return NULL;
2317 	}
2318 	dev = &rte_eth_devices[port_id];
2319 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2320 	ret = flow_err(port_id, -rte_errno, error);
2321 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2322 	return handle;
2323 }
2324 
2325 int
2326 rte_flow_action_list_handle_destroy(uint16_t port_id,
2327 				    struct rte_flow_action_list_handle *handle,
2328 				    struct rte_flow_error *error)
2329 {
2330 	int ret;
2331 	struct rte_eth_dev *dev;
2332 	const struct rte_flow_ops *ops;
2333 
2334 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2335 	ops = rte_flow_ops_get(port_id, error);
2336 	if (!ops || !ops->action_list_handle_destroy)
2337 		return rte_flow_error_set(error, ENOTSUP,
2338 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2339 					  "action_list handle not supported");
2340 	dev = &rte_eth_devices[port_id];
2341 	ret = ops->action_list_handle_destroy(dev, handle, error);
2342 	ret = flow_err(port_id, ret, error);
2343 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2344 	return ret;
2345 }
2346 
2347 struct rte_flow_action_list_handle *
2348 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2349 					 const struct rte_flow_op_attr *attr,
2350 					 const struct rte_flow_indir_action_conf *conf,
2351 					 const struct rte_flow_action *actions,
2352 					 void *user_data,
2353 					 struct rte_flow_error *error)
2354 {
2355 	int ret;
2356 	struct rte_eth_dev *dev;
2357 	const struct rte_flow_ops *ops;
2358 	struct rte_flow_action_list_handle *handle;
2359 
2360 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2361 	ops = rte_flow_ops_get(port_id, error);
2362 	if (!ops || !ops->async_action_list_handle_create) {
2363 		rte_flow_error_set(error, ENOTSUP,
2364 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2365 				   "action_list handle not supported");
2366 		return NULL;
2367 	}
2368 	dev = &rte_eth_devices[port_id];
2369 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2370 						      actions, user_data,
2371 						      error);
2372 	ret = flow_err(port_id, -rte_errno, error);
2373 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2374 						       conf, actions, user_data,
2375 						       ret);
2376 	return handle;
2377 }
2378 
2379 int
2380 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2381 				 const struct rte_flow_op_attr *op_attr,
2382 				 struct rte_flow_action_list_handle *handle,
2383 				 void *user_data, struct rte_flow_error *error)
2384 {
2385 	int ret;
2386 	struct rte_eth_dev *dev;
2387 	const struct rte_flow_ops *ops;
2388 
2389 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2390 	ops = rte_flow_ops_get(port_id, error);
2391 	if (!ops || !ops->async_action_list_handle_destroy)
2392 		return rte_flow_error_set(error, ENOTSUP,
2393 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2394 					  "async action_list handle not supported");
2395 	dev = &rte_eth_devices[port_id];
2396 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2397 						    handle, user_data, error);
2398 	ret = flow_err(port_id, ret, error);
2399 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2400 							op_attr, handle,
2401 							user_data, ret);
2402 	return ret;
2403 }
2404 
2405 int
2406 rte_flow_action_list_handle_query_update(uint16_t port_id,
2407 			 const struct rte_flow_action_list_handle *handle,
2408 			 const void **update, void **query,
2409 			 enum rte_flow_query_update_mode mode,
2410 			 struct rte_flow_error *error)
2411 {
2412 	int ret;
2413 	struct rte_eth_dev *dev;
2414 	const struct rte_flow_ops *ops;
2415 
2416 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2417 	ops = rte_flow_ops_get(port_id, error);
2418 	if (!ops || !ops->action_list_handle_query_update)
2419 		return rte_flow_error_set(error, ENOTSUP,
2420 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2421 					  "action_list query_update not supported");
2422 	dev = &rte_eth_devices[port_id];
2423 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2424 						   mode, error);
2425 	ret = flow_err(port_id, ret, error);
2426 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2427 						       query, mode, ret);
2428 	return ret;
2429 }
2430 
2431 int
2432 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2433 			 const struct rte_flow_op_attr *attr,
2434 			 const struct rte_flow_action_list_handle *handle,
2435 			 const void **update, void **query,
2436 			 enum rte_flow_query_update_mode mode,
2437 			 void *user_data, struct rte_flow_error *error)
2438 {
2439 	int ret;
2440 	struct rte_eth_dev *dev;
2441 	const struct rte_flow_ops *ops;
2442 
2443 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2444 	ops = rte_flow_ops_get(port_id, error);
2445 	if (!ops || !ops->async_action_list_handle_query_update)
2446 		return rte_flow_error_set(error, ENOTSUP,
2447 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2448 					  "action_list async query_update not supported");
2449 	dev = &rte_eth_devices[port_id];
2450 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2451 							 handle, update, query,
2452 							 mode, user_data,
2453 							 error);
2454 	ret = flow_err(port_id, ret, error);
2455 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2456 							     attr, handle,
2457 							     update, query,
2458 							     mode, user_data,
2459 							     ret);
2460 	return ret;
2461 }
2462 
2463 int
2464 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2465 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2466 			 uint32_t *hash, struct rte_flow_error *error)
2467 {
2468 	int ret;
2469 	struct rte_eth_dev *dev;
2470 	const struct rte_flow_ops *ops;
2471 
2472 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2473 	ops = rte_flow_ops_get(port_id, error);
2474 	if (!ops || !ops->flow_calc_table_hash)
2475 		return rte_flow_error_set(error, ENOTSUP,
2476 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2477 					  "action_list async query_update not supported");
2478 	dev = &rte_eth_devices[port_id];
2479 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2480 					hash, error);
2481 	return flow_err(port_id, ret, error);
2482 }
2483