xref: /dpdk/lib/ethdev/rte_flow.c (revision eb704df7e27df838ba7ec9bcd034bf0aaee405cd)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <pthread.h>
11 
12 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 #define FLOW_LOG RTE_ETHDEV_LOG_LINE
23 
24 /* Mbuf dynamic field name for metadata. */
25 int32_t rte_flow_dynf_metadata_offs = -1;
26 
27 /* Mbuf dynamic field flag bit number for metadata. */
28 uint64_t rte_flow_dynf_metadata_mask;
29 
30 /**
31  * Flow elements description tables.
32  */
33 struct rte_flow_desc_data {
34 	const char *name;
35 	size_t size;
36 	size_t (*desc_fn)(void *dst, const void *src);
37 };
38 
39 /**
40  *
41  * @param buf
42  * Destination memory.
43  * @param data
44  * Source memory
45  * @param size
46  * Requested copy size
47  * @param desc
48  * rte_flow_desc_item - for flow item conversion.
49  * rte_flow_desc_action - for flow action conversion.
50  * @param type
51  * Offset into the desc param or negative value for private flow elements.
52  */
53 static inline size_t
54 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
55 		   const struct rte_flow_desc_data *desc, int type)
56 {
57 	/**
58 	 * Allow PMD private flow item
59 	 */
60 	bool rte_type = type >= 0;
61 
62 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
63 	if (buf == NULL || data == NULL)
64 		return 0;
65 	rte_memcpy(buf, data, (size > sz ? sz : size));
66 	if (rte_type && desc[type].desc_fn)
67 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
68 	return sz;
69 }
70 
71 static size_t
72 rte_flow_item_flex_conv(void *buf, const void *data)
73 {
74 	struct rte_flow_item_flex *dst = buf;
75 	const struct rte_flow_item_flex *src = data;
76 	if (buf) {
77 		dst->pattern = rte_memcpy
78 			((void *)((uintptr_t)(dst + 1)), src->pattern,
79 			 src->length);
80 	}
81 	return src->length;
82 }
83 
84 /** Generate flow_item[] entry. */
85 #define MK_FLOW_ITEM(t, s) \
86 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
87 		.name = # t, \
88 		.size = s,               \
89 		.desc_fn = NULL,\
90 	}
91 
92 #define MK_FLOW_ITEM_FN(t, s, fn) \
93 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
94 		.name = # t,                 \
95 		.size = s,                   \
96 		.desc_fn = fn,               \
97 	}
98 
99 /** Information about known flow pattern items. */
100 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
101 	MK_FLOW_ITEM(END, 0),
102 	MK_FLOW_ITEM(VOID, 0),
103 	MK_FLOW_ITEM(INVERT, 0),
104 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
105 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
106 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
107 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
108 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
109 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
110 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
111 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
112 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
113 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
114 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
115 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
116 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
117 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
118 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
119 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
120 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
121 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
122 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
123 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
124 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
125 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
126 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
127 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
128 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
129 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
130 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
131 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
132 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
133 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
134 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
137 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
138 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
139 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
140 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
141 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
142 	MK_FLOW_ITEM(RANDOM, sizeof(struct rte_flow_item_random)),
143 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
144 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
145 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
146 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
147 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
148 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
149 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
150 			sizeof(struct rte_flow_item_pppoe_proto_id)),
151 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
152 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
153 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
154 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
155 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
156 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
157 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
158 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
159 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
160 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
161 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
162 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
163 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
164 			rte_flow_item_flex_conv),
165 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
166 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
167 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
168 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
169 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
170 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
171 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
172 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
173 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
174 };
175 
176 /** Generate flow_action[] entry. */
177 #define MK_FLOW_ACTION(t, s) \
178 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
179 		.name = # t, \
180 		.size = s, \
181 		.desc_fn = NULL,\
182 	}
183 
184 #define MK_FLOW_ACTION_FN(t, fn) \
185 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
186 		.name = # t, \
187 		.size = 0, \
188 		.desc_fn = fn,\
189 	}
190 
191 
192 /** Information about known flow actions. */
193 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
194 	MK_FLOW_ACTION(END, 0),
195 	MK_FLOW_ACTION(VOID, 0),
196 	MK_FLOW_ACTION(PASSTHRU, 0),
197 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
198 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
199 	MK_FLOW_ACTION(FLAG, 0),
200 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
201 	MK_FLOW_ACTION(DROP, 0),
202 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
203 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
204 	MK_FLOW_ACTION(PF, 0),
205 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
206 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
207 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
208 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
209 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
210 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
211 	MK_FLOW_ACTION(OF_PUSH_VLAN,
212 		       sizeof(struct rte_flow_action_of_push_vlan)),
213 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
214 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
215 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
216 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
217 	MK_FLOW_ACTION(OF_POP_MPLS,
218 		       sizeof(struct rte_flow_action_of_pop_mpls)),
219 	MK_FLOW_ACTION(OF_PUSH_MPLS,
220 		       sizeof(struct rte_flow_action_of_push_mpls)),
221 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
222 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
223 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
224 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
225 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
226 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
227 	MK_FLOW_ACTION(SET_IPV4_SRC,
228 		       sizeof(struct rte_flow_action_set_ipv4)),
229 	MK_FLOW_ACTION(SET_IPV4_DST,
230 		       sizeof(struct rte_flow_action_set_ipv4)),
231 	MK_FLOW_ACTION(SET_IPV6_SRC,
232 		       sizeof(struct rte_flow_action_set_ipv6)),
233 	MK_FLOW_ACTION(SET_IPV6_DST,
234 		       sizeof(struct rte_flow_action_set_ipv6)),
235 	MK_FLOW_ACTION(SET_TP_SRC,
236 		       sizeof(struct rte_flow_action_set_tp)),
237 	MK_FLOW_ACTION(SET_TP_DST,
238 		       sizeof(struct rte_flow_action_set_tp)),
239 	MK_FLOW_ACTION(MAC_SWAP, 0),
240 	MK_FLOW_ACTION(DEC_TTL, 0),
241 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
242 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
243 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
244 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
245 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
246 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
247 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
248 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
249 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
250 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
251 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
252 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
253 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
254 	MK_FLOW_ACTION(MODIFY_FIELD,
255 		       sizeof(struct rte_flow_action_modify_field)),
256 	/**
257 	 * Indirect action represented as handle of type
258 	 * (struct rte_flow_action_handle *) stored in conf field (see
259 	 * struct rte_flow_action); no need for additional structure to * store
260 	 * indirect action handle.
261 	 */
262 	MK_FLOW_ACTION(INDIRECT, 0),
263 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
264 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
265 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
266 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
267 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
268 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
269 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
270 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
271 	MK_FLOW_ACTION(INDIRECT_LIST,
272 		       sizeof(struct rte_flow_action_indirect_list)),
273 	MK_FLOW_ACTION(PROG,
274 		       sizeof(struct rte_flow_action_prog)),
275 	MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)),
276 };
277 
278 int
279 rte_flow_dynf_metadata_register(void)
280 {
281 	int offset;
282 	int flag;
283 
284 	static const struct rte_mbuf_dynfield desc_offs = {
285 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
286 		.size = sizeof(uint32_t),
287 		.align = alignof(uint32_t),
288 	};
289 	static const struct rte_mbuf_dynflag desc_flag = {
290 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
291 	};
292 
293 	offset = rte_mbuf_dynfield_register(&desc_offs);
294 	if (offset < 0)
295 		goto error;
296 	flag = rte_mbuf_dynflag_register(&desc_flag);
297 	if (flag < 0)
298 		goto error;
299 	rte_flow_dynf_metadata_offs = offset;
300 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
301 
302 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
303 
304 	return 0;
305 
306 error:
307 	rte_flow_dynf_metadata_offs = -1;
308 	rte_flow_dynf_metadata_mask = UINT64_C(0);
309 	return -rte_errno;
310 }
311 
312 static inline void
313 fts_enter(struct rte_eth_dev *dev)
314 {
315 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
316 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
317 }
318 
319 static inline void
320 fts_exit(struct rte_eth_dev *dev)
321 {
322 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
323 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
324 }
325 
326 static int
327 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
328 {
329 	if (ret == 0)
330 		return 0;
331 	if (rte_eth_dev_is_removed(port_id))
332 		return rte_flow_error_set(error, EIO,
333 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
334 					  NULL, rte_strerror(EIO));
335 	return ret;
336 }
337 
338 /* Get generic flow operations structure from a port. */
339 const struct rte_flow_ops *
340 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
341 {
342 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
343 	const struct rte_flow_ops *ops;
344 	int code;
345 
346 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
347 		code = ENODEV;
348 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
349 		/* flow API not supported with this driver dev_ops */
350 		code = ENOSYS;
351 	else
352 		code = dev->dev_ops->flow_ops_get(dev, &ops);
353 	if (code == 0 && ops == NULL)
354 		/* flow API not supported with this device */
355 		code = ENOSYS;
356 
357 	if (code != 0) {
358 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
359 				   NULL, rte_strerror(code));
360 		return NULL;
361 	}
362 	return ops;
363 }
364 
365 /* Check whether a flow rule can be created on a given port. */
366 int
367 rte_flow_validate(uint16_t port_id,
368 		  const struct rte_flow_attr *attr,
369 		  const struct rte_flow_item pattern[],
370 		  const struct rte_flow_action actions[],
371 		  struct rte_flow_error *error)
372 {
373 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
374 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
375 	int ret;
376 
377 	if (likely(!!attr) && attr->transfer &&
378 	    (attr->ingress || attr->egress)) {
379 		return rte_flow_error_set(error, EINVAL,
380 					  RTE_FLOW_ERROR_TYPE_ATTR,
381 					  attr, "cannot use attr ingress/egress with attr transfer");
382 	}
383 
384 	if (unlikely(!ops))
385 		return -rte_errno;
386 	if (likely(!!ops->validate)) {
387 		fts_enter(dev);
388 		ret = ops->validate(dev, attr, pattern, actions, error);
389 		fts_exit(dev);
390 		ret = flow_err(port_id, ret, error);
391 
392 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
393 
394 		return ret;
395 	}
396 	return rte_flow_error_set(error, ENOSYS,
397 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
398 				  NULL, rte_strerror(ENOSYS));
399 }
400 
401 /* Create a flow rule on a given port. */
402 struct rte_flow *
403 rte_flow_create(uint16_t port_id,
404 		const struct rte_flow_attr *attr,
405 		const struct rte_flow_item pattern[],
406 		const struct rte_flow_action actions[],
407 		struct rte_flow_error *error)
408 {
409 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
410 	struct rte_flow *flow;
411 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
412 
413 	if (unlikely(!ops))
414 		return NULL;
415 	if (likely(!!ops->create)) {
416 		fts_enter(dev);
417 		flow = ops->create(dev, attr, pattern, actions, error);
418 		fts_exit(dev);
419 		if (flow == NULL)
420 			flow_err(port_id, -rte_errno, error);
421 
422 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
423 
424 		return flow;
425 	}
426 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
427 			   NULL, rte_strerror(ENOSYS));
428 	return NULL;
429 }
430 
431 /* Destroy a flow rule on a given port. */
432 int
433 rte_flow_destroy(uint16_t port_id,
434 		 struct rte_flow *flow,
435 		 struct rte_flow_error *error)
436 {
437 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
438 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
439 	int ret;
440 
441 	if (unlikely(!ops))
442 		return -rte_errno;
443 	if (likely(!!ops->destroy)) {
444 		fts_enter(dev);
445 		ret = ops->destroy(dev, flow, error);
446 		fts_exit(dev);
447 		ret = flow_err(port_id, ret, error);
448 
449 		rte_flow_trace_destroy(port_id, flow, ret);
450 
451 		return ret;
452 	}
453 	return rte_flow_error_set(error, ENOSYS,
454 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
455 				  NULL, rte_strerror(ENOSYS));
456 }
457 
458 int
459 rte_flow_actions_update(uint16_t port_id,
460 			struct rte_flow *flow,
461 			const struct rte_flow_action actions[],
462 			struct rte_flow_error *error)
463 {
464 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
465 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
466 	int ret;
467 
468 	if (unlikely(!ops))
469 		return -rte_errno;
470 	if (likely(!!ops->actions_update)) {
471 		fts_enter(dev);
472 		ret = ops->actions_update(dev, flow, actions, error);
473 		fts_exit(dev);
474 
475 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
476 
477 		return flow_err(port_id, ret, error);
478 	}
479 	return rte_flow_error_set(error, ENOSYS,
480 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
481 				  NULL, rte_strerror(ENOSYS));
482 }
483 
484 /* Destroy all flow rules associated with a port. */
485 int
486 rte_flow_flush(uint16_t port_id,
487 	       struct rte_flow_error *error)
488 {
489 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
490 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
491 	int ret;
492 
493 	if (unlikely(!ops))
494 		return -rte_errno;
495 	if (likely(!!ops->flush)) {
496 		fts_enter(dev);
497 		ret = ops->flush(dev, error);
498 		fts_exit(dev);
499 		ret = flow_err(port_id, ret, error);
500 
501 		rte_flow_trace_flush(port_id, ret);
502 
503 		return ret;
504 	}
505 	return rte_flow_error_set(error, ENOSYS,
506 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
507 				  NULL, rte_strerror(ENOSYS));
508 }
509 
510 /* Query an existing flow rule. */
511 int
512 rte_flow_query(uint16_t port_id,
513 	       struct rte_flow *flow,
514 	       const struct rte_flow_action *action,
515 	       void *data,
516 	       struct rte_flow_error *error)
517 {
518 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
519 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
520 	int ret;
521 
522 	if (!ops)
523 		return -rte_errno;
524 	if (likely(!!ops->query)) {
525 		fts_enter(dev);
526 		ret = ops->query(dev, flow, action, data, error);
527 		fts_exit(dev);
528 		ret = flow_err(port_id, ret, error);
529 
530 		rte_flow_trace_query(port_id, flow, action, data, ret);
531 
532 		return ret;
533 	}
534 	return rte_flow_error_set(error, ENOSYS,
535 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
536 				  NULL, rte_strerror(ENOSYS));
537 }
538 
539 /* Restrict ingress traffic to the defined flow rules. */
540 int
541 rte_flow_isolate(uint16_t port_id,
542 		 int set,
543 		 struct rte_flow_error *error)
544 {
545 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
546 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
547 	int ret;
548 
549 	if (!ops)
550 		return -rte_errno;
551 	if (likely(!!ops->isolate)) {
552 		fts_enter(dev);
553 		ret = ops->isolate(dev, set, error);
554 		fts_exit(dev);
555 		ret = flow_err(port_id, ret, error);
556 
557 		rte_flow_trace_isolate(port_id, set, ret);
558 
559 		return ret;
560 	}
561 	return rte_flow_error_set(error, ENOSYS,
562 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
563 				  NULL, rte_strerror(ENOSYS));
564 }
565 
566 /* Initialize flow error structure. */
567 int
568 rte_flow_error_set(struct rte_flow_error *error,
569 		   int code,
570 		   enum rte_flow_error_type type,
571 		   const void *cause,
572 		   const char *message)
573 {
574 	if (error) {
575 		*error = (struct rte_flow_error){
576 			.type = type,
577 			.cause = cause,
578 			.message = message,
579 		};
580 	}
581 	rte_errno = code;
582 	return -code;
583 }
584 
585 /** Pattern item specification types. */
586 enum rte_flow_conv_item_spec_type {
587 	RTE_FLOW_CONV_ITEM_SPEC,
588 	RTE_FLOW_CONV_ITEM_LAST,
589 	RTE_FLOW_CONV_ITEM_MASK,
590 };
591 
592 /**
593  * Copy pattern item specification.
594  *
595  * @param[out] buf
596  *   Output buffer. Can be NULL if @p size is zero.
597  * @param size
598  *   Size of @p buf in bytes.
599  * @param[in] item
600  *   Pattern item to copy specification from.
601  * @param type
602  *   Specification selector for either @p spec, @p last or @p mask.
603  *
604  * @return
605  *   Number of bytes needed to store pattern item specification regardless
606  *   of @p size. @p buf contents are truncated to @p size if not large
607  *   enough.
608  */
609 static size_t
610 rte_flow_conv_item_spec(void *buf, const size_t size,
611 			const struct rte_flow_item *item,
612 			enum rte_flow_conv_item_spec_type type)
613 {
614 	size_t off;
615 	const void *data =
616 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
617 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
618 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
619 		NULL;
620 
621 	switch (item->type) {
622 		union {
623 			const struct rte_flow_item_raw *raw;
624 		} spec;
625 		union {
626 			const struct rte_flow_item_raw *raw;
627 		} last;
628 		union {
629 			const struct rte_flow_item_raw *raw;
630 		} mask;
631 		union {
632 			const struct rte_flow_item_raw *raw;
633 		} src;
634 		union {
635 			struct rte_flow_item_raw *raw;
636 		} dst;
637 		size_t tmp;
638 
639 	case RTE_FLOW_ITEM_TYPE_RAW:
640 		spec.raw = item->spec;
641 		last.raw = item->last ? item->last : item->spec;
642 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
643 		src.raw = data;
644 		dst.raw = buf;
645 		rte_memcpy(dst.raw,
646 			   (&(struct rte_flow_item_raw){
647 				.relative = src.raw->relative,
648 				.search = src.raw->search,
649 				.reserved = src.raw->reserved,
650 				.offset = src.raw->offset,
651 				.limit = src.raw->limit,
652 				.length = src.raw->length,
653 			   }),
654 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
655 		off = sizeof(*dst.raw);
656 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
657 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
658 		     ((spec.raw->length & mask.raw->length) >=
659 		      (last.raw->length & mask.raw->length))))
660 			tmp = spec.raw->length & mask.raw->length;
661 		else
662 			tmp = last.raw->length & mask.raw->length;
663 		if (tmp) {
664 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
665 			if (size >= off + tmp)
666 				dst.raw->pattern = rte_memcpy
667 					((void *)((uintptr_t)dst.raw + off),
668 					 src.raw->pattern, tmp);
669 			off += tmp;
670 		}
671 		break;
672 	default:
673 		off = rte_flow_conv_copy(buf, data, size,
674 					 rte_flow_desc_item, item->type);
675 		break;
676 	}
677 	return off;
678 }
679 
680 /**
681  * Copy action configuration.
682  *
683  * @param[out] buf
684  *   Output buffer. Can be NULL if @p size is zero.
685  * @param size
686  *   Size of @p buf in bytes.
687  * @param[in] action
688  *   Action to copy configuration from.
689  *
690  * @return
691  *   Number of bytes needed to store pattern item specification regardless
692  *   of @p size. @p buf contents are truncated to @p size if not large
693  *   enough.
694  */
695 static size_t
696 rte_flow_conv_action_conf(void *buf, const size_t size,
697 			  const struct rte_flow_action *action)
698 {
699 	size_t off;
700 
701 	switch (action->type) {
702 		union {
703 			const struct rte_flow_action_rss *rss;
704 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
705 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
706 		} src;
707 		union {
708 			struct rte_flow_action_rss *rss;
709 			struct rte_flow_action_vxlan_encap *vxlan_encap;
710 			struct rte_flow_action_nvgre_encap *nvgre_encap;
711 		} dst;
712 		size_t tmp;
713 		int ret;
714 
715 	case RTE_FLOW_ACTION_TYPE_RSS:
716 		src.rss = action->conf;
717 		dst.rss = buf;
718 		rte_memcpy(dst.rss,
719 			   (&(struct rte_flow_action_rss){
720 				.func = src.rss->func,
721 				.level = src.rss->level,
722 				.types = src.rss->types,
723 				.key_len = src.rss->key_len,
724 				.queue_num = src.rss->queue_num,
725 			   }),
726 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
727 		off = sizeof(*dst.rss);
728 		if (src.rss->key_len && src.rss->key) {
729 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
730 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
731 			if (size >= (uint64_t)off + (uint64_t)tmp)
732 				dst.rss->key = rte_memcpy
733 					((void *)((uintptr_t)dst.rss + off),
734 					 src.rss->key, tmp);
735 			off += tmp;
736 		}
737 		if (src.rss->queue_num) {
738 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
739 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
740 			if (size >= (uint64_t)off + (uint64_t)tmp)
741 				dst.rss->queue = rte_memcpy
742 					((void *)((uintptr_t)dst.rss + off),
743 					 src.rss->queue, tmp);
744 			off += tmp;
745 		}
746 		break;
747 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
748 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
749 		src.vxlan_encap = action->conf;
750 		dst.vxlan_encap = buf;
751 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
752 				 sizeof(*src.nvgre_encap) ||
753 				 offsetof(struct rte_flow_action_vxlan_encap,
754 					  definition) !=
755 				 offsetof(struct rte_flow_action_nvgre_encap,
756 					  definition));
757 		off = sizeof(*dst.vxlan_encap);
758 		if (src.vxlan_encap->definition) {
759 			off = RTE_ALIGN_CEIL
760 				(off, sizeof(*dst.vxlan_encap->definition));
761 			ret = rte_flow_conv
762 				(RTE_FLOW_CONV_OP_PATTERN,
763 				 (void *)((uintptr_t)dst.vxlan_encap + off),
764 				 size > off ? size - off : 0,
765 				 src.vxlan_encap->definition, NULL);
766 			if (ret < 0)
767 				return 0;
768 			if (size >= off + ret)
769 				dst.vxlan_encap->definition =
770 					(void *)((uintptr_t)dst.vxlan_encap +
771 						 off);
772 			off += ret;
773 		}
774 		break;
775 	default:
776 		off = rte_flow_conv_copy(buf, action->conf, size,
777 					 rte_flow_desc_action, action->type);
778 		break;
779 	}
780 	return off;
781 }
782 
783 /**
784  * Copy a list of pattern items.
785  *
786  * @param[out] dst
787  *   Destination buffer. Can be NULL if @p size is zero.
788  * @param size
789  *   Size of @p dst in bytes.
790  * @param[in] src
791  *   Source pattern items.
792  * @param num
793  *   Maximum number of pattern items to process from @p src or 0 to process
794  *   the entire list. In both cases, processing stops after
795  *   RTE_FLOW_ITEM_TYPE_END is encountered.
796  * @param[out] error
797  *   Perform verbose error reporting if not NULL.
798  *
799  * @return
800  *   A positive value representing the number of bytes needed to store
801  *   pattern items regardless of @p size on success (@p buf contents are
802  *   truncated to @p size if not large enough), a negative errno value
803  *   otherwise and rte_errno is set.
804  */
805 static int
806 rte_flow_conv_pattern(struct rte_flow_item *dst,
807 		      const size_t size,
808 		      const struct rte_flow_item *src,
809 		      unsigned int num,
810 		      struct rte_flow_error *error)
811 {
812 	uintptr_t data = (uintptr_t)dst;
813 	size_t off;
814 	size_t ret;
815 	unsigned int i;
816 
817 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
818 		/**
819 		 * allow PMD private flow item
820 		 */
821 		if (((int)src->type >= 0) &&
822 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
823 		    !rte_flow_desc_item[src->type].name))
824 			return rte_flow_error_set
825 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
826 				 "cannot convert unknown item type");
827 		if (size >= off + sizeof(*dst))
828 			*dst = (struct rte_flow_item){
829 				.type = src->type,
830 			};
831 		off += sizeof(*dst);
832 		if (!src->type)
833 			num = i + 1;
834 	}
835 	num = i;
836 	src -= num;
837 	dst -= num;
838 	do {
839 		if (src->spec) {
840 			off = RTE_ALIGN_CEIL(off, sizeof(double));
841 			ret = rte_flow_conv_item_spec
842 				((void *)(data + off),
843 				 size > off ? size - off : 0, src,
844 				 RTE_FLOW_CONV_ITEM_SPEC);
845 			if (size && size >= off + ret)
846 				dst->spec = (void *)(data + off);
847 			off += ret;
848 
849 		}
850 		if (src->last) {
851 			off = RTE_ALIGN_CEIL(off, sizeof(double));
852 			ret = rte_flow_conv_item_spec
853 				((void *)(data + off),
854 				 size > off ? size - off : 0, src,
855 				 RTE_FLOW_CONV_ITEM_LAST);
856 			if (size && size >= off + ret)
857 				dst->last = (void *)(data + off);
858 			off += ret;
859 		}
860 		if (src->mask) {
861 			off = RTE_ALIGN_CEIL(off, sizeof(double));
862 			ret = rte_flow_conv_item_spec
863 				((void *)(data + off),
864 				 size > off ? size - off : 0, src,
865 				 RTE_FLOW_CONV_ITEM_MASK);
866 			if (size && size >= off + ret)
867 				dst->mask = (void *)(data + off);
868 			off += ret;
869 		}
870 		++src;
871 		++dst;
872 	} while (--num);
873 	return off;
874 }
875 
876 /**
877  * Copy a list of actions.
878  *
879  * @param[out] dst
880  *   Destination buffer. Can be NULL if @p size is zero.
881  * @param size
882  *   Size of @p dst in bytes.
883  * @param[in] src
884  *   Source actions.
885  * @param num
886  *   Maximum number of actions to process from @p src or 0 to process the
887  *   entire list. In both cases, processing stops after
888  *   RTE_FLOW_ACTION_TYPE_END is encountered.
889  * @param[out] error
890  *   Perform verbose error reporting if not NULL.
891  *
892  * @return
893  *   A positive value representing the number of bytes needed to store
894  *   actions regardless of @p size on success (@p buf contents are truncated
895  *   to @p size if not large enough), a negative errno value otherwise and
896  *   rte_errno is set.
897  */
898 static int
899 rte_flow_conv_actions(struct rte_flow_action *dst,
900 		      const size_t size,
901 		      const struct rte_flow_action *src,
902 		      unsigned int num,
903 		      struct rte_flow_error *error)
904 {
905 	uintptr_t data = (uintptr_t)dst;
906 	size_t off;
907 	size_t ret;
908 	unsigned int i;
909 
910 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
911 		/**
912 		 * allow PMD private flow action
913 		 */
914 		if (((int)src->type >= 0) &&
915 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
916 		    !rte_flow_desc_action[src->type].name))
917 			return rte_flow_error_set
918 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
919 				 src, "cannot convert unknown action type");
920 		if (size >= off + sizeof(*dst))
921 			*dst = (struct rte_flow_action){
922 				.type = src->type,
923 			};
924 		off += sizeof(*dst);
925 		if (!src->type)
926 			num = i + 1;
927 	}
928 	num = i;
929 	src -= num;
930 	dst -= num;
931 	do {
932 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
933 			/*
934 			 * Indirect action conf fills the indirect action
935 			 * handler. Copy the action handle directly instead
936 			 * of duplicating the pointer memory.
937 			 */
938 			if (size)
939 				dst->conf = src->conf;
940 		} else if (src->conf) {
941 			off = RTE_ALIGN_CEIL(off, sizeof(double));
942 			ret = rte_flow_conv_action_conf
943 				((void *)(data + off),
944 				 size > off ? size - off : 0, src);
945 			if (size && size >= off + ret)
946 				dst->conf = (void *)(data + off);
947 			off += ret;
948 		}
949 		++src;
950 		++dst;
951 	} while (--num);
952 	return off;
953 }
954 
955 /**
956  * Copy flow rule components.
957  *
958  * This comprises the flow rule descriptor itself, attributes, pattern and
959  * actions list. NULL components in @p src are skipped.
960  *
961  * @param[out] dst
962  *   Destination buffer. Can be NULL if @p size is zero.
963  * @param size
964  *   Size of @p dst in bytes.
965  * @param[in] src
966  *   Source flow rule descriptor.
967  * @param[out] error
968  *   Perform verbose error reporting if not NULL.
969  *
970  * @return
971  *   A positive value representing the number of bytes needed to store all
972  *   components including the descriptor regardless of @p size on success
973  *   (@p buf contents are truncated to @p size if not large enough), a
974  *   negative errno value otherwise and rte_errno is set.
975  */
976 static int
977 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
978 		   const size_t size,
979 		   const struct rte_flow_conv_rule *src,
980 		   struct rte_flow_error *error)
981 {
982 	size_t off;
983 	int ret;
984 
985 	rte_memcpy(dst,
986 		   (&(struct rte_flow_conv_rule){
987 			.attr = NULL,
988 			.pattern = NULL,
989 			.actions = NULL,
990 		   }),
991 		   size > sizeof(*dst) ? sizeof(*dst) : size);
992 	off = sizeof(*dst);
993 	if (src->attr_ro) {
994 		off = RTE_ALIGN_CEIL(off, sizeof(double));
995 		if (size && size >= off + sizeof(*dst->attr))
996 			dst->attr = rte_memcpy
997 				((void *)((uintptr_t)dst + off),
998 				 src->attr_ro, sizeof(*dst->attr));
999 		off += sizeof(*dst->attr);
1000 	}
1001 	if (src->pattern_ro) {
1002 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1003 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
1004 					    size > off ? size - off : 0,
1005 					    src->pattern_ro, 0, error);
1006 		if (ret < 0)
1007 			return ret;
1008 		if (size && size >= off + (size_t)ret)
1009 			dst->pattern = (void *)((uintptr_t)dst + off);
1010 		off += ret;
1011 	}
1012 	if (src->actions_ro) {
1013 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1014 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1015 					    size > off ? size - off : 0,
1016 					    src->actions_ro, 0, error);
1017 		if (ret < 0)
1018 			return ret;
1019 		if (size >= off + (size_t)ret)
1020 			dst->actions = (void *)((uintptr_t)dst + off);
1021 		off += ret;
1022 	}
1023 	return off;
1024 }
1025 
1026 /**
1027  * Retrieve the name of a pattern item/action type.
1028  *
1029  * @param is_action
1030  *   Nonzero when @p src represents an action type instead of a pattern item
1031  *   type.
1032  * @param is_ptr
1033  *   Nonzero to write string address instead of contents into @p dst.
1034  * @param[out] dst
1035  *   Destination buffer. Can be NULL if @p size is zero.
1036  * @param size
1037  *   Size of @p dst in bytes.
1038  * @param[in] src
1039  *   Depending on @p is_action, source pattern item or action type cast as a
1040  *   pointer.
1041  * @param[out] error
1042  *   Perform verbose error reporting if not NULL.
1043  *
1044  * @return
1045  *   A positive value representing the number of bytes needed to store the
1046  *   name or its address regardless of @p size on success (@p buf contents
1047  *   are truncated to @p size if not large enough), a negative errno value
1048  *   otherwise and rte_errno is set.
1049  */
1050 static int
1051 rte_flow_conv_name(int is_action,
1052 		   int is_ptr,
1053 		   char *dst,
1054 		   const size_t size,
1055 		   const void *src,
1056 		   struct rte_flow_error *error)
1057 {
1058 	struct desc_info {
1059 		const struct rte_flow_desc_data *data;
1060 		size_t num;
1061 	};
1062 	static const struct desc_info info_rep[2] = {
1063 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1064 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1065 	};
1066 	const struct desc_info *const info = &info_rep[!!is_action];
1067 	unsigned int type = (uintptr_t)src;
1068 
1069 	if (type >= info->num)
1070 		return rte_flow_error_set
1071 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1072 			 "unknown object type to retrieve the name of");
1073 	if (!is_ptr)
1074 		return strlcpy(dst, info->data[type].name, size);
1075 	if (size >= sizeof(const char **))
1076 		*((const char **)dst) = info->data[type].name;
1077 	return sizeof(const char **);
1078 }
1079 
1080 /** Helper function to convert flow API objects. */
1081 int
1082 rte_flow_conv(enum rte_flow_conv_op op,
1083 	      void *dst,
1084 	      size_t size,
1085 	      const void *src,
1086 	      struct rte_flow_error *error)
1087 {
1088 	int ret;
1089 
1090 	switch (op) {
1091 		const struct rte_flow_attr *attr;
1092 
1093 	case RTE_FLOW_CONV_OP_NONE:
1094 		ret = 0;
1095 		break;
1096 	case RTE_FLOW_CONV_OP_ATTR:
1097 		attr = src;
1098 		if (size > sizeof(*attr))
1099 			size = sizeof(*attr);
1100 		rte_memcpy(dst, attr, size);
1101 		ret = sizeof(*attr);
1102 		break;
1103 	case RTE_FLOW_CONV_OP_ITEM:
1104 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1105 		break;
1106 	case RTE_FLOW_CONV_OP_ACTION:
1107 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1108 		break;
1109 	case RTE_FLOW_CONV_OP_PATTERN:
1110 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1111 		break;
1112 	case RTE_FLOW_CONV_OP_ACTIONS:
1113 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1114 		break;
1115 	case RTE_FLOW_CONV_OP_RULE:
1116 		ret = rte_flow_conv_rule(dst, size, src, error);
1117 		break;
1118 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1119 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1120 		break;
1121 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1122 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1123 		break;
1124 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1125 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1126 		break;
1127 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1128 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1129 		break;
1130 	default:
1131 		ret = rte_flow_error_set
1132 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1133 		 "unknown object conversion operation");
1134 	}
1135 
1136 	rte_flow_trace_conv(op, dst, size, src, ret);
1137 
1138 	return ret;
1139 }
1140 
1141 /** Store a full rte_flow description. */
1142 size_t
1143 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1144 	      const struct rte_flow_attr *attr,
1145 	      const struct rte_flow_item *items,
1146 	      const struct rte_flow_action *actions)
1147 {
1148 	/*
1149 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1150 	 * to convert the former to the latter without wasting space.
1151 	 */
1152 	struct rte_flow_conv_rule *dst =
1153 		len ?
1154 		(void *)((uintptr_t)desc +
1155 			 (offsetof(struct rte_flow_desc, actions) -
1156 			  offsetof(struct rte_flow_conv_rule, actions))) :
1157 		NULL;
1158 	size_t dst_size =
1159 		len > sizeof(*desc) - sizeof(*dst) ?
1160 		len - (sizeof(*desc) - sizeof(*dst)) :
1161 		0;
1162 	struct rte_flow_conv_rule src = {
1163 		.attr_ro = NULL,
1164 		.pattern_ro = items,
1165 		.actions_ro = actions,
1166 	};
1167 	int ret;
1168 
1169 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1170 			 sizeof(struct rte_flow_conv_rule));
1171 	if (dst_size &&
1172 	    (&dst->pattern != &desc->items ||
1173 	     &dst->actions != &desc->actions ||
1174 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1175 		rte_errno = EINVAL;
1176 		return 0;
1177 	}
1178 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1179 	if (ret < 0)
1180 		return 0;
1181 	ret += sizeof(*desc) - sizeof(*dst);
1182 	rte_memcpy(desc,
1183 		   (&(struct rte_flow_desc){
1184 			.size = ret,
1185 			.attr = *attr,
1186 			.items = dst_size ? dst->pattern : NULL,
1187 			.actions = dst_size ? dst->actions : NULL,
1188 		   }),
1189 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1190 
1191 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1192 
1193 	return ret;
1194 }
1195 
1196 int
1197 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1198 			FILE *file, struct rte_flow_error *error)
1199 {
1200 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1201 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1202 	int ret;
1203 
1204 	if (unlikely(!ops))
1205 		return -rte_errno;
1206 	if (likely(!!ops->dev_dump)) {
1207 		fts_enter(dev);
1208 		ret = ops->dev_dump(dev, flow, file, error);
1209 		fts_exit(dev);
1210 		return flow_err(port_id, ret, error);
1211 	}
1212 	return rte_flow_error_set(error, ENOSYS,
1213 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1214 				  NULL, rte_strerror(ENOSYS));
1215 }
1216 
1217 int
1218 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1219 		    uint32_t nb_contexts, struct rte_flow_error *error)
1220 {
1221 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1222 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1223 	int ret;
1224 
1225 	if (unlikely(!ops))
1226 		return -rte_errno;
1227 	if (likely(!!ops->get_aged_flows)) {
1228 		fts_enter(dev);
1229 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1230 		fts_exit(dev);
1231 		ret = flow_err(port_id, ret, error);
1232 
1233 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1234 
1235 		return ret;
1236 	}
1237 	return rte_flow_error_set(error, ENOTSUP,
1238 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1239 				  NULL, rte_strerror(ENOTSUP));
1240 }
1241 
1242 int
1243 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1244 			  uint32_t nb_contexts, struct rte_flow_error *error)
1245 {
1246 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1247 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1248 	int ret;
1249 
1250 	if (unlikely(!ops))
1251 		return -rte_errno;
1252 	if (likely(!!ops->get_q_aged_flows)) {
1253 		fts_enter(dev);
1254 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1255 					    nb_contexts, error);
1256 		fts_exit(dev);
1257 		ret = flow_err(port_id, ret, error);
1258 
1259 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1260 						nb_contexts, ret);
1261 
1262 		return ret;
1263 	}
1264 	return rte_flow_error_set(error, ENOTSUP,
1265 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1266 				  NULL, rte_strerror(ENOTSUP));
1267 }
1268 
1269 struct rte_flow_action_handle *
1270 rte_flow_action_handle_create(uint16_t port_id,
1271 			      const struct rte_flow_indir_action_conf *conf,
1272 			      const struct rte_flow_action *action,
1273 			      struct rte_flow_error *error)
1274 {
1275 	struct rte_flow_action_handle *handle;
1276 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1277 
1278 	if (unlikely(!ops))
1279 		return NULL;
1280 	if (unlikely(!ops->action_handle_create)) {
1281 		rte_flow_error_set(error, ENOSYS,
1282 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1283 				   rte_strerror(ENOSYS));
1284 		return NULL;
1285 	}
1286 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1287 					   conf, action, error);
1288 	if (handle == NULL)
1289 		flow_err(port_id, -rte_errno, error);
1290 
1291 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1292 
1293 	return handle;
1294 }
1295 
1296 int
1297 rte_flow_action_handle_destroy(uint16_t port_id,
1298 			       struct rte_flow_action_handle *handle,
1299 			       struct rte_flow_error *error)
1300 {
1301 	int ret;
1302 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1303 
1304 	if (unlikely(!ops))
1305 		return -rte_errno;
1306 	if (unlikely(!ops->action_handle_destroy))
1307 		return rte_flow_error_set(error, ENOSYS,
1308 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1309 					  NULL, rte_strerror(ENOSYS));
1310 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1311 					 handle, error);
1312 	ret = flow_err(port_id, ret, error);
1313 
1314 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1315 
1316 	return ret;
1317 }
1318 
1319 int
1320 rte_flow_action_handle_update(uint16_t port_id,
1321 			      struct rte_flow_action_handle *handle,
1322 			      const void *update,
1323 			      struct rte_flow_error *error)
1324 {
1325 	int ret;
1326 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1327 
1328 	if (unlikely(!ops))
1329 		return -rte_errno;
1330 	if (unlikely(!ops->action_handle_update))
1331 		return rte_flow_error_set(error, ENOSYS,
1332 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1333 					  NULL, rte_strerror(ENOSYS));
1334 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1335 					update, error);
1336 	ret = flow_err(port_id, ret, error);
1337 
1338 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1339 
1340 	return ret;
1341 }
1342 
1343 int
1344 rte_flow_action_handle_query(uint16_t port_id,
1345 			     const struct rte_flow_action_handle *handle,
1346 			     void *data,
1347 			     struct rte_flow_error *error)
1348 {
1349 	int ret;
1350 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1351 
1352 	if (unlikely(!ops))
1353 		return -rte_errno;
1354 	if (unlikely(!ops->action_handle_query))
1355 		return rte_flow_error_set(error, ENOSYS,
1356 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1357 					  NULL, rte_strerror(ENOSYS));
1358 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1359 				       data, error);
1360 	ret = flow_err(port_id, ret, error);
1361 
1362 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1363 
1364 	return ret;
1365 }
1366 
1367 int
1368 rte_flow_tunnel_decap_set(uint16_t port_id,
1369 			  struct rte_flow_tunnel *tunnel,
1370 			  struct rte_flow_action **actions,
1371 			  uint32_t *num_of_actions,
1372 			  struct rte_flow_error *error)
1373 {
1374 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1375 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1376 	int ret;
1377 
1378 	if (unlikely(!ops))
1379 		return -rte_errno;
1380 	if (likely(!!ops->tunnel_decap_set)) {
1381 		ret = flow_err(port_id,
1382 			       ops->tunnel_decap_set(dev, tunnel, actions,
1383 						     num_of_actions, error),
1384 			       error);
1385 
1386 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1387 						num_of_actions, ret);
1388 
1389 		return ret;
1390 	}
1391 	return rte_flow_error_set(error, ENOTSUP,
1392 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1393 				  NULL, rte_strerror(ENOTSUP));
1394 }
1395 
1396 int
1397 rte_flow_tunnel_match(uint16_t port_id,
1398 		      struct rte_flow_tunnel *tunnel,
1399 		      struct rte_flow_item **items,
1400 		      uint32_t *num_of_items,
1401 		      struct rte_flow_error *error)
1402 {
1403 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1404 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1405 	int ret;
1406 
1407 	if (unlikely(!ops))
1408 		return -rte_errno;
1409 	if (likely(!!ops->tunnel_match)) {
1410 		ret = flow_err(port_id,
1411 			       ops->tunnel_match(dev, tunnel, items,
1412 						 num_of_items, error),
1413 			       error);
1414 
1415 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1416 					    ret);
1417 
1418 		return ret;
1419 	}
1420 	return rte_flow_error_set(error, ENOTSUP,
1421 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1422 				  NULL, rte_strerror(ENOTSUP));
1423 }
1424 
1425 int
1426 rte_flow_get_restore_info(uint16_t port_id,
1427 			  struct rte_mbuf *m,
1428 			  struct rte_flow_restore_info *restore_info,
1429 			  struct rte_flow_error *error)
1430 {
1431 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1432 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1433 	int ret;
1434 
1435 	if (unlikely(!ops))
1436 		return -rte_errno;
1437 	if (likely(!!ops->get_restore_info)) {
1438 		ret = flow_err(port_id,
1439 			       ops->get_restore_info(dev, m, restore_info,
1440 						     error),
1441 			       error);
1442 
1443 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1444 
1445 		return ret;
1446 	}
1447 	return rte_flow_error_set(error, ENOTSUP,
1448 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1449 				  NULL, rte_strerror(ENOTSUP));
1450 }
1451 
1452 static struct {
1453 	const struct rte_mbuf_dynflag desc;
1454 	uint64_t value;
1455 } flow_restore_info_dynflag = {
1456 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1457 };
1458 
1459 uint64_t
1460 rte_flow_restore_info_dynflag(void)
1461 {
1462 	return flow_restore_info_dynflag.value;
1463 }
1464 
1465 int
1466 rte_flow_restore_info_dynflag_register(void)
1467 {
1468 	if (flow_restore_info_dynflag.value == 0) {
1469 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1470 
1471 		if (offset < 0)
1472 			return -1;
1473 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1474 	}
1475 
1476 	return 0;
1477 }
1478 
1479 int
1480 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1481 				     struct rte_flow_action *actions,
1482 				     uint32_t num_of_actions,
1483 				     struct rte_flow_error *error)
1484 {
1485 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1486 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1487 	int ret;
1488 
1489 	if (unlikely(!ops))
1490 		return -rte_errno;
1491 	if (likely(!!ops->tunnel_action_decap_release)) {
1492 		ret = flow_err(port_id,
1493 			       ops->tunnel_action_decap_release(dev, actions,
1494 								num_of_actions,
1495 								error),
1496 			       error);
1497 
1498 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1499 							   num_of_actions, ret);
1500 
1501 		return ret;
1502 	}
1503 	return rte_flow_error_set(error, ENOTSUP,
1504 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1505 				  NULL, rte_strerror(ENOTSUP));
1506 }
1507 
1508 int
1509 rte_flow_tunnel_item_release(uint16_t port_id,
1510 			     struct rte_flow_item *items,
1511 			     uint32_t num_of_items,
1512 			     struct rte_flow_error *error)
1513 {
1514 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1515 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1516 	int ret;
1517 
1518 	if (unlikely(!ops))
1519 		return -rte_errno;
1520 	if (likely(!!ops->tunnel_item_release)) {
1521 		ret = flow_err(port_id,
1522 			       ops->tunnel_item_release(dev, items,
1523 							num_of_items, error),
1524 			       error);
1525 
1526 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1527 
1528 		return ret;
1529 	}
1530 	return rte_flow_error_set(error, ENOTSUP,
1531 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1532 				  NULL, rte_strerror(ENOTSUP));
1533 }
1534 
1535 int
1536 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1537 			     struct rte_flow_error *error)
1538 {
1539 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1540 	struct rte_eth_dev *dev;
1541 	int ret;
1542 
1543 	if (unlikely(ops == NULL))
1544 		return -rte_errno;
1545 
1546 	if (ops->pick_transfer_proxy == NULL) {
1547 		*proxy_port_id = port_id;
1548 		return 0;
1549 	}
1550 
1551 	dev = &rte_eth_devices[port_id];
1552 
1553 	ret = flow_err(port_id,
1554 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1555 		       error);
1556 
1557 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1558 
1559 	return ret;
1560 }
1561 
1562 struct rte_flow_item_flex_handle *
1563 rte_flow_flex_item_create(uint16_t port_id,
1564 			  const struct rte_flow_item_flex_conf *conf,
1565 			  struct rte_flow_error *error)
1566 {
1567 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1568 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1569 	struct rte_flow_item_flex_handle *handle;
1570 
1571 	if (unlikely(!ops))
1572 		return NULL;
1573 	if (unlikely(!ops->flex_item_create)) {
1574 		rte_flow_error_set(error, ENOTSUP,
1575 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1576 				   NULL, rte_strerror(ENOTSUP));
1577 		return NULL;
1578 	}
1579 	handle = ops->flex_item_create(dev, conf, error);
1580 	if (handle == NULL)
1581 		flow_err(port_id, -rte_errno, error);
1582 
1583 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1584 
1585 	return handle;
1586 }
1587 
1588 int
1589 rte_flow_flex_item_release(uint16_t port_id,
1590 			   const struct rte_flow_item_flex_handle *handle,
1591 			   struct rte_flow_error *error)
1592 {
1593 	int ret;
1594 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1595 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1596 
1597 	if (unlikely(!ops || !ops->flex_item_release))
1598 		return rte_flow_error_set(error, ENOTSUP,
1599 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1600 					  NULL, rte_strerror(ENOTSUP));
1601 	ret = ops->flex_item_release(dev, handle, error);
1602 	ret = flow_err(port_id, ret, error);
1603 
1604 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1605 
1606 	return ret;
1607 }
1608 
1609 int
1610 rte_flow_info_get(uint16_t port_id,
1611 		  struct rte_flow_port_info *port_info,
1612 		  struct rte_flow_queue_info *queue_info,
1613 		  struct rte_flow_error *error)
1614 {
1615 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1616 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1617 	int ret;
1618 
1619 	if (unlikely(!ops))
1620 		return -rte_errno;
1621 	if (dev->data->dev_configured == 0) {
1622 		FLOW_LOG(INFO,
1623 			"Device with port_id=%"PRIu16" is not configured.",
1624 			port_id);
1625 		return -EINVAL;
1626 	}
1627 	if (port_info == NULL) {
1628 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1629 		return -EINVAL;
1630 	}
1631 	if (likely(!!ops->info_get)) {
1632 		ret = flow_err(port_id,
1633 			       ops->info_get(dev, port_info, queue_info, error),
1634 			       error);
1635 
1636 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1637 
1638 		return ret;
1639 	}
1640 	return rte_flow_error_set(error, ENOTSUP,
1641 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1642 				  NULL, rte_strerror(ENOTSUP));
1643 }
1644 
1645 int
1646 rte_flow_configure(uint16_t port_id,
1647 		   const struct rte_flow_port_attr *port_attr,
1648 		   uint16_t nb_queue,
1649 		   const struct rte_flow_queue_attr *queue_attr[],
1650 		   struct rte_flow_error *error)
1651 {
1652 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1653 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1654 	int ret;
1655 
1656 	if (unlikely(!ops))
1657 		return -rte_errno;
1658 	if (dev->data->dev_configured == 0) {
1659 		FLOW_LOG(INFO,
1660 			"Device with port_id=%"PRIu16" is not configured.",
1661 			port_id);
1662 		return -EINVAL;
1663 	}
1664 	if (dev->data->dev_started != 0) {
1665 		FLOW_LOG(INFO,
1666 			"Device with port_id=%"PRIu16" already started.",
1667 			port_id);
1668 		return -EINVAL;
1669 	}
1670 	if (port_attr == NULL) {
1671 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1672 		return -EINVAL;
1673 	}
1674 	if (queue_attr == NULL) {
1675 		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
1676 		return -EINVAL;
1677 	}
1678 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1679 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1680 		return rte_flow_error_set(error, ENODEV,
1681 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1682 					  NULL, rte_strerror(ENODEV));
1683 	}
1684 	if (likely(!!ops->configure)) {
1685 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1686 		if (ret == 0)
1687 			dev->data->flow_configured = 1;
1688 		ret = flow_err(port_id, ret, error);
1689 
1690 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1691 
1692 		return ret;
1693 	}
1694 	return rte_flow_error_set(error, ENOTSUP,
1695 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1696 				  NULL, rte_strerror(ENOTSUP));
1697 }
1698 
1699 struct rte_flow_pattern_template *
1700 rte_flow_pattern_template_create(uint16_t port_id,
1701 		const struct rte_flow_pattern_template_attr *template_attr,
1702 		const struct rte_flow_item pattern[],
1703 		struct rte_flow_error *error)
1704 {
1705 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1706 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1707 	struct rte_flow_pattern_template *template;
1708 
1709 	if (unlikely(!ops))
1710 		return NULL;
1711 	if (dev->data->flow_configured == 0) {
1712 		FLOW_LOG(INFO,
1713 			"Flow engine on port_id=%"PRIu16" is not configured.",
1714 			port_id);
1715 		rte_flow_error_set(error, EINVAL,
1716 				RTE_FLOW_ERROR_TYPE_STATE,
1717 				NULL, rte_strerror(EINVAL));
1718 		return NULL;
1719 	}
1720 	if (template_attr == NULL) {
1721 		FLOW_LOG(ERR,
1722 			     "Port %"PRIu16" template attr is NULL.",
1723 			     port_id);
1724 		rte_flow_error_set(error, EINVAL,
1725 				   RTE_FLOW_ERROR_TYPE_ATTR,
1726 				   NULL, rte_strerror(EINVAL));
1727 		return NULL;
1728 	}
1729 	if (pattern == NULL) {
1730 		FLOW_LOG(ERR,
1731 			     "Port %"PRIu16" pattern is NULL.",
1732 			     port_id);
1733 		rte_flow_error_set(error, EINVAL,
1734 				   RTE_FLOW_ERROR_TYPE_ATTR,
1735 				   NULL, rte_strerror(EINVAL));
1736 		return NULL;
1737 	}
1738 	if (likely(!!ops->pattern_template_create)) {
1739 		template = ops->pattern_template_create(dev, template_attr,
1740 							pattern, error);
1741 		if (template == NULL)
1742 			flow_err(port_id, -rte_errno, error);
1743 
1744 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1745 						       pattern, template);
1746 
1747 		return template;
1748 	}
1749 	rte_flow_error_set(error, ENOTSUP,
1750 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1751 			   NULL, rte_strerror(ENOTSUP));
1752 	return NULL;
1753 }
1754 
1755 int
1756 rte_flow_pattern_template_destroy(uint16_t port_id,
1757 		struct rte_flow_pattern_template *pattern_template,
1758 		struct rte_flow_error *error)
1759 {
1760 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1761 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1762 	int ret;
1763 
1764 	if (unlikely(!ops))
1765 		return -rte_errno;
1766 	if (unlikely(pattern_template == NULL))
1767 		return 0;
1768 	if (likely(!!ops->pattern_template_destroy)) {
1769 		ret = flow_err(port_id,
1770 			       ops->pattern_template_destroy(dev,
1771 							     pattern_template,
1772 							     error),
1773 			       error);
1774 
1775 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1776 							ret);
1777 
1778 		return ret;
1779 	}
1780 	return rte_flow_error_set(error, ENOTSUP,
1781 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1782 				  NULL, rte_strerror(ENOTSUP));
1783 }
1784 
1785 struct rte_flow_actions_template *
1786 rte_flow_actions_template_create(uint16_t port_id,
1787 			const struct rte_flow_actions_template_attr *template_attr,
1788 			const struct rte_flow_action actions[],
1789 			const struct rte_flow_action masks[],
1790 			struct rte_flow_error *error)
1791 {
1792 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1793 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1794 	struct rte_flow_actions_template *template;
1795 
1796 	if (unlikely(!ops))
1797 		return NULL;
1798 	if (dev->data->flow_configured == 0) {
1799 		FLOW_LOG(INFO,
1800 			"Flow engine on port_id=%"PRIu16" is not configured.",
1801 			port_id);
1802 		rte_flow_error_set(error, EINVAL,
1803 				   RTE_FLOW_ERROR_TYPE_STATE,
1804 				   NULL, rte_strerror(EINVAL));
1805 		return NULL;
1806 	}
1807 	if (template_attr == NULL) {
1808 		FLOW_LOG(ERR,
1809 			     "Port %"PRIu16" template attr is NULL.",
1810 			     port_id);
1811 		rte_flow_error_set(error, EINVAL,
1812 				   RTE_FLOW_ERROR_TYPE_ATTR,
1813 				   NULL, rte_strerror(EINVAL));
1814 		return NULL;
1815 	}
1816 	if (actions == NULL) {
1817 		FLOW_LOG(ERR,
1818 			     "Port %"PRIu16" actions is NULL.",
1819 			     port_id);
1820 		rte_flow_error_set(error, EINVAL,
1821 				   RTE_FLOW_ERROR_TYPE_ATTR,
1822 				   NULL, rte_strerror(EINVAL));
1823 		return NULL;
1824 	}
1825 	if (masks == NULL) {
1826 		FLOW_LOG(ERR,
1827 			     "Port %"PRIu16" masks is NULL.",
1828 			     port_id);
1829 		rte_flow_error_set(error, EINVAL,
1830 				   RTE_FLOW_ERROR_TYPE_ATTR,
1831 				   NULL, rte_strerror(EINVAL));
1832 
1833 	}
1834 	if (likely(!!ops->actions_template_create)) {
1835 		template = ops->actions_template_create(dev, template_attr,
1836 							actions, masks, error);
1837 		if (template == NULL)
1838 			flow_err(port_id, -rte_errno, error);
1839 
1840 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1841 						       masks, template);
1842 
1843 		return template;
1844 	}
1845 	rte_flow_error_set(error, ENOTSUP,
1846 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1847 			   NULL, rte_strerror(ENOTSUP));
1848 	return NULL;
1849 }
1850 
1851 int
1852 rte_flow_actions_template_destroy(uint16_t port_id,
1853 			struct rte_flow_actions_template *actions_template,
1854 			struct rte_flow_error *error)
1855 {
1856 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1857 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1858 	int ret;
1859 
1860 	if (unlikely(!ops))
1861 		return -rte_errno;
1862 	if (unlikely(actions_template == NULL))
1863 		return 0;
1864 	if (likely(!!ops->actions_template_destroy)) {
1865 		ret = flow_err(port_id,
1866 			       ops->actions_template_destroy(dev,
1867 							     actions_template,
1868 							     error),
1869 			       error);
1870 
1871 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1872 							ret);
1873 
1874 		return ret;
1875 	}
1876 	return rte_flow_error_set(error, ENOTSUP,
1877 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1878 				  NULL, rte_strerror(ENOTSUP));
1879 }
1880 
1881 struct rte_flow_template_table *
1882 rte_flow_template_table_create(uint16_t port_id,
1883 			const struct rte_flow_template_table_attr *table_attr,
1884 			struct rte_flow_pattern_template *pattern_templates[],
1885 			uint8_t nb_pattern_templates,
1886 			struct rte_flow_actions_template *actions_templates[],
1887 			uint8_t nb_actions_templates,
1888 			struct rte_flow_error *error)
1889 {
1890 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1891 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1892 	struct rte_flow_template_table *table;
1893 
1894 	if (unlikely(!ops))
1895 		return NULL;
1896 	if (dev->data->flow_configured == 0) {
1897 		FLOW_LOG(INFO,
1898 			"Flow engine on port_id=%"PRIu16" is not configured.",
1899 			port_id);
1900 		rte_flow_error_set(error, EINVAL,
1901 				   RTE_FLOW_ERROR_TYPE_STATE,
1902 				   NULL, rte_strerror(EINVAL));
1903 		return NULL;
1904 	}
1905 	if (table_attr == NULL) {
1906 		FLOW_LOG(ERR,
1907 			     "Port %"PRIu16" table attr is NULL.",
1908 			     port_id);
1909 		rte_flow_error_set(error, EINVAL,
1910 				   RTE_FLOW_ERROR_TYPE_ATTR,
1911 				   NULL, rte_strerror(EINVAL));
1912 		return NULL;
1913 	}
1914 	if (pattern_templates == NULL) {
1915 		FLOW_LOG(ERR,
1916 			     "Port %"PRIu16" pattern templates is NULL.",
1917 			     port_id);
1918 		rte_flow_error_set(error, EINVAL,
1919 				   RTE_FLOW_ERROR_TYPE_ATTR,
1920 				   NULL, rte_strerror(EINVAL));
1921 		return NULL;
1922 	}
1923 	if (actions_templates == NULL) {
1924 		FLOW_LOG(ERR,
1925 			     "Port %"PRIu16" actions templates is NULL.",
1926 			     port_id);
1927 		rte_flow_error_set(error, EINVAL,
1928 				   RTE_FLOW_ERROR_TYPE_ATTR,
1929 				   NULL, rte_strerror(EINVAL));
1930 		return NULL;
1931 	}
1932 	if (likely(!!ops->template_table_create)) {
1933 		table = ops->template_table_create(dev, table_attr,
1934 					pattern_templates, nb_pattern_templates,
1935 					actions_templates, nb_actions_templates,
1936 					error);
1937 		if (table == NULL)
1938 			flow_err(port_id, -rte_errno, error);
1939 
1940 		rte_flow_trace_template_table_create(port_id, table_attr,
1941 						     pattern_templates,
1942 						     nb_pattern_templates,
1943 						     actions_templates,
1944 						     nb_actions_templates, table);
1945 
1946 		return table;
1947 	}
1948 	rte_flow_error_set(error, ENOTSUP,
1949 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1950 			   NULL, rte_strerror(ENOTSUP));
1951 	return NULL;
1952 }
1953 
1954 int
1955 rte_flow_template_table_destroy(uint16_t port_id,
1956 				struct rte_flow_template_table *template_table,
1957 				struct rte_flow_error *error)
1958 {
1959 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1960 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1961 	int ret;
1962 
1963 	if (unlikely(!ops))
1964 		return -rte_errno;
1965 	if (unlikely(template_table == NULL))
1966 		return 0;
1967 	if (likely(!!ops->template_table_destroy)) {
1968 		ret = flow_err(port_id,
1969 			       ops->template_table_destroy(dev,
1970 							   template_table,
1971 							   error),
1972 			       error);
1973 
1974 		rte_flow_trace_template_table_destroy(port_id, template_table,
1975 						      ret);
1976 
1977 		return ret;
1978 	}
1979 	return rte_flow_error_set(error, ENOTSUP,
1980 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1981 				  NULL, rte_strerror(ENOTSUP));
1982 }
1983 
1984 int
1985 rte_flow_group_set_miss_actions(uint16_t port_id,
1986 				uint32_t group_id,
1987 				const struct rte_flow_group_attr *attr,
1988 				const struct rte_flow_action actions[],
1989 				struct rte_flow_error *error)
1990 {
1991 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1992 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1993 
1994 	if (unlikely(!ops))
1995 		return -rte_errno;
1996 	if (likely(!!ops->group_set_miss_actions)) {
1997 		return flow_err(port_id,
1998 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
1999 				error);
2000 	}
2001 	return rte_flow_error_set(error, ENOTSUP,
2002 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2003 				  NULL, rte_strerror(ENOTSUP));
2004 }
2005 
2006 struct rte_flow *
2007 rte_flow_async_create(uint16_t port_id,
2008 		      uint32_t queue_id,
2009 		      const struct rte_flow_op_attr *op_attr,
2010 		      struct rte_flow_template_table *template_table,
2011 		      const struct rte_flow_item pattern[],
2012 		      uint8_t pattern_template_index,
2013 		      const struct rte_flow_action actions[],
2014 		      uint8_t actions_template_index,
2015 		      void *user_data,
2016 		      struct rte_flow_error *error)
2017 {
2018 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2019 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2020 	struct rte_flow *flow;
2021 
2022 	flow = ops->async_create(dev, queue_id,
2023 				 op_attr, template_table,
2024 				 pattern, pattern_template_index,
2025 				 actions, actions_template_index,
2026 				 user_data, error);
2027 	if (flow == NULL)
2028 		flow_err(port_id, -rte_errno, error);
2029 
2030 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2031 				    pattern, pattern_template_index, actions,
2032 				    actions_template_index, user_data, flow);
2033 
2034 	return flow;
2035 }
2036 
2037 struct rte_flow *
2038 rte_flow_async_create_by_index(uint16_t port_id,
2039 			       uint32_t queue_id,
2040 			       const struct rte_flow_op_attr *op_attr,
2041 			       struct rte_flow_template_table *template_table,
2042 			       uint32_t rule_index,
2043 			       const struct rte_flow_action actions[],
2044 			       uint8_t actions_template_index,
2045 			       void *user_data,
2046 			       struct rte_flow_error *error)
2047 {
2048 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2049 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2050 	struct rte_flow *flow;
2051 
2052 	flow = ops->async_create_by_index(dev, queue_id,
2053 					  op_attr, template_table, rule_index,
2054 					  actions, actions_template_index,
2055 					  user_data, error);
2056 	if (flow == NULL)
2057 		flow_err(port_id, -rte_errno, error);
2058 	return flow;
2059 }
2060 
2061 int
2062 rte_flow_async_destroy(uint16_t port_id,
2063 		       uint32_t queue_id,
2064 		       const struct rte_flow_op_attr *op_attr,
2065 		       struct rte_flow *flow,
2066 		       void *user_data,
2067 		       struct rte_flow_error *error)
2068 {
2069 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2070 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2071 	int ret;
2072 
2073 	ret = flow_err(port_id,
2074 		       ops->async_destroy(dev, queue_id,
2075 					  op_attr, flow,
2076 					  user_data, error),
2077 		       error);
2078 
2079 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2080 				     user_data, ret);
2081 
2082 	return ret;
2083 }
2084 
2085 int
2086 rte_flow_async_actions_update(uint16_t port_id,
2087 			      uint32_t queue_id,
2088 			      const struct rte_flow_op_attr *op_attr,
2089 			      struct rte_flow *flow,
2090 			      const struct rte_flow_action actions[],
2091 			      uint8_t actions_template_index,
2092 			      void *user_data,
2093 			      struct rte_flow_error *error)
2094 {
2095 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2096 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2097 	int ret;
2098 
2099 	ret = flow_err(port_id,
2100 		       ops->async_actions_update(dev, queue_id, op_attr,
2101 						 flow, actions,
2102 						 actions_template_index,
2103 						 user_data, error),
2104 		       error);
2105 
2106 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2107 					    actions, actions_template_index,
2108 					    user_data, ret);
2109 
2110 	return ret;
2111 }
2112 
2113 int
2114 rte_flow_push(uint16_t port_id,
2115 	      uint32_t queue_id,
2116 	      struct rte_flow_error *error)
2117 {
2118 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2119 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2120 	int ret;
2121 
2122 	ret = flow_err(port_id,
2123 		       ops->push(dev, queue_id, error),
2124 		       error);
2125 
2126 	rte_flow_trace_push(port_id, queue_id, ret);
2127 
2128 	return ret;
2129 }
2130 
2131 int
2132 rte_flow_pull(uint16_t port_id,
2133 	      uint32_t queue_id,
2134 	      struct rte_flow_op_result res[],
2135 	      uint16_t n_res,
2136 	      struct rte_flow_error *error)
2137 {
2138 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2139 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2140 	int ret;
2141 	int rc;
2142 
2143 	ret = ops->pull(dev, queue_id, res, n_res, error);
2144 	rc = ret ? ret : flow_err(port_id, ret, error);
2145 
2146 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2147 
2148 	return rc;
2149 }
2150 
2151 struct rte_flow_action_handle *
2152 rte_flow_async_action_handle_create(uint16_t port_id,
2153 		uint32_t queue_id,
2154 		const struct rte_flow_op_attr *op_attr,
2155 		const struct rte_flow_indir_action_conf *indir_action_conf,
2156 		const struct rte_flow_action *action,
2157 		void *user_data,
2158 		struct rte_flow_error *error)
2159 {
2160 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2161 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2162 	struct rte_flow_action_handle *handle;
2163 
2164 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2165 					     indir_action_conf, action, user_data, error);
2166 	if (handle == NULL)
2167 		flow_err(port_id, -rte_errno, error);
2168 
2169 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2170 						  indir_action_conf, action,
2171 						  user_data, handle);
2172 
2173 	return handle;
2174 }
2175 
2176 int
2177 rte_flow_async_action_handle_destroy(uint16_t port_id,
2178 		uint32_t queue_id,
2179 		const struct rte_flow_op_attr *op_attr,
2180 		struct rte_flow_action_handle *action_handle,
2181 		void *user_data,
2182 		struct rte_flow_error *error)
2183 {
2184 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2185 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2186 	int ret;
2187 
2188 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2189 					   action_handle, user_data, error);
2190 	ret = flow_err(port_id, ret, error);
2191 
2192 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2193 						   action_handle, user_data, ret);
2194 
2195 	return ret;
2196 }
2197 
2198 int
2199 rte_flow_async_action_handle_update(uint16_t port_id,
2200 		uint32_t queue_id,
2201 		const struct rte_flow_op_attr *op_attr,
2202 		struct rte_flow_action_handle *action_handle,
2203 		const void *update,
2204 		void *user_data,
2205 		struct rte_flow_error *error)
2206 {
2207 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2208 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2209 	int ret;
2210 
2211 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2212 					  action_handle, update, user_data, error);
2213 	ret = flow_err(port_id, ret, error);
2214 
2215 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2216 						  action_handle, update,
2217 						  user_data, ret);
2218 
2219 	return ret;
2220 }
2221 
2222 int
2223 rte_flow_async_action_handle_query(uint16_t port_id,
2224 		uint32_t queue_id,
2225 		const struct rte_flow_op_attr *op_attr,
2226 		const struct rte_flow_action_handle *action_handle,
2227 		void *data,
2228 		void *user_data,
2229 		struct rte_flow_error *error)
2230 {
2231 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2232 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2233 	int ret;
2234 
2235 	if (unlikely(!ops))
2236 		return -rte_errno;
2237 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2238 					  action_handle, data, user_data, error);
2239 	ret = flow_err(port_id, ret, error);
2240 
2241 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2242 						 action_handle, data, user_data,
2243 						 ret);
2244 
2245 	return ret;
2246 }
2247 
2248 int
2249 rte_flow_action_handle_query_update(uint16_t port_id,
2250 				    struct rte_flow_action_handle *handle,
2251 				    const void *update, void *query,
2252 				    enum rte_flow_query_update_mode mode,
2253 				    struct rte_flow_error *error)
2254 {
2255 	int ret;
2256 	struct rte_eth_dev *dev;
2257 	const struct rte_flow_ops *ops;
2258 
2259 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2260 	if (!handle)
2261 		return -EINVAL;
2262 	if (!update && !query)
2263 		return -EINVAL;
2264 	dev = &rte_eth_devices[port_id];
2265 	ops = rte_flow_ops_get(port_id, error);
2266 	if (!ops || !ops->action_handle_query_update)
2267 		return -ENOTSUP;
2268 	ret = ops->action_handle_query_update(dev, handle, update,
2269 					      query, mode, error);
2270 	return flow_err(port_id, ret, error);
2271 }
2272 
2273 int
2274 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2275 					  const struct rte_flow_op_attr *attr,
2276 					  struct rte_flow_action_handle *handle,
2277 					  const void *update, void *query,
2278 					  enum rte_flow_query_update_mode mode,
2279 					  void *user_data,
2280 					  struct rte_flow_error *error)
2281 {
2282 	int ret;
2283 	struct rte_eth_dev *dev;
2284 	const struct rte_flow_ops *ops;
2285 
2286 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2287 	if (!handle)
2288 		return -EINVAL;
2289 	if (!update && !query)
2290 		return -EINVAL;
2291 	dev = &rte_eth_devices[port_id];
2292 	ops = rte_flow_ops_get(port_id, error);
2293 	if (!ops || !ops->async_action_handle_query_update)
2294 		return -ENOTSUP;
2295 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2296 						    handle, update,
2297 						    query, mode,
2298 						    user_data, error);
2299 	return flow_err(port_id, ret, error);
2300 }
2301 
2302 struct rte_flow_action_list_handle *
2303 rte_flow_action_list_handle_create(uint16_t port_id,
2304 				   const
2305 				   struct rte_flow_indir_action_conf *conf,
2306 				   const struct rte_flow_action *actions,
2307 				   struct rte_flow_error *error)
2308 {
2309 	int ret;
2310 	struct rte_eth_dev *dev;
2311 	const struct rte_flow_ops *ops;
2312 	struct rte_flow_action_list_handle *handle;
2313 
2314 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2315 	ops = rte_flow_ops_get(port_id, error);
2316 	if (!ops || !ops->action_list_handle_create) {
2317 		rte_flow_error_set(error, ENOTSUP,
2318 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2319 				   "action_list handle not supported");
2320 		return NULL;
2321 	}
2322 	dev = &rte_eth_devices[port_id];
2323 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2324 	ret = flow_err(port_id, -rte_errno, error);
2325 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2326 	return handle;
2327 }
2328 
2329 int
2330 rte_flow_action_list_handle_destroy(uint16_t port_id,
2331 				    struct rte_flow_action_list_handle *handle,
2332 				    struct rte_flow_error *error)
2333 {
2334 	int ret;
2335 	struct rte_eth_dev *dev;
2336 	const struct rte_flow_ops *ops;
2337 
2338 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2339 	ops = rte_flow_ops_get(port_id, error);
2340 	if (!ops || !ops->action_list_handle_destroy)
2341 		return rte_flow_error_set(error, ENOTSUP,
2342 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2343 					  "action_list handle not supported");
2344 	dev = &rte_eth_devices[port_id];
2345 	ret = ops->action_list_handle_destroy(dev, handle, error);
2346 	ret = flow_err(port_id, ret, error);
2347 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2348 	return ret;
2349 }
2350 
2351 struct rte_flow_action_list_handle *
2352 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2353 					 const struct rte_flow_op_attr *attr,
2354 					 const struct rte_flow_indir_action_conf *conf,
2355 					 const struct rte_flow_action *actions,
2356 					 void *user_data,
2357 					 struct rte_flow_error *error)
2358 {
2359 	int ret;
2360 	struct rte_eth_dev *dev;
2361 	const struct rte_flow_ops *ops;
2362 	struct rte_flow_action_list_handle *handle;
2363 
2364 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2365 	ops = rte_flow_ops_get(port_id, error);
2366 	if (!ops || !ops->async_action_list_handle_create) {
2367 		rte_flow_error_set(error, ENOTSUP,
2368 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2369 				   "action_list handle not supported");
2370 		return NULL;
2371 	}
2372 	dev = &rte_eth_devices[port_id];
2373 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2374 						      actions, user_data,
2375 						      error);
2376 	ret = flow_err(port_id, -rte_errno, error);
2377 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2378 						       conf, actions, user_data,
2379 						       ret);
2380 	return handle;
2381 }
2382 
2383 int
2384 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2385 				 const struct rte_flow_op_attr *op_attr,
2386 				 struct rte_flow_action_list_handle *handle,
2387 				 void *user_data, struct rte_flow_error *error)
2388 {
2389 	int ret;
2390 	struct rte_eth_dev *dev;
2391 	const struct rte_flow_ops *ops;
2392 
2393 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2394 	ops = rte_flow_ops_get(port_id, error);
2395 	if (!ops || !ops->async_action_list_handle_destroy)
2396 		return rte_flow_error_set(error, ENOTSUP,
2397 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2398 					  "async action_list handle not supported");
2399 	dev = &rte_eth_devices[port_id];
2400 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2401 						    handle, user_data, error);
2402 	ret = flow_err(port_id, ret, error);
2403 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2404 							op_attr, handle,
2405 							user_data, ret);
2406 	return ret;
2407 }
2408 
2409 int
2410 rte_flow_action_list_handle_query_update(uint16_t port_id,
2411 			 const struct rte_flow_action_list_handle *handle,
2412 			 const void **update, void **query,
2413 			 enum rte_flow_query_update_mode mode,
2414 			 struct rte_flow_error *error)
2415 {
2416 	int ret;
2417 	struct rte_eth_dev *dev;
2418 	const struct rte_flow_ops *ops;
2419 
2420 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2421 	ops = rte_flow_ops_get(port_id, error);
2422 	if (!ops || !ops->action_list_handle_query_update)
2423 		return rte_flow_error_set(error, ENOTSUP,
2424 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2425 					  "action_list query_update not supported");
2426 	dev = &rte_eth_devices[port_id];
2427 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2428 						   mode, error);
2429 	ret = flow_err(port_id, ret, error);
2430 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2431 						       query, mode, ret);
2432 	return ret;
2433 }
2434 
2435 int
2436 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2437 			 const struct rte_flow_op_attr *attr,
2438 			 const struct rte_flow_action_list_handle *handle,
2439 			 const void **update, void **query,
2440 			 enum rte_flow_query_update_mode mode,
2441 			 void *user_data, struct rte_flow_error *error)
2442 {
2443 	int ret;
2444 	struct rte_eth_dev *dev;
2445 	const struct rte_flow_ops *ops;
2446 
2447 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2448 	ops = rte_flow_ops_get(port_id, error);
2449 	if (!ops || !ops->async_action_list_handle_query_update)
2450 		return rte_flow_error_set(error, ENOTSUP,
2451 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2452 					  "action_list async query_update not supported");
2453 	dev = &rte_eth_devices[port_id];
2454 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2455 							 handle, update, query,
2456 							 mode, user_data,
2457 							 error);
2458 	ret = flow_err(port_id, ret, error);
2459 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2460 							     attr, handle,
2461 							     update, query,
2462 							     mode, user_data,
2463 							     ret);
2464 	return ret;
2465 }
2466 
2467 int
2468 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2469 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2470 			 uint32_t *hash, struct rte_flow_error *error)
2471 {
2472 	int ret;
2473 	struct rte_eth_dev *dev;
2474 	const struct rte_flow_ops *ops;
2475 
2476 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2477 	ops = rte_flow_ops_get(port_id, error);
2478 	if (!ops || !ops->flow_calc_table_hash)
2479 		return rte_flow_error_set(error, ENOTSUP,
2480 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2481 					  "action_list async query_update not supported");
2482 	dev = &rte_eth_devices[port_id];
2483 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2484 					hash, error);
2485 	return flow_err(port_id, ret, error);
2486 }
2487