xref: /dpdk/lib/ethdev/rte_flow.c (revision 60531a2c53f4d2b4b96ebb10ca813f62d0a5508d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 #include <pthread.h>
10 
11 #include <rte_common.h>
12 #include <rte_errno.h>
13 #include <rte_branch_prediction.h>
14 #include <rte_string_fns.h>
15 #include <rte_mbuf_dyn.h>
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18 
19 #include "ethdev_trace.h"
20 
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23 
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26 
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31 	const char *name;
32 	size_t size;
33 	size_t (*desc_fn)(void *dst, const void *src);
34 };
35 
36 /**
37  *
38  * @param buf
39  * Destination memory.
40  * @param data
41  * Source memory
42  * @param size
43  * Requested copy size
44  * @param desc
45  * rte_flow_desc_item - for flow item conversion.
46  * rte_flow_desc_action - for flow action conversion.
47  * @param type
48  * Offset into the desc param or negative value for private flow elements.
49  */
50 static inline size_t
51 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
52 		   const struct rte_flow_desc_data *desc, int type)
53 {
54 	/**
55 	 * Allow PMD private flow item
56 	 */
57 	bool rte_type = type >= 0;
58 
59 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
60 	if (buf == NULL || data == NULL)
61 		return 0;
62 	rte_memcpy(buf, data, (size > sz ? sz : size));
63 	if (rte_type && desc[type].desc_fn)
64 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
65 	return sz;
66 }
67 
68 static size_t
69 rte_flow_item_flex_conv(void *buf, const void *data)
70 {
71 	struct rte_flow_item_flex *dst = buf;
72 	const struct rte_flow_item_flex *src = data;
73 	if (buf) {
74 		dst->pattern = rte_memcpy
75 			((void *)((uintptr_t)(dst + 1)), src->pattern,
76 			 src->length);
77 	}
78 	return src->length;
79 }
80 
81 /** Generate flow_item[] entry. */
82 #define MK_FLOW_ITEM(t, s) \
83 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
84 		.name = # t, \
85 		.size = s,               \
86 		.desc_fn = NULL,\
87 	}
88 
89 #define MK_FLOW_ITEM_FN(t, s, fn) \
90 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
91 		.name = # t,                 \
92 		.size = s,                   \
93 		.desc_fn = fn,               \
94 	}
95 
96 /** Information about known flow pattern items. */
97 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
98 	MK_FLOW_ITEM(END, 0),
99 	MK_FLOW_ITEM(VOID, 0),
100 	MK_FLOW_ITEM(INVERT, 0),
101 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
102 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
103 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
104 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
105 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
106 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
107 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
108 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
109 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
110 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
111 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
112 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
113 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
114 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
115 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
116 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
117 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
118 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
119 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
122 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
123 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
124 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
125 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
126 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
127 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
128 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
131 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
132 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
134 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
136 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
137 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
138 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
139 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
140 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
141 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
142 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
143 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
144 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
146 			sizeof(struct rte_flow_item_pppoe_proto_id)),
147 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
148 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
149 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
150 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
151 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
152 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
153 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
154 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
155 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
156 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
157 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
158 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
160 			rte_flow_item_flex_conv),
161 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
162 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
163 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
164 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
165 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
166 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
167 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
168 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
169 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
170 };
171 
172 /** Generate flow_action[] entry. */
173 #define MK_FLOW_ACTION(t, s) \
174 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
175 		.name = # t, \
176 		.size = s, \
177 		.desc_fn = NULL,\
178 	}
179 
180 #define MK_FLOW_ACTION_FN(t, fn) \
181 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
182 		.name = # t, \
183 		.size = 0, \
184 		.desc_fn = fn,\
185 	}
186 
187 
188 /** Information about known flow actions. */
189 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
190 	MK_FLOW_ACTION(END, 0),
191 	MK_FLOW_ACTION(VOID, 0),
192 	MK_FLOW_ACTION(PASSTHRU, 0),
193 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
194 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
195 	MK_FLOW_ACTION(FLAG, 0),
196 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
197 	MK_FLOW_ACTION(DROP, 0),
198 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
199 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
200 	MK_FLOW_ACTION(PF, 0),
201 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
202 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
203 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
204 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
205 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
206 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
207 	MK_FLOW_ACTION(OF_PUSH_VLAN,
208 		       sizeof(struct rte_flow_action_of_push_vlan)),
209 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
210 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
211 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
212 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
213 	MK_FLOW_ACTION(OF_POP_MPLS,
214 		       sizeof(struct rte_flow_action_of_pop_mpls)),
215 	MK_FLOW_ACTION(OF_PUSH_MPLS,
216 		       sizeof(struct rte_flow_action_of_push_mpls)),
217 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
218 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
219 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
220 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
221 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
222 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
223 	MK_FLOW_ACTION(SET_IPV4_SRC,
224 		       sizeof(struct rte_flow_action_set_ipv4)),
225 	MK_FLOW_ACTION(SET_IPV4_DST,
226 		       sizeof(struct rte_flow_action_set_ipv4)),
227 	MK_FLOW_ACTION(SET_IPV6_SRC,
228 		       sizeof(struct rte_flow_action_set_ipv6)),
229 	MK_FLOW_ACTION(SET_IPV6_DST,
230 		       sizeof(struct rte_flow_action_set_ipv6)),
231 	MK_FLOW_ACTION(SET_TP_SRC,
232 		       sizeof(struct rte_flow_action_set_tp)),
233 	MK_FLOW_ACTION(SET_TP_DST,
234 		       sizeof(struct rte_flow_action_set_tp)),
235 	MK_FLOW_ACTION(MAC_SWAP, 0),
236 	MK_FLOW_ACTION(DEC_TTL, 0),
237 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
238 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
239 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
240 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
241 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
242 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
243 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
244 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
245 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
246 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
247 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
248 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
249 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
250 	MK_FLOW_ACTION(MODIFY_FIELD,
251 		       sizeof(struct rte_flow_action_modify_field)),
252 	/**
253 	 * Indirect action represented as handle of type
254 	 * (struct rte_flow_action_handle *) stored in conf field (see
255 	 * struct rte_flow_action); no need for additional structure to * store
256 	 * indirect action handle.
257 	 */
258 	MK_FLOW_ACTION(INDIRECT, 0),
259 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
260 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
261 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
262 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
263 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
264 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
265 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
266 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
267 	MK_FLOW_ACTION(INDIRECT_LIST,
268 		       sizeof(struct rte_flow_action_indirect_list)),
269 	MK_FLOW_ACTION(PROG,
270 		       sizeof(struct rte_flow_action_prog)),
271 };
272 
273 int
274 rte_flow_dynf_metadata_register(void)
275 {
276 	int offset;
277 	int flag;
278 
279 	static const struct rte_mbuf_dynfield desc_offs = {
280 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
281 		.size = sizeof(uint32_t),
282 		.align = __alignof__(uint32_t),
283 	};
284 	static const struct rte_mbuf_dynflag desc_flag = {
285 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
286 	};
287 
288 	offset = rte_mbuf_dynfield_register(&desc_offs);
289 	if (offset < 0)
290 		goto error;
291 	flag = rte_mbuf_dynflag_register(&desc_flag);
292 	if (flag < 0)
293 		goto error;
294 	rte_flow_dynf_metadata_offs = offset;
295 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
296 
297 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
298 
299 	return 0;
300 
301 error:
302 	rte_flow_dynf_metadata_offs = -1;
303 	rte_flow_dynf_metadata_mask = UINT64_C(0);
304 	return -rte_errno;
305 }
306 
307 static inline void
308 fts_enter(struct rte_eth_dev *dev)
309 {
310 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
311 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
312 }
313 
314 static inline void
315 fts_exit(struct rte_eth_dev *dev)
316 {
317 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
318 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
319 }
320 
321 static int
322 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
323 {
324 	if (ret == 0)
325 		return 0;
326 	if (rte_eth_dev_is_removed(port_id))
327 		return rte_flow_error_set(error, EIO,
328 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
329 					  NULL, rte_strerror(EIO));
330 	return ret;
331 }
332 
333 /* Get generic flow operations structure from a port. */
334 const struct rte_flow_ops *
335 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
336 {
337 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
338 	const struct rte_flow_ops *ops;
339 	int code;
340 
341 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
342 		code = ENODEV;
343 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
344 		/* flow API not supported with this driver dev_ops */
345 		code = ENOSYS;
346 	else
347 		code = dev->dev_ops->flow_ops_get(dev, &ops);
348 	if (code == 0 && ops == NULL)
349 		/* flow API not supported with this device */
350 		code = ENOSYS;
351 
352 	if (code != 0) {
353 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
354 				   NULL, rte_strerror(code));
355 		return NULL;
356 	}
357 	return ops;
358 }
359 
360 /* Check whether a flow rule can be created on a given port. */
361 int
362 rte_flow_validate(uint16_t port_id,
363 		  const struct rte_flow_attr *attr,
364 		  const struct rte_flow_item pattern[],
365 		  const struct rte_flow_action actions[],
366 		  struct rte_flow_error *error)
367 {
368 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
369 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
370 	int ret;
371 
372 	if (likely(!!attr) && attr->transfer &&
373 	    (attr->ingress || attr->egress)) {
374 		return rte_flow_error_set(error, EINVAL,
375 					  RTE_FLOW_ERROR_TYPE_ATTR,
376 					  attr, "cannot use attr ingress/egress with attr transfer");
377 	}
378 
379 	if (unlikely(!ops))
380 		return -rte_errno;
381 	if (likely(!!ops->validate)) {
382 		fts_enter(dev);
383 		ret = ops->validate(dev, attr, pattern, actions, error);
384 		fts_exit(dev);
385 		ret = flow_err(port_id, ret, error);
386 
387 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
388 
389 		return ret;
390 	}
391 	return rte_flow_error_set(error, ENOSYS,
392 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
393 				  NULL, rte_strerror(ENOSYS));
394 }
395 
396 /* Create a flow rule on a given port. */
397 struct rte_flow *
398 rte_flow_create(uint16_t port_id,
399 		const struct rte_flow_attr *attr,
400 		const struct rte_flow_item pattern[],
401 		const struct rte_flow_action actions[],
402 		struct rte_flow_error *error)
403 {
404 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
405 	struct rte_flow *flow;
406 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
407 
408 	if (unlikely(!ops))
409 		return NULL;
410 	if (likely(!!ops->create)) {
411 		fts_enter(dev);
412 		flow = ops->create(dev, attr, pattern, actions, error);
413 		fts_exit(dev);
414 		if (flow == NULL)
415 			flow_err(port_id, -rte_errno, error);
416 
417 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
418 
419 		return flow;
420 	}
421 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
422 			   NULL, rte_strerror(ENOSYS));
423 	return NULL;
424 }
425 
426 /* Destroy a flow rule on a given port. */
427 int
428 rte_flow_destroy(uint16_t port_id,
429 		 struct rte_flow *flow,
430 		 struct rte_flow_error *error)
431 {
432 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
433 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
434 	int ret;
435 
436 	if (unlikely(!ops))
437 		return -rte_errno;
438 	if (likely(!!ops->destroy)) {
439 		fts_enter(dev);
440 		ret = ops->destroy(dev, flow, error);
441 		fts_exit(dev);
442 		ret = flow_err(port_id, ret, error);
443 
444 		rte_flow_trace_destroy(port_id, flow, ret);
445 
446 		return ret;
447 	}
448 	return rte_flow_error_set(error, ENOSYS,
449 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
450 				  NULL, rte_strerror(ENOSYS));
451 }
452 
453 int
454 rte_flow_actions_update(uint16_t port_id,
455 			struct rte_flow *flow,
456 			const struct rte_flow_action actions[],
457 			struct rte_flow_error *error)
458 {
459 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
460 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
461 	int ret;
462 
463 	if (unlikely(!ops))
464 		return -rte_errno;
465 	if (likely(!!ops->actions_update)) {
466 		fts_enter(dev);
467 		ret = ops->actions_update(dev, flow, actions, error);
468 		fts_exit(dev);
469 
470 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
471 
472 		return flow_err(port_id, ret, error);
473 	}
474 	return rte_flow_error_set(error, ENOSYS,
475 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
476 				  NULL, rte_strerror(ENOSYS));
477 }
478 
479 /* Destroy all flow rules associated with a port. */
480 int
481 rte_flow_flush(uint16_t port_id,
482 	       struct rte_flow_error *error)
483 {
484 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
485 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
486 	int ret;
487 
488 	if (unlikely(!ops))
489 		return -rte_errno;
490 	if (likely(!!ops->flush)) {
491 		fts_enter(dev);
492 		ret = ops->flush(dev, error);
493 		fts_exit(dev);
494 		ret = flow_err(port_id, ret, error);
495 
496 		rte_flow_trace_flush(port_id, ret);
497 
498 		return ret;
499 	}
500 	return rte_flow_error_set(error, ENOSYS,
501 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
502 				  NULL, rte_strerror(ENOSYS));
503 }
504 
505 /* Query an existing flow rule. */
506 int
507 rte_flow_query(uint16_t port_id,
508 	       struct rte_flow *flow,
509 	       const struct rte_flow_action *action,
510 	       void *data,
511 	       struct rte_flow_error *error)
512 {
513 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
514 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
515 	int ret;
516 
517 	if (!ops)
518 		return -rte_errno;
519 	if (likely(!!ops->query)) {
520 		fts_enter(dev);
521 		ret = ops->query(dev, flow, action, data, error);
522 		fts_exit(dev);
523 		ret = flow_err(port_id, ret, error);
524 
525 		rte_flow_trace_query(port_id, flow, action, data, ret);
526 
527 		return ret;
528 	}
529 	return rte_flow_error_set(error, ENOSYS,
530 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
531 				  NULL, rte_strerror(ENOSYS));
532 }
533 
534 /* Restrict ingress traffic to the defined flow rules. */
535 int
536 rte_flow_isolate(uint16_t port_id,
537 		 int set,
538 		 struct rte_flow_error *error)
539 {
540 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
541 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
542 	int ret;
543 
544 	if (!ops)
545 		return -rte_errno;
546 	if (likely(!!ops->isolate)) {
547 		fts_enter(dev);
548 		ret = ops->isolate(dev, set, error);
549 		fts_exit(dev);
550 		ret = flow_err(port_id, ret, error);
551 
552 		rte_flow_trace_isolate(port_id, set, ret);
553 
554 		return ret;
555 	}
556 	return rte_flow_error_set(error, ENOSYS,
557 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
558 				  NULL, rte_strerror(ENOSYS));
559 }
560 
561 /* Initialize flow error structure. */
562 int
563 rte_flow_error_set(struct rte_flow_error *error,
564 		   int code,
565 		   enum rte_flow_error_type type,
566 		   const void *cause,
567 		   const char *message)
568 {
569 	if (error) {
570 		*error = (struct rte_flow_error){
571 			.type = type,
572 			.cause = cause,
573 			.message = message,
574 		};
575 	}
576 	rte_errno = code;
577 	return -code;
578 }
579 
580 /** Pattern item specification types. */
581 enum rte_flow_conv_item_spec_type {
582 	RTE_FLOW_CONV_ITEM_SPEC,
583 	RTE_FLOW_CONV_ITEM_LAST,
584 	RTE_FLOW_CONV_ITEM_MASK,
585 };
586 
587 /**
588  * Copy pattern item specification.
589  *
590  * @param[out] buf
591  *   Output buffer. Can be NULL if @p size is zero.
592  * @param size
593  *   Size of @p buf in bytes.
594  * @param[in] item
595  *   Pattern item to copy specification from.
596  * @param type
597  *   Specification selector for either @p spec, @p last or @p mask.
598  *
599  * @return
600  *   Number of bytes needed to store pattern item specification regardless
601  *   of @p size. @p buf contents are truncated to @p size if not large
602  *   enough.
603  */
604 static size_t
605 rte_flow_conv_item_spec(void *buf, const size_t size,
606 			const struct rte_flow_item *item,
607 			enum rte_flow_conv_item_spec_type type)
608 {
609 	size_t off;
610 	const void *data =
611 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
612 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
613 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
614 		NULL;
615 
616 	switch (item->type) {
617 		union {
618 			const struct rte_flow_item_raw *raw;
619 		} spec;
620 		union {
621 			const struct rte_flow_item_raw *raw;
622 		} last;
623 		union {
624 			const struct rte_flow_item_raw *raw;
625 		} mask;
626 		union {
627 			const struct rte_flow_item_raw *raw;
628 		} src;
629 		union {
630 			struct rte_flow_item_raw *raw;
631 		} dst;
632 		size_t tmp;
633 
634 	case RTE_FLOW_ITEM_TYPE_RAW:
635 		spec.raw = item->spec;
636 		last.raw = item->last ? item->last : item->spec;
637 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
638 		src.raw = data;
639 		dst.raw = buf;
640 		rte_memcpy(dst.raw,
641 			   (&(struct rte_flow_item_raw){
642 				.relative = src.raw->relative,
643 				.search = src.raw->search,
644 				.reserved = src.raw->reserved,
645 				.offset = src.raw->offset,
646 				.limit = src.raw->limit,
647 				.length = src.raw->length,
648 			   }),
649 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
650 		off = sizeof(*dst.raw);
651 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
652 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
653 		     ((spec.raw->length & mask.raw->length) >=
654 		      (last.raw->length & mask.raw->length))))
655 			tmp = spec.raw->length & mask.raw->length;
656 		else
657 			tmp = last.raw->length & mask.raw->length;
658 		if (tmp) {
659 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
660 			if (size >= off + tmp)
661 				dst.raw->pattern = rte_memcpy
662 					((void *)((uintptr_t)dst.raw + off),
663 					 src.raw->pattern, tmp);
664 			off += tmp;
665 		}
666 		break;
667 	default:
668 		off = rte_flow_conv_copy(buf, data, size,
669 					 rte_flow_desc_item, item->type);
670 		break;
671 	}
672 	return off;
673 }
674 
675 /**
676  * Copy action configuration.
677  *
678  * @param[out] buf
679  *   Output buffer. Can be NULL if @p size is zero.
680  * @param size
681  *   Size of @p buf in bytes.
682  * @param[in] action
683  *   Action to copy configuration from.
684  *
685  * @return
686  *   Number of bytes needed to store pattern item specification regardless
687  *   of @p size. @p buf contents are truncated to @p size if not large
688  *   enough.
689  */
690 static size_t
691 rte_flow_conv_action_conf(void *buf, const size_t size,
692 			  const struct rte_flow_action *action)
693 {
694 	size_t off;
695 
696 	switch (action->type) {
697 		union {
698 			const struct rte_flow_action_rss *rss;
699 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
700 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
701 		} src;
702 		union {
703 			struct rte_flow_action_rss *rss;
704 			struct rte_flow_action_vxlan_encap *vxlan_encap;
705 			struct rte_flow_action_nvgre_encap *nvgre_encap;
706 		} dst;
707 		size_t tmp;
708 		int ret;
709 
710 	case RTE_FLOW_ACTION_TYPE_RSS:
711 		src.rss = action->conf;
712 		dst.rss = buf;
713 		rte_memcpy(dst.rss,
714 			   (&(struct rte_flow_action_rss){
715 				.func = src.rss->func,
716 				.level = src.rss->level,
717 				.types = src.rss->types,
718 				.key_len = src.rss->key_len,
719 				.queue_num = src.rss->queue_num,
720 			   }),
721 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
722 		off = sizeof(*dst.rss);
723 		if (src.rss->key_len && src.rss->key) {
724 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
725 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
726 			if (size >= (uint64_t)off + (uint64_t)tmp)
727 				dst.rss->key = rte_memcpy
728 					((void *)((uintptr_t)dst.rss + off),
729 					 src.rss->key, tmp);
730 			off += tmp;
731 		}
732 		if (src.rss->queue_num) {
733 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
734 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
735 			if (size >= (uint64_t)off + (uint64_t)tmp)
736 				dst.rss->queue = rte_memcpy
737 					((void *)((uintptr_t)dst.rss + off),
738 					 src.rss->queue, tmp);
739 			off += tmp;
740 		}
741 		break;
742 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
743 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
744 		src.vxlan_encap = action->conf;
745 		dst.vxlan_encap = buf;
746 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
747 				 sizeof(*src.nvgre_encap) ||
748 				 offsetof(struct rte_flow_action_vxlan_encap,
749 					  definition) !=
750 				 offsetof(struct rte_flow_action_nvgre_encap,
751 					  definition));
752 		off = sizeof(*dst.vxlan_encap);
753 		if (src.vxlan_encap->definition) {
754 			off = RTE_ALIGN_CEIL
755 				(off, sizeof(*dst.vxlan_encap->definition));
756 			ret = rte_flow_conv
757 				(RTE_FLOW_CONV_OP_PATTERN,
758 				 (void *)((uintptr_t)dst.vxlan_encap + off),
759 				 size > off ? size - off : 0,
760 				 src.vxlan_encap->definition, NULL);
761 			if (ret < 0)
762 				return 0;
763 			if (size >= off + ret)
764 				dst.vxlan_encap->definition =
765 					(void *)((uintptr_t)dst.vxlan_encap +
766 						 off);
767 			off += ret;
768 		}
769 		break;
770 	default:
771 		off = rte_flow_conv_copy(buf, action->conf, size,
772 					 rte_flow_desc_action, action->type);
773 		break;
774 	}
775 	return off;
776 }
777 
778 /**
779  * Copy a list of pattern items.
780  *
781  * @param[out] dst
782  *   Destination buffer. Can be NULL if @p size is zero.
783  * @param size
784  *   Size of @p dst in bytes.
785  * @param[in] src
786  *   Source pattern items.
787  * @param num
788  *   Maximum number of pattern items to process from @p src or 0 to process
789  *   the entire list. In both cases, processing stops after
790  *   RTE_FLOW_ITEM_TYPE_END is encountered.
791  * @param[out] error
792  *   Perform verbose error reporting if not NULL.
793  *
794  * @return
795  *   A positive value representing the number of bytes needed to store
796  *   pattern items regardless of @p size on success (@p buf contents are
797  *   truncated to @p size if not large enough), a negative errno value
798  *   otherwise and rte_errno is set.
799  */
800 static int
801 rte_flow_conv_pattern(struct rte_flow_item *dst,
802 		      const size_t size,
803 		      const struct rte_flow_item *src,
804 		      unsigned int num,
805 		      struct rte_flow_error *error)
806 {
807 	uintptr_t data = (uintptr_t)dst;
808 	size_t off;
809 	size_t ret;
810 	unsigned int i;
811 
812 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
813 		/**
814 		 * allow PMD private flow item
815 		 */
816 		if (((int)src->type >= 0) &&
817 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
818 		    !rte_flow_desc_item[src->type].name))
819 			return rte_flow_error_set
820 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
821 				 "cannot convert unknown item type");
822 		if (size >= off + sizeof(*dst))
823 			*dst = (struct rte_flow_item){
824 				.type = src->type,
825 			};
826 		off += sizeof(*dst);
827 		if (!src->type)
828 			num = i + 1;
829 	}
830 	num = i;
831 	src -= num;
832 	dst -= num;
833 	do {
834 		if (src->spec) {
835 			off = RTE_ALIGN_CEIL(off, sizeof(double));
836 			ret = rte_flow_conv_item_spec
837 				((void *)(data + off),
838 				 size > off ? size - off : 0, src,
839 				 RTE_FLOW_CONV_ITEM_SPEC);
840 			if (size && size >= off + ret)
841 				dst->spec = (void *)(data + off);
842 			off += ret;
843 
844 		}
845 		if (src->last) {
846 			off = RTE_ALIGN_CEIL(off, sizeof(double));
847 			ret = rte_flow_conv_item_spec
848 				((void *)(data + off),
849 				 size > off ? size - off : 0, src,
850 				 RTE_FLOW_CONV_ITEM_LAST);
851 			if (size && size >= off + ret)
852 				dst->last = (void *)(data + off);
853 			off += ret;
854 		}
855 		if (src->mask) {
856 			off = RTE_ALIGN_CEIL(off, sizeof(double));
857 			ret = rte_flow_conv_item_spec
858 				((void *)(data + off),
859 				 size > off ? size - off : 0, src,
860 				 RTE_FLOW_CONV_ITEM_MASK);
861 			if (size && size >= off + ret)
862 				dst->mask = (void *)(data + off);
863 			off += ret;
864 		}
865 		++src;
866 		++dst;
867 	} while (--num);
868 	return off;
869 }
870 
871 /**
872  * Copy a list of actions.
873  *
874  * @param[out] dst
875  *   Destination buffer. Can be NULL if @p size is zero.
876  * @param size
877  *   Size of @p dst in bytes.
878  * @param[in] src
879  *   Source actions.
880  * @param num
881  *   Maximum number of actions to process from @p src or 0 to process the
882  *   entire list. In both cases, processing stops after
883  *   RTE_FLOW_ACTION_TYPE_END is encountered.
884  * @param[out] error
885  *   Perform verbose error reporting if not NULL.
886  *
887  * @return
888  *   A positive value representing the number of bytes needed to store
889  *   actions regardless of @p size on success (@p buf contents are truncated
890  *   to @p size if not large enough), a negative errno value otherwise and
891  *   rte_errno is set.
892  */
893 static int
894 rte_flow_conv_actions(struct rte_flow_action *dst,
895 		      const size_t size,
896 		      const struct rte_flow_action *src,
897 		      unsigned int num,
898 		      struct rte_flow_error *error)
899 {
900 	uintptr_t data = (uintptr_t)dst;
901 	size_t off;
902 	size_t ret;
903 	unsigned int i;
904 
905 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
906 		/**
907 		 * allow PMD private flow action
908 		 */
909 		if (((int)src->type >= 0) &&
910 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
911 		    !rte_flow_desc_action[src->type].name))
912 			return rte_flow_error_set
913 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
914 				 src, "cannot convert unknown action type");
915 		if (size >= off + sizeof(*dst))
916 			*dst = (struct rte_flow_action){
917 				.type = src->type,
918 			};
919 		off += sizeof(*dst);
920 		if (!src->type)
921 			num = i + 1;
922 	}
923 	num = i;
924 	src -= num;
925 	dst -= num;
926 	do {
927 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
928 			/*
929 			 * Indirect action conf fills the indirect action
930 			 * handler. Copy the action handle directly instead
931 			 * of duplicating the pointer memory.
932 			 */
933 			if (size)
934 				dst->conf = src->conf;
935 		} else if (src->conf) {
936 			off = RTE_ALIGN_CEIL(off, sizeof(double));
937 			ret = rte_flow_conv_action_conf
938 				((void *)(data + off),
939 				 size > off ? size - off : 0, src);
940 			if (size && size >= off + ret)
941 				dst->conf = (void *)(data + off);
942 			off += ret;
943 		}
944 		++src;
945 		++dst;
946 	} while (--num);
947 	return off;
948 }
949 
950 /**
951  * Copy flow rule components.
952  *
953  * This comprises the flow rule descriptor itself, attributes, pattern and
954  * actions list. NULL components in @p src are skipped.
955  *
956  * @param[out] dst
957  *   Destination buffer. Can be NULL if @p size is zero.
958  * @param size
959  *   Size of @p dst in bytes.
960  * @param[in] src
961  *   Source flow rule descriptor.
962  * @param[out] error
963  *   Perform verbose error reporting if not NULL.
964  *
965  * @return
966  *   A positive value representing the number of bytes needed to store all
967  *   components including the descriptor regardless of @p size on success
968  *   (@p buf contents are truncated to @p size if not large enough), a
969  *   negative errno value otherwise and rte_errno is set.
970  */
971 static int
972 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
973 		   const size_t size,
974 		   const struct rte_flow_conv_rule *src,
975 		   struct rte_flow_error *error)
976 {
977 	size_t off;
978 	int ret;
979 
980 	rte_memcpy(dst,
981 		   (&(struct rte_flow_conv_rule){
982 			.attr = NULL,
983 			.pattern = NULL,
984 			.actions = NULL,
985 		   }),
986 		   size > sizeof(*dst) ? sizeof(*dst) : size);
987 	off = sizeof(*dst);
988 	if (src->attr_ro) {
989 		off = RTE_ALIGN_CEIL(off, sizeof(double));
990 		if (size && size >= off + sizeof(*dst->attr))
991 			dst->attr = rte_memcpy
992 				((void *)((uintptr_t)dst + off),
993 				 src->attr_ro, sizeof(*dst->attr));
994 		off += sizeof(*dst->attr);
995 	}
996 	if (src->pattern_ro) {
997 		off = RTE_ALIGN_CEIL(off, sizeof(double));
998 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
999 					    size > off ? size - off : 0,
1000 					    src->pattern_ro, 0, error);
1001 		if (ret < 0)
1002 			return ret;
1003 		if (size && size >= off + (size_t)ret)
1004 			dst->pattern = (void *)((uintptr_t)dst + off);
1005 		off += ret;
1006 	}
1007 	if (src->actions_ro) {
1008 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1009 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1010 					    size > off ? size - off : 0,
1011 					    src->actions_ro, 0, error);
1012 		if (ret < 0)
1013 			return ret;
1014 		if (size >= off + (size_t)ret)
1015 			dst->actions = (void *)((uintptr_t)dst + off);
1016 		off += ret;
1017 	}
1018 	return off;
1019 }
1020 
1021 /**
1022  * Retrieve the name of a pattern item/action type.
1023  *
1024  * @param is_action
1025  *   Nonzero when @p src represents an action type instead of a pattern item
1026  *   type.
1027  * @param is_ptr
1028  *   Nonzero to write string address instead of contents into @p dst.
1029  * @param[out] dst
1030  *   Destination buffer. Can be NULL if @p size is zero.
1031  * @param size
1032  *   Size of @p dst in bytes.
1033  * @param[in] src
1034  *   Depending on @p is_action, source pattern item or action type cast as a
1035  *   pointer.
1036  * @param[out] error
1037  *   Perform verbose error reporting if not NULL.
1038  *
1039  * @return
1040  *   A positive value representing the number of bytes needed to store the
1041  *   name or its address regardless of @p size on success (@p buf contents
1042  *   are truncated to @p size if not large enough), a negative errno value
1043  *   otherwise and rte_errno is set.
1044  */
1045 static int
1046 rte_flow_conv_name(int is_action,
1047 		   int is_ptr,
1048 		   char *dst,
1049 		   const size_t size,
1050 		   const void *src,
1051 		   struct rte_flow_error *error)
1052 {
1053 	struct desc_info {
1054 		const struct rte_flow_desc_data *data;
1055 		size_t num;
1056 	};
1057 	static const struct desc_info info_rep[2] = {
1058 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1059 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1060 	};
1061 	const struct desc_info *const info = &info_rep[!!is_action];
1062 	unsigned int type = (uintptr_t)src;
1063 
1064 	if (type >= info->num)
1065 		return rte_flow_error_set
1066 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1067 			 "unknown object type to retrieve the name of");
1068 	if (!is_ptr)
1069 		return strlcpy(dst, info->data[type].name, size);
1070 	if (size >= sizeof(const char **))
1071 		*((const char **)dst) = info->data[type].name;
1072 	return sizeof(const char **);
1073 }
1074 
1075 /** Helper function to convert flow API objects. */
1076 int
1077 rte_flow_conv(enum rte_flow_conv_op op,
1078 	      void *dst,
1079 	      size_t size,
1080 	      const void *src,
1081 	      struct rte_flow_error *error)
1082 {
1083 	int ret;
1084 
1085 	switch (op) {
1086 		const struct rte_flow_attr *attr;
1087 
1088 	case RTE_FLOW_CONV_OP_NONE:
1089 		ret = 0;
1090 		break;
1091 	case RTE_FLOW_CONV_OP_ATTR:
1092 		attr = src;
1093 		if (size > sizeof(*attr))
1094 			size = sizeof(*attr);
1095 		rte_memcpy(dst, attr, size);
1096 		ret = sizeof(*attr);
1097 		break;
1098 	case RTE_FLOW_CONV_OP_ITEM:
1099 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1100 		break;
1101 	case RTE_FLOW_CONV_OP_ACTION:
1102 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1103 		break;
1104 	case RTE_FLOW_CONV_OP_PATTERN:
1105 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1106 		break;
1107 	case RTE_FLOW_CONV_OP_ACTIONS:
1108 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1109 		break;
1110 	case RTE_FLOW_CONV_OP_RULE:
1111 		ret = rte_flow_conv_rule(dst, size, src, error);
1112 		break;
1113 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1114 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1115 		break;
1116 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1117 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1118 		break;
1119 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1120 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1121 		break;
1122 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1123 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1124 		break;
1125 	default:
1126 		ret = rte_flow_error_set
1127 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1128 		 "unknown object conversion operation");
1129 	}
1130 
1131 	rte_flow_trace_conv(op, dst, size, src, ret);
1132 
1133 	return ret;
1134 }
1135 
1136 /** Store a full rte_flow description. */
1137 size_t
1138 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1139 	      const struct rte_flow_attr *attr,
1140 	      const struct rte_flow_item *items,
1141 	      const struct rte_flow_action *actions)
1142 {
1143 	/*
1144 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1145 	 * to convert the former to the latter without wasting space.
1146 	 */
1147 	struct rte_flow_conv_rule *dst =
1148 		len ?
1149 		(void *)((uintptr_t)desc +
1150 			 (offsetof(struct rte_flow_desc, actions) -
1151 			  offsetof(struct rte_flow_conv_rule, actions))) :
1152 		NULL;
1153 	size_t dst_size =
1154 		len > sizeof(*desc) - sizeof(*dst) ?
1155 		len - (sizeof(*desc) - sizeof(*dst)) :
1156 		0;
1157 	struct rte_flow_conv_rule src = {
1158 		.attr_ro = NULL,
1159 		.pattern_ro = items,
1160 		.actions_ro = actions,
1161 	};
1162 	int ret;
1163 
1164 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1165 			 sizeof(struct rte_flow_conv_rule));
1166 	if (dst_size &&
1167 	    (&dst->pattern != &desc->items ||
1168 	     &dst->actions != &desc->actions ||
1169 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1170 		rte_errno = EINVAL;
1171 		return 0;
1172 	}
1173 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1174 	if (ret < 0)
1175 		return 0;
1176 	ret += sizeof(*desc) - sizeof(*dst);
1177 	rte_memcpy(desc,
1178 		   (&(struct rte_flow_desc){
1179 			.size = ret,
1180 			.attr = *attr,
1181 			.items = dst_size ? dst->pattern : NULL,
1182 			.actions = dst_size ? dst->actions : NULL,
1183 		   }),
1184 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1185 
1186 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1187 
1188 	return ret;
1189 }
1190 
1191 int
1192 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1193 			FILE *file, struct rte_flow_error *error)
1194 {
1195 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1196 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1197 	int ret;
1198 
1199 	if (unlikely(!ops))
1200 		return -rte_errno;
1201 	if (likely(!!ops->dev_dump)) {
1202 		fts_enter(dev);
1203 		ret = ops->dev_dump(dev, flow, file, error);
1204 		fts_exit(dev);
1205 		return flow_err(port_id, ret, error);
1206 	}
1207 	return rte_flow_error_set(error, ENOSYS,
1208 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1209 				  NULL, rte_strerror(ENOSYS));
1210 }
1211 
1212 int
1213 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1214 		    uint32_t nb_contexts, struct rte_flow_error *error)
1215 {
1216 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1217 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1218 	int ret;
1219 
1220 	if (unlikely(!ops))
1221 		return -rte_errno;
1222 	if (likely(!!ops->get_aged_flows)) {
1223 		fts_enter(dev);
1224 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1225 		fts_exit(dev);
1226 		ret = flow_err(port_id, ret, error);
1227 
1228 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1229 
1230 		return ret;
1231 	}
1232 	return rte_flow_error_set(error, ENOTSUP,
1233 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1234 				  NULL, rte_strerror(ENOTSUP));
1235 }
1236 
1237 int
1238 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1239 			  uint32_t nb_contexts, struct rte_flow_error *error)
1240 {
1241 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1242 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1243 	int ret;
1244 
1245 	if (unlikely(!ops))
1246 		return -rte_errno;
1247 	if (likely(!!ops->get_q_aged_flows)) {
1248 		fts_enter(dev);
1249 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1250 					    nb_contexts, error);
1251 		fts_exit(dev);
1252 		ret = flow_err(port_id, ret, error);
1253 
1254 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1255 						nb_contexts, ret);
1256 
1257 		return ret;
1258 	}
1259 	return rte_flow_error_set(error, ENOTSUP,
1260 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1261 				  NULL, rte_strerror(ENOTSUP));
1262 }
1263 
1264 struct rte_flow_action_handle *
1265 rte_flow_action_handle_create(uint16_t port_id,
1266 			      const struct rte_flow_indir_action_conf *conf,
1267 			      const struct rte_flow_action *action,
1268 			      struct rte_flow_error *error)
1269 {
1270 	struct rte_flow_action_handle *handle;
1271 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1272 
1273 	if (unlikely(!ops))
1274 		return NULL;
1275 	if (unlikely(!ops->action_handle_create)) {
1276 		rte_flow_error_set(error, ENOSYS,
1277 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1278 				   rte_strerror(ENOSYS));
1279 		return NULL;
1280 	}
1281 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1282 					   conf, action, error);
1283 	if (handle == NULL)
1284 		flow_err(port_id, -rte_errno, error);
1285 
1286 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1287 
1288 	return handle;
1289 }
1290 
1291 int
1292 rte_flow_action_handle_destroy(uint16_t port_id,
1293 			       struct rte_flow_action_handle *handle,
1294 			       struct rte_flow_error *error)
1295 {
1296 	int ret;
1297 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1298 
1299 	if (unlikely(!ops))
1300 		return -rte_errno;
1301 	if (unlikely(!ops->action_handle_destroy))
1302 		return rte_flow_error_set(error, ENOSYS,
1303 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1304 					  NULL, rte_strerror(ENOSYS));
1305 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1306 					 handle, error);
1307 	ret = flow_err(port_id, ret, error);
1308 
1309 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1310 
1311 	return ret;
1312 }
1313 
1314 int
1315 rte_flow_action_handle_update(uint16_t port_id,
1316 			      struct rte_flow_action_handle *handle,
1317 			      const void *update,
1318 			      struct rte_flow_error *error)
1319 {
1320 	int ret;
1321 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1322 
1323 	if (unlikely(!ops))
1324 		return -rte_errno;
1325 	if (unlikely(!ops->action_handle_update))
1326 		return rte_flow_error_set(error, ENOSYS,
1327 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1328 					  NULL, rte_strerror(ENOSYS));
1329 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1330 					update, error);
1331 	ret = flow_err(port_id, ret, error);
1332 
1333 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1334 
1335 	return ret;
1336 }
1337 
1338 int
1339 rte_flow_action_handle_query(uint16_t port_id,
1340 			     const struct rte_flow_action_handle *handle,
1341 			     void *data,
1342 			     struct rte_flow_error *error)
1343 {
1344 	int ret;
1345 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1346 
1347 	if (unlikely(!ops))
1348 		return -rte_errno;
1349 	if (unlikely(!ops->action_handle_query))
1350 		return rte_flow_error_set(error, ENOSYS,
1351 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1352 					  NULL, rte_strerror(ENOSYS));
1353 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1354 				       data, error);
1355 	ret = flow_err(port_id, ret, error);
1356 
1357 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1358 
1359 	return ret;
1360 }
1361 
1362 int
1363 rte_flow_tunnel_decap_set(uint16_t port_id,
1364 			  struct rte_flow_tunnel *tunnel,
1365 			  struct rte_flow_action **actions,
1366 			  uint32_t *num_of_actions,
1367 			  struct rte_flow_error *error)
1368 {
1369 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1370 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1371 	int ret;
1372 
1373 	if (unlikely(!ops))
1374 		return -rte_errno;
1375 	if (likely(!!ops->tunnel_decap_set)) {
1376 		ret = flow_err(port_id,
1377 			       ops->tunnel_decap_set(dev, tunnel, actions,
1378 						     num_of_actions, error),
1379 			       error);
1380 
1381 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1382 						num_of_actions, ret);
1383 
1384 		return ret;
1385 	}
1386 	return rte_flow_error_set(error, ENOTSUP,
1387 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1388 				  NULL, rte_strerror(ENOTSUP));
1389 }
1390 
1391 int
1392 rte_flow_tunnel_match(uint16_t port_id,
1393 		      struct rte_flow_tunnel *tunnel,
1394 		      struct rte_flow_item **items,
1395 		      uint32_t *num_of_items,
1396 		      struct rte_flow_error *error)
1397 {
1398 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1399 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1400 	int ret;
1401 
1402 	if (unlikely(!ops))
1403 		return -rte_errno;
1404 	if (likely(!!ops->tunnel_match)) {
1405 		ret = flow_err(port_id,
1406 			       ops->tunnel_match(dev, tunnel, items,
1407 						 num_of_items, error),
1408 			       error);
1409 
1410 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1411 					    ret);
1412 
1413 		return ret;
1414 	}
1415 	return rte_flow_error_set(error, ENOTSUP,
1416 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1417 				  NULL, rte_strerror(ENOTSUP));
1418 }
1419 
1420 int
1421 rte_flow_get_restore_info(uint16_t port_id,
1422 			  struct rte_mbuf *m,
1423 			  struct rte_flow_restore_info *restore_info,
1424 			  struct rte_flow_error *error)
1425 {
1426 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1427 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1428 	int ret;
1429 
1430 	if (unlikely(!ops))
1431 		return -rte_errno;
1432 	if (likely(!!ops->get_restore_info)) {
1433 		ret = flow_err(port_id,
1434 			       ops->get_restore_info(dev, m, restore_info,
1435 						     error),
1436 			       error);
1437 
1438 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1439 
1440 		return ret;
1441 	}
1442 	return rte_flow_error_set(error, ENOTSUP,
1443 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1444 				  NULL, rte_strerror(ENOTSUP));
1445 }
1446 
1447 static struct {
1448 	const struct rte_mbuf_dynflag desc;
1449 	uint64_t value;
1450 } flow_restore_info_dynflag = {
1451 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1452 };
1453 
1454 uint64_t
1455 rte_flow_restore_info_dynflag(void)
1456 {
1457 	return flow_restore_info_dynflag.value;
1458 }
1459 
1460 int
1461 rte_flow_restore_info_dynflag_register(void)
1462 {
1463 	if (flow_restore_info_dynflag.value == 0) {
1464 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1465 
1466 		if (offset < 0)
1467 			return -1;
1468 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1469 	}
1470 
1471 	return 0;
1472 }
1473 
1474 int
1475 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1476 				     struct rte_flow_action *actions,
1477 				     uint32_t num_of_actions,
1478 				     struct rte_flow_error *error)
1479 {
1480 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1481 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1482 	int ret;
1483 
1484 	if (unlikely(!ops))
1485 		return -rte_errno;
1486 	if (likely(!!ops->tunnel_action_decap_release)) {
1487 		ret = flow_err(port_id,
1488 			       ops->tunnel_action_decap_release(dev, actions,
1489 								num_of_actions,
1490 								error),
1491 			       error);
1492 
1493 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1494 							   num_of_actions, ret);
1495 
1496 		return ret;
1497 	}
1498 	return rte_flow_error_set(error, ENOTSUP,
1499 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1500 				  NULL, rte_strerror(ENOTSUP));
1501 }
1502 
1503 int
1504 rte_flow_tunnel_item_release(uint16_t port_id,
1505 			     struct rte_flow_item *items,
1506 			     uint32_t num_of_items,
1507 			     struct rte_flow_error *error)
1508 {
1509 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1510 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1511 	int ret;
1512 
1513 	if (unlikely(!ops))
1514 		return -rte_errno;
1515 	if (likely(!!ops->tunnel_item_release)) {
1516 		ret = flow_err(port_id,
1517 			       ops->tunnel_item_release(dev, items,
1518 							num_of_items, error),
1519 			       error);
1520 
1521 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1522 
1523 		return ret;
1524 	}
1525 	return rte_flow_error_set(error, ENOTSUP,
1526 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1527 				  NULL, rte_strerror(ENOTSUP));
1528 }
1529 
1530 int
1531 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1532 			     struct rte_flow_error *error)
1533 {
1534 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1535 	struct rte_eth_dev *dev;
1536 	int ret;
1537 
1538 	if (unlikely(ops == NULL))
1539 		return -rte_errno;
1540 
1541 	if (ops->pick_transfer_proxy == NULL) {
1542 		*proxy_port_id = port_id;
1543 		return 0;
1544 	}
1545 
1546 	dev = &rte_eth_devices[port_id];
1547 
1548 	ret = flow_err(port_id,
1549 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1550 		       error);
1551 
1552 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1553 
1554 	return ret;
1555 }
1556 
1557 struct rte_flow_item_flex_handle *
1558 rte_flow_flex_item_create(uint16_t port_id,
1559 			  const struct rte_flow_item_flex_conf *conf,
1560 			  struct rte_flow_error *error)
1561 {
1562 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1563 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1564 	struct rte_flow_item_flex_handle *handle;
1565 
1566 	if (unlikely(!ops))
1567 		return NULL;
1568 	if (unlikely(!ops->flex_item_create)) {
1569 		rte_flow_error_set(error, ENOTSUP,
1570 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1571 				   NULL, rte_strerror(ENOTSUP));
1572 		return NULL;
1573 	}
1574 	handle = ops->flex_item_create(dev, conf, error);
1575 	if (handle == NULL)
1576 		flow_err(port_id, -rte_errno, error);
1577 
1578 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1579 
1580 	return handle;
1581 }
1582 
1583 int
1584 rte_flow_flex_item_release(uint16_t port_id,
1585 			   const struct rte_flow_item_flex_handle *handle,
1586 			   struct rte_flow_error *error)
1587 {
1588 	int ret;
1589 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1590 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1591 
1592 	if (unlikely(!ops || !ops->flex_item_release))
1593 		return rte_flow_error_set(error, ENOTSUP,
1594 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1595 					  NULL, rte_strerror(ENOTSUP));
1596 	ret = ops->flex_item_release(dev, handle, error);
1597 	ret = flow_err(port_id, ret, error);
1598 
1599 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1600 
1601 	return ret;
1602 }
1603 
1604 int
1605 rte_flow_info_get(uint16_t port_id,
1606 		  struct rte_flow_port_info *port_info,
1607 		  struct rte_flow_queue_info *queue_info,
1608 		  struct rte_flow_error *error)
1609 {
1610 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1611 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1612 	int ret;
1613 
1614 	if (unlikely(!ops))
1615 		return -rte_errno;
1616 	if (dev->data->dev_configured == 0) {
1617 		RTE_FLOW_LOG(INFO,
1618 			"Device with port_id=%"PRIu16" is not configured.\n",
1619 			port_id);
1620 		return -EINVAL;
1621 	}
1622 	if (port_info == NULL) {
1623 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1624 		return -EINVAL;
1625 	}
1626 	if (likely(!!ops->info_get)) {
1627 		ret = flow_err(port_id,
1628 			       ops->info_get(dev, port_info, queue_info, error),
1629 			       error);
1630 
1631 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1632 
1633 		return ret;
1634 	}
1635 	return rte_flow_error_set(error, ENOTSUP,
1636 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1637 				  NULL, rte_strerror(ENOTSUP));
1638 }
1639 
1640 int
1641 rte_flow_configure(uint16_t port_id,
1642 		   const struct rte_flow_port_attr *port_attr,
1643 		   uint16_t nb_queue,
1644 		   const struct rte_flow_queue_attr *queue_attr[],
1645 		   struct rte_flow_error *error)
1646 {
1647 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1648 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1649 	int ret;
1650 
1651 	if (unlikely(!ops))
1652 		return -rte_errno;
1653 	if (dev->data->dev_configured == 0) {
1654 		RTE_FLOW_LOG(INFO,
1655 			"Device with port_id=%"PRIu16" is not configured.\n",
1656 			port_id);
1657 		return -EINVAL;
1658 	}
1659 	if (dev->data->dev_started != 0) {
1660 		RTE_FLOW_LOG(INFO,
1661 			"Device with port_id=%"PRIu16" already started.\n",
1662 			port_id);
1663 		return -EINVAL;
1664 	}
1665 	if (port_attr == NULL) {
1666 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1667 		return -EINVAL;
1668 	}
1669 	if (queue_attr == NULL) {
1670 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1671 		return -EINVAL;
1672 	}
1673 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1674 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1675 		return rte_flow_error_set(error, ENODEV,
1676 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1677 					  NULL, rte_strerror(ENODEV));
1678 	}
1679 	if (likely(!!ops->configure)) {
1680 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1681 		if (ret == 0)
1682 			dev->data->flow_configured = 1;
1683 		ret = flow_err(port_id, ret, error);
1684 
1685 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1686 
1687 		return ret;
1688 	}
1689 	return rte_flow_error_set(error, ENOTSUP,
1690 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1691 				  NULL, rte_strerror(ENOTSUP));
1692 }
1693 
1694 struct rte_flow_pattern_template *
1695 rte_flow_pattern_template_create(uint16_t port_id,
1696 		const struct rte_flow_pattern_template_attr *template_attr,
1697 		const struct rte_flow_item pattern[],
1698 		struct rte_flow_error *error)
1699 {
1700 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1701 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1702 	struct rte_flow_pattern_template *template;
1703 
1704 	if (unlikely(!ops))
1705 		return NULL;
1706 	if (dev->data->flow_configured == 0) {
1707 		RTE_FLOW_LOG(INFO,
1708 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1709 			port_id);
1710 		rte_flow_error_set(error, EINVAL,
1711 				RTE_FLOW_ERROR_TYPE_STATE,
1712 				NULL, rte_strerror(EINVAL));
1713 		return NULL;
1714 	}
1715 	if (template_attr == NULL) {
1716 		RTE_FLOW_LOG(ERR,
1717 			     "Port %"PRIu16" template attr is NULL.\n",
1718 			     port_id);
1719 		rte_flow_error_set(error, EINVAL,
1720 				   RTE_FLOW_ERROR_TYPE_ATTR,
1721 				   NULL, rte_strerror(EINVAL));
1722 		return NULL;
1723 	}
1724 	if (pattern == NULL) {
1725 		RTE_FLOW_LOG(ERR,
1726 			     "Port %"PRIu16" pattern is NULL.\n",
1727 			     port_id);
1728 		rte_flow_error_set(error, EINVAL,
1729 				   RTE_FLOW_ERROR_TYPE_ATTR,
1730 				   NULL, rte_strerror(EINVAL));
1731 		return NULL;
1732 	}
1733 	if (likely(!!ops->pattern_template_create)) {
1734 		template = ops->pattern_template_create(dev, template_attr,
1735 							pattern, error);
1736 		if (template == NULL)
1737 			flow_err(port_id, -rte_errno, error);
1738 
1739 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1740 						       pattern, template);
1741 
1742 		return template;
1743 	}
1744 	rte_flow_error_set(error, ENOTSUP,
1745 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1746 			   NULL, rte_strerror(ENOTSUP));
1747 	return NULL;
1748 }
1749 
1750 int
1751 rte_flow_pattern_template_destroy(uint16_t port_id,
1752 		struct rte_flow_pattern_template *pattern_template,
1753 		struct rte_flow_error *error)
1754 {
1755 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1756 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1757 	int ret;
1758 
1759 	if (unlikely(!ops))
1760 		return -rte_errno;
1761 	if (unlikely(pattern_template == NULL))
1762 		return 0;
1763 	if (likely(!!ops->pattern_template_destroy)) {
1764 		ret = flow_err(port_id,
1765 			       ops->pattern_template_destroy(dev,
1766 							     pattern_template,
1767 							     error),
1768 			       error);
1769 
1770 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1771 							ret);
1772 
1773 		return ret;
1774 	}
1775 	return rte_flow_error_set(error, ENOTSUP,
1776 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1777 				  NULL, rte_strerror(ENOTSUP));
1778 }
1779 
1780 struct rte_flow_actions_template *
1781 rte_flow_actions_template_create(uint16_t port_id,
1782 			const struct rte_flow_actions_template_attr *template_attr,
1783 			const struct rte_flow_action actions[],
1784 			const struct rte_flow_action masks[],
1785 			struct rte_flow_error *error)
1786 {
1787 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1788 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1789 	struct rte_flow_actions_template *template;
1790 
1791 	if (unlikely(!ops))
1792 		return NULL;
1793 	if (dev->data->flow_configured == 0) {
1794 		RTE_FLOW_LOG(INFO,
1795 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1796 			port_id);
1797 		rte_flow_error_set(error, EINVAL,
1798 				   RTE_FLOW_ERROR_TYPE_STATE,
1799 				   NULL, rte_strerror(EINVAL));
1800 		return NULL;
1801 	}
1802 	if (template_attr == NULL) {
1803 		RTE_FLOW_LOG(ERR,
1804 			     "Port %"PRIu16" template attr is NULL.\n",
1805 			     port_id);
1806 		rte_flow_error_set(error, EINVAL,
1807 				   RTE_FLOW_ERROR_TYPE_ATTR,
1808 				   NULL, rte_strerror(EINVAL));
1809 		return NULL;
1810 	}
1811 	if (actions == NULL) {
1812 		RTE_FLOW_LOG(ERR,
1813 			     "Port %"PRIu16" actions is NULL.\n",
1814 			     port_id);
1815 		rte_flow_error_set(error, EINVAL,
1816 				   RTE_FLOW_ERROR_TYPE_ATTR,
1817 				   NULL, rte_strerror(EINVAL));
1818 		return NULL;
1819 	}
1820 	if (masks == NULL) {
1821 		RTE_FLOW_LOG(ERR,
1822 			     "Port %"PRIu16" masks is NULL.\n",
1823 			     port_id);
1824 		rte_flow_error_set(error, EINVAL,
1825 				   RTE_FLOW_ERROR_TYPE_ATTR,
1826 				   NULL, rte_strerror(EINVAL));
1827 
1828 	}
1829 	if (likely(!!ops->actions_template_create)) {
1830 		template = ops->actions_template_create(dev, template_attr,
1831 							actions, masks, error);
1832 		if (template == NULL)
1833 			flow_err(port_id, -rte_errno, error);
1834 
1835 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1836 						       masks, template);
1837 
1838 		return template;
1839 	}
1840 	rte_flow_error_set(error, ENOTSUP,
1841 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1842 			   NULL, rte_strerror(ENOTSUP));
1843 	return NULL;
1844 }
1845 
1846 int
1847 rte_flow_actions_template_destroy(uint16_t port_id,
1848 			struct rte_flow_actions_template *actions_template,
1849 			struct rte_flow_error *error)
1850 {
1851 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1852 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1853 	int ret;
1854 
1855 	if (unlikely(!ops))
1856 		return -rte_errno;
1857 	if (unlikely(actions_template == NULL))
1858 		return 0;
1859 	if (likely(!!ops->actions_template_destroy)) {
1860 		ret = flow_err(port_id,
1861 			       ops->actions_template_destroy(dev,
1862 							     actions_template,
1863 							     error),
1864 			       error);
1865 
1866 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1867 							ret);
1868 
1869 		return ret;
1870 	}
1871 	return rte_flow_error_set(error, ENOTSUP,
1872 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1873 				  NULL, rte_strerror(ENOTSUP));
1874 }
1875 
1876 struct rte_flow_template_table *
1877 rte_flow_template_table_create(uint16_t port_id,
1878 			const struct rte_flow_template_table_attr *table_attr,
1879 			struct rte_flow_pattern_template *pattern_templates[],
1880 			uint8_t nb_pattern_templates,
1881 			struct rte_flow_actions_template *actions_templates[],
1882 			uint8_t nb_actions_templates,
1883 			struct rte_flow_error *error)
1884 {
1885 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1886 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1887 	struct rte_flow_template_table *table;
1888 
1889 	if (unlikely(!ops))
1890 		return NULL;
1891 	if (dev->data->flow_configured == 0) {
1892 		RTE_FLOW_LOG(INFO,
1893 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1894 			port_id);
1895 		rte_flow_error_set(error, EINVAL,
1896 				   RTE_FLOW_ERROR_TYPE_STATE,
1897 				   NULL, rte_strerror(EINVAL));
1898 		return NULL;
1899 	}
1900 	if (table_attr == NULL) {
1901 		RTE_FLOW_LOG(ERR,
1902 			     "Port %"PRIu16" table attr is NULL.\n",
1903 			     port_id);
1904 		rte_flow_error_set(error, EINVAL,
1905 				   RTE_FLOW_ERROR_TYPE_ATTR,
1906 				   NULL, rte_strerror(EINVAL));
1907 		return NULL;
1908 	}
1909 	if (pattern_templates == NULL) {
1910 		RTE_FLOW_LOG(ERR,
1911 			     "Port %"PRIu16" pattern templates is NULL.\n",
1912 			     port_id);
1913 		rte_flow_error_set(error, EINVAL,
1914 				   RTE_FLOW_ERROR_TYPE_ATTR,
1915 				   NULL, rte_strerror(EINVAL));
1916 		return NULL;
1917 	}
1918 	if (actions_templates == NULL) {
1919 		RTE_FLOW_LOG(ERR,
1920 			     "Port %"PRIu16" actions templates is NULL.\n",
1921 			     port_id);
1922 		rte_flow_error_set(error, EINVAL,
1923 				   RTE_FLOW_ERROR_TYPE_ATTR,
1924 				   NULL, rte_strerror(EINVAL));
1925 		return NULL;
1926 	}
1927 	if (likely(!!ops->template_table_create)) {
1928 		table = ops->template_table_create(dev, table_attr,
1929 					pattern_templates, nb_pattern_templates,
1930 					actions_templates, nb_actions_templates,
1931 					error);
1932 		if (table == NULL)
1933 			flow_err(port_id, -rte_errno, error);
1934 
1935 		rte_flow_trace_template_table_create(port_id, table_attr,
1936 						     pattern_templates,
1937 						     nb_pattern_templates,
1938 						     actions_templates,
1939 						     nb_actions_templates, table);
1940 
1941 		return table;
1942 	}
1943 	rte_flow_error_set(error, ENOTSUP,
1944 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1945 			   NULL, rte_strerror(ENOTSUP));
1946 	return NULL;
1947 }
1948 
1949 int
1950 rte_flow_template_table_destroy(uint16_t port_id,
1951 				struct rte_flow_template_table *template_table,
1952 				struct rte_flow_error *error)
1953 {
1954 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1955 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1956 	int ret;
1957 
1958 	if (unlikely(!ops))
1959 		return -rte_errno;
1960 	if (unlikely(template_table == NULL))
1961 		return 0;
1962 	if (likely(!!ops->template_table_destroy)) {
1963 		ret = flow_err(port_id,
1964 			       ops->template_table_destroy(dev,
1965 							   template_table,
1966 							   error),
1967 			       error);
1968 
1969 		rte_flow_trace_template_table_destroy(port_id, template_table,
1970 						      ret);
1971 
1972 		return ret;
1973 	}
1974 	return rte_flow_error_set(error, ENOTSUP,
1975 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1976 				  NULL, rte_strerror(ENOTSUP));
1977 }
1978 
1979 int
1980 rte_flow_group_set_miss_actions(uint16_t port_id,
1981 				uint32_t group_id,
1982 				const struct rte_flow_group_attr *attr,
1983 				const struct rte_flow_action actions[],
1984 				struct rte_flow_error *error)
1985 {
1986 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1987 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1988 
1989 	if (unlikely(!ops))
1990 		return -rte_errno;
1991 	if (likely(!!ops->group_set_miss_actions)) {
1992 		return flow_err(port_id,
1993 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
1994 				error);
1995 	}
1996 	return rte_flow_error_set(error, ENOTSUP,
1997 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1998 				  NULL, rte_strerror(ENOTSUP));
1999 }
2000 
2001 struct rte_flow *
2002 rte_flow_async_create(uint16_t port_id,
2003 		      uint32_t queue_id,
2004 		      const struct rte_flow_op_attr *op_attr,
2005 		      struct rte_flow_template_table *template_table,
2006 		      const struct rte_flow_item pattern[],
2007 		      uint8_t pattern_template_index,
2008 		      const struct rte_flow_action actions[],
2009 		      uint8_t actions_template_index,
2010 		      void *user_data,
2011 		      struct rte_flow_error *error)
2012 {
2013 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2014 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2015 	struct rte_flow *flow;
2016 
2017 	flow = ops->async_create(dev, queue_id,
2018 				 op_attr, template_table,
2019 				 pattern, pattern_template_index,
2020 				 actions, actions_template_index,
2021 				 user_data, error);
2022 	if (flow == NULL)
2023 		flow_err(port_id, -rte_errno, error);
2024 
2025 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2026 				    pattern, pattern_template_index, actions,
2027 				    actions_template_index, user_data, flow);
2028 
2029 	return flow;
2030 }
2031 
2032 struct rte_flow *
2033 rte_flow_async_create_by_index(uint16_t port_id,
2034 			       uint32_t queue_id,
2035 			       const struct rte_flow_op_attr *op_attr,
2036 			       struct rte_flow_template_table *template_table,
2037 			       uint32_t rule_index,
2038 			       const struct rte_flow_action actions[],
2039 			       uint8_t actions_template_index,
2040 			       void *user_data,
2041 			       struct rte_flow_error *error)
2042 {
2043 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2044 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2045 	struct rte_flow *flow;
2046 
2047 	flow = ops->async_create_by_index(dev, queue_id,
2048 					  op_attr, template_table, rule_index,
2049 					  actions, actions_template_index,
2050 					  user_data, error);
2051 	if (flow == NULL)
2052 		flow_err(port_id, -rte_errno, error);
2053 	return flow;
2054 }
2055 
2056 int
2057 rte_flow_async_destroy(uint16_t port_id,
2058 		       uint32_t queue_id,
2059 		       const struct rte_flow_op_attr *op_attr,
2060 		       struct rte_flow *flow,
2061 		       void *user_data,
2062 		       struct rte_flow_error *error)
2063 {
2064 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2065 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2066 	int ret;
2067 
2068 	ret = flow_err(port_id,
2069 		       ops->async_destroy(dev, queue_id,
2070 					  op_attr, flow,
2071 					  user_data, error),
2072 		       error);
2073 
2074 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2075 				     user_data, ret);
2076 
2077 	return ret;
2078 }
2079 
2080 int
2081 rte_flow_async_actions_update(uint16_t port_id,
2082 			      uint32_t queue_id,
2083 			      const struct rte_flow_op_attr *op_attr,
2084 			      struct rte_flow *flow,
2085 			      const struct rte_flow_action actions[],
2086 			      uint8_t actions_template_index,
2087 			      void *user_data,
2088 			      struct rte_flow_error *error)
2089 {
2090 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2091 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2092 	int ret;
2093 
2094 	ret = flow_err(port_id,
2095 		       ops->async_actions_update(dev, queue_id, op_attr,
2096 						 flow, actions,
2097 						 actions_template_index,
2098 						 user_data, error),
2099 		       error);
2100 
2101 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2102 					    actions, actions_template_index,
2103 					    user_data, ret);
2104 
2105 	return ret;
2106 }
2107 
2108 int
2109 rte_flow_push(uint16_t port_id,
2110 	      uint32_t queue_id,
2111 	      struct rte_flow_error *error)
2112 {
2113 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2114 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2115 	int ret;
2116 
2117 	ret = flow_err(port_id,
2118 		       ops->push(dev, queue_id, error),
2119 		       error);
2120 
2121 	rte_flow_trace_push(port_id, queue_id, ret);
2122 
2123 	return ret;
2124 }
2125 
2126 int
2127 rte_flow_pull(uint16_t port_id,
2128 	      uint32_t queue_id,
2129 	      struct rte_flow_op_result res[],
2130 	      uint16_t n_res,
2131 	      struct rte_flow_error *error)
2132 {
2133 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2134 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2135 	int ret;
2136 	int rc;
2137 
2138 	ret = ops->pull(dev, queue_id, res, n_res, error);
2139 	rc = ret ? ret : flow_err(port_id, ret, error);
2140 
2141 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2142 
2143 	return rc;
2144 }
2145 
2146 struct rte_flow_action_handle *
2147 rte_flow_async_action_handle_create(uint16_t port_id,
2148 		uint32_t queue_id,
2149 		const struct rte_flow_op_attr *op_attr,
2150 		const struct rte_flow_indir_action_conf *indir_action_conf,
2151 		const struct rte_flow_action *action,
2152 		void *user_data,
2153 		struct rte_flow_error *error)
2154 {
2155 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2156 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2157 	struct rte_flow_action_handle *handle;
2158 
2159 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2160 					     indir_action_conf, action, user_data, error);
2161 	if (handle == NULL)
2162 		flow_err(port_id, -rte_errno, error);
2163 
2164 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2165 						  indir_action_conf, action,
2166 						  user_data, handle);
2167 
2168 	return handle;
2169 }
2170 
2171 int
2172 rte_flow_async_action_handle_destroy(uint16_t port_id,
2173 		uint32_t queue_id,
2174 		const struct rte_flow_op_attr *op_attr,
2175 		struct rte_flow_action_handle *action_handle,
2176 		void *user_data,
2177 		struct rte_flow_error *error)
2178 {
2179 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2180 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2181 	int ret;
2182 
2183 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2184 					   action_handle, user_data, error);
2185 	ret = flow_err(port_id, ret, error);
2186 
2187 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2188 						   action_handle, user_data, ret);
2189 
2190 	return ret;
2191 }
2192 
2193 int
2194 rte_flow_async_action_handle_update(uint16_t port_id,
2195 		uint32_t queue_id,
2196 		const struct rte_flow_op_attr *op_attr,
2197 		struct rte_flow_action_handle *action_handle,
2198 		const void *update,
2199 		void *user_data,
2200 		struct rte_flow_error *error)
2201 {
2202 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2203 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2204 	int ret;
2205 
2206 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2207 					  action_handle, update, user_data, error);
2208 	ret = flow_err(port_id, ret, error);
2209 
2210 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2211 						  action_handle, update,
2212 						  user_data, ret);
2213 
2214 	return ret;
2215 }
2216 
2217 int
2218 rte_flow_async_action_handle_query(uint16_t port_id,
2219 		uint32_t queue_id,
2220 		const struct rte_flow_op_attr *op_attr,
2221 		const struct rte_flow_action_handle *action_handle,
2222 		void *data,
2223 		void *user_data,
2224 		struct rte_flow_error *error)
2225 {
2226 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2227 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2228 	int ret;
2229 
2230 	if (unlikely(!ops))
2231 		return -rte_errno;
2232 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2233 					  action_handle, data, user_data, error);
2234 	ret = flow_err(port_id, ret, error);
2235 
2236 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2237 						 action_handle, data, user_data,
2238 						 ret);
2239 
2240 	return ret;
2241 }
2242 
2243 int
2244 rte_flow_action_handle_query_update(uint16_t port_id,
2245 				    struct rte_flow_action_handle *handle,
2246 				    const void *update, void *query,
2247 				    enum rte_flow_query_update_mode mode,
2248 				    struct rte_flow_error *error)
2249 {
2250 	int ret;
2251 	struct rte_eth_dev *dev;
2252 	const struct rte_flow_ops *ops;
2253 
2254 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2255 	if (!handle)
2256 		return -EINVAL;
2257 	if (!update && !query)
2258 		return -EINVAL;
2259 	dev = &rte_eth_devices[port_id];
2260 	ops = rte_flow_ops_get(port_id, error);
2261 	if (!ops || !ops->action_handle_query_update)
2262 		return -ENOTSUP;
2263 	ret = ops->action_handle_query_update(dev, handle, update,
2264 					      query, mode, error);
2265 	return flow_err(port_id, ret, error);
2266 }
2267 
2268 int
2269 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2270 					  const struct rte_flow_op_attr *attr,
2271 					  struct rte_flow_action_handle *handle,
2272 					  const void *update, void *query,
2273 					  enum rte_flow_query_update_mode mode,
2274 					  void *user_data,
2275 					  struct rte_flow_error *error)
2276 {
2277 	int ret;
2278 	struct rte_eth_dev *dev;
2279 	const struct rte_flow_ops *ops;
2280 
2281 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2282 	if (!handle)
2283 		return -EINVAL;
2284 	if (!update && !query)
2285 		return -EINVAL;
2286 	dev = &rte_eth_devices[port_id];
2287 	ops = rte_flow_ops_get(port_id, error);
2288 	if (!ops || !ops->async_action_handle_query_update)
2289 		return -ENOTSUP;
2290 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2291 						    handle, update,
2292 						    query, mode,
2293 						    user_data, error);
2294 	return flow_err(port_id, ret, error);
2295 }
2296 
2297 struct rte_flow_action_list_handle *
2298 rte_flow_action_list_handle_create(uint16_t port_id,
2299 				   const
2300 				   struct rte_flow_indir_action_conf *conf,
2301 				   const struct rte_flow_action *actions,
2302 				   struct rte_flow_error *error)
2303 {
2304 	int ret;
2305 	struct rte_eth_dev *dev;
2306 	const struct rte_flow_ops *ops;
2307 	struct rte_flow_action_list_handle *handle;
2308 
2309 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2310 	ops = rte_flow_ops_get(port_id, error);
2311 	if (!ops || !ops->action_list_handle_create) {
2312 		rte_flow_error_set(error, ENOTSUP,
2313 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2314 				   "action_list handle not supported");
2315 		return NULL;
2316 	}
2317 	dev = &rte_eth_devices[port_id];
2318 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2319 	ret = flow_err(port_id, -rte_errno, error);
2320 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2321 	return handle;
2322 }
2323 
2324 int
2325 rte_flow_action_list_handle_destroy(uint16_t port_id,
2326 				    struct rte_flow_action_list_handle *handle,
2327 				    struct rte_flow_error *error)
2328 {
2329 	int ret;
2330 	struct rte_eth_dev *dev;
2331 	const struct rte_flow_ops *ops;
2332 
2333 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2334 	ops = rte_flow_ops_get(port_id, error);
2335 	if (!ops || !ops->action_list_handle_destroy)
2336 		return rte_flow_error_set(error, ENOTSUP,
2337 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2338 					  "action_list handle not supported");
2339 	dev = &rte_eth_devices[port_id];
2340 	ret = ops->action_list_handle_destroy(dev, handle, error);
2341 	ret = flow_err(port_id, ret, error);
2342 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2343 	return ret;
2344 }
2345 
2346 struct rte_flow_action_list_handle *
2347 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2348 					 const struct rte_flow_op_attr *attr,
2349 					 const struct rte_flow_indir_action_conf *conf,
2350 					 const struct rte_flow_action *actions,
2351 					 void *user_data,
2352 					 struct rte_flow_error *error)
2353 {
2354 	int ret;
2355 	struct rte_eth_dev *dev;
2356 	const struct rte_flow_ops *ops;
2357 	struct rte_flow_action_list_handle *handle;
2358 
2359 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2360 	ops = rte_flow_ops_get(port_id, error);
2361 	if (!ops || !ops->async_action_list_handle_create) {
2362 		rte_flow_error_set(error, ENOTSUP,
2363 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2364 				   "action_list handle not supported");
2365 		return NULL;
2366 	}
2367 	dev = &rte_eth_devices[port_id];
2368 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2369 						      actions, user_data,
2370 						      error);
2371 	ret = flow_err(port_id, -rte_errno, error);
2372 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2373 						       conf, actions, user_data,
2374 						       ret);
2375 	return handle;
2376 }
2377 
2378 int
2379 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2380 				 const struct rte_flow_op_attr *op_attr,
2381 				 struct rte_flow_action_list_handle *handle,
2382 				 void *user_data, struct rte_flow_error *error)
2383 {
2384 	int ret;
2385 	struct rte_eth_dev *dev;
2386 	const struct rte_flow_ops *ops;
2387 
2388 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2389 	ops = rte_flow_ops_get(port_id, error);
2390 	if (!ops || !ops->async_action_list_handle_destroy)
2391 		return rte_flow_error_set(error, ENOTSUP,
2392 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2393 					  "async action_list handle not supported");
2394 	dev = &rte_eth_devices[port_id];
2395 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2396 						    handle, user_data, error);
2397 	ret = flow_err(port_id, ret, error);
2398 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2399 							op_attr, handle,
2400 							user_data, ret);
2401 	return ret;
2402 }
2403 
2404 int
2405 rte_flow_action_list_handle_query_update(uint16_t port_id,
2406 			 const struct rte_flow_action_list_handle *handle,
2407 			 const void **update, void **query,
2408 			 enum rte_flow_query_update_mode mode,
2409 			 struct rte_flow_error *error)
2410 {
2411 	int ret;
2412 	struct rte_eth_dev *dev;
2413 	const struct rte_flow_ops *ops;
2414 
2415 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2416 	ops = rte_flow_ops_get(port_id, error);
2417 	if (!ops || !ops->action_list_handle_query_update)
2418 		return rte_flow_error_set(error, ENOTSUP,
2419 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2420 					  "action_list query_update not supported");
2421 	dev = &rte_eth_devices[port_id];
2422 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2423 						   mode, error);
2424 	ret = flow_err(port_id, ret, error);
2425 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2426 						       query, mode, ret);
2427 	return ret;
2428 }
2429 
2430 int
2431 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2432 			 const struct rte_flow_op_attr *attr,
2433 			 const struct rte_flow_action_list_handle *handle,
2434 			 const void **update, void **query,
2435 			 enum rte_flow_query_update_mode mode,
2436 			 void *user_data, struct rte_flow_error *error)
2437 {
2438 	int ret;
2439 	struct rte_eth_dev *dev;
2440 	const struct rte_flow_ops *ops;
2441 
2442 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2443 	ops = rte_flow_ops_get(port_id, error);
2444 	if (!ops || !ops->async_action_list_handle_query_update)
2445 		return rte_flow_error_set(error, ENOTSUP,
2446 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2447 					  "action_list async query_update not supported");
2448 	dev = &rte_eth_devices[port_id];
2449 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2450 							 handle, update, query,
2451 							 mode, user_data,
2452 							 error);
2453 	ret = flow_err(port_id, ret, error);
2454 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2455 							     attr, handle,
2456 							     update, query,
2457 							     mode, user_data,
2458 							     ret);
2459 	return ret;
2460 }
2461 
2462 int
2463 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2464 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2465 			 uint32_t *hash, struct rte_flow_error *error)
2466 {
2467 	int ret;
2468 	struct rte_eth_dev *dev;
2469 	const struct rte_flow_ops *ops;
2470 
2471 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2472 	ops = rte_flow_ops_get(port_id, error);
2473 	if (!ops || !ops->flow_calc_table_hash)
2474 		return rte_flow_error_set(error, ENOTSUP,
2475 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2476 					  "action_list async query_update not supported");
2477 	dev = &rte_eth_devices[port_id];
2478 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2479 					hash, error);
2480 	return flow_err(port_id, ret, error);
2481 }
2482