xref: /dpdk/lib/ethdev/rte_flow.c (revision 738ef8f7c706c124715699f76b6de1168a64534a)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <pthread.h>
11 
12 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 #define FLOW_LOG RTE_ETHDEV_LOG_LINE
23 
24 /* Mbuf dynamic field name for metadata. */
25 int32_t rte_flow_dynf_metadata_offs = -1;
26 
27 /* Mbuf dynamic field flag bit number for metadata. */
28 uint64_t rte_flow_dynf_metadata_mask;
29 
30 /**
31  * Flow elements description tables.
32  */
33 struct rte_flow_desc_data {
34 	const char *name;
35 	size_t size;
36 	size_t (*desc_fn)(void *dst, const void *src);
37 };
38 
39 /**
40  *
41  * @param buf
42  * Destination memory.
43  * @param data
44  * Source memory
45  * @param size
46  * Requested copy size
47  * @param desc
48  * rte_flow_desc_item - for flow item conversion.
49  * rte_flow_desc_action - for flow action conversion.
50  * @param type
51  * Offset into the desc param or negative value for private flow elements.
52  */
53 static inline size_t
54 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
55 		   const struct rte_flow_desc_data *desc, int type)
56 {
57 	/**
58 	 * Allow PMD private flow item
59 	 */
60 	bool rte_type = type >= 0;
61 
62 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
63 	if (buf == NULL || data == NULL)
64 		return 0;
65 	rte_memcpy(buf, data, (size > sz ? sz : size));
66 	if (rte_type && desc[type].desc_fn)
67 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
68 	return sz;
69 }
70 
71 static size_t
72 rte_flow_item_flex_conv(void *buf, const void *data)
73 {
74 	struct rte_flow_item_flex *dst = buf;
75 	const struct rte_flow_item_flex *src = data;
76 	if (buf) {
77 		dst->pattern = rte_memcpy
78 			((void *)((uintptr_t)(dst + 1)), src->pattern,
79 			 src->length);
80 	}
81 	return src->length;
82 }
83 
84 /** Generate flow_item[] entry. */
85 #define MK_FLOW_ITEM(t, s) \
86 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
87 		.name = # t, \
88 		.size = s,               \
89 		.desc_fn = NULL,\
90 	}
91 
92 #define MK_FLOW_ITEM_FN(t, s, fn) \
93 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
94 		.name = # t,                 \
95 		.size = s,                   \
96 		.desc_fn = fn,               \
97 	}
98 
99 /** Information about known flow pattern items. */
100 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
101 	MK_FLOW_ITEM(END, 0),
102 	MK_FLOW_ITEM(VOID, 0),
103 	MK_FLOW_ITEM(INVERT, 0),
104 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
105 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
106 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
107 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
108 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
109 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
110 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
111 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
112 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
113 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
114 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
115 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
116 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
117 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
118 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
119 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
120 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
121 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
122 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
123 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
124 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
125 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
126 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
127 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
128 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
129 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
130 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
131 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
132 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
133 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
134 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
137 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
138 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
139 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
140 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
141 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
142 	MK_FLOW_ITEM(RANDOM, sizeof(struct rte_flow_item_random)),
143 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
144 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
145 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
146 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
147 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
148 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
149 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
150 			sizeof(struct rte_flow_item_pppoe_proto_id)),
151 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
152 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
153 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
154 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
155 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
156 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
157 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
158 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
159 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
160 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
161 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
162 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
163 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
164 			rte_flow_item_flex_conv),
165 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
166 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
167 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
168 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
169 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
170 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
171 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
172 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
173 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
174 };
175 
176 /** Generate flow_action[] entry. */
177 #define MK_FLOW_ACTION(t, s) \
178 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
179 		.name = # t, \
180 		.size = s, \
181 		.desc_fn = NULL,\
182 	}
183 
184 #define MK_FLOW_ACTION_FN(t, fn) \
185 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
186 		.name = # t, \
187 		.size = 0, \
188 		.desc_fn = fn,\
189 	}
190 
191 
192 /** Information about known flow actions. */
193 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
194 	MK_FLOW_ACTION(END, 0),
195 	MK_FLOW_ACTION(VOID, 0),
196 	MK_FLOW_ACTION(PASSTHRU, 0),
197 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
198 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
199 	MK_FLOW_ACTION(FLAG, 0),
200 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
201 	MK_FLOW_ACTION(DROP, 0),
202 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
203 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
204 	MK_FLOW_ACTION(PF, 0),
205 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
206 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
207 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
208 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
209 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
210 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
211 	MK_FLOW_ACTION(OF_PUSH_VLAN,
212 		       sizeof(struct rte_flow_action_of_push_vlan)),
213 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
214 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
215 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
216 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
217 	MK_FLOW_ACTION(OF_POP_MPLS,
218 		       sizeof(struct rte_flow_action_of_pop_mpls)),
219 	MK_FLOW_ACTION(OF_PUSH_MPLS,
220 		       sizeof(struct rte_flow_action_of_push_mpls)),
221 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
222 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
223 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
224 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
225 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
226 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
227 	MK_FLOW_ACTION(SET_IPV4_SRC,
228 		       sizeof(struct rte_flow_action_set_ipv4)),
229 	MK_FLOW_ACTION(SET_IPV4_DST,
230 		       sizeof(struct rte_flow_action_set_ipv4)),
231 	MK_FLOW_ACTION(SET_IPV6_SRC,
232 		       sizeof(struct rte_flow_action_set_ipv6)),
233 	MK_FLOW_ACTION(SET_IPV6_DST,
234 		       sizeof(struct rte_flow_action_set_ipv6)),
235 	MK_FLOW_ACTION(SET_TP_SRC,
236 		       sizeof(struct rte_flow_action_set_tp)),
237 	MK_FLOW_ACTION(SET_TP_DST,
238 		       sizeof(struct rte_flow_action_set_tp)),
239 	MK_FLOW_ACTION(MAC_SWAP, 0),
240 	MK_FLOW_ACTION(DEC_TTL, 0),
241 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
242 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
243 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
244 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
245 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
246 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
247 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
248 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
249 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
250 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
251 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
252 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
253 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
254 	MK_FLOW_ACTION(MODIFY_FIELD,
255 		       sizeof(struct rte_flow_action_modify_field)),
256 	/**
257 	 * Indirect action represented as handle of type
258 	 * (struct rte_flow_action_handle *) stored in conf field (see
259 	 * struct rte_flow_action); no need for additional structure to * store
260 	 * indirect action handle.
261 	 */
262 	MK_FLOW_ACTION(INDIRECT, 0),
263 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
264 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
265 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
266 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
267 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
268 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
269 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
270 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
271 	MK_FLOW_ACTION(INDIRECT_LIST,
272 		       sizeof(struct rte_flow_action_indirect_list)),
273 	MK_FLOW_ACTION(PROG,
274 		       sizeof(struct rte_flow_action_prog)),
275 };
276 
277 int
278 rte_flow_dynf_metadata_register(void)
279 {
280 	int offset;
281 	int flag;
282 
283 	static const struct rte_mbuf_dynfield desc_offs = {
284 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
285 		.size = sizeof(uint32_t),
286 		.align = alignof(uint32_t),
287 	};
288 	static const struct rte_mbuf_dynflag desc_flag = {
289 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
290 	};
291 
292 	offset = rte_mbuf_dynfield_register(&desc_offs);
293 	if (offset < 0)
294 		goto error;
295 	flag = rte_mbuf_dynflag_register(&desc_flag);
296 	if (flag < 0)
297 		goto error;
298 	rte_flow_dynf_metadata_offs = offset;
299 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
300 
301 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
302 
303 	return 0;
304 
305 error:
306 	rte_flow_dynf_metadata_offs = -1;
307 	rte_flow_dynf_metadata_mask = UINT64_C(0);
308 	return -rte_errno;
309 }
310 
311 static inline void
312 fts_enter(struct rte_eth_dev *dev)
313 {
314 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
315 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
316 }
317 
318 static inline void
319 fts_exit(struct rte_eth_dev *dev)
320 {
321 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
322 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
323 }
324 
325 static int
326 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
327 {
328 	if (ret == 0)
329 		return 0;
330 	if (rte_eth_dev_is_removed(port_id))
331 		return rte_flow_error_set(error, EIO,
332 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
333 					  NULL, rte_strerror(EIO));
334 	return ret;
335 }
336 
337 /* Get generic flow operations structure from a port. */
338 const struct rte_flow_ops *
339 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
340 {
341 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
342 	const struct rte_flow_ops *ops;
343 	int code;
344 
345 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
346 		code = ENODEV;
347 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
348 		/* flow API not supported with this driver dev_ops */
349 		code = ENOSYS;
350 	else
351 		code = dev->dev_ops->flow_ops_get(dev, &ops);
352 	if (code == 0 && ops == NULL)
353 		/* flow API not supported with this device */
354 		code = ENOSYS;
355 
356 	if (code != 0) {
357 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
358 				   NULL, rte_strerror(code));
359 		return NULL;
360 	}
361 	return ops;
362 }
363 
364 /* Check whether a flow rule can be created on a given port. */
365 int
366 rte_flow_validate(uint16_t port_id,
367 		  const struct rte_flow_attr *attr,
368 		  const struct rte_flow_item pattern[],
369 		  const struct rte_flow_action actions[],
370 		  struct rte_flow_error *error)
371 {
372 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
373 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
374 	int ret;
375 
376 	if (likely(!!attr) && attr->transfer &&
377 	    (attr->ingress || attr->egress)) {
378 		return rte_flow_error_set(error, EINVAL,
379 					  RTE_FLOW_ERROR_TYPE_ATTR,
380 					  attr, "cannot use attr ingress/egress with attr transfer");
381 	}
382 
383 	if (unlikely(!ops))
384 		return -rte_errno;
385 	if (likely(!!ops->validate)) {
386 		fts_enter(dev);
387 		ret = ops->validate(dev, attr, pattern, actions, error);
388 		fts_exit(dev);
389 		ret = flow_err(port_id, ret, error);
390 
391 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
392 
393 		return ret;
394 	}
395 	return rte_flow_error_set(error, ENOSYS,
396 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
397 				  NULL, rte_strerror(ENOSYS));
398 }
399 
400 /* Create a flow rule on a given port. */
401 struct rte_flow *
402 rte_flow_create(uint16_t port_id,
403 		const struct rte_flow_attr *attr,
404 		const struct rte_flow_item pattern[],
405 		const struct rte_flow_action actions[],
406 		struct rte_flow_error *error)
407 {
408 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
409 	struct rte_flow *flow;
410 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
411 
412 	if (unlikely(!ops))
413 		return NULL;
414 	if (likely(!!ops->create)) {
415 		fts_enter(dev);
416 		flow = ops->create(dev, attr, pattern, actions, error);
417 		fts_exit(dev);
418 		if (flow == NULL)
419 			flow_err(port_id, -rte_errno, error);
420 
421 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
422 
423 		return flow;
424 	}
425 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
426 			   NULL, rte_strerror(ENOSYS));
427 	return NULL;
428 }
429 
430 /* Destroy a flow rule on a given port. */
431 int
432 rte_flow_destroy(uint16_t port_id,
433 		 struct rte_flow *flow,
434 		 struct rte_flow_error *error)
435 {
436 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
437 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
438 	int ret;
439 
440 	if (unlikely(!ops))
441 		return -rte_errno;
442 	if (likely(!!ops->destroy)) {
443 		fts_enter(dev);
444 		ret = ops->destroy(dev, flow, error);
445 		fts_exit(dev);
446 		ret = flow_err(port_id, ret, error);
447 
448 		rte_flow_trace_destroy(port_id, flow, ret);
449 
450 		return ret;
451 	}
452 	return rte_flow_error_set(error, ENOSYS,
453 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
454 				  NULL, rte_strerror(ENOSYS));
455 }
456 
457 int
458 rte_flow_actions_update(uint16_t port_id,
459 			struct rte_flow *flow,
460 			const struct rte_flow_action actions[],
461 			struct rte_flow_error *error)
462 {
463 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
464 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
465 	int ret;
466 
467 	if (unlikely(!ops))
468 		return -rte_errno;
469 	if (likely(!!ops->actions_update)) {
470 		fts_enter(dev);
471 		ret = ops->actions_update(dev, flow, actions, error);
472 		fts_exit(dev);
473 
474 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
475 
476 		return flow_err(port_id, ret, error);
477 	}
478 	return rte_flow_error_set(error, ENOSYS,
479 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
480 				  NULL, rte_strerror(ENOSYS));
481 }
482 
483 /* Destroy all flow rules associated with a port. */
484 int
485 rte_flow_flush(uint16_t port_id,
486 	       struct rte_flow_error *error)
487 {
488 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
489 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
490 	int ret;
491 
492 	if (unlikely(!ops))
493 		return -rte_errno;
494 	if (likely(!!ops->flush)) {
495 		fts_enter(dev);
496 		ret = ops->flush(dev, error);
497 		fts_exit(dev);
498 		ret = flow_err(port_id, ret, error);
499 
500 		rte_flow_trace_flush(port_id, ret);
501 
502 		return ret;
503 	}
504 	return rte_flow_error_set(error, ENOSYS,
505 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
506 				  NULL, rte_strerror(ENOSYS));
507 }
508 
509 /* Query an existing flow rule. */
510 int
511 rte_flow_query(uint16_t port_id,
512 	       struct rte_flow *flow,
513 	       const struct rte_flow_action *action,
514 	       void *data,
515 	       struct rte_flow_error *error)
516 {
517 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
518 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
519 	int ret;
520 
521 	if (!ops)
522 		return -rte_errno;
523 	if (likely(!!ops->query)) {
524 		fts_enter(dev);
525 		ret = ops->query(dev, flow, action, data, error);
526 		fts_exit(dev);
527 		ret = flow_err(port_id, ret, error);
528 
529 		rte_flow_trace_query(port_id, flow, action, data, ret);
530 
531 		return ret;
532 	}
533 	return rte_flow_error_set(error, ENOSYS,
534 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
535 				  NULL, rte_strerror(ENOSYS));
536 }
537 
538 /* Restrict ingress traffic to the defined flow rules. */
539 int
540 rte_flow_isolate(uint16_t port_id,
541 		 int set,
542 		 struct rte_flow_error *error)
543 {
544 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
545 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
546 	int ret;
547 
548 	if (!ops)
549 		return -rte_errno;
550 	if (likely(!!ops->isolate)) {
551 		fts_enter(dev);
552 		ret = ops->isolate(dev, set, error);
553 		fts_exit(dev);
554 		ret = flow_err(port_id, ret, error);
555 
556 		rte_flow_trace_isolate(port_id, set, ret);
557 
558 		return ret;
559 	}
560 	return rte_flow_error_set(error, ENOSYS,
561 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
562 				  NULL, rte_strerror(ENOSYS));
563 }
564 
565 /* Initialize flow error structure. */
566 int
567 rte_flow_error_set(struct rte_flow_error *error,
568 		   int code,
569 		   enum rte_flow_error_type type,
570 		   const void *cause,
571 		   const char *message)
572 {
573 	if (error) {
574 		*error = (struct rte_flow_error){
575 			.type = type,
576 			.cause = cause,
577 			.message = message,
578 		};
579 	}
580 	rte_errno = code;
581 	return -code;
582 }
583 
584 /** Pattern item specification types. */
585 enum rte_flow_conv_item_spec_type {
586 	RTE_FLOW_CONV_ITEM_SPEC,
587 	RTE_FLOW_CONV_ITEM_LAST,
588 	RTE_FLOW_CONV_ITEM_MASK,
589 };
590 
591 /**
592  * Copy pattern item specification.
593  *
594  * @param[out] buf
595  *   Output buffer. Can be NULL if @p size is zero.
596  * @param size
597  *   Size of @p buf in bytes.
598  * @param[in] item
599  *   Pattern item to copy specification from.
600  * @param type
601  *   Specification selector for either @p spec, @p last or @p mask.
602  *
603  * @return
604  *   Number of bytes needed to store pattern item specification regardless
605  *   of @p size. @p buf contents are truncated to @p size if not large
606  *   enough.
607  */
608 static size_t
609 rte_flow_conv_item_spec(void *buf, const size_t size,
610 			const struct rte_flow_item *item,
611 			enum rte_flow_conv_item_spec_type type)
612 {
613 	size_t off;
614 	const void *data =
615 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
616 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
617 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
618 		NULL;
619 
620 	switch (item->type) {
621 		union {
622 			const struct rte_flow_item_raw *raw;
623 		} spec;
624 		union {
625 			const struct rte_flow_item_raw *raw;
626 		} last;
627 		union {
628 			const struct rte_flow_item_raw *raw;
629 		} mask;
630 		union {
631 			const struct rte_flow_item_raw *raw;
632 		} src;
633 		union {
634 			struct rte_flow_item_raw *raw;
635 		} dst;
636 		size_t tmp;
637 
638 	case RTE_FLOW_ITEM_TYPE_RAW:
639 		spec.raw = item->spec;
640 		last.raw = item->last ? item->last : item->spec;
641 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
642 		src.raw = data;
643 		dst.raw = buf;
644 		rte_memcpy(dst.raw,
645 			   (&(struct rte_flow_item_raw){
646 				.relative = src.raw->relative,
647 				.search = src.raw->search,
648 				.reserved = src.raw->reserved,
649 				.offset = src.raw->offset,
650 				.limit = src.raw->limit,
651 				.length = src.raw->length,
652 			   }),
653 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
654 		off = sizeof(*dst.raw);
655 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
656 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
657 		     ((spec.raw->length & mask.raw->length) >=
658 		      (last.raw->length & mask.raw->length))))
659 			tmp = spec.raw->length & mask.raw->length;
660 		else
661 			tmp = last.raw->length & mask.raw->length;
662 		if (tmp) {
663 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
664 			if (size >= off + tmp)
665 				dst.raw->pattern = rte_memcpy
666 					((void *)((uintptr_t)dst.raw + off),
667 					 src.raw->pattern, tmp);
668 			off += tmp;
669 		}
670 		break;
671 	default:
672 		off = rte_flow_conv_copy(buf, data, size,
673 					 rte_flow_desc_item, item->type);
674 		break;
675 	}
676 	return off;
677 }
678 
679 /**
680  * Copy action configuration.
681  *
682  * @param[out] buf
683  *   Output buffer. Can be NULL if @p size is zero.
684  * @param size
685  *   Size of @p buf in bytes.
686  * @param[in] action
687  *   Action to copy configuration from.
688  *
689  * @return
690  *   Number of bytes needed to store pattern item specification regardless
691  *   of @p size. @p buf contents are truncated to @p size if not large
692  *   enough.
693  */
694 static size_t
695 rte_flow_conv_action_conf(void *buf, const size_t size,
696 			  const struct rte_flow_action *action)
697 {
698 	size_t off;
699 
700 	switch (action->type) {
701 		union {
702 			const struct rte_flow_action_rss *rss;
703 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
704 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
705 		} src;
706 		union {
707 			struct rte_flow_action_rss *rss;
708 			struct rte_flow_action_vxlan_encap *vxlan_encap;
709 			struct rte_flow_action_nvgre_encap *nvgre_encap;
710 		} dst;
711 		size_t tmp;
712 		int ret;
713 
714 	case RTE_FLOW_ACTION_TYPE_RSS:
715 		src.rss = action->conf;
716 		dst.rss = buf;
717 		rte_memcpy(dst.rss,
718 			   (&(struct rte_flow_action_rss){
719 				.func = src.rss->func,
720 				.level = src.rss->level,
721 				.types = src.rss->types,
722 				.key_len = src.rss->key_len,
723 				.queue_num = src.rss->queue_num,
724 			   }),
725 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
726 		off = sizeof(*dst.rss);
727 		if (src.rss->key_len && src.rss->key) {
728 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
729 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
730 			if (size >= (uint64_t)off + (uint64_t)tmp)
731 				dst.rss->key = rte_memcpy
732 					((void *)((uintptr_t)dst.rss + off),
733 					 src.rss->key, tmp);
734 			off += tmp;
735 		}
736 		if (src.rss->queue_num) {
737 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
738 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
739 			if (size >= (uint64_t)off + (uint64_t)tmp)
740 				dst.rss->queue = rte_memcpy
741 					((void *)((uintptr_t)dst.rss + off),
742 					 src.rss->queue, tmp);
743 			off += tmp;
744 		}
745 		break;
746 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
747 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
748 		src.vxlan_encap = action->conf;
749 		dst.vxlan_encap = buf;
750 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
751 				 sizeof(*src.nvgre_encap) ||
752 				 offsetof(struct rte_flow_action_vxlan_encap,
753 					  definition) !=
754 				 offsetof(struct rte_flow_action_nvgre_encap,
755 					  definition));
756 		off = sizeof(*dst.vxlan_encap);
757 		if (src.vxlan_encap->definition) {
758 			off = RTE_ALIGN_CEIL
759 				(off, sizeof(*dst.vxlan_encap->definition));
760 			ret = rte_flow_conv
761 				(RTE_FLOW_CONV_OP_PATTERN,
762 				 (void *)((uintptr_t)dst.vxlan_encap + off),
763 				 size > off ? size - off : 0,
764 				 src.vxlan_encap->definition, NULL);
765 			if (ret < 0)
766 				return 0;
767 			if (size >= off + ret)
768 				dst.vxlan_encap->definition =
769 					(void *)((uintptr_t)dst.vxlan_encap +
770 						 off);
771 			off += ret;
772 		}
773 		break;
774 	default:
775 		off = rte_flow_conv_copy(buf, action->conf, size,
776 					 rte_flow_desc_action, action->type);
777 		break;
778 	}
779 	return off;
780 }
781 
782 /**
783  * Copy a list of pattern items.
784  *
785  * @param[out] dst
786  *   Destination buffer. Can be NULL if @p size is zero.
787  * @param size
788  *   Size of @p dst in bytes.
789  * @param[in] src
790  *   Source pattern items.
791  * @param num
792  *   Maximum number of pattern items to process from @p src or 0 to process
793  *   the entire list. In both cases, processing stops after
794  *   RTE_FLOW_ITEM_TYPE_END is encountered.
795  * @param[out] error
796  *   Perform verbose error reporting if not NULL.
797  *
798  * @return
799  *   A positive value representing the number of bytes needed to store
800  *   pattern items regardless of @p size on success (@p buf contents are
801  *   truncated to @p size if not large enough), a negative errno value
802  *   otherwise and rte_errno is set.
803  */
804 static int
805 rte_flow_conv_pattern(struct rte_flow_item *dst,
806 		      const size_t size,
807 		      const struct rte_flow_item *src,
808 		      unsigned int num,
809 		      struct rte_flow_error *error)
810 {
811 	uintptr_t data = (uintptr_t)dst;
812 	size_t off;
813 	size_t ret;
814 	unsigned int i;
815 
816 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
817 		/**
818 		 * allow PMD private flow item
819 		 */
820 		if (((int)src->type >= 0) &&
821 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
822 		    !rte_flow_desc_item[src->type].name))
823 			return rte_flow_error_set
824 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
825 				 "cannot convert unknown item type");
826 		if (size >= off + sizeof(*dst))
827 			*dst = (struct rte_flow_item){
828 				.type = src->type,
829 			};
830 		off += sizeof(*dst);
831 		if (!src->type)
832 			num = i + 1;
833 	}
834 	num = i;
835 	src -= num;
836 	dst -= num;
837 	do {
838 		if (src->spec) {
839 			off = RTE_ALIGN_CEIL(off, sizeof(double));
840 			ret = rte_flow_conv_item_spec
841 				((void *)(data + off),
842 				 size > off ? size - off : 0, src,
843 				 RTE_FLOW_CONV_ITEM_SPEC);
844 			if (size && size >= off + ret)
845 				dst->spec = (void *)(data + off);
846 			off += ret;
847 
848 		}
849 		if (src->last) {
850 			off = RTE_ALIGN_CEIL(off, sizeof(double));
851 			ret = rte_flow_conv_item_spec
852 				((void *)(data + off),
853 				 size > off ? size - off : 0, src,
854 				 RTE_FLOW_CONV_ITEM_LAST);
855 			if (size && size >= off + ret)
856 				dst->last = (void *)(data + off);
857 			off += ret;
858 		}
859 		if (src->mask) {
860 			off = RTE_ALIGN_CEIL(off, sizeof(double));
861 			ret = rte_flow_conv_item_spec
862 				((void *)(data + off),
863 				 size > off ? size - off : 0, src,
864 				 RTE_FLOW_CONV_ITEM_MASK);
865 			if (size && size >= off + ret)
866 				dst->mask = (void *)(data + off);
867 			off += ret;
868 		}
869 		++src;
870 		++dst;
871 	} while (--num);
872 	return off;
873 }
874 
875 /**
876  * Copy a list of actions.
877  *
878  * @param[out] dst
879  *   Destination buffer. Can be NULL if @p size is zero.
880  * @param size
881  *   Size of @p dst in bytes.
882  * @param[in] src
883  *   Source actions.
884  * @param num
885  *   Maximum number of actions to process from @p src or 0 to process the
886  *   entire list. In both cases, processing stops after
887  *   RTE_FLOW_ACTION_TYPE_END is encountered.
888  * @param[out] error
889  *   Perform verbose error reporting if not NULL.
890  *
891  * @return
892  *   A positive value representing the number of bytes needed to store
893  *   actions regardless of @p size on success (@p buf contents are truncated
894  *   to @p size if not large enough), a negative errno value otherwise and
895  *   rte_errno is set.
896  */
897 static int
898 rte_flow_conv_actions(struct rte_flow_action *dst,
899 		      const size_t size,
900 		      const struct rte_flow_action *src,
901 		      unsigned int num,
902 		      struct rte_flow_error *error)
903 {
904 	uintptr_t data = (uintptr_t)dst;
905 	size_t off;
906 	size_t ret;
907 	unsigned int i;
908 
909 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
910 		/**
911 		 * allow PMD private flow action
912 		 */
913 		if (((int)src->type >= 0) &&
914 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
915 		    !rte_flow_desc_action[src->type].name))
916 			return rte_flow_error_set
917 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
918 				 src, "cannot convert unknown action type");
919 		if (size >= off + sizeof(*dst))
920 			*dst = (struct rte_flow_action){
921 				.type = src->type,
922 			};
923 		off += sizeof(*dst);
924 		if (!src->type)
925 			num = i + 1;
926 	}
927 	num = i;
928 	src -= num;
929 	dst -= num;
930 	do {
931 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
932 			/*
933 			 * Indirect action conf fills the indirect action
934 			 * handler. Copy the action handle directly instead
935 			 * of duplicating the pointer memory.
936 			 */
937 			if (size)
938 				dst->conf = src->conf;
939 		} else if (src->conf) {
940 			off = RTE_ALIGN_CEIL(off, sizeof(double));
941 			ret = rte_flow_conv_action_conf
942 				((void *)(data + off),
943 				 size > off ? size - off : 0, src);
944 			if (size && size >= off + ret)
945 				dst->conf = (void *)(data + off);
946 			off += ret;
947 		}
948 		++src;
949 		++dst;
950 	} while (--num);
951 	return off;
952 }
953 
954 /**
955  * Copy flow rule components.
956  *
957  * This comprises the flow rule descriptor itself, attributes, pattern and
958  * actions list. NULL components in @p src are skipped.
959  *
960  * @param[out] dst
961  *   Destination buffer. Can be NULL if @p size is zero.
962  * @param size
963  *   Size of @p dst in bytes.
964  * @param[in] src
965  *   Source flow rule descriptor.
966  * @param[out] error
967  *   Perform verbose error reporting if not NULL.
968  *
969  * @return
970  *   A positive value representing the number of bytes needed to store all
971  *   components including the descriptor regardless of @p size on success
972  *   (@p buf contents are truncated to @p size if not large enough), a
973  *   negative errno value otherwise and rte_errno is set.
974  */
975 static int
976 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
977 		   const size_t size,
978 		   const struct rte_flow_conv_rule *src,
979 		   struct rte_flow_error *error)
980 {
981 	size_t off;
982 	int ret;
983 
984 	rte_memcpy(dst,
985 		   (&(struct rte_flow_conv_rule){
986 			.attr = NULL,
987 			.pattern = NULL,
988 			.actions = NULL,
989 		   }),
990 		   size > sizeof(*dst) ? sizeof(*dst) : size);
991 	off = sizeof(*dst);
992 	if (src->attr_ro) {
993 		off = RTE_ALIGN_CEIL(off, sizeof(double));
994 		if (size && size >= off + sizeof(*dst->attr))
995 			dst->attr = rte_memcpy
996 				((void *)((uintptr_t)dst + off),
997 				 src->attr_ro, sizeof(*dst->attr));
998 		off += sizeof(*dst->attr);
999 	}
1000 	if (src->pattern_ro) {
1001 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1002 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
1003 					    size > off ? size - off : 0,
1004 					    src->pattern_ro, 0, error);
1005 		if (ret < 0)
1006 			return ret;
1007 		if (size && size >= off + (size_t)ret)
1008 			dst->pattern = (void *)((uintptr_t)dst + off);
1009 		off += ret;
1010 	}
1011 	if (src->actions_ro) {
1012 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1013 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1014 					    size > off ? size - off : 0,
1015 					    src->actions_ro, 0, error);
1016 		if (ret < 0)
1017 			return ret;
1018 		if (size >= off + (size_t)ret)
1019 			dst->actions = (void *)((uintptr_t)dst + off);
1020 		off += ret;
1021 	}
1022 	return off;
1023 }
1024 
1025 /**
1026  * Retrieve the name of a pattern item/action type.
1027  *
1028  * @param is_action
1029  *   Nonzero when @p src represents an action type instead of a pattern item
1030  *   type.
1031  * @param is_ptr
1032  *   Nonzero to write string address instead of contents into @p dst.
1033  * @param[out] dst
1034  *   Destination buffer. Can be NULL if @p size is zero.
1035  * @param size
1036  *   Size of @p dst in bytes.
1037  * @param[in] src
1038  *   Depending on @p is_action, source pattern item or action type cast as a
1039  *   pointer.
1040  * @param[out] error
1041  *   Perform verbose error reporting if not NULL.
1042  *
1043  * @return
1044  *   A positive value representing the number of bytes needed to store the
1045  *   name or its address regardless of @p size on success (@p buf contents
1046  *   are truncated to @p size if not large enough), a negative errno value
1047  *   otherwise and rte_errno is set.
1048  */
1049 static int
1050 rte_flow_conv_name(int is_action,
1051 		   int is_ptr,
1052 		   char *dst,
1053 		   const size_t size,
1054 		   const void *src,
1055 		   struct rte_flow_error *error)
1056 {
1057 	struct desc_info {
1058 		const struct rte_flow_desc_data *data;
1059 		size_t num;
1060 	};
1061 	static const struct desc_info info_rep[2] = {
1062 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1063 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1064 	};
1065 	const struct desc_info *const info = &info_rep[!!is_action];
1066 	unsigned int type = (uintptr_t)src;
1067 
1068 	if (type >= info->num)
1069 		return rte_flow_error_set
1070 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1071 			 "unknown object type to retrieve the name of");
1072 	if (!is_ptr)
1073 		return strlcpy(dst, info->data[type].name, size);
1074 	if (size >= sizeof(const char **))
1075 		*((const char **)dst) = info->data[type].name;
1076 	return sizeof(const char **);
1077 }
1078 
1079 /** Helper function to convert flow API objects. */
1080 int
1081 rte_flow_conv(enum rte_flow_conv_op op,
1082 	      void *dst,
1083 	      size_t size,
1084 	      const void *src,
1085 	      struct rte_flow_error *error)
1086 {
1087 	int ret;
1088 
1089 	switch (op) {
1090 		const struct rte_flow_attr *attr;
1091 
1092 	case RTE_FLOW_CONV_OP_NONE:
1093 		ret = 0;
1094 		break;
1095 	case RTE_FLOW_CONV_OP_ATTR:
1096 		attr = src;
1097 		if (size > sizeof(*attr))
1098 			size = sizeof(*attr);
1099 		rte_memcpy(dst, attr, size);
1100 		ret = sizeof(*attr);
1101 		break;
1102 	case RTE_FLOW_CONV_OP_ITEM:
1103 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1104 		break;
1105 	case RTE_FLOW_CONV_OP_ACTION:
1106 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1107 		break;
1108 	case RTE_FLOW_CONV_OP_PATTERN:
1109 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1110 		break;
1111 	case RTE_FLOW_CONV_OP_ACTIONS:
1112 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1113 		break;
1114 	case RTE_FLOW_CONV_OP_RULE:
1115 		ret = rte_flow_conv_rule(dst, size, src, error);
1116 		break;
1117 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1118 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1119 		break;
1120 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1121 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1122 		break;
1123 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1124 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1125 		break;
1126 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1127 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1128 		break;
1129 	default:
1130 		ret = rte_flow_error_set
1131 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1132 		 "unknown object conversion operation");
1133 	}
1134 
1135 	rte_flow_trace_conv(op, dst, size, src, ret);
1136 
1137 	return ret;
1138 }
1139 
1140 /** Store a full rte_flow description. */
1141 size_t
1142 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1143 	      const struct rte_flow_attr *attr,
1144 	      const struct rte_flow_item *items,
1145 	      const struct rte_flow_action *actions)
1146 {
1147 	/*
1148 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1149 	 * to convert the former to the latter without wasting space.
1150 	 */
1151 	struct rte_flow_conv_rule *dst =
1152 		len ?
1153 		(void *)((uintptr_t)desc +
1154 			 (offsetof(struct rte_flow_desc, actions) -
1155 			  offsetof(struct rte_flow_conv_rule, actions))) :
1156 		NULL;
1157 	size_t dst_size =
1158 		len > sizeof(*desc) - sizeof(*dst) ?
1159 		len - (sizeof(*desc) - sizeof(*dst)) :
1160 		0;
1161 	struct rte_flow_conv_rule src = {
1162 		.attr_ro = NULL,
1163 		.pattern_ro = items,
1164 		.actions_ro = actions,
1165 	};
1166 	int ret;
1167 
1168 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1169 			 sizeof(struct rte_flow_conv_rule));
1170 	if (dst_size &&
1171 	    (&dst->pattern != &desc->items ||
1172 	     &dst->actions != &desc->actions ||
1173 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1174 		rte_errno = EINVAL;
1175 		return 0;
1176 	}
1177 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1178 	if (ret < 0)
1179 		return 0;
1180 	ret += sizeof(*desc) - sizeof(*dst);
1181 	rte_memcpy(desc,
1182 		   (&(struct rte_flow_desc){
1183 			.size = ret,
1184 			.attr = *attr,
1185 			.items = dst_size ? dst->pattern : NULL,
1186 			.actions = dst_size ? dst->actions : NULL,
1187 		   }),
1188 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1189 
1190 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1191 
1192 	return ret;
1193 }
1194 
1195 int
1196 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1197 			FILE *file, struct rte_flow_error *error)
1198 {
1199 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1200 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1201 	int ret;
1202 
1203 	if (unlikely(!ops))
1204 		return -rte_errno;
1205 	if (likely(!!ops->dev_dump)) {
1206 		fts_enter(dev);
1207 		ret = ops->dev_dump(dev, flow, file, error);
1208 		fts_exit(dev);
1209 		return flow_err(port_id, ret, error);
1210 	}
1211 	return rte_flow_error_set(error, ENOSYS,
1212 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1213 				  NULL, rte_strerror(ENOSYS));
1214 }
1215 
1216 int
1217 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1218 		    uint32_t nb_contexts, struct rte_flow_error *error)
1219 {
1220 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1221 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1222 	int ret;
1223 
1224 	if (unlikely(!ops))
1225 		return -rte_errno;
1226 	if (likely(!!ops->get_aged_flows)) {
1227 		fts_enter(dev);
1228 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1229 		fts_exit(dev);
1230 		ret = flow_err(port_id, ret, error);
1231 
1232 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1233 
1234 		return ret;
1235 	}
1236 	return rte_flow_error_set(error, ENOTSUP,
1237 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1238 				  NULL, rte_strerror(ENOTSUP));
1239 }
1240 
1241 int
1242 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1243 			  uint32_t nb_contexts, struct rte_flow_error *error)
1244 {
1245 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1246 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1247 	int ret;
1248 
1249 	if (unlikely(!ops))
1250 		return -rte_errno;
1251 	if (likely(!!ops->get_q_aged_flows)) {
1252 		fts_enter(dev);
1253 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1254 					    nb_contexts, error);
1255 		fts_exit(dev);
1256 		ret = flow_err(port_id, ret, error);
1257 
1258 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1259 						nb_contexts, ret);
1260 
1261 		return ret;
1262 	}
1263 	return rte_flow_error_set(error, ENOTSUP,
1264 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1265 				  NULL, rte_strerror(ENOTSUP));
1266 }
1267 
1268 struct rte_flow_action_handle *
1269 rte_flow_action_handle_create(uint16_t port_id,
1270 			      const struct rte_flow_indir_action_conf *conf,
1271 			      const struct rte_flow_action *action,
1272 			      struct rte_flow_error *error)
1273 {
1274 	struct rte_flow_action_handle *handle;
1275 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1276 
1277 	if (unlikely(!ops))
1278 		return NULL;
1279 	if (unlikely(!ops->action_handle_create)) {
1280 		rte_flow_error_set(error, ENOSYS,
1281 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1282 				   rte_strerror(ENOSYS));
1283 		return NULL;
1284 	}
1285 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1286 					   conf, action, error);
1287 	if (handle == NULL)
1288 		flow_err(port_id, -rte_errno, error);
1289 
1290 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1291 
1292 	return handle;
1293 }
1294 
1295 int
1296 rte_flow_action_handle_destroy(uint16_t port_id,
1297 			       struct rte_flow_action_handle *handle,
1298 			       struct rte_flow_error *error)
1299 {
1300 	int ret;
1301 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1302 
1303 	if (unlikely(!ops))
1304 		return -rte_errno;
1305 	if (unlikely(!ops->action_handle_destroy))
1306 		return rte_flow_error_set(error, ENOSYS,
1307 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1308 					  NULL, rte_strerror(ENOSYS));
1309 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1310 					 handle, error);
1311 	ret = flow_err(port_id, ret, error);
1312 
1313 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1314 
1315 	return ret;
1316 }
1317 
1318 int
1319 rte_flow_action_handle_update(uint16_t port_id,
1320 			      struct rte_flow_action_handle *handle,
1321 			      const void *update,
1322 			      struct rte_flow_error *error)
1323 {
1324 	int ret;
1325 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1326 
1327 	if (unlikely(!ops))
1328 		return -rte_errno;
1329 	if (unlikely(!ops->action_handle_update))
1330 		return rte_flow_error_set(error, ENOSYS,
1331 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1332 					  NULL, rte_strerror(ENOSYS));
1333 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1334 					update, error);
1335 	ret = flow_err(port_id, ret, error);
1336 
1337 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1338 
1339 	return ret;
1340 }
1341 
1342 int
1343 rte_flow_action_handle_query(uint16_t port_id,
1344 			     const struct rte_flow_action_handle *handle,
1345 			     void *data,
1346 			     struct rte_flow_error *error)
1347 {
1348 	int ret;
1349 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1350 
1351 	if (unlikely(!ops))
1352 		return -rte_errno;
1353 	if (unlikely(!ops->action_handle_query))
1354 		return rte_flow_error_set(error, ENOSYS,
1355 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1356 					  NULL, rte_strerror(ENOSYS));
1357 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1358 				       data, error);
1359 	ret = flow_err(port_id, ret, error);
1360 
1361 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1362 
1363 	return ret;
1364 }
1365 
1366 int
1367 rte_flow_tunnel_decap_set(uint16_t port_id,
1368 			  struct rte_flow_tunnel *tunnel,
1369 			  struct rte_flow_action **actions,
1370 			  uint32_t *num_of_actions,
1371 			  struct rte_flow_error *error)
1372 {
1373 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1374 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1375 	int ret;
1376 
1377 	if (unlikely(!ops))
1378 		return -rte_errno;
1379 	if (likely(!!ops->tunnel_decap_set)) {
1380 		ret = flow_err(port_id,
1381 			       ops->tunnel_decap_set(dev, tunnel, actions,
1382 						     num_of_actions, error),
1383 			       error);
1384 
1385 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1386 						num_of_actions, ret);
1387 
1388 		return ret;
1389 	}
1390 	return rte_flow_error_set(error, ENOTSUP,
1391 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1392 				  NULL, rte_strerror(ENOTSUP));
1393 }
1394 
1395 int
1396 rte_flow_tunnel_match(uint16_t port_id,
1397 		      struct rte_flow_tunnel *tunnel,
1398 		      struct rte_flow_item **items,
1399 		      uint32_t *num_of_items,
1400 		      struct rte_flow_error *error)
1401 {
1402 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1403 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1404 	int ret;
1405 
1406 	if (unlikely(!ops))
1407 		return -rte_errno;
1408 	if (likely(!!ops->tunnel_match)) {
1409 		ret = flow_err(port_id,
1410 			       ops->tunnel_match(dev, tunnel, items,
1411 						 num_of_items, error),
1412 			       error);
1413 
1414 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1415 					    ret);
1416 
1417 		return ret;
1418 	}
1419 	return rte_flow_error_set(error, ENOTSUP,
1420 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1421 				  NULL, rte_strerror(ENOTSUP));
1422 }
1423 
1424 int
1425 rte_flow_get_restore_info(uint16_t port_id,
1426 			  struct rte_mbuf *m,
1427 			  struct rte_flow_restore_info *restore_info,
1428 			  struct rte_flow_error *error)
1429 {
1430 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1431 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1432 	int ret;
1433 
1434 	if (unlikely(!ops))
1435 		return -rte_errno;
1436 	if (likely(!!ops->get_restore_info)) {
1437 		ret = flow_err(port_id,
1438 			       ops->get_restore_info(dev, m, restore_info,
1439 						     error),
1440 			       error);
1441 
1442 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1443 
1444 		return ret;
1445 	}
1446 	return rte_flow_error_set(error, ENOTSUP,
1447 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1448 				  NULL, rte_strerror(ENOTSUP));
1449 }
1450 
1451 static struct {
1452 	const struct rte_mbuf_dynflag desc;
1453 	uint64_t value;
1454 } flow_restore_info_dynflag = {
1455 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1456 };
1457 
1458 uint64_t
1459 rte_flow_restore_info_dynflag(void)
1460 {
1461 	return flow_restore_info_dynflag.value;
1462 }
1463 
1464 int
1465 rte_flow_restore_info_dynflag_register(void)
1466 {
1467 	if (flow_restore_info_dynflag.value == 0) {
1468 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1469 
1470 		if (offset < 0)
1471 			return -1;
1472 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1473 	}
1474 
1475 	return 0;
1476 }
1477 
1478 int
1479 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1480 				     struct rte_flow_action *actions,
1481 				     uint32_t num_of_actions,
1482 				     struct rte_flow_error *error)
1483 {
1484 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1485 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1486 	int ret;
1487 
1488 	if (unlikely(!ops))
1489 		return -rte_errno;
1490 	if (likely(!!ops->tunnel_action_decap_release)) {
1491 		ret = flow_err(port_id,
1492 			       ops->tunnel_action_decap_release(dev, actions,
1493 								num_of_actions,
1494 								error),
1495 			       error);
1496 
1497 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1498 							   num_of_actions, ret);
1499 
1500 		return ret;
1501 	}
1502 	return rte_flow_error_set(error, ENOTSUP,
1503 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1504 				  NULL, rte_strerror(ENOTSUP));
1505 }
1506 
1507 int
1508 rte_flow_tunnel_item_release(uint16_t port_id,
1509 			     struct rte_flow_item *items,
1510 			     uint32_t num_of_items,
1511 			     struct rte_flow_error *error)
1512 {
1513 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1514 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1515 	int ret;
1516 
1517 	if (unlikely(!ops))
1518 		return -rte_errno;
1519 	if (likely(!!ops->tunnel_item_release)) {
1520 		ret = flow_err(port_id,
1521 			       ops->tunnel_item_release(dev, items,
1522 							num_of_items, error),
1523 			       error);
1524 
1525 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1526 
1527 		return ret;
1528 	}
1529 	return rte_flow_error_set(error, ENOTSUP,
1530 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1531 				  NULL, rte_strerror(ENOTSUP));
1532 }
1533 
1534 int
1535 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1536 			     struct rte_flow_error *error)
1537 {
1538 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1539 	struct rte_eth_dev *dev;
1540 	int ret;
1541 
1542 	if (unlikely(ops == NULL))
1543 		return -rte_errno;
1544 
1545 	if (ops->pick_transfer_proxy == NULL) {
1546 		*proxy_port_id = port_id;
1547 		return 0;
1548 	}
1549 
1550 	dev = &rte_eth_devices[port_id];
1551 
1552 	ret = flow_err(port_id,
1553 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1554 		       error);
1555 
1556 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1557 
1558 	return ret;
1559 }
1560 
1561 struct rte_flow_item_flex_handle *
1562 rte_flow_flex_item_create(uint16_t port_id,
1563 			  const struct rte_flow_item_flex_conf *conf,
1564 			  struct rte_flow_error *error)
1565 {
1566 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1567 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1568 	struct rte_flow_item_flex_handle *handle;
1569 
1570 	if (unlikely(!ops))
1571 		return NULL;
1572 	if (unlikely(!ops->flex_item_create)) {
1573 		rte_flow_error_set(error, ENOTSUP,
1574 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1575 				   NULL, rte_strerror(ENOTSUP));
1576 		return NULL;
1577 	}
1578 	handle = ops->flex_item_create(dev, conf, error);
1579 	if (handle == NULL)
1580 		flow_err(port_id, -rte_errno, error);
1581 
1582 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1583 
1584 	return handle;
1585 }
1586 
1587 int
1588 rte_flow_flex_item_release(uint16_t port_id,
1589 			   const struct rte_flow_item_flex_handle *handle,
1590 			   struct rte_flow_error *error)
1591 {
1592 	int ret;
1593 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1594 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1595 
1596 	if (unlikely(!ops || !ops->flex_item_release))
1597 		return rte_flow_error_set(error, ENOTSUP,
1598 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1599 					  NULL, rte_strerror(ENOTSUP));
1600 	ret = ops->flex_item_release(dev, handle, error);
1601 	ret = flow_err(port_id, ret, error);
1602 
1603 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1604 
1605 	return ret;
1606 }
1607 
1608 int
1609 rte_flow_info_get(uint16_t port_id,
1610 		  struct rte_flow_port_info *port_info,
1611 		  struct rte_flow_queue_info *queue_info,
1612 		  struct rte_flow_error *error)
1613 {
1614 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1615 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1616 	int ret;
1617 
1618 	if (unlikely(!ops))
1619 		return -rte_errno;
1620 	if (dev->data->dev_configured == 0) {
1621 		FLOW_LOG(INFO,
1622 			"Device with port_id=%"PRIu16" is not configured.",
1623 			port_id);
1624 		return -EINVAL;
1625 	}
1626 	if (port_info == NULL) {
1627 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1628 		return -EINVAL;
1629 	}
1630 	if (likely(!!ops->info_get)) {
1631 		ret = flow_err(port_id,
1632 			       ops->info_get(dev, port_info, queue_info, error),
1633 			       error);
1634 
1635 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1636 
1637 		return ret;
1638 	}
1639 	return rte_flow_error_set(error, ENOTSUP,
1640 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1641 				  NULL, rte_strerror(ENOTSUP));
1642 }
1643 
1644 int
1645 rte_flow_configure(uint16_t port_id,
1646 		   const struct rte_flow_port_attr *port_attr,
1647 		   uint16_t nb_queue,
1648 		   const struct rte_flow_queue_attr *queue_attr[],
1649 		   struct rte_flow_error *error)
1650 {
1651 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1652 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1653 	int ret;
1654 
1655 	if (unlikely(!ops))
1656 		return -rte_errno;
1657 	if (dev->data->dev_configured == 0) {
1658 		FLOW_LOG(INFO,
1659 			"Device with port_id=%"PRIu16" is not configured.",
1660 			port_id);
1661 		return -EINVAL;
1662 	}
1663 	if (dev->data->dev_started != 0) {
1664 		FLOW_LOG(INFO,
1665 			"Device with port_id=%"PRIu16" already started.",
1666 			port_id);
1667 		return -EINVAL;
1668 	}
1669 	if (port_attr == NULL) {
1670 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1671 		return -EINVAL;
1672 	}
1673 	if (queue_attr == NULL) {
1674 		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
1675 		return -EINVAL;
1676 	}
1677 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1678 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1679 		return rte_flow_error_set(error, ENODEV,
1680 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1681 					  NULL, rte_strerror(ENODEV));
1682 	}
1683 	if (likely(!!ops->configure)) {
1684 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1685 		if (ret == 0)
1686 			dev->data->flow_configured = 1;
1687 		ret = flow_err(port_id, ret, error);
1688 
1689 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1690 
1691 		return ret;
1692 	}
1693 	return rte_flow_error_set(error, ENOTSUP,
1694 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1695 				  NULL, rte_strerror(ENOTSUP));
1696 }
1697 
1698 struct rte_flow_pattern_template *
1699 rte_flow_pattern_template_create(uint16_t port_id,
1700 		const struct rte_flow_pattern_template_attr *template_attr,
1701 		const struct rte_flow_item pattern[],
1702 		struct rte_flow_error *error)
1703 {
1704 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1705 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1706 	struct rte_flow_pattern_template *template;
1707 
1708 	if (unlikely(!ops))
1709 		return NULL;
1710 	if (dev->data->flow_configured == 0) {
1711 		FLOW_LOG(INFO,
1712 			"Flow engine on port_id=%"PRIu16" is not configured.",
1713 			port_id);
1714 		rte_flow_error_set(error, EINVAL,
1715 				RTE_FLOW_ERROR_TYPE_STATE,
1716 				NULL, rte_strerror(EINVAL));
1717 		return NULL;
1718 	}
1719 	if (template_attr == NULL) {
1720 		FLOW_LOG(ERR,
1721 			     "Port %"PRIu16" template attr is NULL.",
1722 			     port_id);
1723 		rte_flow_error_set(error, EINVAL,
1724 				   RTE_FLOW_ERROR_TYPE_ATTR,
1725 				   NULL, rte_strerror(EINVAL));
1726 		return NULL;
1727 	}
1728 	if (pattern == NULL) {
1729 		FLOW_LOG(ERR,
1730 			     "Port %"PRIu16" pattern is NULL.",
1731 			     port_id);
1732 		rte_flow_error_set(error, EINVAL,
1733 				   RTE_FLOW_ERROR_TYPE_ATTR,
1734 				   NULL, rte_strerror(EINVAL));
1735 		return NULL;
1736 	}
1737 	if (likely(!!ops->pattern_template_create)) {
1738 		template = ops->pattern_template_create(dev, template_attr,
1739 							pattern, error);
1740 		if (template == NULL)
1741 			flow_err(port_id, -rte_errno, error);
1742 
1743 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1744 						       pattern, template);
1745 
1746 		return template;
1747 	}
1748 	rte_flow_error_set(error, ENOTSUP,
1749 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1750 			   NULL, rte_strerror(ENOTSUP));
1751 	return NULL;
1752 }
1753 
1754 int
1755 rte_flow_pattern_template_destroy(uint16_t port_id,
1756 		struct rte_flow_pattern_template *pattern_template,
1757 		struct rte_flow_error *error)
1758 {
1759 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1760 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1761 	int ret;
1762 
1763 	if (unlikely(!ops))
1764 		return -rte_errno;
1765 	if (unlikely(pattern_template == NULL))
1766 		return 0;
1767 	if (likely(!!ops->pattern_template_destroy)) {
1768 		ret = flow_err(port_id,
1769 			       ops->pattern_template_destroy(dev,
1770 							     pattern_template,
1771 							     error),
1772 			       error);
1773 
1774 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1775 							ret);
1776 
1777 		return ret;
1778 	}
1779 	return rte_flow_error_set(error, ENOTSUP,
1780 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1781 				  NULL, rte_strerror(ENOTSUP));
1782 }
1783 
1784 struct rte_flow_actions_template *
1785 rte_flow_actions_template_create(uint16_t port_id,
1786 			const struct rte_flow_actions_template_attr *template_attr,
1787 			const struct rte_flow_action actions[],
1788 			const struct rte_flow_action masks[],
1789 			struct rte_flow_error *error)
1790 {
1791 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1792 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1793 	struct rte_flow_actions_template *template;
1794 
1795 	if (unlikely(!ops))
1796 		return NULL;
1797 	if (dev->data->flow_configured == 0) {
1798 		FLOW_LOG(INFO,
1799 			"Flow engine on port_id=%"PRIu16" is not configured.",
1800 			port_id);
1801 		rte_flow_error_set(error, EINVAL,
1802 				   RTE_FLOW_ERROR_TYPE_STATE,
1803 				   NULL, rte_strerror(EINVAL));
1804 		return NULL;
1805 	}
1806 	if (template_attr == NULL) {
1807 		FLOW_LOG(ERR,
1808 			     "Port %"PRIu16" template attr is NULL.",
1809 			     port_id);
1810 		rte_flow_error_set(error, EINVAL,
1811 				   RTE_FLOW_ERROR_TYPE_ATTR,
1812 				   NULL, rte_strerror(EINVAL));
1813 		return NULL;
1814 	}
1815 	if (actions == NULL) {
1816 		FLOW_LOG(ERR,
1817 			     "Port %"PRIu16" actions is NULL.",
1818 			     port_id);
1819 		rte_flow_error_set(error, EINVAL,
1820 				   RTE_FLOW_ERROR_TYPE_ATTR,
1821 				   NULL, rte_strerror(EINVAL));
1822 		return NULL;
1823 	}
1824 	if (masks == NULL) {
1825 		FLOW_LOG(ERR,
1826 			     "Port %"PRIu16" masks is NULL.",
1827 			     port_id);
1828 		rte_flow_error_set(error, EINVAL,
1829 				   RTE_FLOW_ERROR_TYPE_ATTR,
1830 				   NULL, rte_strerror(EINVAL));
1831 
1832 	}
1833 	if (likely(!!ops->actions_template_create)) {
1834 		template = ops->actions_template_create(dev, template_attr,
1835 							actions, masks, error);
1836 		if (template == NULL)
1837 			flow_err(port_id, -rte_errno, error);
1838 
1839 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1840 						       masks, template);
1841 
1842 		return template;
1843 	}
1844 	rte_flow_error_set(error, ENOTSUP,
1845 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1846 			   NULL, rte_strerror(ENOTSUP));
1847 	return NULL;
1848 }
1849 
1850 int
1851 rte_flow_actions_template_destroy(uint16_t port_id,
1852 			struct rte_flow_actions_template *actions_template,
1853 			struct rte_flow_error *error)
1854 {
1855 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1856 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1857 	int ret;
1858 
1859 	if (unlikely(!ops))
1860 		return -rte_errno;
1861 	if (unlikely(actions_template == NULL))
1862 		return 0;
1863 	if (likely(!!ops->actions_template_destroy)) {
1864 		ret = flow_err(port_id,
1865 			       ops->actions_template_destroy(dev,
1866 							     actions_template,
1867 							     error),
1868 			       error);
1869 
1870 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1871 							ret);
1872 
1873 		return ret;
1874 	}
1875 	return rte_flow_error_set(error, ENOTSUP,
1876 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1877 				  NULL, rte_strerror(ENOTSUP));
1878 }
1879 
1880 struct rte_flow_template_table *
1881 rte_flow_template_table_create(uint16_t port_id,
1882 			const struct rte_flow_template_table_attr *table_attr,
1883 			struct rte_flow_pattern_template *pattern_templates[],
1884 			uint8_t nb_pattern_templates,
1885 			struct rte_flow_actions_template *actions_templates[],
1886 			uint8_t nb_actions_templates,
1887 			struct rte_flow_error *error)
1888 {
1889 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1890 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1891 	struct rte_flow_template_table *table;
1892 
1893 	if (unlikely(!ops))
1894 		return NULL;
1895 	if (dev->data->flow_configured == 0) {
1896 		FLOW_LOG(INFO,
1897 			"Flow engine on port_id=%"PRIu16" is not configured.",
1898 			port_id);
1899 		rte_flow_error_set(error, EINVAL,
1900 				   RTE_FLOW_ERROR_TYPE_STATE,
1901 				   NULL, rte_strerror(EINVAL));
1902 		return NULL;
1903 	}
1904 	if (table_attr == NULL) {
1905 		FLOW_LOG(ERR,
1906 			     "Port %"PRIu16" table attr is NULL.",
1907 			     port_id);
1908 		rte_flow_error_set(error, EINVAL,
1909 				   RTE_FLOW_ERROR_TYPE_ATTR,
1910 				   NULL, rte_strerror(EINVAL));
1911 		return NULL;
1912 	}
1913 	if (pattern_templates == NULL) {
1914 		FLOW_LOG(ERR,
1915 			     "Port %"PRIu16" pattern templates is NULL.",
1916 			     port_id);
1917 		rte_flow_error_set(error, EINVAL,
1918 				   RTE_FLOW_ERROR_TYPE_ATTR,
1919 				   NULL, rte_strerror(EINVAL));
1920 		return NULL;
1921 	}
1922 	if (actions_templates == NULL) {
1923 		FLOW_LOG(ERR,
1924 			     "Port %"PRIu16" actions templates is NULL.",
1925 			     port_id);
1926 		rte_flow_error_set(error, EINVAL,
1927 				   RTE_FLOW_ERROR_TYPE_ATTR,
1928 				   NULL, rte_strerror(EINVAL));
1929 		return NULL;
1930 	}
1931 	if (likely(!!ops->template_table_create)) {
1932 		table = ops->template_table_create(dev, table_attr,
1933 					pattern_templates, nb_pattern_templates,
1934 					actions_templates, nb_actions_templates,
1935 					error);
1936 		if (table == NULL)
1937 			flow_err(port_id, -rte_errno, error);
1938 
1939 		rte_flow_trace_template_table_create(port_id, table_attr,
1940 						     pattern_templates,
1941 						     nb_pattern_templates,
1942 						     actions_templates,
1943 						     nb_actions_templates, table);
1944 
1945 		return table;
1946 	}
1947 	rte_flow_error_set(error, ENOTSUP,
1948 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1949 			   NULL, rte_strerror(ENOTSUP));
1950 	return NULL;
1951 }
1952 
1953 int
1954 rte_flow_template_table_destroy(uint16_t port_id,
1955 				struct rte_flow_template_table *template_table,
1956 				struct rte_flow_error *error)
1957 {
1958 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1959 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1960 	int ret;
1961 
1962 	if (unlikely(!ops))
1963 		return -rte_errno;
1964 	if (unlikely(template_table == NULL))
1965 		return 0;
1966 	if (likely(!!ops->template_table_destroy)) {
1967 		ret = flow_err(port_id,
1968 			       ops->template_table_destroy(dev,
1969 							   template_table,
1970 							   error),
1971 			       error);
1972 
1973 		rte_flow_trace_template_table_destroy(port_id, template_table,
1974 						      ret);
1975 
1976 		return ret;
1977 	}
1978 	return rte_flow_error_set(error, ENOTSUP,
1979 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1980 				  NULL, rte_strerror(ENOTSUP));
1981 }
1982 
1983 int
1984 rte_flow_group_set_miss_actions(uint16_t port_id,
1985 				uint32_t group_id,
1986 				const struct rte_flow_group_attr *attr,
1987 				const struct rte_flow_action actions[],
1988 				struct rte_flow_error *error)
1989 {
1990 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1991 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1992 
1993 	if (unlikely(!ops))
1994 		return -rte_errno;
1995 	if (likely(!!ops->group_set_miss_actions)) {
1996 		return flow_err(port_id,
1997 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
1998 				error);
1999 	}
2000 	return rte_flow_error_set(error, ENOTSUP,
2001 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2002 				  NULL, rte_strerror(ENOTSUP));
2003 }
2004 
2005 struct rte_flow *
2006 rte_flow_async_create(uint16_t port_id,
2007 		      uint32_t queue_id,
2008 		      const struct rte_flow_op_attr *op_attr,
2009 		      struct rte_flow_template_table *template_table,
2010 		      const struct rte_flow_item pattern[],
2011 		      uint8_t pattern_template_index,
2012 		      const struct rte_flow_action actions[],
2013 		      uint8_t actions_template_index,
2014 		      void *user_data,
2015 		      struct rte_flow_error *error)
2016 {
2017 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2018 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2019 	struct rte_flow *flow;
2020 
2021 	flow = ops->async_create(dev, queue_id,
2022 				 op_attr, template_table,
2023 				 pattern, pattern_template_index,
2024 				 actions, actions_template_index,
2025 				 user_data, error);
2026 	if (flow == NULL)
2027 		flow_err(port_id, -rte_errno, error);
2028 
2029 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2030 				    pattern, pattern_template_index, actions,
2031 				    actions_template_index, user_data, flow);
2032 
2033 	return flow;
2034 }
2035 
2036 struct rte_flow *
2037 rte_flow_async_create_by_index(uint16_t port_id,
2038 			       uint32_t queue_id,
2039 			       const struct rte_flow_op_attr *op_attr,
2040 			       struct rte_flow_template_table *template_table,
2041 			       uint32_t rule_index,
2042 			       const struct rte_flow_action actions[],
2043 			       uint8_t actions_template_index,
2044 			       void *user_data,
2045 			       struct rte_flow_error *error)
2046 {
2047 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2048 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2049 	struct rte_flow *flow;
2050 
2051 	flow = ops->async_create_by_index(dev, queue_id,
2052 					  op_attr, template_table, rule_index,
2053 					  actions, actions_template_index,
2054 					  user_data, error);
2055 	if (flow == NULL)
2056 		flow_err(port_id, -rte_errno, error);
2057 	return flow;
2058 }
2059 
2060 int
2061 rte_flow_async_destroy(uint16_t port_id,
2062 		       uint32_t queue_id,
2063 		       const struct rte_flow_op_attr *op_attr,
2064 		       struct rte_flow *flow,
2065 		       void *user_data,
2066 		       struct rte_flow_error *error)
2067 {
2068 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2069 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2070 	int ret;
2071 
2072 	ret = flow_err(port_id,
2073 		       ops->async_destroy(dev, queue_id,
2074 					  op_attr, flow,
2075 					  user_data, error),
2076 		       error);
2077 
2078 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2079 				     user_data, ret);
2080 
2081 	return ret;
2082 }
2083 
2084 int
2085 rte_flow_async_actions_update(uint16_t port_id,
2086 			      uint32_t queue_id,
2087 			      const struct rte_flow_op_attr *op_attr,
2088 			      struct rte_flow *flow,
2089 			      const struct rte_flow_action actions[],
2090 			      uint8_t actions_template_index,
2091 			      void *user_data,
2092 			      struct rte_flow_error *error)
2093 {
2094 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2095 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2096 	int ret;
2097 
2098 	ret = flow_err(port_id,
2099 		       ops->async_actions_update(dev, queue_id, op_attr,
2100 						 flow, actions,
2101 						 actions_template_index,
2102 						 user_data, error),
2103 		       error);
2104 
2105 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2106 					    actions, actions_template_index,
2107 					    user_data, ret);
2108 
2109 	return ret;
2110 }
2111 
2112 int
2113 rte_flow_push(uint16_t port_id,
2114 	      uint32_t queue_id,
2115 	      struct rte_flow_error *error)
2116 {
2117 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2118 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2119 	int ret;
2120 
2121 	ret = flow_err(port_id,
2122 		       ops->push(dev, queue_id, error),
2123 		       error);
2124 
2125 	rte_flow_trace_push(port_id, queue_id, ret);
2126 
2127 	return ret;
2128 }
2129 
2130 int
2131 rte_flow_pull(uint16_t port_id,
2132 	      uint32_t queue_id,
2133 	      struct rte_flow_op_result res[],
2134 	      uint16_t n_res,
2135 	      struct rte_flow_error *error)
2136 {
2137 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2138 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2139 	int ret;
2140 	int rc;
2141 
2142 	ret = ops->pull(dev, queue_id, res, n_res, error);
2143 	rc = ret ? ret : flow_err(port_id, ret, error);
2144 
2145 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2146 
2147 	return rc;
2148 }
2149 
2150 struct rte_flow_action_handle *
2151 rte_flow_async_action_handle_create(uint16_t port_id,
2152 		uint32_t queue_id,
2153 		const struct rte_flow_op_attr *op_attr,
2154 		const struct rte_flow_indir_action_conf *indir_action_conf,
2155 		const struct rte_flow_action *action,
2156 		void *user_data,
2157 		struct rte_flow_error *error)
2158 {
2159 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2160 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2161 	struct rte_flow_action_handle *handle;
2162 
2163 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2164 					     indir_action_conf, action, user_data, error);
2165 	if (handle == NULL)
2166 		flow_err(port_id, -rte_errno, error);
2167 
2168 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2169 						  indir_action_conf, action,
2170 						  user_data, handle);
2171 
2172 	return handle;
2173 }
2174 
2175 int
2176 rte_flow_async_action_handle_destroy(uint16_t port_id,
2177 		uint32_t queue_id,
2178 		const struct rte_flow_op_attr *op_attr,
2179 		struct rte_flow_action_handle *action_handle,
2180 		void *user_data,
2181 		struct rte_flow_error *error)
2182 {
2183 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2184 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2185 	int ret;
2186 
2187 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2188 					   action_handle, user_data, error);
2189 	ret = flow_err(port_id, ret, error);
2190 
2191 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2192 						   action_handle, user_data, ret);
2193 
2194 	return ret;
2195 }
2196 
2197 int
2198 rte_flow_async_action_handle_update(uint16_t port_id,
2199 		uint32_t queue_id,
2200 		const struct rte_flow_op_attr *op_attr,
2201 		struct rte_flow_action_handle *action_handle,
2202 		const void *update,
2203 		void *user_data,
2204 		struct rte_flow_error *error)
2205 {
2206 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2207 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2208 	int ret;
2209 
2210 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2211 					  action_handle, update, user_data, error);
2212 	ret = flow_err(port_id, ret, error);
2213 
2214 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2215 						  action_handle, update,
2216 						  user_data, ret);
2217 
2218 	return ret;
2219 }
2220 
2221 int
2222 rte_flow_async_action_handle_query(uint16_t port_id,
2223 		uint32_t queue_id,
2224 		const struct rte_flow_op_attr *op_attr,
2225 		const struct rte_flow_action_handle *action_handle,
2226 		void *data,
2227 		void *user_data,
2228 		struct rte_flow_error *error)
2229 {
2230 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2231 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2232 	int ret;
2233 
2234 	if (unlikely(!ops))
2235 		return -rte_errno;
2236 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2237 					  action_handle, data, user_data, error);
2238 	ret = flow_err(port_id, ret, error);
2239 
2240 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2241 						 action_handle, data, user_data,
2242 						 ret);
2243 
2244 	return ret;
2245 }
2246 
2247 int
2248 rte_flow_action_handle_query_update(uint16_t port_id,
2249 				    struct rte_flow_action_handle *handle,
2250 				    const void *update, void *query,
2251 				    enum rte_flow_query_update_mode mode,
2252 				    struct rte_flow_error *error)
2253 {
2254 	int ret;
2255 	struct rte_eth_dev *dev;
2256 	const struct rte_flow_ops *ops;
2257 
2258 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2259 	if (!handle)
2260 		return -EINVAL;
2261 	if (!update && !query)
2262 		return -EINVAL;
2263 	dev = &rte_eth_devices[port_id];
2264 	ops = rte_flow_ops_get(port_id, error);
2265 	if (!ops || !ops->action_handle_query_update)
2266 		return -ENOTSUP;
2267 	ret = ops->action_handle_query_update(dev, handle, update,
2268 					      query, mode, error);
2269 	return flow_err(port_id, ret, error);
2270 }
2271 
2272 int
2273 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2274 					  const struct rte_flow_op_attr *attr,
2275 					  struct rte_flow_action_handle *handle,
2276 					  const void *update, void *query,
2277 					  enum rte_flow_query_update_mode mode,
2278 					  void *user_data,
2279 					  struct rte_flow_error *error)
2280 {
2281 	int ret;
2282 	struct rte_eth_dev *dev;
2283 	const struct rte_flow_ops *ops;
2284 
2285 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2286 	if (!handle)
2287 		return -EINVAL;
2288 	if (!update && !query)
2289 		return -EINVAL;
2290 	dev = &rte_eth_devices[port_id];
2291 	ops = rte_flow_ops_get(port_id, error);
2292 	if (!ops || !ops->async_action_handle_query_update)
2293 		return -ENOTSUP;
2294 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2295 						    handle, update,
2296 						    query, mode,
2297 						    user_data, error);
2298 	return flow_err(port_id, ret, error);
2299 }
2300 
2301 struct rte_flow_action_list_handle *
2302 rte_flow_action_list_handle_create(uint16_t port_id,
2303 				   const
2304 				   struct rte_flow_indir_action_conf *conf,
2305 				   const struct rte_flow_action *actions,
2306 				   struct rte_flow_error *error)
2307 {
2308 	int ret;
2309 	struct rte_eth_dev *dev;
2310 	const struct rte_flow_ops *ops;
2311 	struct rte_flow_action_list_handle *handle;
2312 
2313 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2314 	ops = rte_flow_ops_get(port_id, error);
2315 	if (!ops || !ops->action_list_handle_create) {
2316 		rte_flow_error_set(error, ENOTSUP,
2317 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2318 				   "action_list handle not supported");
2319 		return NULL;
2320 	}
2321 	dev = &rte_eth_devices[port_id];
2322 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2323 	ret = flow_err(port_id, -rte_errno, error);
2324 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2325 	return handle;
2326 }
2327 
2328 int
2329 rte_flow_action_list_handle_destroy(uint16_t port_id,
2330 				    struct rte_flow_action_list_handle *handle,
2331 				    struct rte_flow_error *error)
2332 {
2333 	int ret;
2334 	struct rte_eth_dev *dev;
2335 	const struct rte_flow_ops *ops;
2336 
2337 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2338 	ops = rte_flow_ops_get(port_id, error);
2339 	if (!ops || !ops->action_list_handle_destroy)
2340 		return rte_flow_error_set(error, ENOTSUP,
2341 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2342 					  "action_list handle not supported");
2343 	dev = &rte_eth_devices[port_id];
2344 	ret = ops->action_list_handle_destroy(dev, handle, error);
2345 	ret = flow_err(port_id, ret, error);
2346 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2347 	return ret;
2348 }
2349 
2350 struct rte_flow_action_list_handle *
2351 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2352 					 const struct rte_flow_op_attr *attr,
2353 					 const struct rte_flow_indir_action_conf *conf,
2354 					 const struct rte_flow_action *actions,
2355 					 void *user_data,
2356 					 struct rte_flow_error *error)
2357 {
2358 	int ret;
2359 	struct rte_eth_dev *dev;
2360 	const struct rte_flow_ops *ops;
2361 	struct rte_flow_action_list_handle *handle;
2362 
2363 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2364 	ops = rte_flow_ops_get(port_id, error);
2365 	if (!ops || !ops->async_action_list_handle_create) {
2366 		rte_flow_error_set(error, ENOTSUP,
2367 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2368 				   "action_list handle not supported");
2369 		return NULL;
2370 	}
2371 	dev = &rte_eth_devices[port_id];
2372 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2373 						      actions, user_data,
2374 						      error);
2375 	ret = flow_err(port_id, -rte_errno, error);
2376 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2377 						       conf, actions, user_data,
2378 						       ret);
2379 	return handle;
2380 }
2381 
2382 int
2383 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2384 				 const struct rte_flow_op_attr *op_attr,
2385 				 struct rte_flow_action_list_handle *handle,
2386 				 void *user_data, struct rte_flow_error *error)
2387 {
2388 	int ret;
2389 	struct rte_eth_dev *dev;
2390 	const struct rte_flow_ops *ops;
2391 
2392 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2393 	ops = rte_flow_ops_get(port_id, error);
2394 	if (!ops || !ops->async_action_list_handle_destroy)
2395 		return rte_flow_error_set(error, ENOTSUP,
2396 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2397 					  "async action_list handle not supported");
2398 	dev = &rte_eth_devices[port_id];
2399 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2400 						    handle, user_data, error);
2401 	ret = flow_err(port_id, ret, error);
2402 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2403 							op_attr, handle,
2404 							user_data, ret);
2405 	return ret;
2406 }
2407 
2408 int
2409 rte_flow_action_list_handle_query_update(uint16_t port_id,
2410 			 const struct rte_flow_action_list_handle *handle,
2411 			 const void **update, void **query,
2412 			 enum rte_flow_query_update_mode mode,
2413 			 struct rte_flow_error *error)
2414 {
2415 	int ret;
2416 	struct rte_eth_dev *dev;
2417 	const struct rte_flow_ops *ops;
2418 
2419 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2420 	ops = rte_flow_ops_get(port_id, error);
2421 	if (!ops || !ops->action_list_handle_query_update)
2422 		return rte_flow_error_set(error, ENOTSUP,
2423 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2424 					  "action_list query_update not supported");
2425 	dev = &rte_eth_devices[port_id];
2426 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2427 						   mode, error);
2428 	ret = flow_err(port_id, ret, error);
2429 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2430 						       query, mode, ret);
2431 	return ret;
2432 }
2433 
2434 int
2435 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2436 			 const struct rte_flow_op_attr *attr,
2437 			 const struct rte_flow_action_list_handle *handle,
2438 			 const void **update, void **query,
2439 			 enum rte_flow_query_update_mode mode,
2440 			 void *user_data, struct rte_flow_error *error)
2441 {
2442 	int ret;
2443 	struct rte_eth_dev *dev;
2444 	const struct rte_flow_ops *ops;
2445 
2446 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2447 	ops = rte_flow_ops_get(port_id, error);
2448 	if (!ops || !ops->async_action_list_handle_query_update)
2449 		return rte_flow_error_set(error, ENOTSUP,
2450 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2451 					  "action_list async query_update not supported");
2452 	dev = &rte_eth_devices[port_id];
2453 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2454 							 handle, update, query,
2455 							 mode, user_data,
2456 							 error);
2457 	ret = flow_err(port_id, ret, error);
2458 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2459 							     attr, handle,
2460 							     update, query,
2461 							     mode, user_data,
2462 							     ret);
2463 	return ret;
2464 }
2465 
2466 int
2467 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2468 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2469 			 uint32_t *hash, struct rte_flow_error *error)
2470 {
2471 	int ret;
2472 	struct rte_eth_dev *dev;
2473 	const struct rte_flow_ops *ops;
2474 
2475 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2476 	ops = rte_flow_ops_get(port_id, error);
2477 	if (!ops || !ops->flow_calc_table_hash)
2478 		return rte_flow_error_set(error, ENOTSUP,
2479 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2480 					  "action_list async query_update not supported");
2481 	dev = &rte_eth_devices[port_id];
2482 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2483 					hash, error);
2484 	return flow_err(port_id, ret, error);
2485 }
2486