xref: /dpdk/lib/ethdev/rte_flow.c (revision 58143b7b386dd15c7e5db5cf5b280bcd19f6241b)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <pthread.h>
11 
12 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 #define FLOW_LOG RTE_ETHDEV_LOG_LINE
23 
24 /* Mbuf dynamic field name for metadata. */
25 int32_t rte_flow_dynf_metadata_offs = -1;
26 
27 /* Mbuf dynamic field flag bit number for metadata. */
28 uint64_t rte_flow_dynf_metadata_mask;
29 
30 /**
31  * Flow elements description tables.
32  */
33 struct rte_flow_desc_data {
34 	const char *name;
35 	size_t size;
36 	size_t (*desc_fn)(void *dst, const void *src);
37 };
38 
39 /**
40  *
41  * @param buf
42  * Destination memory.
43  * @param data
44  * Source memory
45  * @param size
46  * Requested copy size
47  * @param desc
48  * rte_flow_desc_item - for flow item conversion.
49  * rte_flow_desc_action - for flow action conversion.
50  * @param type
51  * Offset into the desc param or negative value for private flow elements.
52  */
53 static inline size_t
54 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
55 		   const struct rte_flow_desc_data *desc, int type)
56 {
57 	/**
58 	 * Allow PMD private flow item
59 	 */
60 	bool rte_type = type >= 0;
61 
62 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
63 	if (buf == NULL || data == NULL)
64 		return 0;
65 	rte_memcpy(buf, data, (size > sz ? sz : size));
66 	if (rte_type && desc[type].desc_fn)
67 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
68 	return sz;
69 }
70 
71 static size_t
72 rte_flow_item_flex_conv(void *buf, const void *data)
73 {
74 	struct rte_flow_item_flex *dst = buf;
75 	const struct rte_flow_item_flex *src = data;
76 	if (buf) {
77 		dst->pattern = rte_memcpy
78 			((void *)((uintptr_t)(dst + 1)), src->pattern,
79 			 src->length);
80 	}
81 	return src->length;
82 }
83 
84 /** Generate flow_item[] entry. */
85 #define MK_FLOW_ITEM(t, s) \
86 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
87 		.name = # t, \
88 		.size = s,               \
89 		.desc_fn = NULL,\
90 	}
91 
92 #define MK_FLOW_ITEM_FN(t, s, fn) \
93 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
94 		.name = # t,                 \
95 		.size = s,                   \
96 		.desc_fn = fn,               \
97 	}
98 
99 /** Information about known flow pattern items. */
100 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
101 	MK_FLOW_ITEM(END, 0),
102 	MK_FLOW_ITEM(VOID, 0),
103 	MK_FLOW_ITEM(INVERT, 0),
104 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
105 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
106 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
107 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
108 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
109 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
110 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
111 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
112 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
113 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
114 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
115 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
116 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
117 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
118 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
119 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
120 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
121 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
122 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
123 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
124 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
125 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
126 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
127 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
128 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
129 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
130 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
131 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
132 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
133 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
134 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
137 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
138 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
139 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
140 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
141 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
142 	MK_FLOW_ITEM(RANDOM, sizeof(struct rte_flow_item_random)),
143 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
144 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
145 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
146 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
147 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
148 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
149 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
150 			sizeof(struct rte_flow_item_pppoe_proto_id)),
151 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
152 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
153 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
154 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
155 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
156 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
157 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
158 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
159 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
160 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
161 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
162 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
163 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
164 			rte_flow_item_flex_conv),
165 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
166 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
167 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
168 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
169 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
170 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
171 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
172 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
173 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
174 	MK_FLOW_ITEM(COMPARE, sizeof(struct rte_flow_item_compare)),
175 };
176 
177 /** Generate flow_action[] entry. */
178 #define MK_FLOW_ACTION(t, s) \
179 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
180 		.name = # t, \
181 		.size = s, \
182 		.desc_fn = NULL,\
183 	}
184 
185 #define MK_FLOW_ACTION_FN(t, fn) \
186 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
187 		.name = # t, \
188 		.size = 0, \
189 		.desc_fn = fn,\
190 	}
191 
192 
193 /** Information about known flow actions. */
194 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
195 	MK_FLOW_ACTION(END, 0),
196 	MK_FLOW_ACTION(VOID, 0),
197 	MK_FLOW_ACTION(PASSTHRU, 0),
198 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
199 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
200 	MK_FLOW_ACTION(FLAG, 0),
201 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
202 	MK_FLOW_ACTION(DROP, 0),
203 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
204 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
205 	MK_FLOW_ACTION(PF, 0),
206 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
207 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
208 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
209 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
210 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
211 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
212 	MK_FLOW_ACTION(OF_PUSH_VLAN,
213 		       sizeof(struct rte_flow_action_of_push_vlan)),
214 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
215 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
216 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
217 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
218 	MK_FLOW_ACTION(OF_POP_MPLS,
219 		       sizeof(struct rte_flow_action_of_pop_mpls)),
220 	MK_FLOW_ACTION(OF_PUSH_MPLS,
221 		       sizeof(struct rte_flow_action_of_push_mpls)),
222 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
223 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
224 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
225 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
226 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
227 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
228 	MK_FLOW_ACTION(SET_IPV4_SRC,
229 		       sizeof(struct rte_flow_action_set_ipv4)),
230 	MK_FLOW_ACTION(SET_IPV4_DST,
231 		       sizeof(struct rte_flow_action_set_ipv4)),
232 	MK_FLOW_ACTION(SET_IPV6_SRC,
233 		       sizeof(struct rte_flow_action_set_ipv6)),
234 	MK_FLOW_ACTION(SET_IPV6_DST,
235 		       sizeof(struct rte_flow_action_set_ipv6)),
236 	MK_FLOW_ACTION(SET_TP_SRC,
237 		       sizeof(struct rte_flow_action_set_tp)),
238 	MK_FLOW_ACTION(SET_TP_DST,
239 		       sizeof(struct rte_flow_action_set_tp)),
240 	MK_FLOW_ACTION(MAC_SWAP, 0),
241 	MK_FLOW_ACTION(DEC_TTL, 0),
242 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
243 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
244 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
245 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
246 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
247 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
248 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
249 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
250 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
251 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
252 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
253 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
254 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
255 	MK_FLOW_ACTION(MODIFY_FIELD,
256 		       sizeof(struct rte_flow_action_modify_field)),
257 	/**
258 	 * Indirect action represented as handle of type
259 	 * (struct rte_flow_action_handle *) stored in conf field (see
260 	 * struct rte_flow_action); no need for additional structure to * store
261 	 * indirect action handle.
262 	 */
263 	MK_FLOW_ACTION(INDIRECT, 0),
264 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
265 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
266 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
267 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
268 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
269 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
270 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
271 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
272 	MK_FLOW_ACTION(INDIRECT_LIST,
273 		       sizeof(struct rte_flow_action_indirect_list)),
274 	MK_FLOW_ACTION(PROG,
275 		       sizeof(struct rte_flow_action_prog)),
276 	MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)),
277 };
278 
279 int
280 rte_flow_dynf_metadata_register(void)
281 {
282 	int offset;
283 	int flag;
284 
285 	static const struct rte_mbuf_dynfield desc_offs = {
286 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
287 		.size = sizeof(uint32_t),
288 		.align = alignof(uint32_t),
289 	};
290 	static const struct rte_mbuf_dynflag desc_flag = {
291 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
292 	};
293 
294 	offset = rte_mbuf_dynfield_register(&desc_offs);
295 	if (offset < 0)
296 		goto error;
297 	flag = rte_mbuf_dynflag_register(&desc_flag);
298 	if (flag < 0)
299 		goto error;
300 	rte_flow_dynf_metadata_offs = offset;
301 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
302 
303 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
304 
305 	return 0;
306 
307 error:
308 	rte_flow_dynf_metadata_offs = -1;
309 	rte_flow_dynf_metadata_mask = UINT64_C(0);
310 	return -rte_errno;
311 }
312 
313 static inline void
314 fts_enter(struct rte_eth_dev *dev)
315 {
316 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
317 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
318 }
319 
320 static inline void
321 fts_exit(struct rte_eth_dev *dev)
322 {
323 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
324 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
325 }
326 
327 static int
328 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
329 {
330 	if (ret == 0)
331 		return 0;
332 	if (rte_eth_dev_is_removed(port_id))
333 		return rte_flow_error_set(error, EIO,
334 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
335 					  NULL, rte_strerror(EIO));
336 	return ret;
337 }
338 
339 /* Get generic flow operations structure from a port. */
340 const struct rte_flow_ops *
341 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
342 {
343 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
344 	const struct rte_flow_ops *ops;
345 	int code;
346 
347 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
348 		code = ENODEV;
349 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
350 		/* flow API not supported with this driver dev_ops */
351 		code = ENOSYS;
352 	else
353 		code = dev->dev_ops->flow_ops_get(dev, &ops);
354 	if (code == 0 && ops == NULL)
355 		/* flow API not supported with this device */
356 		code = ENOSYS;
357 
358 	if (code != 0) {
359 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
360 				   NULL, rte_strerror(code));
361 		return NULL;
362 	}
363 	return ops;
364 }
365 
366 /* Check whether a flow rule can be created on a given port. */
367 int
368 rte_flow_validate(uint16_t port_id,
369 		  const struct rte_flow_attr *attr,
370 		  const struct rte_flow_item pattern[],
371 		  const struct rte_flow_action actions[],
372 		  struct rte_flow_error *error)
373 {
374 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
375 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
376 	int ret;
377 
378 	if (likely(!!attr) && attr->transfer &&
379 	    (attr->ingress || attr->egress)) {
380 		return rte_flow_error_set(error, EINVAL,
381 					  RTE_FLOW_ERROR_TYPE_ATTR,
382 					  attr, "cannot use attr ingress/egress with attr transfer");
383 	}
384 
385 	if (unlikely(!ops))
386 		return -rte_errno;
387 	if (likely(!!ops->validate)) {
388 		fts_enter(dev);
389 		ret = ops->validate(dev, attr, pattern, actions, error);
390 		fts_exit(dev);
391 		ret = flow_err(port_id, ret, error);
392 
393 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
394 
395 		return ret;
396 	}
397 	return rte_flow_error_set(error, ENOSYS,
398 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
399 				  NULL, rte_strerror(ENOSYS));
400 }
401 
402 /* Create a flow rule on a given port. */
403 struct rte_flow *
404 rte_flow_create(uint16_t port_id,
405 		const struct rte_flow_attr *attr,
406 		const struct rte_flow_item pattern[],
407 		const struct rte_flow_action actions[],
408 		struct rte_flow_error *error)
409 {
410 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
411 	struct rte_flow *flow;
412 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
413 
414 	if (unlikely(!ops))
415 		return NULL;
416 	if (likely(!!ops->create)) {
417 		fts_enter(dev);
418 		flow = ops->create(dev, attr, pattern, actions, error);
419 		fts_exit(dev);
420 		if (flow == NULL)
421 			flow_err(port_id, -rte_errno, error);
422 
423 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
424 
425 		return flow;
426 	}
427 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
428 			   NULL, rte_strerror(ENOSYS));
429 	return NULL;
430 }
431 
432 /* Destroy a flow rule on a given port. */
433 int
434 rte_flow_destroy(uint16_t port_id,
435 		 struct rte_flow *flow,
436 		 struct rte_flow_error *error)
437 {
438 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
439 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
440 	int ret;
441 
442 	if (unlikely(!ops))
443 		return -rte_errno;
444 	if (likely(!!ops->destroy)) {
445 		fts_enter(dev);
446 		ret = ops->destroy(dev, flow, error);
447 		fts_exit(dev);
448 		ret = flow_err(port_id, ret, error);
449 
450 		rte_flow_trace_destroy(port_id, flow, ret);
451 
452 		return ret;
453 	}
454 	return rte_flow_error_set(error, ENOSYS,
455 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
456 				  NULL, rte_strerror(ENOSYS));
457 }
458 
459 int
460 rte_flow_actions_update(uint16_t port_id,
461 			struct rte_flow *flow,
462 			const struct rte_flow_action actions[],
463 			struct rte_flow_error *error)
464 {
465 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
466 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
467 	int ret;
468 
469 	if (unlikely(!ops))
470 		return -rte_errno;
471 	if (likely(!!ops->actions_update)) {
472 		fts_enter(dev);
473 		ret = ops->actions_update(dev, flow, actions, error);
474 		fts_exit(dev);
475 
476 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
477 
478 		return flow_err(port_id, ret, error);
479 	}
480 	return rte_flow_error_set(error, ENOSYS,
481 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
482 				  NULL, rte_strerror(ENOSYS));
483 }
484 
485 /* Destroy all flow rules associated with a port. */
486 int
487 rte_flow_flush(uint16_t port_id,
488 	       struct rte_flow_error *error)
489 {
490 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
491 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
492 	int ret;
493 
494 	if (unlikely(!ops))
495 		return -rte_errno;
496 	if (likely(!!ops->flush)) {
497 		fts_enter(dev);
498 		ret = ops->flush(dev, error);
499 		fts_exit(dev);
500 		ret = flow_err(port_id, ret, error);
501 
502 		rte_flow_trace_flush(port_id, ret);
503 
504 		return ret;
505 	}
506 	return rte_flow_error_set(error, ENOSYS,
507 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
508 				  NULL, rte_strerror(ENOSYS));
509 }
510 
511 /* Query an existing flow rule. */
512 int
513 rte_flow_query(uint16_t port_id,
514 	       struct rte_flow *flow,
515 	       const struct rte_flow_action *action,
516 	       void *data,
517 	       struct rte_flow_error *error)
518 {
519 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
520 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
521 	int ret;
522 
523 	if (!ops)
524 		return -rte_errno;
525 	if (likely(!!ops->query)) {
526 		fts_enter(dev);
527 		ret = ops->query(dev, flow, action, data, error);
528 		fts_exit(dev);
529 		ret = flow_err(port_id, ret, error);
530 
531 		rte_flow_trace_query(port_id, flow, action, data, ret);
532 
533 		return ret;
534 	}
535 	return rte_flow_error_set(error, ENOSYS,
536 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
537 				  NULL, rte_strerror(ENOSYS));
538 }
539 
540 /* Restrict ingress traffic to the defined flow rules. */
541 int
542 rte_flow_isolate(uint16_t port_id,
543 		 int set,
544 		 struct rte_flow_error *error)
545 {
546 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
547 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
548 	int ret;
549 
550 	if (!ops)
551 		return -rte_errno;
552 	if (likely(!!ops->isolate)) {
553 		fts_enter(dev);
554 		ret = ops->isolate(dev, set, error);
555 		fts_exit(dev);
556 		ret = flow_err(port_id, ret, error);
557 
558 		rte_flow_trace_isolate(port_id, set, ret);
559 
560 		return ret;
561 	}
562 	return rte_flow_error_set(error, ENOSYS,
563 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
564 				  NULL, rte_strerror(ENOSYS));
565 }
566 
567 /* Initialize flow error structure. */
568 int
569 rte_flow_error_set(struct rte_flow_error *error,
570 		   int code,
571 		   enum rte_flow_error_type type,
572 		   const void *cause,
573 		   const char *message)
574 {
575 	if (error) {
576 		*error = (struct rte_flow_error){
577 			.type = type,
578 			.cause = cause,
579 			.message = message,
580 		};
581 	}
582 	rte_errno = code;
583 	return -code;
584 }
585 
586 /** Pattern item specification types. */
587 enum rte_flow_conv_item_spec_type {
588 	RTE_FLOW_CONV_ITEM_SPEC,
589 	RTE_FLOW_CONV_ITEM_LAST,
590 	RTE_FLOW_CONV_ITEM_MASK,
591 };
592 
593 /**
594  * Copy pattern item specification.
595  *
596  * @param[out] buf
597  *   Output buffer. Can be NULL if @p size is zero.
598  * @param size
599  *   Size of @p buf in bytes.
600  * @param[in] item
601  *   Pattern item to copy specification from.
602  * @param type
603  *   Specification selector for either @p spec, @p last or @p mask.
604  *
605  * @return
606  *   Number of bytes needed to store pattern item specification regardless
607  *   of @p size. @p buf contents are truncated to @p size if not large
608  *   enough.
609  */
610 static size_t
611 rte_flow_conv_item_spec(void *buf, const size_t size,
612 			const struct rte_flow_item *item,
613 			enum rte_flow_conv_item_spec_type type)
614 {
615 	size_t off;
616 	const void *data =
617 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
618 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
619 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
620 		NULL;
621 
622 	switch (item->type) {
623 		union {
624 			const struct rte_flow_item_raw *raw;
625 		} spec;
626 		union {
627 			const struct rte_flow_item_raw *raw;
628 		} last;
629 		union {
630 			const struct rte_flow_item_raw *raw;
631 		} mask;
632 		union {
633 			const struct rte_flow_item_raw *raw;
634 		} src;
635 		union {
636 			struct rte_flow_item_raw *raw;
637 		} dst;
638 		size_t tmp;
639 
640 	case RTE_FLOW_ITEM_TYPE_RAW:
641 		spec.raw = item->spec;
642 		last.raw = item->last ? item->last : item->spec;
643 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
644 		src.raw = data;
645 		dst.raw = buf;
646 		rte_memcpy(dst.raw,
647 			   (&(struct rte_flow_item_raw){
648 				.relative = src.raw->relative,
649 				.search = src.raw->search,
650 				.reserved = src.raw->reserved,
651 				.offset = src.raw->offset,
652 				.limit = src.raw->limit,
653 				.length = src.raw->length,
654 			   }),
655 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
656 		off = sizeof(*dst.raw);
657 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
658 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
659 		     ((spec.raw->length & mask.raw->length) >=
660 		      (last.raw->length & mask.raw->length))))
661 			tmp = spec.raw->length & mask.raw->length;
662 		else
663 			tmp = last.raw->length & mask.raw->length;
664 		if (tmp) {
665 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
666 			if (size >= off + tmp)
667 				dst.raw->pattern = rte_memcpy
668 					((void *)((uintptr_t)dst.raw + off),
669 					 src.raw->pattern, tmp);
670 			off += tmp;
671 		}
672 		break;
673 	default:
674 		off = rte_flow_conv_copy(buf, data, size,
675 					 rte_flow_desc_item, item->type);
676 		break;
677 	}
678 	return off;
679 }
680 
681 /**
682  * Copy action configuration.
683  *
684  * @param[out] buf
685  *   Output buffer. Can be NULL if @p size is zero.
686  * @param size
687  *   Size of @p buf in bytes.
688  * @param[in] action
689  *   Action to copy configuration from.
690  *
691  * @return
692  *   Number of bytes needed to store pattern item specification regardless
693  *   of @p size. @p buf contents are truncated to @p size if not large
694  *   enough.
695  */
696 static size_t
697 rte_flow_conv_action_conf(void *buf, const size_t size,
698 			  const struct rte_flow_action *action)
699 {
700 	size_t off;
701 
702 	switch (action->type) {
703 		union {
704 			const struct rte_flow_action_rss *rss;
705 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
706 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
707 		} src;
708 		union {
709 			struct rte_flow_action_rss *rss;
710 			struct rte_flow_action_vxlan_encap *vxlan_encap;
711 			struct rte_flow_action_nvgre_encap *nvgre_encap;
712 		} dst;
713 		size_t tmp;
714 		int ret;
715 
716 	case RTE_FLOW_ACTION_TYPE_RSS:
717 		src.rss = action->conf;
718 		dst.rss = buf;
719 		rte_memcpy(dst.rss,
720 			   (&(struct rte_flow_action_rss){
721 				.func = src.rss->func,
722 				.level = src.rss->level,
723 				.types = src.rss->types,
724 				.key_len = src.rss->key_len,
725 				.queue_num = src.rss->queue_num,
726 			   }),
727 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
728 		off = sizeof(*dst.rss);
729 		if (src.rss->key_len && src.rss->key) {
730 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
731 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
732 			if (size >= (uint64_t)off + (uint64_t)tmp)
733 				dst.rss->key = rte_memcpy
734 					((void *)((uintptr_t)dst.rss + off),
735 					 src.rss->key, tmp);
736 			off += tmp;
737 		}
738 		if (src.rss->queue_num) {
739 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
740 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
741 			if (size >= (uint64_t)off + (uint64_t)tmp)
742 				dst.rss->queue = rte_memcpy
743 					((void *)((uintptr_t)dst.rss + off),
744 					 src.rss->queue, tmp);
745 			off += tmp;
746 		}
747 		break;
748 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
749 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
750 		src.vxlan_encap = action->conf;
751 		dst.vxlan_encap = buf;
752 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
753 				 sizeof(*src.nvgre_encap) ||
754 				 offsetof(struct rte_flow_action_vxlan_encap,
755 					  definition) !=
756 				 offsetof(struct rte_flow_action_nvgre_encap,
757 					  definition));
758 		off = sizeof(*dst.vxlan_encap);
759 		if (src.vxlan_encap->definition) {
760 			off = RTE_ALIGN_CEIL
761 				(off, sizeof(*dst.vxlan_encap->definition));
762 			ret = rte_flow_conv
763 				(RTE_FLOW_CONV_OP_PATTERN,
764 				 (void *)((uintptr_t)dst.vxlan_encap + off),
765 				 size > off ? size - off : 0,
766 				 src.vxlan_encap->definition, NULL);
767 			if (ret < 0)
768 				return 0;
769 			if (size >= off + ret)
770 				dst.vxlan_encap->definition =
771 					(void *)((uintptr_t)dst.vxlan_encap +
772 						 off);
773 			off += ret;
774 		}
775 		break;
776 	default:
777 		off = rte_flow_conv_copy(buf, action->conf, size,
778 					 rte_flow_desc_action, action->type);
779 		break;
780 	}
781 	return off;
782 }
783 
784 /**
785  * Copy a list of pattern items.
786  *
787  * @param[out] dst
788  *   Destination buffer. Can be NULL if @p size is zero.
789  * @param size
790  *   Size of @p dst in bytes.
791  * @param[in] src
792  *   Source pattern items.
793  * @param num
794  *   Maximum number of pattern items to process from @p src or 0 to process
795  *   the entire list. In both cases, processing stops after
796  *   RTE_FLOW_ITEM_TYPE_END is encountered.
797  * @param[out] error
798  *   Perform verbose error reporting if not NULL.
799  *
800  * @return
801  *   A positive value representing the number of bytes needed to store
802  *   pattern items regardless of @p size on success (@p buf contents are
803  *   truncated to @p size if not large enough), a negative errno value
804  *   otherwise and rte_errno is set.
805  */
806 static int
807 rte_flow_conv_pattern(struct rte_flow_item *dst,
808 		      const size_t size,
809 		      const struct rte_flow_item *src,
810 		      unsigned int num,
811 		      struct rte_flow_error *error)
812 {
813 	uintptr_t data = (uintptr_t)dst;
814 	size_t off;
815 	size_t ret;
816 	unsigned int i;
817 
818 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
819 		/**
820 		 * allow PMD private flow item
821 		 */
822 		if (((int)src->type >= 0) &&
823 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
824 		    !rte_flow_desc_item[src->type].name))
825 			return rte_flow_error_set
826 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
827 				 "cannot convert unknown item type");
828 		if (size >= off + sizeof(*dst))
829 			*dst = (struct rte_flow_item){
830 				.type = src->type,
831 			};
832 		off += sizeof(*dst);
833 		if (!src->type)
834 			num = i + 1;
835 	}
836 	num = i;
837 	src -= num;
838 	dst -= num;
839 	do {
840 		if (src->spec) {
841 			off = RTE_ALIGN_CEIL(off, sizeof(double));
842 			ret = rte_flow_conv_item_spec
843 				((void *)(data + off),
844 				 size > off ? size - off : 0, src,
845 				 RTE_FLOW_CONV_ITEM_SPEC);
846 			if (size && size >= off + ret)
847 				dst->spec = (void *)(data + off);
848 			off += ret;
849 
850 		}
851 		if (src->last) {
852 			off = RTE_ALIGN_CEIL(off, sizeof(double));
853 			ret = rte_flow_conv_item_spec
854 				((void *)(data + off),
855 				 size > off ? size - off : 0, src,
856 				 RTE_FLOW_CONV_ITEM_LAST);
857 			if (size && size >= off + ret)
858 				dst->last = (void *)(data + off);
859 			off += ret;
860 		}
861 		if (src->mask) {
862 			off = RTE_ALIGN_CEIL(off, sizeof(double));
863 			ret = rte_flow_conv_item_spec
864 				((void *)(data + off),
865 				 size > off ? size - off : 0, src,
866 				 RTE_FLOW_CONV_ITEM_MASK);
867 			if (size && size >= off + ret)
868 				dst->mask = (void *)(data + off);
869 			off += ret;
870 		}
871 		++src;
872 		++dst;
873 	} while (--num);
874 	return off;
875 }
876 
877 /**
878  * Copy a list of actions.
879  *
880  * @param[out] dst
881  *   Destination buffer. Can be NULL if @p size is zero.
882  * @param size
883  *   Size of @p dst in bytes.
884  * @param[in] src
885  *   Source actions.
886  * @param num
887  *   Maximum number of actions to process from @p src or 0 to process the
888  *   entire list. In both cases, processing stops after
889  *   RTE_FLOW_ACTION_TYPE_END is encountered.
890  * @param[out] error
891  *   Perform verbose error reporting if not NULL.
892  *
893  * @return
894  *   A positive value representing the number of bytes needed to store
895  *   actions regardless of @p size on success (@p buf contents are truncated
896  *   to @p size if not large enough), a negative errno value otherwise and
897  *   rte_errno is set.
898  */
899 static int
900 rte_flow_conv_actions(struct rte_flow_action *dst,
901 		      const size_t size,
902 		      const struct rte_flow_action *src,
903 		      unsigned int num,
904 		      struct rte_flow_error *error)
905 {
906 	uintptr_t data = (uintptr_t)dst;
907 	size_t off;
908 	size_t ret;
909 	unsigned int i;
910 
911 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
912 		/**
913 		 * allow PMD private flow action
914 		 */
915 		if (((int)src->type >= 0) &&
916 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
917 		    !rte_flow_desc_action[src->type].name))
918 			return rte_flow_error_set
919 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
920 				 src, "cannot convert unknown action type");
921 		if (size >= off + sizeof(*dst))
922 			*dst = (struct rte_flow_action){
923 				.type = src->type,
924 			};
925 		off += sizeof(*dst);
926 		if (!src->type)
927 			num = i + 1;
928 	}
929 	num = i;
930 	src -= num;
931 	dst -= num;
932 	do {
933 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
934 			/*
935 			 * Indirect action conf fills the indirect action
936 			 * handler. Copy the action handle directly instead
937 			 * of duplicating the pointer memory.
938 			 */
939 			if (size)
940 				dst->conf = src->conf;
941 		} else if (src->conf) {
942 			off = RTE_ALIGN_CEIL(off, sizeof(double));
943 			ret = rte_flow_conv_action_conf
944 				((void *)(data + off),
945 				 size > off ? size - off : 0, src);
946 			if (size && size >= off + ret)
947 				dst->conf = (void *)(data + off);
948 			off += ret;
949 		}
950 		++src;
951 		++dst;
952 	} while (--num);
953 	return off;
954 }
955 
956 /**
957  * Copy flow rule components.
958  *
959  * This comprises the flow rule descriptor itself, attributes, pattern and
960  * actions list. NULL components in @p src are skipped.
961  *
962  * @param[out] dst
963  *   Destination buffer. Can be NULL if @p size is zero.
964  * @param size
965  *   Size of @p dst in bytes.
966  * @param[in] src
967  *   Source flow rule descriptor.
968  * @param[out] error
969  *   Perform verbose error reporting if not NULL.
970  *
971  * @return
972  *   A positive value representing the number of bytes needed to store all
973  *   components including the descriptor regardless of @p size on success
974  *   (@p buf contents are truncated to @p size if not large enough), a
975  *   negative errno value otherwise and rte_errno is set.
976  */
977 static int
978 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
979 		   const size_t size,
980 		   const struct rte_flow_conv_rule *src,
981 		   struct rte_flow_error *error)
982 {
983 	size_t off;
984 	int ret;
985 
986 	rte_memcpy(dst,
987 		   (&(struct rte_flow_conv_rule){
988 			.attr = NULL,
989 			.pattern = NULL,
990 			.actions = NULL,
991 		   }),
992 		   size > sizeof(*dst) ? sizeof(*dst) : size);
993 	off = sizeof(*dst);
994 	if (src->attr_ro) {
995 		off = RTE_ALIGN_CEIL(off, sizeof(double));
996 		if (size && size >= off + sizeof(*dst->attr))
997 			dst->attr = rte_memcpy
998 				((void *)((uintptr_t)dst + off),
999 				 src->attr_ro, sizeof(*dst->attr));
1000 		off += sizeof(*dst->attr);
1001 	}
1002 	if (src->pattern_ro) {
1003 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1004 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
1005 					    size > off ? size - off : 0,
1006 					    src->pattern_ro, 0, error);
1007 		if (ret < 0)
1008 			return ret;
1009 		if (size && size >= off + (size_t)ret)
1010 			dst->pattern = (void *)((uintptr_t)dst + off);
1011 		off += ret;
1012 	}
1013 	if (src->actions_ro) {
1014 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1015 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1016 					    size > off ? size - off : 0,
1017 					    src->actions_ro, 0, error);
1018 		if (ret < 0)
1019 			return ret;
1020 		if (size >= off + (size_t)ret)
1021 			dst->actions = (void *)((uintptr_t)dst + off);
1022 		off += ret;
1023 	}
1024 	return off;
1025 }
1026 
1027 /**
1028  * Retrieve the name of a pattern item/action type.
1029  *
1030  * @param is_action
1031  *   Nonzero when @p src represents an action type instead of a pattern item
1032  *   type.
1033  * @param is_ptr
1034  *   Nonzero to write string address instead of contents into @p dst.
1035  * @param[out] dst
1036  *   Destination buffer. Can be NULL if @p size is zero.
1037  * @param size
1038  *   Size of @p dst in bytes.
1039  * @param[in] src
1040  *   Depending on @p is_action, source pattern item or action type cast as a
1041  *   pointer.
1042  * @param[out] error
1043  *   Perform verbose error reporting if not NULL.
1044  *
1045  * @return
1046  *   A positive value representing the number of bytes needed to store the
1047  *   name or its address regardless of @p size on success (@p buf contents
1048  *   are truncated to @p size if not large enough), a negative errno value
1049  *   otherwise and rte_errno is set.
1050  */
1051 static int
1052 rte_flow_conv_name(int is_action,
1053 		   int is_ptr,
1054 		   char *dst,
1055 		   const size_t size,
1056 		   const void *src,
1057 		   struct rte_flow_error *error)
1058 {
1059 	struct desc_info {
1060 		const struct rte_flow_desc_data *data;
1061 		size_t num;
1062 	};
1063 	static const struct desc_info info_rep[2] = {
1064 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1065 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1066 	};
1067 	const struct desc_info *const info = &info_rep[!!is_action];
1068 	unsigned int type = (uintptr_t)src;
1069 
1070 	if (type >= info->num)
1071 		return rte_flow_error_set
1072 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1073 			 "unknown object type to retrieve the name of");
1074 	if (!is_ptr)
1075 		return strlcpy(dst, info->data[type].name, size);
1076 	if (size >= sizeof(const char **))
1077 		*((const char **)dst) = info->data[type].name;
1078 	return sizeof(const char **);
1079 }
1080 
1081 /** Helper function to convert flow API objects. */
1082 int
1083 rte_flow_conv(enum rte_flow_conv_op op,
1084 	      void *dst,
1085 	      size_t size,
1086 	      const void *src,
1087 	      struct rte_flow_error *error)
1088 {
1089 	int ret;
1090 
1091 	switch (op) {
1092 		const struct rte_flow_attr *attr;
1093 
1094 	case RTE_FLOW_CONV_OP_NONE:
1095 		ret = 0;
1096 		break;
1097 	case RTE_FLOW_CONV_OP_ATTR:
1098 		attr = src;
1099 		if (size > sizeof(*attr))
1100 			size = sizeof(*attr);
1101 		rte_memcpy(dst, attr, size);
1102 		ret = sizeof(*attr);
1103 		break;
1104 	case RTE_FLOW_CONV_OP_ITEM:
1105 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1106 		break;
1107 	case RTE_FLOW_CONV_OP_ACTION:
1108 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1109 		break;
1110 	case RTE_FLOW_CONV_OP_PATTERN:
1111 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1112 		break;
1113 	case RTE_FLOW_CONV_OP_ACTIONS:
1114 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1115 		break;
1116 	case RTE_FLOW_CONV_OP_RULE:
1117 		ret = rte_flow_conv_rule(dst, size, src, error);
1118 		break;
1119 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1120 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1121 		break;
1122 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1123 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1124 		break;
1125 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1126 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1127 		break;
1128 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1129 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1130 		break;
1131 	default:
1132 		ret = rte_flow_error_set
1133 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1134 		 "unknown object conversion operation");
1135 	}
1136 
1137 	rte_flow_trace_conv(op, dst, size, src, ret);
1138 
1139 	return ret;
1140 }
1141 
1142 /** Store a full rte_flow description. */
1143 size_t
1144 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1145 	      const struct rte_flow_attr *attr,
1146 	      const struct rte_flow_item *items,
1147 	      const struct rte_flow_action *actions)
1148 {
1149 	/*
1150 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1151 	 * to convert the former to the latter without wasting space.
1152 	 */
1153 	struct rte_flow_conv_rule *dst =
1154 		len ?
1155 		(void *)((uintptr_t)desc +
1156 			 (offsetof(struct rte_flow_desc, actions) -
1157 			  offsetof(struct rte_flow_conv_rule, actions))) :
1158 		NULL;
1159 	size_t dst_size =
1160 		len > sizeof(*desc) - sizeof(*dst) ?
1161 		len - (sizeof(*desc) - sizeof(*dst)) :
1162 		0;
1163 	struct rte_flow_conv_rule src = {
1164 		.attr_ro = NULL,
1165 		.pattern_ro = items,
1166 		.actions_ro = actions,
1167 	};
1168 	int ret;
1169 
1170 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1171 			 sizeof(struct rte_flow_conv_rule));
1172 	if (dst_size &&
1173 	    (&dst->pattern != &desc->items ||
1174 	     &dst->actions != &desc->actions ||
1175 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1176 		rte_errno = EINVAL;
1177 		return 0;
1178 	}
1179 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1180 	if (ret < 0)
1181 		return 0;
1182 	ret += sizeof(*desc) - sizeof(*dst);
1183 	rte_memcpy(desc,
1184 		   (&(struct rte_flow_desc){
1185 			.size = ret,
1186 			.attr = *attr,
1187 			.items = dst_size ? dst->pattern : NULL,
1188 			.actions = dst_size ? dst->actions : NULL,
1189 		   }),
1190 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1191 
1192 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1193 
1194 	return ret;
1195 }
1196 
1197 int
1198 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1199 			FILE *file, struct rte_flow_error *error)
1200 {
1201 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1202 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1203 	int ret;
1204 
1205 	if (unlikely(!ops))
1206 		return -rte_errno;
1207 	if (likely(!!ops->dev_dump)) {
1208 		fts_enter(dev);
1209 		ret = ops->dev_dump(dev, flow, file, error);
1210 		fts_exit(dev);
1211 		return flow_err(port_id, ret, error);
1212 	}
1213 	return rte_flow_error_set(error, ENOSYS,
1214 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1215 				  NULL, rte_strerror(ENOSYS));
1216 }
1217 
1218 int
1219 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1220 		    uint32_t nb_contexts, struct rte_flow_error *error)
1221 {
1222 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1223 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1224 	int ret;
1225 
1226 	if (unlikely(!ops))
1227 		return -rte_errno;
1228 	if (likely(!!ops->get_aged_flows)) {
1229 		fts_enter(dev);
1230 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1231 		fts_exit(dev);
1232 		ret = flow_err(port_id, ret, error);
1233 
1234 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1235 
1236 		return ret;
1237 	}
1238 	return rte_flow_error_set(error, ENOTSUP,
1239 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1240 				  NULL, rte_strerror(ENOTSUP));
1241 }
1242 
1243 int
1244 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1245 			  uint32_t nb_contexts, struct rte_flow_error *error)
1246 {
1247 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1248 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1249 	int ret;
1250 
1251 	if (unlikely(!ops))
1252 		return -rte_errno;
1253 	if (likely(!!ops->get_q_aged_flows)) {
1254 		fts_enter(dev);
1255 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1256 					    nb_contexts, error);
1257 		fts_exit(dev);
1258 		ret = flow_err(port_id, ret, error);
1259 
1260 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1261 						nb_contexts, ret);
1262 
1263 		return ret;
1264 	}
1265 	return rte_flow_error_set(error, ENOTSUP,
1266 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1267 				  NULL, rte_strerror(ENOTSUP));
1268 }
1269 
1270 struct rte_flow_action_handle *
1271 rte_flow_action_handle_create(uint16_t port_id,
1272 			      const struct rte_flow_indir_action_conf *conf,
1273 			      const struct rte_flow_action *action,
1274 			      struct rte_flow_error *error)
1275 {
1276 	struct rte_flow_action_handle *handle;
1277 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1278 
1279 	if (unlikely(!ops))
1280 		return NULL;
1281 	if (unlikely(!ops->action_handle_create)) {
1282 		rte_flow_error_set(error, ENOSYS,
1283 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1284 				   rte_strerror(ENOSYS));
1285 		return NULL;
1286 	}
1287 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1288 					   conf, action, error);
1289 	if (handle == NULL)
1290 		flow_err(port_id, -rte_errno, error);
1291 
1292 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1293 
1294 	return handle;
1295 }
1296 
1297 int
1298 rte_flow_action_handle_destroy(uint16_t port_id,
1299 			       struct rte_flow_action_handle *handle,
1300 			       struct rte_flow_error *error)
1301 {
1302 	int ret;
1303 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1304 
1305 	if (unlikely(!ops))
1306 		return -rte_errno;
1307 	if (unlikely(!ops->action_handle_destroy))
1308 		return rte_flow_error_set(error, ENOSYS,
1309 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1310 					  NULL, rte_strerror(ENOSYS));
1311 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1312 					 handle, error);
1313 	ret = flow_err(port_id, ret, error);
1314 
1315 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1316 
1317 	return ret;
1318 }
1319 
1320 int
1321 rte_flow_action_handle_update(uint16_t port_id,
1322 			      struct rte_flow_action_handle *handle,
1323 			      const void *update,
1324 			      struct rte_flow_error *error)
1325 {
1326 	int ret;
1327 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1328 
1329 	if (unlikely(!ops))
1330 		return -rte_errno;
1331 	if (unlikely(!ops->action_handle_update))
1332 		return rte_flow_error_set(error, ENOSYS,
1333 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1334 					  NULL, rte_strerror(ENOSYS));
1335 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1336 					update, error);
1337 	ret = flow_err(port_id, ret, error);
1338 
1339 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1340 
1341 	return ret;
1342 }
1343 
1344 int
1345 rte_flow_action_handle_query(uint16_t port_id,
1346 			     const struct rte_flow_action_handle *handle,
1347 			     void *data,
1348 			     struct rte_flow_error *error)
1349 {
1350 	int ret;
1351 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1352 
1353 	if (unlikely(!ops))
1354 		return -rte_errno;
1355 	if (unlikely(!ops->action_handle_query))
1356 		return rte_flow_error_set(error, ENOSYS,
1357 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1358 					  NULL, rte_strerror(ENOSYS));
1359 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1360 				       data, error);
1361 	ret = flow_err(port_id, ret, error);
1362 
1363 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1364 
1365 	return ret;
1366 }
1367 
1368 int
1369 rte_flow_tunnel_decap_set(uint16_t port_id,
1370 			  struct rte_flow_tunnel *tunnel,
1371 			  struct rte_flow_action **actions,
1372 			  uint32_t *num_of_actions,
1373 			  struct rte_flow_error *error)
1374 {
1375 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1376 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1377 	int ret;
1378 
1379 	if (unlikely(!ops))
1380 		return -rte_errno;
1381 	if (likely(!!ops->tunnel_decap_set)) {
1382 		ret = flow_err(port_id,
1383 			       ops->tunnel_decap_set(dev, tunnel, actions,
1384 						     num_of_actions, error),
1385 			       error);
1386 
1387 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1388 						num_of_actions, ret);
1389 
1390 		return ret;
1391 	}
1392 	return rte_flow_error_set(error, ENOTSUP,
1393 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1394 				  NULL, rte_strerror(ENOTSUP));
1395 }
1396 
1397 int
1398 rte_flow_tunnel_match(uint16_t port_id,
1399 		      struct rte_flow_tunnel *tunnel,
1400 		      struct rte_flow_item **items,
1401 		      uint32_t *num_of_items,
1402 		      struct rte_flow_error *error)
1403 {
1404 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1405 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1406 	int ret;
1407 
1408 	if (unlikely(!ops))
1409 		return -rte_errno;
1410 	if (likely(!!ops->tunnel_match)) {
1411 		ret = flow_err(port_id,
1412 			       ops->tunnel_match(dev, tunnel, items,
1413 						 num_of_items, error),
1414 			       error);
1415 
1416 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1417 					    ret);
1418 
1419 		return ret;
1420 	}
1421 	return rte_flow_error_set(error, ENOTSUP,
1422 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1423 				  NULL, rte_strerror(ENOTSUP));
1424 }
1425 
1426 int
1427 rte_flow_get_restore_info(uint16_t port_id,
1428 			  struct rte_mbuf *m,
1429 			  struct rte_flow_restore_info *restore_info,
1430 			  struct rte_flow_error *error)
1431 {
1432 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1433 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1434 	int ret;
1435 
1436 	if (unlikely(!ops))
1437 		return -rte_errno;
1438 	if (likely(!!ops->get_restore_info)) {
1439 		ret = flow_err(port_id,
1440 			       ops->get_restore_info(dev, m, restore_info,
1441 						     error),
1442 			       error);
1443 
1444 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1445 
1446 		return ret;
1447 	}
1448 	return rte_flow_error_set(error, ENOTSUP,
1449 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1450 				  NULL, rte_strerror(ENOTSUP));
1451 }
1452 
1453 static struct {
1454 	const struct rte_mbuf_dynflag desc;
1455 	uint64_t value;
1456 } flow_restore_info_dynflag = {
1457 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1458 };
1459 
1460 uint64_t
1461 rte_flow_restore_info_dynflag(void)
1462 {
1463 	return flow_restore_info_dynflag.value;
1464 }
1465 
1466 int
1467 rte_flow_restore_info_dynflag_register(void)
1468 {
1469 	if (flow_restore_info_dynflag.value == 0) {
1470 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1471 
1472 		if (offset < 0)
1473 			return -1;
1474 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1475 	}
1476 
1477 	return 0;
1478 }
1479 
1480 int
1481 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1482 				     struct rte_flow_action *actions,
1483 				     uint32_t num_of_actions,
1484 				     struct rte_flow_error *error)
1485 {
1486 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1487 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1488 	int ret;
1489 
1490 	if (unlikely(!ops))
1491 		return -rte_errno;
1492 	if (likely(!!ops->tunnel_action_decap_release)) {
1493 		ret = flow_err(port_id,
1494 			       ops->tunnel_action_decap_release(dev, actions,
1495 								num_of_actions,
1496 								error),
1497 			       error);
1498 
1499 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1500 							   num_of_actions, ret);
1501 
1502 		return ret;
1503 	}
1504 	return rte_flow_error_set(error, ENOTSUP,
1505 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1506 				  NULL, rte_strerror(ENOTSUP));
1507 }
1508 
1509 int
1510 rte_flow_tunnel_item_release(uint16_t port_id,
1511 			     struct rte_flow_item *items,
1512 			     uint32_t num_of_items,
1513 			     struct rte_flow_error *error)
1514 {
1515 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1516 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1517 	int ret;
1518 
1519 	if (unlikely(!ops))
1520 		return -rte_errno;
1521 	if (likely(!!ops->tunnel_item_release)) {
1522 		ret = flow_err(port_id,
1523 			       ops->tunnel_item_release(dev, items,
1524 							num_of_items, error),
1525 			       error);
1526 
1527 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1528 
1529 		return ret;
1530 	}
1531 	return rte_flow_error_set(error, ENOTSUP,
1532 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1533 				  NULL, rte_strerror(ENOTSUP));
1534 }
1535 
1536 int
1537 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1538 			     struct rte_flow_error *error)
1539 {
1540 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1541 	struct rte_eth_dev *dev;
1542 	int ret;
1543 
1544 	if (unlikely(ops == NULL))
1545 		return -rte_errno;
1546 
1547 	if (ops->pick_transfer_proxy == NULL) {
1548 		*proxy_port_id = port_id;
1549 		return 0;
1550 	}
1551 
1552 	dev = &rte_eth_devices[port_id];
1553 
1554 	ret = flow_err(port_id,
1555 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1556 		       error);
1557 
1558 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1559 
1560 	return ret;
1561 }
1562 
1563 struct rte_flow_item_flex_handle *
1564 rte_flow_flex_item_create(uint16_t port_id,
1565 			  const struct rte_flow_item_flex_conf *conf,
1566 			  struct rte_flow_error *error)
1567 {
1568 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1569 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1570 	struct rte_flow_item_flex_handle *handle;
1571 
1572 	if (unlikely(!ops))
1573 		return NULL;
1574 	if (unlikely(!ops->flex_item_create)) {
1575 		rte_flow_error_set(error, ENOTSUP,
1576 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1577 				   NULL, rte_strerror(ENOTSUP));
1578 		return NULL;
1579 	}
1580 	handle = ops->flex_item_create(dev, conf, error);
1581 	if (handle == NULL)
1582 		flow_err(port_id, -rte_errno, error);
1583 
1584 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1585 
1586 	return handle;
1587 }
1588 
1589 int
1590 rte_flow_flex_item_release(uint16_t port_id,
1591 			   const struct rte_flow_item_flex_handle *handle,
1592 			   struct rte_flow_error *error)
1593 {
1594 	int ret;
1595 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1596 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1597 
1598 	if (unlikely(!ops || !ops->flex_item_release))
1599 		return rte_flow_error_set(error, ENOTSUP,
1600 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1601 					  NULL, rte_strerror(ENOTSUP));
1602 	ret = ops->flex_item_release(dev, handle, error);
1603 	ret = flow_err(port_id, ret, error);
1604 
1605 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1606 
1607 	return ret;
1608 }
1609 
1610 int
1611 rte_flow_info_get(uint16_t port_id,
1612 		  struct rte_flow_port_info *port_info,
1613 		  struct rte_flow_queue_info *queue_info,
1614 		  struct rte_flow_error *error)
1615 {
1616 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1617 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1618 	int ret;
1619 
1620 	if (unlikely(!ops))
1621 		return -rte_errno;
1622 	if (dev->data->dev_configured == 0) {
1623 		FLOW_LOG(INFO,
1624 			"Device with port_id=%"PRIu16" is not configured.",
1625 			port_id);
1626 		return -EINVAL;
1627 	}
1628 	if (port_info == NULL) {
1629 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1630 		return -EINVAL;
1631 	}
1632 	if (likely(!!ops->info_get)) {
1633 		ret = flow_err(port_id,
1634 			       ops->info_get(dev, port_info, queue_info, error),
1635 			       error);
1636 
1637 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1638 
1639 		return ret;
1640 	}
1641 	return rte_flow_error_set(error, ENOTSUP,
1642 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1643 				  NULL, rte_strerror(ENOTSUP));
1644 }
1645 
1646 int
1647 rte_flow_configure(uint16_t port_id,
1648 		   const struct rte_flow_port_attr *port_attr,
1649 		   uint16_t nb_queue,
1650 		   const struct rte_flow_queue_attr *queue_attr[],
1651 		   struct rte_flow_error *error)
1652 {
1653 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1654 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1655 	int ret;
1656 
1657 	if (unlikely(!ops))
1658 		return -rte_errno;
1659 	if (dev->data->dev_configured == 0) {
1660 		FLOW_LOG(INFO,
1661 			"Device with port_id=%"PRIu16" is not configured.",
1662 			port_id);
1663 		return -EINVAL;
1664 	}
1665 	if (dev->data->dev_started != 0) {
1666 		FLOW_LOG(INFO,
1667 			"Device with port_id=%"PRIu16" already started.",
1668 			port_id);
1669 		return -EINVAL;
1670 	}
1671 	if (port_attr == NULL) {
1672 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1673 		return -EINVAL;
1674 	}
1675 	if (queue_attr == NULL) {
1676 		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
1677 		return -EINVAL;
1678 	}
1679 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1680 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1681 		return rte_flow_error_set(error, ENODEV,
1682 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1683 					  NULL, rte_strerror(ENODEV));
1684 	}
1685 	if (likely(!!ops->configure)) {
1686 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1687 		if (ret == 0)
1688 			dev->data->flow_configured = 1;
1689 		ret = flow_err(port_id, ret, error);
1690 
1691 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1692 
1693 		return ret;
1694 	}
1695 	return rte_flow_error_set(error, ENOTSUP,
1696 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1697 				  NULL, rte_strerror(ENOTSUP));
1698 }
1699 
1700 struct rte_flow_pattern_template *
1701 rte_flow_pattern_template_create(uint16_t port_id,
1702 		const struct rte_flow_pattern_template_attr *template_attr,
1703 		const struct rte_flow_item pattern[],
1704 		struct rte_flow_error *error)
1705 {
1706 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1707 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1708 	struct rte_flow_pattern_template *template;
1709 
1710 	if (unlikely(!ops))
1711 		return NULL;
1712 	if (dev->data->flow_configured == 0) {
1713 		FLOW_LOG(INFO,
1714 			"Flow engine on port_id=%"PRIu16" is not configured.",
1715 			port_id);
1716 		rte_flow_error_set(error, EINVAL,
1717 				RTE_FLOW_ERROR_TYPE_STATE,
1718 				NULL, rte_strerror(EINVAL));
1719 		return NULL;
1720 	}
1721 	if (template_attr == NULL) {
1722 		FLOW_LOG(ERR,
1723 			     "Port %"PRIu16" template attr is NULL.",
1724 			     port_id);
1725 		rte_flow_error_set(error, EINVAL,
1726 				   RTE_FLOW_ERROR_TYPE_ATTR,
1727 				   NULL, rte_strerror(EINVAL));
1728 		return NULL;
1729 	}
1730 	if (pattern == NULL) {
1731 		FLOW_LOG(ERR,
1732 			     "Port %"PRIu16" pattern is NULL.",
1733 			     port_id);
1734 		rte_flow_error_set(error, EINVAL,
1735 				   RTE_FLOW_ERROR_TYPE_ATTR,
1736 				   NULL, rte_strerror(EINVAL));
1737 		return NULL;
1738 	}
1739 	if (likely(!!ops->pattern_template_create)) {
1740 		template = ops->pattern_template_create(dev, template_attr,
1741 							pattern, error);
1742 		if (template == NULL)
1743 			flow_err(port_id, -rte_errno, error);
1744 
1745 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1746 						       pattern, template);
1747 
1748 		return template;
1749 	}
1750 	rte_flow_error_set(error, ENOTSUP,
1751 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1752 			   NULL, rte_strerror(ENOTSUP));
1753 	return NULL;
1754 }
1755 
1756 int
1757 rte_flow_pattern_template_destroy(uint16_t port_id,
1758 		struct rte_flow_pattern_template *pattern_template,
1759 		struct rte_flow_error *error)
1760 {
1761 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1762 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1763 	int ret;
1764 
1765 	if (unlikely(!ops))
1766 		return -rte_errno;
1767 	if (unlikely(pattern_template == NULL))
1768 		return 0;
1769 	if (likely(!!ops->pattern_template_destroy)) {
1770 		ret = flow_err(port_id,
1771 			       ops->pattern_template_destroy(dev,
1772 							     pattern_template,
1773 							     error),
1774 			       error);
1775 
1776 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1777 							ret);
1778 
1779 		return ret;
1780 	}
1781 	return rte_flow_error_set(error, ENOTSUP,
1782 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1783 				  NULL, rte_strerror(ENOTSUP));
1784 }
1785 
1786 struct rte_flow_actions_template *
1787 rte_flow_actions_template_create(uint16_t port_id,
1788 			const struct rte_flow_actions_template_attr *template_attr,
1789 			const struct rte_flow_action actions[],
1790 			const struct rte_flow_action masks[],
1791 			struct rte_flow_error *error)
1792 {
1793 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1794 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1795 	struct rte_flow_actions_template *template;
1796 
1797 	if (unlikely(!ops))
1798 		return NULL;
1799 	if (dev->data->flow_configured == 0) {
1800 		FLOW_LOG(INFO,
1801 			"Flow engine on port_id=%"PRIu16" is not configured.",
1802 			port_id);
1803 		rte_flow_error_set(error, EINVAL,
1804 				   RTE_FLOW_ERROR_TYPE_STATE,
1805 				   NULL, rte_strerror(EINVAL));
1806 		return NULL;
1807 	}
1808 	if (template_attr == NULL) {
1809 		FLOW_LOG(ERR,
1810 			     "Port %"PRIu16" template attr is NULL.",
1811 			     port_id);
1812 		rte_flow_error_set(error, EINVAL,
1813 				   RTE_FLOW_ERROR_TYPE_ATTR,
1814 				   NULL, rte_strerror(EINVAL));
1815 		return NULL;
1816 	}
1817 	if (actions == NULL) {
1818 		FLOW_LOG(ERR,
1819 			     "Port %"PRIu16" actions is NULL.",
1820 			     port_id);
1821 		rte_flow_error_set(error, EINVAL,
1822 				   RTE_FLOW_ERROR_TYPE_ATTR,
1823 				   NULL, rte_strerror(EINVAL));
1824 		return NULL;
1825 	}
1826 	if (masks == NULL) {
1827 		FLOW_LOG(ERR,
1828 			     "Port %"PRIu16" masks is NULL.",
1829 			     port_id);
1830 		rte_flow_error_set(error, EINVAL,
1831 				   RTE_FLOW_ERROR_TYPE_ATTR,
1832 				   NULL, rte_strerror(EINVAL));
1833 
1834 	}
1835 	if (likely(!!ops->actions_template_create)) {
1836 		template = ops->actions_template_create(dev, template_attr,
1837 							actions, masks, error);
1838 		if (template == NULL)
1839 			flow_err(port_id, -rte_errno, error);
1840 
1841 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1842 						       masks, template);
1843 
1844 		return template;
1845 	}
1846 	rte_flow_error_set(error, ENOTSUP,
1847 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1848 			   NULL, rte_strerror(ENOTSUP));
1849 	return NULL;
1850 }
1851 
1852 int
1853 rte_flow_actions_template_destroy(uint16_t port_id,
1854 			struct rte_flow_actions_template *actions_template,
1855 			struct rte_flow_error *error)
1856 {
1857 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1858 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1859 	int ret;
1860 
1861 	if (unlikely(!ops))
1862 		return -rte_errno;
1863 	if (unlikely(actions_template == NULL))
1864 		return 0;
1865 	if (likely(!!ops->actions_template_destroy)) {
1866 		ret = flow_err(port_id,
1867 			       ops->actions_template_destroy(dev,
1868 							     actions_template,
1869 							     error),
1870 			       error);
1871 
1872 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1873 							ret);
1874 
1875 		return ret;
1876 	}
1877 	return rte_flow_error_set(error, ENOTSUP,
1878 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1879 				  NULL, rte_strerror(ENOTSUP));
1880 }
1881 
1882 struct rte_flow_template_table *
1883 rte_flow_template_table_create(uint16_t port_id,
1884 			const struct rte_flow_template_table_attr *table_attr,
1885 			struct rte_flow_pattern_template *pattern_templates[],
1886 			uint8_t nb_pattern_templates,
1887 			struct rte_flow_actions_template *actions_templates[],
1888 			uint8_t nb_actions_templates,
1889 			struct rte_flow_error *error)
1890 {
1891 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1892 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1893 	struct rte_flow_template_table *table;
1894 
1895 	if (unlikely(!ops))
1896 		return NULL;
1897 	if (dev->data->flow_configured == 0) {
1898 		FLOW_LOG(INFO,
1899 			"Flow engine on port_id=%"PRIu16" is not configured.",
1900 			port_id);
1901 		rte_flow_error_set(error, EINVAL,
1902 				   RTE_FLOW_ERROR_TYPE_STATE,
1903 				   NULL, rte_strerror(EINVAL));
1904 		return NULL;
1905 	}
1906 	if (table_attr == NULL) {
1907 		FLOW_LOG(ERR,
1908 			     "Port %"PRIu16" table attr is NULL.",
1909 			     port_id);
1910 		rte_flow_error_set(error, EINVAL,
1911 				   RTE_FLOW_ERROR_TYPE_ATTR,
1912 				   NULL, rte_strerror(EINVAL));
1913 		return NULL;
1914 	}
1915 	if (pattern_templates == NULL) {
1916 		FLOW_LOG(ERR,
1917 			     "Port %"PRIu16" pattern templates is NULL.",
1918 			     port_id);
1919 		rte_flow_error_set(error, EINVAL,
1920 				   RTE_FLOW_ERROR_TYPE_ATTR,
1921 				   NULL, rte_strerror(EINVAL));
1922 		return NULL;
1923 	}
1924 	if (actions_templates == NULL) {
1925 		FLOW_LOG(ERR,
1926 			     "Port %"PRIu16" actions templates is NULL.",
1927 			     port_id);
1928 		rte_flow_error_set(error, EINVAL,
1929 				   RTE_FLOW_ERROR_TYPE_ATTR,
1930 				   NULL, rte_strerror(EINVAL));
1931 		return NULL;
1932 	}
1933 	if (likely(!!ops->template_table_create)) {
1934 		table = ops->template_table_create(dev, table_attr,
1935 					pattern_templates, nb_pattern_templates,
1936 					actions_templates, nb_actions_templates,
1937 					error);
1938 		if (table == NULL)
1939 			flow_err(port_id, -rte_errno, error);
1940 
1941 		rte_flow_trace_template_table_create(port_id, table_attr,
1942 						     pattern_templates,
1943 						     nb_pattern_templates,
1944 						     actions_templates,
1945 						     nb_actions_templates, table);
1946 
1947 		return table;
1948 	}
1949 	rte_flow_error_set(error, ENOTSUP,
1950 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1951 			   NULL, rte_strerror(ENOTSUP));
1952 	return NULL;
1953 }
1954 
1955 int
1956 rte_flow_template_table_destroy(uint16_t port_id,
1957 				struct rte_flow_template_table *template_table,
1958 				struct rte_flow_error *error)
1959 {
1960 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1961 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1962 	int ret;
1963 
1964 	if (unlikely(!ops))
1965 		return -rte_errno;
1966 	if (unlikely(template_table == NULL))
1967 		return 0;
1968 	if (likely(!!ops->template_table_destroy)) {
1969 		ret = flow_err(port_id,
1970 			       ops->template_table_destroy(dev,
1971 							   template_table,
1972 							   error),
1973 			       error);
1974 
1975 		rte_flow_trace_template_table_destroy(port_id, template_table,
1976 						      ret);
1977 
1978 		return ret;
1979 	}
1980 	return rte_flow_error_set(error, ENOTSUP,
1981 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1982 				  NULL, rte_strerror(ENOTSUP));
1983 }
1984 
1985 int
1986 rte_flow_group_set_miss_actions(uint16_t port_id,
1987 				uint32_t group_id,
1988 				const struct rte_flow_group_attr *attr,
1989 				const struct rte_flow_action actions[],
1990 				struct rte_flow_error *error)
1991 {
1992 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1993 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1994 
1995 	if (unlikely(!ops))
1996 		return -rte_errno;
1997 	if (likely(!!ops->group_set_miss_actions)) {
1998 		return flow_err(port_id,
1999 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
2000 				error);
2001 	}
2002 	return rte_flow_error_set(error, ENOTSUP,
2003 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2004 				  NULL, rte_strerror(ENOTSUP));
2005 }
2006 
2007 struct rte_flow *
2008 rte_flow_async_create(uint16_t port_id,
2009 		      uint32_t queue_id,
2010 		      const struct rte_flow_op_attr *op_attr,
2011 		      struct rte_flow_template_table *template_table,
2012 		      const struct rte_flow_item pattern[],
2013 		      uint8_t pattern_template_index,
2014 		      const struct rte_flow_action actions[],
2015 		      uint8_t actions_template_index,
2016 		      void *user_data,
2017 		      struct rte_flow_error *error)
2018 {
2019 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2020 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2021 	struct rte_flow *flow;
2022 
2023 	flow = ops->async_create(dev, queue_id,
2024 				 op_attr, template_table,
2025 				 pattern, pattern_template_index,
2026 				 actions, actions_template_index,
2027 				 user_data, error);
2028 	if (flow == NULL)
2029 		flow_err(port_id, -rte_errno, error);
2030 
2031 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2032 				    pattern, pattern_template_index, actions,
2033 				    actions_template_index, user_data, flow);
2034 
2035 	return flow;
2036 }
2037 
2038 struct rte_flow *
2039 rte_flow_async_create_by_index(uint16_t port_id,
2040 			       uint32_t queue_id,
2041 			       const struct rte_flow_op_attr *op_attr,
2042 			       struct rte_flow_template_table *template_table,
2043 			       uint32_t rule_index,
2044 			       const struct rte_flow_action actions[],
2045 			       uint8_t actions_template_index,
2046 			       void *user_data,
2047 			       struct rte_flow_error *error)
2048 {
2049 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2050 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2051 	struct rte_flow *flow;
2052 
2053 	flow = ops->async_create_by_index(dev, queue_id,
2054 					  op_attr, template_table, rule_index,
2055 					  actions, actions_template_index,
2056 					  user_data, error);
2057 	if (flow == NULL)
2058 		flow_err(port_id, -rte_errno, error);
2059 	return flow;
2060 }
2061 
2062 int
2063 rte_flow_async_destroy(uint16_t port_id,
2064 		       uint32_t queue_id,
2065 		       const struct rte_flow_op_attr *op_attr,
2066 		       struct rte_flow *flow,
2067 		       void *user_data,
2068 		       struct rte_flow_error *error)
2069 {
2070 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2071 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2072 	int ret;
2073 
2074 	ret = flow_err(port_id,
2075 		       ops->async_destroy(dev, queue_id,
2076 					  op_attr, flow,
2077 					  user_data, error),
2078 		       error);
2079 
2080 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2081 				     user_data, ret);
2082 
2083 	return ret;
2084 }
2085 
2086 int
2087 rte_flow_async_actions_update(uint16_t port_id,
2088 			      uint32_t queue_id,
2089 			      const struct rte_flow_op_attr *op_attr,
2090 			      struct rte_flow *flow,
2091 			      const struct rte_flow_action actions[],
2092 			      uint8_t actions_template_index,
2093 			      void *user_data,
2094 			      struct rte_flow_error *error)
2095 {
2096 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2097 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2098 	int ret;
2099 
2100 	ret = flow_err(port_id,
2101 		       ops->async_actions_update(dev, queue_id, op_attr,
2102 						 flow, actions,
2103 						 actions_template_index,
2104 						 user_data, error),
2105 		       error);
2106 
2107 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2108 					    actions, actions_template_index,
2109 					    user_data, ret);
2110 
2111 	return ret;
2112 }
2113 
2114 int
2115 rte_flow_push(uint16_t port_id,
2116 	      uint32_t queue_id,
2117 	      struct rte_flow_error *error)
2118 {
2119 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2120 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2121 	int ret;
2122 
2123 	ret = flow_err(port_id,
2124 		       ops->push(dev, queue_id, error),
2125 		       error);
2126 
2127 	rte_flow_trace_push(port_id, queue_id, ret);
2128 
2129 	return ret;
2130 }
2131 
2132 int
2133 rte_flow_pull(uint16_t port_id,
2134 	      uint32_t queue_id,
2135 	      struct rte_flow_op_result res[],
2136 	      uint16_t n_res,
2137 	      struct rte_flow_error *error)
2138 {
2139 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2140 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2141 	int ret;
2142 	int rc;
2143 
2144 	ret = ops->pull(dev, queue_id, res, n_res, error);
2145 	rc = ret ? ret : flow_err(port_id, ret, error);
2146 
2147 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2148 
2149 	return rc;
2150 }
2151 
2152 struct rte_flow_action_handle *
2153 rte_flow_async_action_handle_create(uint16_t port_id,
2154 		uint32_t queue_id,
2155 		const struct rte_flow_op_attr *op_attr,
2156 		const struct rte_flow_indir_action_conf *indir_action_conf,
2157 		const struct rte_flow_action *action,
2158 		void *user_data,
2159 		struct rte_flow_error *error)
2160 {
2161 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2162 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2163 	struct rte_flow_action_handle *handle;
2164 
2165 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2166 					     indir_action_conf, action, user_data, error);
2167 	if (handle == NULL)
2168 		flow_err(port_id, -rte_errno, error);
2169 
2170 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2171 						  indir_action_conf, action,
2172 						  user_data, handle);
2173 
2174 	return handle;
2175 }
2176 
2177 int
2178 rte_flow_async_action_handle_destroy(uint16_t port_id,
2179 		uint32_t queue_id,
2180 		const struct rte_flow_op_attr *op_attr,
2181 		struct rte_flow_action_handle *action_handle,
2182 		void *user_data,
2183 		struct rte_flow_error *error)
2184 {
2185 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2186 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2187 	int ret;
2188 
2189 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2190 					   action_handle, user_data, error);
2191 	ret = flow_err(port_id, ret, error);
2192 
2193 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2194 						   action_handle, user_data, ret);
2195 
2196 	return ret;
2197 }
2198 
2199 int
2200 rte_flow_async_action_handle_update(uint16_t port_id,
2201 		uint32_t queue_id,
2202 		const struct rte_flow_op_attr *op_attr,
2203 		struct rte_flow_action_handle *action_handle,
2204 		const void *update,
2205 		void *user_data,
2206 		struct rte_flow_error *error)
2207 {
2208 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2209 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2210 	int ret;
2211 
2212 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2213 					  action_handle, update, user_data, error);
2214 	ret = flow_err(port_id, ret, error);
2215 
2216 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2217 						  action_handle, update,
2218 						  user_data, ret);
2219 
2220 	return ret;
2221 }
2222 
2223 int
2224 rte_flow_async_action_handle_query(uint16_t port_id,
2225 		uint32_t queue_id,
2226 		const struct rte_flow_op_attr *op_attr,
2227 		const struct rte_flow_action_handle *action_handle,
2228 		void *data,
2229 		void *user_data,
2230 		struct rte_flow_error *error)
2231 {
2232 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2233 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2234 	int ret;
2235 
2236 	if (unlikely(!ops))
2237 		return -rte_errno;
2238 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2239 					  action_handle, data, user_data, error);
2240 	ret = flow_err(port_id, ret, error);
2241 
2242 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2243 						 action_handle, data, user_data,
2244 						 ret);
2245 
2246 	return ret;
2247 }
2248 
2249 int
2250 rte_flow_action_handle_query_update(uint16_t port_id,
2251 				    struct rte_flow_action_handle *handle,
2252 				    const void *update, void *query,
2253 				    enum rte_flow_query_update_mode mode,
2254 				    struct rte_flow_error *error)
2255 {
2256 	int ret;
2257 	struct rte_eth_dev *dev;
2258 	const struct rte_flow_ops *ops;
2259 
2260 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2261 	if (!handle)
2262 		return -EINVAL;
2263 	if (!update && !query)
2264 		return -EINVAL;
2265 	dev = &rte_eth_devices[port_id];
2266 	ops = rte_flow_ops_get(port_id, error);
2267 	if (!ops || !ops->action_handle_query_update)
2268 		return -ENOTSUP;
2269 	ret = ops->action_handle_query_update(dev, handle, update,
2270 					      query, mode, error);
2271 	return flow_err(port_id, ret, error);
2272 }
2273 
2274 int
2275 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2276 					  const struct rte_flow_op_attr *attr,
2277 					  struct rte_flow_action_handle *handle,
2278 					  const void *update, void *query,
2279 					  enum rte_flow_query_update_mode mode,
2280 					  void *user_data,
2281 					  struct rte_flow_error *error)
2282 {
2283 	int ret;
2284 	struct rte_eth_dev *dev;
2285 	const struct rte_flow_ops *ops;
2286 
2287 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2288 	if (!handle)
2289 		return -EINVAL;
2290 	if (!update && !query)
2291 		return -EINVAL;
2292 	dev = &rte_eth_devices[port_id];
2293 	ops = rte_flow_ops_get(port_id, error);
2294 	if (!ops || !ops->async_action_handle_query_update)
2295 		return -ENOTSUP;
2296 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2297 						    handle, update,
2298 						    query, mode,
2299 						    user_data, error);
2300 	return flow_err(port_id, ret, error);
2301 }
2302 
2303 struct rte_flow_action_list_handle *
2304 rte_flow_action_list_handle_create(uint16_t port_id,
2305 				   const
2306 				   struct rte_flow_indir_action_conf *conf,
2307 				   const struct rte_flow_action *actions,
2308 				   struct rte_flow_error *error)
2309 {
2310 	int ret;
2311 	struct rte_eth_dev *dev;
2312 	const struct rte_flow_ops *ops;
2313 	struct rte_flow_action_list_handle *handle;
2314 
2315 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2316 	ops = rte_flow_ops_get(port_id, error);
2317 	if (!ops || !ops->action_list_handle_create) {
2318 		rte_flow_error_set(error, ENOTSUP,
2319 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2320 				   "action_list handle not supported");
2321 		return NULL;
2322 	}
2323 	dev = &rte_eth_devices[port_id];
2324 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2325 	ret = flow_err(port_id, -rte_errno, error);
2326 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2327 	return handle;
2328 }
2329 
2330 int
2331 rte_flow_action_list_handle_destroy(uint16_t port_id,
2332 				    struct rte_flow_action_list_handle *handle,
2333 				    struct rte_flow_error *error)
2334 {
2335 	int ret;
2336 	struct rte_eth_dev *dev;
2337 	const struct rte_flow_ops *ops;
2338 
2339 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2340 	ops = rte_flow_ops_get(port_id, error);
2341 	if (!ops || !ops->action_list_handle_destroy)
2342 		return rte_flow_error_set(error, ENOTSUP,
2343 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2344 					  "action_list handle not supported");
2345 	dev = &rte_eth_devices[port_id];
2346 	ret = ops->action_list_handle_destroy(dev, handle, error);
2347 	ret = flow_err(port_id, ret, error);
2348 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2349 	return ret;
2350 }
2351 
2352 struct rte_flow_action_list_handle *
2353 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2354 					 const struct rte_flow_op_attr *attr,
2355 					 const struct rte_flow_indir_action_conf *conf,
2356 					 const struct rte_flow_action *actions,
2357 					 void *user_data,
2358 					 struct rte_flow_error *error)
2359 {
2360 	int ret;
2361 	struct rte_eth_dev *dev;
2362 	const struct rte_flow_ops *ops;
2363 	struct rte_flow_action_list_handle *handle;
2364 
2365 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2366 	ops = rte_flow_ops_get(port_id, error);
2367 	if (!ops || !ops->async_action_list_handle_create) {
2368 		rte_flow_error_set(error, ENOTSUP,
2369 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2370 				   "action_list handle not supported");
2371 		return NULL;
2372 	}
2373 	dev = &rte_eth_devices[port_id];
2374 	handle = ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2375 						      actions, user_data,
2376 						      error);
2377 	ret = flow_err(port_id, -rte_errno, error);
2378 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2379 						       conf, actions, user_data,
2380 						       ret);
2381 	return handle;
2382 }
2383 
2384 int
2385 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2386 				 const struct rte_flow_op_attr *op_attr,
2387 				 struct rte_flow_action_list_handle *handle,
2388 				 void *user_data, struct rte_flow_error *error)
2389 {
2390 	int ret;
2391 	struct rte_eth_dev *dev;
2392 	const struct rte_flow_ops *ops;
2393 
2394 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2395 	ops = rte_flow_ops_get(port_id, error);
2396 	if (!ops || !ops->async_action_list_handle_destroy)
2397 		return rte_flow_error_set(error, ENOTSUP,
2398 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2399 					  "async action_list handle not supported");
2400 	dev = &rte_eth_devices[port_id];
2401 	ret = ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2402 						    handle, user_data, error);
2403 	ret = flow_err(port_id, ret, error);
2404 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2405 							op_attr, handle,
2406 							user_data, ret);
2407 	return ret;
2408 }
2409 
2410 int
2411 rte_flow_action_list_handle_query_update(uint16_t port_id,
2412 			 const struct rte_flow_action_list_handle *handle,
2413 			 const void **update, void **query,
2414 			 enum rte_flow_query_update_mode mode,
2415 			 struct rte_flow_error *error)
2416 {
2417 	int ret;
2418 	struct rte_eth_dev *dev;
2419 	const struct rte_flow_ops *ops;
2420 
2421 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2422 	ops = rte_flow_ops_get(port_id, error);
2423 	if (!ops || !ops->action_list_handle_query_update)
2424 		return rte_flow_error_set(error, ENOTSUP,
2425 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2426 					  "action_list query_update not supported");
2427 	dev = &rte_eth_devices[port_id];
2428 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2429 						   mode, error);
2430 	ret = flow_err(port_id, ret, error);
2431 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2432 						       query, mode, ret);
2433 	return ret;
2434 }
2435 
2436 int
2437 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2438 			 const struct rte_flow_op_attr *attr,
2439 			 const struct rte_flow_action_list_handle *handle,
2440 			 const void **update, void **query,
2441 			 enum rte_flow_query_update_mode mode,
2442 			 void *user_data, struct rte_flow_error *error)
2443 {
2444 	int ret;
2445 	struct rte_eth_dev *dev;
2446 	const struct rte_flow_ops *ops;
2447 
2448 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2449 	ops = rte_flow_ops_get(port_id, error);
2450 	if (!ops || !ops->async_action_list_handle_query_update)
2451 		return rte_flow_error_set(error, ENOTSUP,
2452 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2453 					  "action_list async query_update not supported");
2454 	dev = &rte_eth_devices[port_id];
2455 	ret = ops->async_action_list_handle_query_update(dev, queue_id, attr,
2456 							 handle, update, query,
2457 							 mode, user_data,
2458 							 error);
2459 	ret = flow_err(port_id, ret, error);
2460 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2461 							     attr, handle,
2462 							     update, query,
2463 							     mode, user_data,
2464 							     ret);
2465 	return ret;
2466 }
2467 
2468 int
2469 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2470 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2471 			 uint32_t *hash, struct rte_flow_error *error)
2472 {
2473 	int ret;
2474 	struct rte_eth_dev *dev;
2475 	const struct rte_flow_ops *ops;
2476 
2477 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2478 	ops = rte_flow_ops_get(port_id, error);
2479 	if (!ops || !ops->flow_calc_table_hash)
2480 		return rte_flow_error_set(error, ENOTSUP,
2481 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2482 					  "action_list async query_update not supported");
2483 	dev = &rte_eth_devices[port_id];
2484 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2485 					hash, error);
2486 	return flow_err(port_id, ret, error);
2487 }
2488