xref: /dpdk/lib/ethdev/rte_flow.c (revision c6552d9a8deffa448de2d5e2e726f50508c1efd2)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <pthread.h>
11 
12 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 #define FLOW_LOG RTE_ETHDEV_LOG_LINE
23 
24 /* Mbuf dynamic field name for metadata. */
25 int32_t rte_flow_dynf_metadata_offs = -1;
26 
27 /* Mbuf dynamic field flag bit number for metadata. */
28 uint64_t rte_flow_dynf_metadata_mask;
29 
30 /**
31  * Flow elements description tables.
32  */
33 struct rte_flow_desc_data {
34 	const char *name;
35 	size_t size;
36 	size_t (*desc_fn)(void *dst, const void *src);
37 };
38 
39 /**
40  *
41  * @param buf
42  * Destination memory.
43  * @param data
44  * Source memory
45  * @param size
46  * Requested copy size
47  * @param desc
48  * rte_flow_desc_item - for flow item conversion.
49  * rte_flow_desc_action - for flow action conversion.
50  * @param type
51  * Offset into the desc param or negative value for private flow elements.
52  */
53 static inline size_t
54 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
55 		   const struct rte_flow_desc_data *desc, int type)
56 {
57 	/**
58 	 * Allow PMD private flow item
59 	 */
60 	bool rte_type = type >= 0;
61 
62 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
63 	if (buf == NULL || data == NULL)
64 		return 0;
65 	rte_memcpy(buf, data, (size > sz ? sz : size));
66 	if (rte_type && desc[type].desc_fn)
67 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
68 	return sz;
69 }
70 
71 static size_t
72 rte_flow_item_flex_conv(void *buf, const void *data)
73 {
74 	struct rte_flow_item_flex *dst = buf;
75 	const struct rte_flow_item_flex *src = data;
76 	if (buf) {
77 		dst->pattern = rte_memcpy
78 			((void *)((uintptr_t)(dst + 1)), src->pattern,
79 			 src->length);
80 	}
81 	return src->length;
82 }
83 
84 /** Generate flow_item[] entry. */
85 #define MK_FLOW_ITEM(t, s) \
86 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
87 		.name = # t, \
88 		.size = s,               \
89 		.desc_fn = NULL,\
90 	}
91 
92 #define MK_FLOW_ITEM_FN(t, s, fn) \
93 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
94 		.name = # t,                 \
95 		.size = s,                   \
96 		.desc_fn = fn,               \
97 	}
98 
99 /** Information about known flow pattern items. */
100 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
101 	MK_FLOW_ITEM(END, 0),
102 	MK_FLOW_ITEM(VOID, 0),
103 	MK_FLOW_ITEM(INVERT, 0),
104 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
105 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
106 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
107 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
108 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
109 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
110 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
111 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
112 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
113 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
114 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
115 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
116 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
117 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
118 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
119 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
120 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
121 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
122 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
123 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
124 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
125 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
126 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
127 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
128 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
129 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
130 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
131 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
132 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
133 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
134 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
137 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
138 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
139 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
140 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
141 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
142 	MK_FLOW_ITEM(RANDOM, sizeof(struct rte_flow_item_random)),
143 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
144 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
145 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
146 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
147 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
148 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
149 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
150 			sizeof(struct rte_flow_item_pppoe_proto_id)),
151 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
152 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
153 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
154 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
155 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
156 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
157 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
158 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
159 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
160 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
161 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
162 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
163 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
164 			rte_flow_item_flex_conv),
165 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
166 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
167 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
168 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
169 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
170 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
171 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
172 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
173 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
174 	MK_FLOW_ITEM(COMPARE, sizeof(struct rte_flow_item_compare)),
175 };
176 
177 /** Generate flow_action[] entry. */
178 #define MK_FLOW_ACTION(t, s) \
179 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
180 		.name = # t, \
181 		.size = s, \
182 		.desc_fn = NULL,\
183 	}
184 
185 #define MK_FLOW_ACTION_FN(t, fn) \
186 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
187 		.name = # t, \
188 		.size = 0, \
189 		.desc_fn = fn,\
190 	}
191 
192 
193 /** Information about known flow actions. */
194 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
195 	MK_FLOW_ACTION(END, 0),
196 	MK_FLOW_ACTION(VOID, 0),
197 	MK_FLOW_ACTION(PASSTHRU, 0),
198 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
199 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
200 	MK_FLOW_ACTION(FLAG, 0),
201 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
202 	MK_FLOW_ACTION(DROP, 0),
203 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
204 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
205 	MK_FLOW_ACTION(PF, 0),
206 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
207 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
208 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
209 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
210 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
211 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
212 	MK_FLOW_ACTION(OF_PUSH_VLAN,
213 		       sizeof(struct rte_flow_action_of_push_vlan)),
214 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
215 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
216 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
217 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
218 	MK_FLOW_ACTION(OF_POP_MPLS,
219 		       sizeof(struct rte_flow_action_of_pop_mpls)),
220 	MK_FLOW_ACTION(OF_PUSH_MPLS,
221 		       sizeof(struct rte_flow_action_of_push_mpls)),
222 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
223 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
224 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
225 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
226 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
227 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
228 	MK_FLOW_ACTION(SET_IPV4_SRC,
229 		       sizeof(struct rte_flow_action_set_ipv4)),
230 	MK_FLOW_ACTION(SET_IPV4_DST,
231 		       sizeof(struct rte_flow_action_set_ipv4)),
232 	MK_FLOW_ACTION(SET_IPV6_SRC,
233 		       sizeof(struct rte_flow_action_set_ipv6)),
234 	MK_FLOW_ACTION(SET_IPV6_DST,
235 		       sizeof(struct rte_flow_action_set_ipv6)),
236 	MK_FLOW_ACTION(SET_TP_SRC,
237 		       sizeof(struct rte_flow_action_set_tp)),
238 	MK_FLOW_ACTION(SET_TP_DST,
239 		       sizeof(struct rte_flow_action_set_tp)),
240 	MK_FLOW_ACTION(MAC_SWAP, 0),
241 	MK_FLOW_ACTION(DEC_TTL, 0),
242 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
243 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
244 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
245 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
246 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
247 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
248 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
249 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
250 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
251 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
252 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
253 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
254 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
255 	MK_FLOW_ACTION(MODIFY_FIELD,
256 		       sizeof(struct rte_flow_action_modify_field)),
257 	/**
258 	 * Indirect action represented as handle of type
259 	 * (struct rte_flow_action_handle *) stored in conf field (see
260 	 * struct rte_flow_action); no need for additional structure to * store
261 	 * indirect action handle.
262 	 */
263 	MK_FLOW_ACTION(INDIRECT, 0),
264 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
265 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
266 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
267 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
268 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
269 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
270 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
271 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
272 	MK_FLOW_ACTION(INDIRECT_LIST,
273 		       sizeof(struct rte_flow_action_indirect_list)),
274 	MK_FLOW_ACTION(PROG,
275 		       sizeof(struct rte_flow_action_prog)),
276 	MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)),
277 };
278 
279 int
280 rte_flow_dynf_metadata_register(void)
281 {
282 	int offset;
283 	int flag;
284 
285 	static const struct rte_mbuf_dynfield desc_offs = {
286 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
287 		.size = sizeof(uint32_t),
288 		.align = alignof(uint32_t),
289 	};
290 	static const struct rte_mbuf_dynflag desc_flag = {
291 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
292 	};
293 
294 	offset = rte_mbuf_dynfield_register(&desc_offs);
295 	if (offset < 0)
296 		goto error;
297 	flag = rte_mbuf_dynflag_register(&desc_flag);
298 	if (flag < 0)
299 		goto error;
300 	rte_flow_dynf_metadata_offs = offset;
301 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
302 
303 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
304 
305 	return 0;
306 
307 error:
308 	rte_flow_dynf_metadata_offs = -1;
309 	rte_flow_dynf_metadata_mask = UINT64_C(0);
310 	return -rte_errno;
311 }
312 
313 static inline void
314 fts_enter(struct rte_eth_dev *dev)
315 {
316 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
317 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
318 }
319 
320 static inline void
321 fts_exit(struct rte_eth_dev *dev)
322 {
323 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
324 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
325 }
326 
327 static int
328 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
329 {
330 	if (ret == 0)
331 		return 0;
332 	if (rte_eth_dev_is_removed(port_id))
333 		return rte_flow_error_set(error, EIO,
334 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
335 					  NULL, rte_strerror(EIO));
336 	return ret;
337 }
338 
339 /* Get generic flow operations structure from a port. */
340 const struct rte_flow_ops *
341 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
342 {
343 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
344 	const struct rte_flow_ops *ops;
345 	int code;
346 
347 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
348 		code = ENODEV;
349 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
350 		/* flow API not supported with this driver dev_ops */
351 		code = ENOSYS;
352 	else
353 		code = dev->dev_ops->flow_ops_get(dev, &ops);
354 	if (code == 0 && ops == NULL)
355 		/* flow API not supported with this device */
356 		code = ENOSYS;
357 
358 	if (code != 0) {
359 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
360 				   NULL, rte_strerror(code));
361 		return NULL;
362 	}
363 	return ops;
364 }
365 
366 /* Check whether a flow rule can be created on a given port. */
367 int
368 rte_flow_validate(uint16_t port_id,
369 		  const struct rte_flow_attr *attr,
370 		  const struct rte_flow_item pattern[],
371 		  const struct rte_flow_action actions[],
372 		  struct rte_flow_error *error)
373 {
374 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
375 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
376 	int ret;
377 
378 	if (likely(!!attr) && attr->transfer &&
379 	    (attr->ingress || attr->egress)) {
380 		return rte_flow_error_set(error, EINVAL,
381 					  RTE_FLOW_ERROR_TYPE_ATTR,
382 					  attr, "cannot use attr ingress/egress with attr transfer");
383 	}
384 
385 	if (unlikely(!ops))
386 		return -rte_errno;
387 	if (likely(!!ops->validate)) {
388 		fts_enter(dev);
389 		ret = ops->validate(dev, attr, pattern, actions, error);
390 		fts_exit(dev);
391 		ret = flow_err(port_id, ret, error);
392 
393 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
394 
395 		return ret;
396 	}
397 	return rte_flow_error_set(error, ENOSYS,
398 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
399 				  NULL, rte_strerror(ENOSYS));
400 }
401 
402 /* Create a flow rule on a given port. */
403 struct rte_flow *
404 rte_flow_create(uint16_t port_id,
405 		const struct rte_flow_attr *attr,
406 		const struct rte_flow_item pattern[],
407 		const struct rte_flow_action actions[],
408 		struct rte_flow_error *error)
409 {
410 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
411 	struct rte_flow *flow;
412 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
413 
414 	if (unlikely(!ops))
415 		return NULL;
416 	if (likely(!!ops->create)) {
417 		fts_enter(dev);
418 		flow = ops->create(dev, attr, pattern, actions, error);
419 		fts_exit(dev);
420 		if (flow == NULL)
421 			flow_err(port_id, -rte_errno, error);
422 
423 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
424 
425 		return flow;
426 	}
427 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
428 			   NULL, rte_strerror(ENOSYS));
429 	return NULL;
430 }
431 
432 /* Destroy a flow rule on a given port. */
433 int
434 rte_flow_destroy(uint16_t port_id,
435 		 struct rte_flow *flow,
436 		 struct rte_flow_error *error)
437 {
438 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
439 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
440 	int ret;
441 
442 	if (unlikely(!ops))
443 		return -rte_errno;
444 	if (likely(!!ops->destroy)) {
445 		fts_enter(dev);
446 		ret = ops->destroy(dev, flow, error);
447 		fts_exit(dev);
448 		ret = flow_err(port_id, ret, error);
449 
450 		rte_flow_trace_destroy(port_id, flow, ret);
451 
452 		return ret;
453 	}
454 	return rte_flow_error_set(error, ENOSYS,
455 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
456 				  NULL, rte_strerror(ENOSYS));
457 }
458 
459 int
460 rte_flow_actions_update(uint16_t port_id,
461 			struct rte_flow *flow,
462 			const struct rte_flow_action actions[],
463 			struct rte_flow_error *error)
464 {
465 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
466 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
467 	int ret;
468 
469 	if (unlikely(!ops))
470 		return -rte_errno;
471 	if (likely(!!ops->actions_update)) {
472 		fts_enter(dev);
473 		ret = ops->actions_update(dev, flow, actions, error);
474 		fts_exit(dev);
475 
476 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
477 
478 		return flow_err(port_id, ret, error);
479 	}
480 	return rte_flow_error_set(error, ENOSYS,
481 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
482 				  NULL, rte_strerror(ENOSYS));
483 }
484 
485 /* Destroy all flow rules associated with a port. */
486 int
487 rte_flow_flush(uint16_t port_id,
488 	       struct rte_flow_error *error)
489 {
490 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
491 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
492 	int ret;
493 
494 	if (unlikely(!ops))
495 		return -rte_errno;
496 	if (likely(!!ops->flush)) {
497 		fts_enter(dev);
498 		ret = ops->flush(dev, error);
499 		fts_exit(dev);
500 		ret = flow_err(port_id, ret, error);
501 
502 		rte_flow_trace_flush(port_id, ret);
503 
504 		return ret;
505 	}
506 	return rte_flow_error_set(error, ENOSYS,
507 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
508 				  NULL, rte_strerror(ENOSYS));
509 }
510 
511 /* Query an existing flow rule. */
512 int
513 rte_flow_query(uint16_t port_id,
514 	       struct rte_flow *flow,
515 	       const struct rte_flow_action *action,
516 	       void *data,
517 	       struct rte_flow_error *error)
518 {
519 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
520 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
521 	int ret;
522 
523 	if (!ops)
524 		return -rte_errno;
525 	if (likely(!!ops->query)) {
526 		fts_enter(dev);
527 		ret = ops->query(dev, flow, action, data, error);
528 		fts_exit(dev);
529 		ret = flow_err(port_id, ret, error);
530 
531 		rte_flow_trace_query(port_id, flow, action, data, ret);
532 
533 		return ret;
534 	}
535 	return rte_flow_error_set(error, ENOSYS,
536 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
537 				  NULL, rte_strerror(ENOSYS));
538 }
539 
540 /* Restrict ingress traffic to the defined flow rules. */
541 int
542 rte_flow_isolate(uint16_t port_id,
543 		 int set,
544 		 struct rte_flow_error *error)
545 {
546 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
547 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
548 	int ret;
549 
550 	if (!ops)
551 		return -rte_errno;
552 	if (likely(!!ops->isolate)) {
553 		fts_enter(dev);
554 		ret = ops->isolate(dev, set, error);
555 		fts_exit(dev);
556 		ret = flow_err(port_id, ret, error);
557 
558 		rte_flow_trace_isolate(port_id, set, ret);
559 
560 		return ret;
561 	}
562 	return rte_flow_error_set(error, ENOSYS,
563 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
564 				  NULL, rte_strerror(ENOSYS));
565 }
566 
567 /* Initialize flow error structure. */
568 int
569 rte_flow_error_set(struct rte_flow_error *error,
570 		   int code,
571 		   enum rte_flow_error_type type,
572 		   const void *cause,
573 		   const char *message)
574 {
575 	if (error) {
576 		*error = (struct rte_flow_error){
577 			.type = type,
578 			.cause = cause,
579 			.message = message,
580 		};
581 	}
582 	rte_errno = code;
583 	return -code;
584 }
585 
586 /** Pattern item specification types. */
587 enum rte_flow_conv_item_spec_type {
588 	RTE_FLOW_CONV_ITEM_SPEC,
589 	RTE_FLOW_CONV_ITEM_LAST,
590 	RTE_FLOW_CONV_ITEM_MASK,
591 };
592 
593 /**
594  * Copy pattern item specification.
595  *
596  * @param[out] buf
597  *   Output buffer. Can be NULL if @p size is zero.
598  * @param size
599  *   Size of @p buf in bytes.
600  * @param[in] item
601  *   Pattern item to copy specification from.
602  * @param type
603  *   Specification selector for either @p spec, @p last or @p mask.
604  *
605  * @return
606  *   Number of bytes needed to store pattern item specification regardless
607  *   of @p size. @p buf contents are truncated to @p size if not large
608  *   enough.
609  */
610 static size_t
611 rte_flow_conv_item_spec(void *buf, const size_t size,
612 			const struct rte_flow_item *item,
613 			enum rte_flow_conv_item_spec_type type)
614 {
615 	size_t off;
616 	const void *data =
617 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
618 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
619 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
620 		NULL;
621 
622 	switch (item->type) {
623 		union {
624 			const struct rte_flow_item_raw *raw;
625 		} spec;
626 		union {
627 			const struct rte_flow_item_raw *raw;
628 		} last;
629 		union {
630 			const struct rte_flow_item_raw *raw;
631 		} mask;
632 		union {
633 			const struct rte_flow_item_raw *raw;
634 		} src;
635 		union {
636 			struct rte_flow_item_raw *raw;
637 		} dst;
638 		size_t tmp;
639 
640 	case RTE_FLOW_ITEM_TYPE_RAW:
641 		spec.raw = item->spec;
642 		last.raw = item->last ? item->last : item->spec;
643 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
644 		src.raw = data;
645 		dst.raw = buf;
646 		rte_memcpy(dst.raw,
647 			   (&(struct rte_flow_item_raw){
648 				.relative = src.raw->relative,
649 				.search = src.raw->search,
650 				.reserved = src.raw->reserved,
651 				.offset = src.raw->offset,
652 				.limit = src.raw->limit,
653 				.length = src.raw->length,
654 			   }),
655 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
656 		off = sizeof(*dst.raw);
657 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
658 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
659 		     ((spec.raw->length & mask.raw->length) >=
660 		      (last.raw->length & mask.raw->length))))
661 			tmp = spec.raw->length & mask.raw->length;
662 		else
663 			tmp = last.raw->length & mask.raw->length;
664 		if (tmp) {
665 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
666 			if (size >= off + tmp)
667 				dst.raw->pattern = rte_memcpy
668 					((void *)((uintptr_t)dst.raw + off),
669 					 src.raw->pattern, tmp);
670 			off += tmp;
671 		}
672 		break;
673 	default:
674 		off = rte_flow_conv_copy(buf, data, size,
675 					 rte_flow_desc_item, item->type);
676 		break;
677 	}
678 	return off;
679 }
680 
681 /**
682  * Copy action configuration.
683  *
684  * @param[out] buf
685  *   Output buffer. Can be NULL if @p size is zero.
686  * @param size
687  *   Size of @p buf in bytes.
688  * @param[in] action
689  *   Action to copy configuration from.
690  *
691  * @return
692  *   Number of bytes needed to store pattern item specification regardless
693  *   of @p size. @p buf contents are truncated to @p size if not large
694  *   enough.
695  */
696 static size_t
697 rte_flow_conv_action_conf(void *buf, const size_t size,
698 			  const struct rte_flow_action *action)
699 {
700 	size_t off;
701 
702 	switch (action->type) {
703 		union {
704 			const struct rte_flow_action_rss *rss;
705 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
706 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
707 		} src;
708 		union {
709 			struct rte_flow_action_rss *rss;
710 			struct rte_flow_action_vxlan_encap *vxlan_encap;
711 			struct rte_flow_action_nvgre_encap *nvgre_encap;
712 		} dst;
713 		size_t tmp;
714 		int ret;
715 
716 	case RTE_FLOW_ACTION_TYPE_RSS:
717 		src.rss = action->conf;
718 		dst.rss = buf;
719 		rte_memcpy(dst.rss,
720 			   (&(struct rte_flow_action_rss){
721 				.func = src.rss->func,
722 				.level = src.rss->level,
723 				.types = src.rss->types,
724 				.key_len = src.rss->key_len,
725 				.queue_num = src.rss->queue_num,
726 			   }),
727 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
728 		off = sizeof(*dst.rss);
729 		if (src.rss->key_len && src.rss->key) {
730 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
731 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
732 			if (size >= (uint64_t)off + (uint64_t)tmp)
733 				dst.rss->key = rte_memcpy
734 					((void *)((uintptr_t)dst.rss + off),
735 					 src.rss->key, tmp);
736 			off += tmp;
737 		}
738 		if (src.rss->queue_num) {
739 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
740 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
741 			if (size >= (uint64_t)off + (uint64_t)tmp)
742 				dst.rss->queue = rte_memcpy
743 					((void *)((uintptr_t)dst.rss + off),
744 					 src.rss->queue, tmp);
745 			off += tmp;
746 		}
747 		break;
748 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
749 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
750 		src.vxlan_encap = action->conf;
751 		dst.vxlan_encap = buf;
752 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
753 				 sizeof(*src.nvgre_encap) ||
754 				 offsetof(struct rte_flow_action_vxlan_encap,
755 					  definition) !=
756 				 offsetof(struct rte_flow_action_nvgre_encap,
757 					  definition));
758 		off = sizeof(*dst.vxlan_encap);
759 		if (src.vxlan_encap->definition) {
760 			off = RTE_ALIGN_CEIL
761 				(off, sizeof(*dst.vxlan_encap->definition));
762 			ret = rte_flow_conv
763 				(RTE_FLOW_CONV_OP_PATTERN,
764 				 (void *)((uintptr_t)dst.vxlan_encap + off),
765 				 size > off ? size - off : 0,
766 				 src.vxlan_encap->definition, NULL);
767 			if (ret < 0)
768 				return 0;
769 			if (size >= off + ret)
770 				dst.vxlan_encap->definition =
771 					(void *)((uintptr_t)dst.vxlan_encap +
772 						 off);
773 			off += ret;
774 		}
775 		break;
776 	default:
777 		off = rte_flow_conv_copy(buf, action->conf, size,
778 					 rte_flow_desc_action, action->type);
779 		break;
780 	}
781 	return off;
782 }
783 
784 /**
785  * Copy a list of pattern items.
786  *
787  * @param[out] dst
788  *   Destination buffer. Can be NULL if @p size is zero.
789  * @param size
790  *   Size of @p dst in bytes.
791  * @param[in] src
792  *   Source pattern items.
793  * @param num
794  *   Maximum number of pattern items to process from @p src or 0 to process
795  *   the entire list. In both cases, processing stops after
796  *   RTE_FLOW_ITEM_TYPE_END is encountered.
797  * @param[out] error
798  *   Perform verbose error reporting if not NULL.
799  *
800  * @return
801  *   A positive value representing the number of bytes needed to store
802  *   pattern items regardless of @p size on success (@p buf contents are
803  *   truncated to @p size if not large enough), a negative errno value
804  *   otherwise and rte_errno is set.
805  */
806 static int
807 rte_flow_conv_pattern(struct rte_flow_item *dst,
808 		      const size_t size,
809 		      const struct rte_flow_item *src,
810 		      unsigned int num,
811 		      struct rte_flow_error *error)
812 {
813 	uintptr_t data = (uintptr_t)dst;
814 	size_t off;
815 	size_t ret;
816 	unsigned int i;
817 
818 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
819 		/**
820 		 * allow PMD private flow item
821 		 */
822 		if (((int)src->type >= 0) &&
823 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
824 		    !rte_flow_desc_item[src->type].name))
825 			return rte_flow_error_set
826 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
827 				 "cannot convert unknown item type");
828 		if (size >= off + sizeof(*dst))
829 			*dst = (struct rte_flow_item){
830 				.type = src->type,
831 			};
832 		off += sizeof(*dst);
833 		if (!src->type)
834 			num = i + 1;
835 	}
836 	num = i;
837 	src -= num;
838 	dst -= num;
839 	do {
840 		if (src->spec) {
841 			off = RTE_ALIGN_CEIL(off, sizeof(double));
842 			ret = rte_flow_conv_item_spec
843 				((void *)(data + off),
844 				 size > off ? size - off : 0, src,
845 				 RTE_FLOW_CONV_ITEM_SPEC);
846 			if (size && size >= off + ret)
847 				dst->spec = (void *)(data + off);
848 			off += ret;
849 
850 		}
851 		if (src->last) {
852 			off = RTE_ALIGN_CEIL(off, sizeof(double));
853 			ret = rte_flow_conv_item_spec
854 				((void *)(data + off),
855 				 size > off ? size - off : 0, src,
856 				 RTE_FLOW_CONV_ITEM_LAST);
857 			if (size && size >= off + ret)
858 				dst->last = (void *)(data + off);
859 			off += ret;
860 		}
861 		if (src->mask) {
862 			off = RTE_ALIGN_CEIL(off, sizeof(double));
863 			ret = rte_flow_conv_item_spec
864 				((void *)(data + off),
865 				 size > off ? size - off : 0, src,
866 				 RTE_FLOW_CONV_ITEM_MASK);
867 			if (size && size >= off + ret)
868 				dst->mask = (void *)(data + off);
869 			off += ret;
870 		}
871 		++src;
872 		++dst;
873 	} while (--num);
874 	return off;
875 }
876 
877 /**
878  * Copy a list of actions.
879  *
880  * @param[out] dst
881  *   Destination buffer. Can be NULL if @p size is zero.
882  * @param size
883  *   Size of @p dst in bytes.
884  * @param[in] src
885  *   Source actions.
886  * @param num
887  *   Maximum number of actions to process from @p src or 0 to process the
888  *   entire list. In both cases, processing stops after
889  *   RTE_FLOW_ACTION_TYPE_END is encountered.
890  * @param[out] error
891  *   Perform verbose error reporting if not NULL.
892  *
893  * @return
894  *   A positive value representing the number of bytes needed to store
895  *   actions regardless of @p size on success (@p buf contents are truncated
896  *   to @p size if not large enough), a negative errno value otherwise and
897  *   rte_errno is set.
898  */
899 static int
900 rte_flow_conv_actions(struct rte_flow_action *dst,
901 		      const size_t size,
902 		      const struct rte_flow_action *src,
903 		      unsigned int num,
904 		      struct rte_flow_error *error)
905 {
906 	uintptr_t data = (uintptr_t)dst;
907 	size_t off;
908 	size_t ret;
909 	unsigned int i;
910 
911 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
912 		/**
913 		 * allow PMD private flow action
914 		 */
915 		if (((int)src->type >= 0) &&
916 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
917 		    !rte_flow_desc_action[src->type].name))
918 			return rte_flow_error_set
919 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
920 				 src, "cannot convert unknown action type");
921 		if (size >= off + sizeof(*dst))
922 			*dst = (struct rte_flow_action){
923 				.type = src->type,
924 			};
925 		off += sizeof(*dst);
926 		if (!src->type)
927 			num = i + 1;
928 	}
929 	num = i;
930 	src -= num;
931 	dst -= num;
932 	do {
933 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
934 			/*
935 			 * Indirect action conf fills the indirect action
936 			 * handler. Copy the action handle directly instead
937 			 * of duplicating the pointer memory.
938 			 */
939 			if (size)
940 				dst->conf = src->conf;
941 		} else if (src->conf) {
942 			off = RTE_ALIGN_CEIL(off, sizeof(double));
943 			ret = rte_flow_conv_action_conf
944 				((void *)(data + off),
945 				 size > off ? size - off : 0, src);
946 			if (size && size >= off + ret)
947 				dst->conf = (void *)(data + off);
948 			off += ret;
949 		}
950 		++src;
951 		++dst;
952 	} while (--num);
953 	return off;
954 }
955 
956 /**
957  * Copy flow rule components.
958  *
959  * This comprises the flow rule descriptor itself, attributes, pattern and
960  * actions list. NULL components in @p src are skipped.
961  *
962  * @param[out] dst
963  *   Destination buffer. Can be NULL if @p size is zero.
964  * @param size
965  *   Size of @p dst in bytes.
966  * @param[in] src
967  *   Source flow rule descriptor.
968  * @param[out] error
969  *   Perform verbose error reporting if not NULL.
970  *
971  * @return
972  *   A positive value representing the number of bytes needed to store all
973  *   components including the descriptor regardless of @p size on success
974  *   (@p buf contents are truncated to @p size if not large enough), a
975  *   negative errno value otherwise and rte_errno is set.
976  */
977 static int
978 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
979 		   const size_t size,
980 		   const struct rte_flow_conv_rule *src,
981 		   struct rte_flow_error *error)
982 {
983 	size_t off;
984 	int ret;
985 
986 	rte_memcpy(dst,
987 		   (&(struct rte_flow_conv_rule){
988 			.attr = NULL,
989 			.pattern = NULL,
990 			.actions = NULL,
991 		   }),
992 		   size > sizeof(*dst) ? sizeof(*dst) : size);
993 	off = sizeof(*dst);
994 	if (src->attr_ro) {
995 		off = RTE_ALIGN_CEIL(off, sizeof(double));
996 		if (size && size >= off + sizeof(*dst->attr))
997 			dst->attr = rte_memcpy
998 				((void *)((uintptr_t)dst + off),
999 				 src->attr_ro, sizeof(*dst->attr));
1000 		off += sizeof(*dst->attr);
1001 	}
1002 	if (src->pattern_ro) {
1003 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1004 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
1005 					    size > off ? size - off : 0,
1006 					    src->pattern_ro, 0, error);
1007 		if (ret < 0)
1008 			return ret;
1009 		if (size && size >= off + (size_t)ret)
1010 			dst->pattern = (void *)((uintptr_t)dst + off);
1011 		off += ret;
1012 	}
1013 	if (src->actions_ro) {
1014 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1015 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1016 					    size > off ? size - off : 0,
1017 					    src->actions_ro, 0, error);
1018 		if (ret < 0)
1019 			return ret;
1020 		if (size >= off + (size_t)ret)
1021 			dst->actions = (void *)((uintptr_t)dst + off);
1022 		off += ret;
1023 	}
1024 	return off;
1025 }
1026 
1027 /**
1028  * Retrieve the name of a pattern item/action type.
1029  *
1030  * @param is_action
1031  *   Nonzero when @p src represents an action type instead of a pattern item
1032  *   type.
1033  * @param is_ptr
1034  *   Nonzero to write string address instead of contents into @p dst.
1035  * @param[out] dst
1036  *   Destination buffer. Can be NULL if @p size is zero.
1037  * @param size
1038  *   Size of @p dst in bytes.
1039  * @param[in] src
1040  *   Depending on @p is_action, source pattern item or action type cast as a
1041  *   pointer.
1042  * @param[out] error
1043  *   Perform verbose error reporting if not NULL.
1044  *
1045  * @return
1046  *   A positive value representing the number of bytes needed to store the
1047  *   name or its address regardless of @p size on success (@p buf contents
1048  *   are truncated to @p size if not large enough), a negative errno value
1049  *   otherwise and rte_errno is set.
1050  */
1051 static int
1052 rte_flow_conv_name(int is_action,
1053 		   int is_ptr,
1054 		   char *dst,
1055 		   const size_t size,
1056 		   const void *src,
1057 		   struct rte_flow_error *error)
1058 {
1059 	struct desc_info {
1060 		const struct rte_flow_desc_data *data;
1061 		size_t num;
1062 	};
1063 	static const struct desc_info info_rep[2] = {
1064 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1065 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1066 	};
1067 	const struct desc_info *const info = &info_rep[!!is_action];
1068 	unsigned int type = (uintptr_t)src;
1069 
1070 	if (type >= info->num)
1071 		return rte_flow_error_set
1072 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1073 			 "unknown object type to retrieve the name of");
1074 	if (!is_ptr)
1075 		return strlcpy(dst, info->data[type].name, size);
1076 	if (size >= sizeof(const char **))
1077 		*((const char **)dst) = info->data[type].name;
1078 	return sizeof(const char **);
1079 }
1080 
1081 /** Helper function to convert flow API objects. */
1082 int
1083 rte_flow_conv(enum rte_flow_conv_op op,
1084 	      void *dst,
1085 	      size_t size,
1086 	      const void *src,
1087 	      struct rte_flow_error *error)
1088 {
1089 	int ret;
1090 
1091 	switch (op) {
1092 		const struct rte_flow_attr *attr;
1093 
1094 	case RTE_FLOW_CONV_OP_NONE:
1095 		ret = 0;
1096 		break;
1097 	case RTE_FLOW_CONV_OP_ATTR:
1098 		attr = src;
1099 		if (size > sizeof(*attr))
1100 			size = sizeof(*attr);
1101 		rte_memcpy(dst, attr, size);
1102 		ret = sizeof(*attr);
1103 		break;
1104 	case RTE_FLOW_CONV_OP_ITEM:
1105 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1106 		break;
1107 	case RTE_FLOW_CONV_OP_ACTION:
1108 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1109 		break;
1110 	case RTE_FLOW_CONV_OP_PATTERN:
1111 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1112 		break;
1113 	case RTE_FLOW_CONV_OP_ACTIONS:
1114 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1115 		break;
1116 	case RTE_FLOW_CONV_OP_RULE:
1117 		ret = rte_flow_conv_rule(dst, size, src, error);
1118 		break;
1119 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1120 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1121 		break;
1122 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1123 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1124 		break;
1125 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1126 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1127 		break;
1128 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1129 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1130 		break;
1131 	default:
1132 		ret = rte_flow_error_set
1133 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1134 		 "unknown object conversion operation");
1135 	}
1136 
1137 	rte_flow_trace_conv(op, dst, size, src, ret);
1138 
1139 	return ret;
1140 }
1141 
1142 /** Store a full rte_flow description. */
1143 size_t
1144 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1145 	      const struct rte_flow_attr *attr,
1146 	      const struct rte_flow_item *items,
1147 	      const struct rte_flow_action *actions)
1148 {
1149 	/*
1150 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1151 	 * to convert the former to the latter without wasting space.
1152 	 */
1153 	struct rte_flow_conv_rule *dst =
1154 		len ?
1155 		(void *)((uintptr_t)desc +
1156 			 (offsetof(struct rte_flow_desc, actions) -
1157 			  offsetof(struct rte_flow_conv_rule, actions))) :
1158 		NULL;
1159 	size_t dst_size =
1160 		len > sizeof(*desc) - sizeof(*dst) ?
1161 		len - (sizeof(*desc) - sizeof(*dst)) :
1162 		0;
1163 	struct rte_flow_conv_rule src = {
1164 		.attr_ro = NULL,
1165 		.pattern_ro = items,
1166 		.actions_ro = actions,
1167 	};
1168 	int ret;
1169 
1170 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1171 			 sizeof(struct rte_flow_conv_rule));
1172 	if (dst_size &&
1173 	    (&dst->pattern != &desc->items ||
1174 	     &dst->actions != &desc->actions ||
1175 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1176 		rte_errno = EINVAL;
1177 		return 0;
1178 	}
1179 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1180 	if (ret < 0)
1181 		return 0;
1182 	ret += sizeof(*desc) - sizeof(*dst);
1183 	rte_memcpy(desc,
1184 		   (&(struct rte_flow_desc){
1185 			.size = ret,
1186 			.attr = *attr,
1187 			.items = dst_size ? dst->pattern : NULL,
1188 			.actions = dst_size ? dst->actions : NULL,
1189 		   }),
1190 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1191 
1192 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1193 
1194 	return ret;
1195 }
1196 
1197 int
1198 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1199 			FILE *file, struct rte_flow_error *error)
1200 {
1201 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1202 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1203 	int ret;
1204 
1205 	if (unlikely(!ops))
1206 		return -rte_errno;
1207 	if (likely(!!ops->dev_dump)) {
1208 		fts_enter(dev);
1209 		ret = ops->dev_dump(dev, flow, file, error);
1210 		fts_exit(dev);
1211 		return flow_err(port_id, ret, error);
1212 	}
1213 	return rte_flow_error_set(error, ENOSYS,
1214 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1215 				  NULL, rte_strerror(ENOSYS));
1216 }
1217 
1218 int
1219 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1220 		    uint32_t nb_contexts, struct rte_flow_error *error)
1221 {
1222 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1223 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1224 	int ret;
1225 
1226 	if (unlikely(!ops))
1227 		return -rte_errno;
1228 	if (likely(!!ops->get_aged_flows)) {
1229 		fts_enter(dev);
1230 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1231 		fts_exit(dev);
1232 		ret = flow_err(port_id, ret, error);
1233 
1234 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1235 
1236 		return ret;
1237 	}
1238 	return rte_flow_error_set(error, ENOTSUP,
1239 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1240 				  NULL, rte_strerror(ENOTSUP));
1241 }
1242 
1243 int
1244 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1245 			  uint32_t nb_contexts, struct rte_flow_error *error)
1246 {
1247 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1248 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1249 	int ret;
1250 
1251 	if (unlikely(!ops))
1252 		return -rte_errno;
1253 	if (likely(!!ops->get_q_aged_flows)) {
1254 		fts_enter(dev);
1255 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1256 					    nb_contexts, error);
1257 		fts_exit(dev);
1258 		ret = flow_err(port_id, ret, error);
1259 
1260 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1261 						nb_contexts, ret);
1262 
1263 		return ret;
1264 	}
1265 	return rte_flow_error_set(error, ENOTSUP,
1266 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1267 				  NULL, rte_strerror(ENOTSUP));
1268 }
1269 
1270 struct rte_flow_action_handle *
1271 rte_flow_action_handle_create(uint16_t port_id,
1272 			      const struct rte_flow_indir_action_conf *conf,
1273 			      const struct rte_flow_action *action,
1274 			      struct rte_flow_error *error)
1275 {
1276 	struct rte_flow_action_handle *handle;
1277 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1278 
1279 	if (unlikely(!ops))
1280 		return NULL;
1281 	if (unlikely(!ops->action_handle_create)) {
1282 		rte_flow_error_set(error, ENOSYS,
1283 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1284 				   rte_strerror(ENOSYS));
1285 		return NULL;
1286 	}
1287 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1288 					   conf, action, error);
1289 	if (handle == NULL)
1290 		flow_err(port_id, -rte_errno, error);
1291 
1292 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1293 
1294 	return handle;
1295 }
1296 
1297 int
1298 rte_flow_action_handle_destroy(uint16_t port_id,
1299 			       struct rte_flow_action_handle *handle,
1300 			       struct rte_flow_error *error)
1301 {
1302 	int ret;
1303 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1304 
1305 	if (unlikely(!ops))
1306 		return -rte_errno;
1307 	if (unlikely(!ops->action_handle_destroy))
1308 		return rte_flow_error_set(error, ENOSYS,
1309 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1310 					  NULL, rte_strerror(ENOSYS));
1311 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1312 					 handle, error);
1313 	ret = flow_err(port_id, ret, error);
1314 
1315 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1316 
1317 	return ret;
1318 }
1319 
1320 int
1321 rte_flow_action_handle_update(uint16_t port_id,
1322 			      struct rte_flow_action_handle *handle,
1323 			      const void *update,
1324 			      struct rte_flow_error *error)
1325 {
1326 	int ret;
1327 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1328 
1329 	if (unlikely(!ops))
1330 		return -rte_errno;
1331 	if (unlikely(!ops->action_handle_update))
1332 		return rte_flow_error_set(error, ENOSYS,
1333 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1334 					  NULL, rte_strerror(ENOSYS));
1335 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1336 					update, error);
1337 	ret = flow_err(port_id, ret, error);
1338 
1339 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1340 
1341 	return ret;
1342 }
1343 
1344 int
1345 rte_flow_action_handle_query(uint16_t port_id,
1346 			     const struct rte_flow_action_handle *handle,
1347 			     void *data,
1348 			     struct rte_flow_error *error)
1349 {
1350 	int ret;
1351 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1352 
1353 	if (unlikely(!ops))
1354 		return -rte_errno;
1355 	if (unlikely(!ops->action_handle_query))
1356 		return rte_flow_error_set(error, ENOSYS,
1357 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1358 					  NULL, rte_strerror(ENOSYS));
1359 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1360 				       data, error);
1361 	ret = flow_err(port_id, ret, error);
1362 
1363 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1364 
1365 	return ret;
1366 }
1367 
1368 int
1369 rte_flow_tunnel_decap_set(uint16_t port_id,
1370 			  struct rte_flow_tunnel *tunnel,
1371 			  struct rte_flow_action **actions,
1372 			  uint32_t *num_of_actions,
1373 			  struct rte_flow_error *error)
1374 {
1375 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1376 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1377 	int ret;
1378 
1379 	if (unlikely(!ops))
1380 		return -rte_errno;
1381 	if (likely(!!ops->tunnel_decap_set)) {
1382 		ret = flow_err(port_id,
1383 			       ops->tunnel_decap_set(dev, tunnel, actions,
1384 						     num_of_actions, error),
1385 			       error);
1386 
1387 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1388 						num_of_actions, ret);
1389 
1390 		return ret;
1391 	}
1392 	return rte_flow_error_set(error, ENOTSUP,
1393 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1394 				  NULL, rte_strerror(ENOTSUP));
1395 }
1396 
1397 int
1398 rte_flow_tunnel_match(uint16_t port_id,
1399 		      struct rte_flow_tunnel *tunnel,
1400 		      struct rte_flow_item **items,
1401 		      uint32_t *num_of_items,
1402 		      struct rte_flow_error *error)
1403 {
1404 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1405 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1406 	int ret;
1407 
1408 	if (unlikely(!ops))
1409 		return -rte_errno;
1410 	if (likely(!!ops->tunnel_match)) {
1411 		ret = flow_err(port_id,
1412 			       ops->tunnel_match(dev, tunnel, items,
1413 						 num_of_items, error),
1414 			       error);
1415 
1416 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1417 					    ret);
1418 
1419 		return ret;
1420 	}
1421 	return rte_flow_error_set(error, ENOTSUP,
1422 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1423 				  NULL, rte_strerror(ENOTSUP));
1424 }
1425 
1426 int
1427 rte_flow_get_restore_info(uint16_t port_id,
1428 			  struct rte_mbuf *m,
1429 			  struct rte_flow_restore_info *restore_info,
1430 			  struct rte_flow_error *error)
1431 {
1432 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1433 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1434 	int ret;
1435 
1436 	if (unlikely(!ops))
1437 		return -rte_errno;
1438 	if (likely(!!ops->get_restore_info)) {
1439 		ret = flow_err(port_id,
1440 			       ops->get_restore_info(dev, m, restore_info,
1441 						     error),
1442 			       error);
1443 
1444 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1445 
1446 		return ret;
1447 	}
1448 	return rte_flow_error_set(error, ENOTSUP,
1449 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1450 				  NULL, rte_strerror(ENOTSUP));
1451 }
1452 
1453 static struct {
1454 	const struct rte_mbuf_dynflag desc;
1455 	uint64_t value;
1456 } flow_restore_info_dynflag = {
1457 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1458 };
1459 
1460 uint64_t
1461 rte_flow_restore_info_dynflag(void)
1462 {
1463 	return flow_restore_info_dynflag.value;
1464 }
1465 
1466 int
1467 rte_flow_restore_info_dynflag_register(void)
1468 {
1469 	if (flow_restore_info_dynflag.value == 0) {
1470 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1471 
1472 		if (offset < 0)
1473 			return -1;
1474 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1475 	}
1476 
1477 	return 0;
1478 }
1479 
1480 int
1481 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1482 				     struct rte_flow_action *actions,
1483 				     uint32_t num_of_actions,
1484 				     struct rte_flow_error *error)
1485 {
1486 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1487 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1488 	int ret;
1489 
1490 	if (unlikely(!ops))
1491 		return -rte_errno;
1492 	if (likely(!!ops->tunnel_action_decap_release)) {
1493 		ret = flow_err(port_id,
1494 			       ops->tunnel_action_decap_release(dev, actions,
1495 								num_of_actions,
1496 								error),
1497 			       error);
1498 
1499 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1500 							   num_of_actions, ret);
1501 
1502 		return ret;
1503 	}
1504 	return rte_flow_error_set(error, ENOTSUP,
1505 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1506 				  NULL, rte_strerror(ENOTSUP));
1507 }
1508 
1509 int
1510 rte_flow_tunnel_item_release(uint16_t port_id,
1511 			     struct rte_flow_item *items,
1512 			     uint32_t num_of_items,
1513 			     struct rte_flow_error *error)
1514 {
1515 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1516 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1517 	int ret;
1518 
1519 	if (unlikely(!ops))
1520 		return -rte_errno;
1521 	if (likely(!!ops->tunnel_item_release)) {
1522 		ret = flow_err(port_id,
1523 			       ops->tunnel_item_release(dev, items,
1524 							num_of_items, error),
1525 			       error);
1526 
1527 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1528 
1529 		return ret;
1530 	}
1531 	return rte_flow_error_set(error, ENOTSUP,
1532 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1533 				  NULL, rte_strerror(ENOTSUP));
1534 }
1535 
1536 int
1537 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1538 			     struct rte_flow_error *error)
1539 {
1540 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1541 	struct rte_eth_dev *dev;
1542 	int ret;
1543 
1544 	if (unlikely(ops == NULL))
1545 		return -rte_errno;
1546 
1547 	if (ops->pick_transfer_proxy == NULL) {
1548 		*proxy_port_id = port_id;
1549 		return 0;
1550 	}
1551 
1552 	dev = &rte_eth_devices[port_id];
1553 
1554 	ret = flow_err(port_id,
1555 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1556 		       error);
1557 
1558 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1559 
1560 	return ret;
1561 }
1562 
1563 struct rte_flow_item_flex_handle *
1564 rte_flow_flex_item_create(uint16_t port_id,
1565 			  const struct rte_flow_item_flex_conf *conf,
1566 			  struct rte_flow_error *error)
1567 {
1568 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1569 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1570 	struct rte_flow_item_flex_handle *handle;
1571 
1572 	if (unlikely(!ops))
1573 		return NULL;
1574 	if (unlikely(!ops->flex_item_create)) {
1575 		rte_flow_error_set(error, ENOTSUP,
1576 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1577 				   NULL, rte_strerror(ENOTSUP));
1578 		return NULL;
1579 	}
1580 	handle = ops->flex_item_create(dev, conf, error);
1581 	if (handle == NULL)
1582 		flow_err(port_id, -rte_errno, error);
1583 
1584 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1585 
1586 	return handle;
1587 }
1588 
1589 int
1590 rte_flow_flex_item_release(uint16_t port_id,
1591 			   const struct rte_flow_item_flex_handle *handle,
1592 			   struct rte_flow_error *error)
1593 {
1594 	int ret;
1595 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1596 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1597 
1598 	if (unlikely(!ops || !ops->flex_item_release))
1599 		return rte_flow_error_set(error, ENOTSUP,
1600 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1601 					  NULL, rte_strerror(ENOTSUP));
1602 	ret = ops->flex_item_release(dev, handle, error);
1603 	ret = flow_err(port_id, ret, error);
1604 
1605 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1606 
1607 	return ret;
1608 }
1609 
1610 int
1611 rte_flow_info_get(uint16_t port_id,
1612 		  struct rte_flow_port_info *port_info,
1613 		  struct rte_flow_queue_info *queue_info,
1614 		  struct rte_flow_error *error)
1615 {
1616 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1617 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1618 	int ret;
1619 
1620 	if (unlikely(!ops))
1621 		return -rte_errno;
1622 	if (dev->data->dev_configured == 0) {
1623 		FLOW_LOG(INFO,
1624 			"Device with port_id=%"PRIu16" is not configured.",
1625 			port_id);
1626 		return -EINVAL;
1627 	}
1628 	if (port_info == NULL) {
1629 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1630 		return -EINVAL;
1631 	}
1632 	if (likely(!!ops->info_get)) {
1633 		ret = flow_err(port_id,
1634 			       ops->info_get(dev, port_info, queue_info, error),
1635 			       error);
1636 
1637 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1638 
1639 		return ret;
1640 	}
1641 	return rte_flow_error_set(error, ENOTSUP,
1642 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1643 				  NULL, rte_strerror(ENOTSUP));
1644 }
1645 
1646 int
1647 rte_flow_configure(uint16_t port_id,
1648 		   const struct rte_flow_port_attr *port_attr,
1649 		   uint16_t nb_queue,
1650 		   const struct rte_flow_queue_attr *queue_attr[],
1651 		   struct rte_flow_error *error)
1652 {
1653 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1654 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1655 	int ret;
1656 
1657 	if (unlikely(!ops))
1658 		return -rte_errno;
1659 	if (dev->data->dev_configured == 0) {
1660 		FLOW_LOG(INFO,
1661 			"Device with port_id=%"PRIu16" is not configured.",
1662 			port_id);
1663 		return -EINVAL;
1664 	}
1665 	if (dev->data->dev_started != 0) {
1666 		FLOW_LOG(INFO,
1667 			"Device with port_id=%"PRIu16" already started.",
1668 			port_id);
1669 		return -EINVAL;
1670 	}
1671 	if (port_attr == NULL) {
1672 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1673 		return -EINVAL;
1674 	}
1675 	if (queue_attr == NULL) {
1676 		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
1677 		return -EINVAL;
1678 	}
1679 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1680 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1681 		return rte_flow_error_set(error, ENODEV,
1682 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1683 					  NULL, rte_strerror(ENODEV));
1684 	}
1685 	if (likely(!!ops->configure)) {
1686 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1687 		if (ret == 0)
1688 			dev->data->flow_configured = 1;
1689 		ret = flow_err(port_id, ret, error);
1690 
1691 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1692 
1693 		return ret;
1694 	}
1695 	return rte_flow_error_set(error, ENOTSUP,
1696 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1697 				  NULL, rte_strerror(ENOTSUP));
1698 }
1699 
1700 struct rte_flow_pattern_template *
1701 rte_flow_pattern_template_create(uint16_t port_id,
1702 		const struct rte_flow_pattern_template_attr *template_attr,
1703 		const struct rte_flow_item pattern[],
1704 		struct rte_flow_error *error)
1705 {
1706 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1707 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1708 	struct rte_flow_pattern_template *template;
1709 
1710 	if (unlikely(!ops))
1711 		return NULL;
1712 	if (dev->data->flow_configured == 0) {
1713 		FLOW_LOG(INFO,
1714 			"Flow engine on port_id=%"PRIu16" is not configured.",
1715 			port_id);
1716 		rte_flow_error_set(error, EINVAL,
1717 				RTE_FLOW_ERROR_TYPE_STATE,
1718 				NULL, rte_strerror(EINVAL));
1719 		return NULL;
1720 	}
1721 	if (template_attr == NULL) {
1722 		FLOW_LOG(ERR,
1723 			     "Port %"PRIu16" template attr is NULL.",
1724 			     port_id);
1725 		rte_flow_error_set(error, EINVAL,
1726 				   RTE_FLOW_ERROR_TYPE_ATTR,
1727 				   NULL, rte_strerror(EINVAL));
1728 		return NULL;
1729 	}
1730 	if (pattern == NULL) {
1731 		FLOW_LOG(ERR,
1732 			     "Port %"PRIu16" pattern is NULL.",
1733 			     port_id);
1734 		rte_flow_error_set(error, EINVAL,
1735 				   RTE_FLOW_ERROR_TYPE_ATTR,
1736 				   NULL, rte_strerror(EINVAL));
1737 		return NULL;
1738 	}
1739 	if (likely(!!ops->pattern_template_create)) {
1740 		template = ops->pattern_template_create(dev, template_attr,
1741 							pattern, error);
1742 		if (template == NULL)
1743 			flow_err(port_id, -rte_errno, error);
1744 
1745 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1746 						       pattern, template);
1747 
1748 		return template;
1749 	}
1750 	rte_flow_error_set(error, ENOTSUP,
1751 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1752 			   NULL, rte_strerror(ENOTSUP));
1753 	return NULL;
1754 }
1755 
1756 int
1757 rte_flow_pattern_template_destroy(uint16_t port_id,
1758 		struct rte_flow_pattern_template *pattern_template,
1759 		struct rte_flow_error *error)
1760 {
1761 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1762 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1763 	int ret;
1764 
1765 	if (unlikely(!ops))
1766 		return -rte_errno;
1767 	if (unlikely(pattern_template == NULL))
1768 		return 0;
1769 	if (likely(!!ops->pattern_template_destroy)) {
1770 		ret = flow_err(port_id,
1771 			       ops->pattern_template_destroy(dev,
1772 							     pattern_template,
1773 							     error),
1774 			       error);
1775 
1776 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1777 							ret);
1778 
1779 		return ret;
1780 	}
1781 	return rte_flow_error_set(error, ENOTSUP,
1782 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1783 				  NULL, rte_strerror(ENOTSUP));
1784 }
1785 
1786 struct rte_flow_actions_template *
1787 rte_flow_actions_template_create(uint16_t port_id,
1788 			const struct rte_flow_actions_template_attr *template_attr,
1789 			const struct rte_flow_action actions[],
1790 			const struct rte_flow_action masks[],
1791 			struct rte_flow_error *error)
1792 {
1793 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1794 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1795 	struct rte_flow_actions_template *template;
1796 
1797 	if (unlikely(!ops))
1798 		return NULL;
1799 	if (dev->data->flow_configured == 0) {
1800 		FLOW_LOG(INFO,
1801 			"Flow engine on port_id=%"PRIu16" is not configured.",
1802 			port_id);
1803 		rte_flow_error_set(error, EINVAL,
1804 				   RTE_FLOW_ERROR_TYPE_STATE,
1805 				   NULL, rte_strerror(EINVAL));
1806 		return NULL;
1807 	}
1808 	if (template_attr == NULL) {
1809 		FLOW_LOG(ERR,
1810 			     "Port %"PRIu16" template attr is NULL.",
1811 			     port_id);
1812 		rte_flow_error_set(error, EINVAL,
1813 				   RTE_FLOW_ERROR_TYPE_ATTR,
1814 				   NULL, rte_strerror(EINVAL));
1815 		return NULL;
1816 	}
1817 	if (actions == NULL) {
1818 		FLOW_LOG(ERR,
1819 			     "Port %"PRIu16" actions is NULL.",
1820 			     port_id);
1821 		rte_flow_error_set(error, EINVAL,
1822 				   RTE_FLOW_ERROR_TYPE_ATTR,
1823 				   NULL, rte_strerror(EINVAL));
1824 		return NULL;
1825 	}
1826 	if (masks == NULL) {
1827 		FLOW_LOG(ERR,
1828 			     "Port %"PRIu16" masks is NULL.",
1829 			     port_id);
1830 		rte_flow_error_set(error, EINVAL,
1831 				   RTE_FLOW_ERROR_TYPE_ATTR,
1832 				   NULL, rte_strerror(EINVAL));
1833 
1834 	}
1835 	if (likely(!!ops->actions_template_create)) {
1836 		template = ops->actions_template_create(dev, template_attr,
1837 							actions, masks, error);
1838 		if (template == NULL)
1839 			flow_err(port_id, -rte_errno, error);
1840 
1841 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1842 						       masks, template);
1843 
1844 		return template;
1845 	}
1846 	rte_flow_error_set(error, ENOTSUP,
1847 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1848 			   NULL, rte_strerror(ENOTSUP));
1849 	return NULL;
1850 }
1851 
1852 int
1853 rte_flow_actions_template_destroy(uint16_t port_id,
1854 			struct rte_flow_actions_template *actions_template,
1855 			struct rte_flow_error *error)
1856 {
1857 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1858 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1859 	int ret;
1860 
1861 	if (unlikely(!ops))
1862 		return -rte_errno;
1863 	if (unlikely(actions_template == NULL))
1864 		return 0;
1865 	if (likely(!!ops->actions_template_destroy)) {
1866 		ret = flow_err(port_id,
1867 			       ops->actions_template_destroy(dev,
1868 							     actions_template,
1869 							     error),
1870 			       error);
1871 
1872 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1873 							ret);
1874 
1875 		return ret;
1876 	}
1877 	return rte_flow_error_set(error, ENOTSUP,
1878 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1879 				  NULL, rte_strerror(ENOTSUP));
1880 }
1881 
1882 struct rte_flow_template_table *
1883 rte_flow_template_table_create(uint16_t port_id,
1884 			const struct rte_flow_template_table_attr *table_attr,
1885 			struct rte_flow_pattern_template *pattern_templates[],
1886 			uint8_t nb_pattern_templates,
1887 			struct rte_flow_actions_template *actions_templates[],
1888 			uint8_t nb_actions_templates,
1889 			struct rte_flow_error *error)
1890 {
1891 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1892 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1893 	struct rte_flow_template_table *table;
1894 
1895 	if (unlikely(!ops))
1896 		return NULL;
1897 	if (dev->data->flow_configured == 0) {
1898 		FLOW_LOG(INFO,
1899 			"Flow engine on port_id=%"PRIu16" is not configured.",
1900 			port_id);
1901 		rte_flow_error_set(error, EINVAL,
1902 				   RTE_FLOW_ERROR_TYPE_STATE,
1903 				   NULL, rte_strerror(EINVAL));
1904 		return NULL;
1905 	}
1906 	if (table_attr == NULL) {
1907 		FLOW_LOG(ERR,
1908 			     "Port %"PRIu16" table attr is NULL.",
1909 			     port_id);
1910 		rte_flow_error_set(error, EINVAL,
1911 				   RTE_FLOW_ERROR_TYPE_ATTR,
1912 				   NULL, rte_strerror(EINVAL));
1913 		return NULL;
1914 	}
1915 	if (pattern_templates == NULL) {
1916 		FLOW_LOG(ERR,
1917 			     "Port %"PRIu16" pattern templates is NULL.",
1918 			     port_id);
1919 		rte_flow_error_set(error, EINVAL,
1920 				   RTE_FLOW_ERROR_TYPE_ATTR,
1921 				   NULL, rte_strerror(EINVAL));
1922 		return NULL;
1923 	}
1924 	if (actions_templates == NULL) {
1925 		FLOW_LOG(ERR,
1926 			     "Port %"PRIu16" actions templates is NULL.",
1927 			     port_id);
1928 		rte_flow_error_set(error, EINVAL,
1929 				   RTE_FLOW_ERROR_TYPE_ATTR,
1930 				   NULL, rte_strerror(EINVAL));
1931 		return NULL;
1932 	}
1933 	if (likely(!!ops->template_table_create)) {
1934 		table = ops->template_table_create(dev, table_attr,
1935 					pattern_templates, nb_pattern_templates,
1936 					actions_templates, nb_actions_templates,
1937 					error);
1938 		if (table == NULL)
1939 			flow_err(port_id, -rte_errno, error);
1940 
1941 		rte_flow_trace_template_table_create(port_id, table_attr,
1942 						     pattern_templates,
1943 						     nb_pattern_templates,
1944 						     actions_templates,
1945 						     nb_actions_templates, table);
1946 
1947 		return table;
1948 	}
1949 	rte_flow_error_set(error, ENOTSUP,
1950 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1951 			   NULL, rte_strerror(ENOTSUP));
1952 	return NULL;
1953 }
1954 
1955 int
1956 rte_flow_template_table_destroy(uint16_t port_id,
1957 				struct rte_flow_template_table *template_table,
1958 				struct rte_flow_error *error)
1959 {
1960 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1961 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1962 	int ret;
1963 
1964 	if (unlikely(!ops))
1965 		return -rte_errno;
1966 	if (unlikely(template_table == NULL))
1967 		return 0;
1968 	if (likely(!!ops->template_table_destroy)) {
1969 		ret = flow_err(port_id,
1970 			       ops->template_table_destroy(dev,
1971 							   template_table,
1972 							   error),
1973 			       error);
1974 
1975 		rte_flow_trace_template_table_destroy(port_id, template_table,
1976 						      ret);
1977 
1978 		return ret;
1979 	}
1980 	return rte_flow_error_set(error, ENOTSUP,
1981 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1982 				  NULL, rte_strerror(ENOTSUP));
1983 }
1984 
1985 int
1986 rte_flow_group_set_miss_actions(uint16_t port_id,
1987 				uint32_t group_id,
1988 				const struct rte_flow_group_attr *attr,
1989 				const struct rte_flow_action actions[],
1990 				struct rte_flow_error *error)
1991 {
1992 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1993 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1994 
1995 	if (unlikely(!ops))
1996 		return -rte_errno;
1997 	if (likely(!!ops->group_set_miss_actions)) {
1998 		return flow_err(port_id,
1999 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
2000 				error);
2001 	}
2002 	return rte_flow_error_set(error, ENOTSUP,
2003 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2004 				  NULL, rte_strerror(ENOTSUP));
2005 }
2006 
2007 struct rte_flow *
2008 rte_flow_async_create(uint16_t port_id,
2009 		      uint32_t queue_id,
2010 		      const struct rte_flow_op_attr *op_attr,
2011 		      struct rte_flow_template_table *template_table,
2012 		      const struct rte_flow_item pattern[],
2013 		      uint8_t pattern_template_index,
2014 		      const struct rte_flow_action actions[],
2015 		      uint8_t actions_template_index,
2016 		      void *user_data,
2017 		      struct rte_flow_error *error)
2018 {
2019 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2020 	struct rte_flow *flow;
2021 
2022 #ifdef RTE_FLOW_DEBUG
2023 	if (!rte_eth_dev_is_valid_port(port_id)) {
2024 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2025 				   rte_strerror(ENODEV));
2026 		return NULL;
2027 	}
2028 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create == NULL) {
2029 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2030 				   rte_strerror(ENOSYS));
2031 		return NULL;
2032 	}
2033 #endif
2034 
2035 	flow = dev->flow_fp_ops->async_create(dev, queue_id,
2036 					      op_attr, template_table,
2037 					      pattern, pattern_template_index,
2038 					      actions, actions_template_index,
2039 					      user_data, error);
2040 
2041 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2042 				    pattern, pattern_template_index, actions,
2043 				    actions_template_index, user_data, flow);
2044 
2045 	return flow;
2046 }
2047 
2048 struct rte_flow *
2049 rte_flow_async_create_by_index(uint16_t port_id,
2050 			       uint32_t queue_id,
2051 			       const struct rte_flow_op_attr *op_attr,
2052 			       struct rte_flow_template_table *template_table,
2053 			       uint32_t rule_index,
2054 			       const struct rte_flow_action actions[],
2055 			       uint8_t actions_template_index,
2056 			       void *user_data,
2057 			       struct rte_flow_error *error)
2058 {
2059 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2060 
2061 #ifdef RTE_FLOW_DEBUG
2062 	if (!rte_eth_dev_is_valid_port(port_id)) {
2063 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2064 				   rte_strerror(ENODEV));
2065 		return NULL;
2066 	}
2067 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create_by_index == NULL) {
2068 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2069 				   rte_strerror(ENOSYS));
2070 		return NULL;
2071 	}
2072 #endif
2073 
2074 	return dev->flow_fp_ops->async_create_by_index(dev, queue_id,
2075 						       op_attr, template_table, rule_index,
2076 						       actions, actions_template_index,
2077 						       user_data, error);
2078 }
2079 
2080 int
2081 rte_flow_async_destroy(uint16_t port_id,
2082 		       uint32_t queue_id,
2083 		       const struct rte_flow_op_attr *op_attr,
2084 		       struct rte_flow *flow,
2085 		       void *user_data,
2086 		       struct rte_flow_error *error)
2087 {
2088 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2089 	int ret;
2090 
2091 #ifdef RTE_FLOW_DEBUG
2092 	if (!rte_eth_dev_is_valid_port(port_id))
2093 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2094 					  rte_strerror(ENODEV));
2095 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_destroy == NULL)
2096 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2097 					  rte_strerror(ENOSYS));
2098 #endif
2099 
2100 	ret = dev->flow_fp_ops->async_destroy(dev, queue_id,
2101 					      op_attr, flow,
2102 					      user_data, error);
2103 
2104 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2105 				     user_data, ret);
2106 
2107 	return ret;
2108 }
2109 
2110 int
2111 rte_flow_async_actions_update(uint16_t port_id,
2112 			      uint32_t queue_id,
2113 			      const struct rte_flow_op_attr *op_attr,
2114 			      struct rte_flow *flow,
2115 			      const struct rte_flow_action actions[],
2116 			      uint8_t actions_template_index,
2117 			      void *user_data,
2118 			      struct rte_flow_error *error)
2119 {
2120 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2121 	int ret;
2122 
2123 #ifdef RTE_FLOW_DEBUG
2124 	if (!rte_eth_dev_is_valid_port(port_id))
2125 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2126 					  rte_strerror(ENODEV));
2127 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_actions_update == NULL)
2128 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2129 					  rte_strerror(ENOSYS));
2130 #endif
2131 
2132 	ret = dev->flow_fp_ops->async_actions_update(dev, queue_id, op_attr,
2133 						     flow, actions,
2134 						     actions_template_index,
2135 						     user_data, error);
2136 
2137 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2138 					    actions, actions_template_index,
2139 					    user_data, ret);
2140 
2141 	return ret;
2142 }
2143 
2144 int
2145 rte_flow_push(uint16_t port_id,
2146 	      uint32_t queue_id,
2147 	      struct rte_flow_error *error)
2148 {
2149 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2150 	int ret;
2151 
2152 #ifdef RTE_FLOW_DEBUG
2153 	if (!rte_eth_dev_is_valid_port(port_id))
2154 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2155 					  rte_strerror(ENODEV));
2156 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->push == NULL)
2157 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2158 					  rte_strerror(ENOSYS));
2159 #endif
2160 
2161 	ret = dev->flow_fp_ops->push(dev, queue_id, error);
2162 
2163 	rte_flow_trace_push(port_id, queue_id, ret);
2164 
2165 	return ret;
2166 }
2167 
2168 int
2169 rte_flow_pull(uint16_t port_id,
2170 	      uint32_t queue_id,
2171 	      struct rte_flow_op_result res[],
2172 	      uint16_t n_res,
2173 	      struct rte_flow_error *error)
2174 {
2175 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2176 	int ret;
2177 
2178 #ifdef RTE_FLOW_DEBUG
2179 	if (!rte_eth_dev_is_valid_port(port_id))
2180 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2181 					  rte_strerror(ENODEV));
2182 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->pull == NULL)
2183 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2184 					  rte_strerror(ENOSYS));
2185 #endif
2186 
2187 	ret = dev->flow_fp_ops->pull(dev, queue_id, res, n_res, error);
2188 
2189 	rte_flow_trace_pull(port_id, queue_id, res, n_res, ret);
2190 
2191 	return ret;
2192 }
2193 
2194 struct rte_flow_action_handle *
2195 rte_flow_async_action_handle_create(uint16_t port_id,
2196 		uint32_t queue_id,
2197 		const struct rte_flow_op_attr *op_attr,
2198 		const struct rte_flow_indir_action_conf *indir_action_conf,
2199 		const struct rte_flow_action *action,
2200 		void *user_data,
2201 		struct rte_flow_error *error)
2202 {
2203 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2204 	struct rte_flow_action_handle *handle;
2205 
2206 #ifdef RTE_FLOW_DEBUG
2207 	if (!rte_eth_dev_is_valid_port(port_id)) {
2208 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2209 				   rte_strerror(ENODEV));
2210 		return NULL;
2211 	}
2212 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_create == NULL) {
2213 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2214 				   rte_strerror(ENOSYS));
2215 		return NULL;
2216 	}
2217 #endif
2218 
2219 	handle = dev->flow_fp_ops->async_action_handle_create(dev, queue_id, op_attr,
2220 							      indir_action_conf, action,
2221 							      user_data, error);
2222 
2223 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2224 						  indir_action_conf, action,
2225 						  user_data, handle);
2226 
2227 	return handle;
2228 }
2229 
2230 int
2231 rte_flow_async_action_handle_destroy(uint16_t port_id,
2232 		uint32_t queue_id,
2233 		const struct rte_flow_op_attr *op_attr,
2234 		struct rte_flow_action_handle *action_handle,
2235 		void *user_data,
2236 		struct rte_flow_error *error)
2237 {
2238 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2239 	int ret;
2240 
2241 #ifdef RTE_FLOW_DEBUG
2242 	if (!rte_eth_dev_is_valid_port(port_id))
2243 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2244 					  rte_strerror(ENODEV));
2245 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_destroy == NULL)
2246 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2247 					  rte_strerror(ENOSYS));
2248 #endif
2249 
2250 	ret = dev->flow_fp_ops->async_action_handle_destroy(dev, queue_id, op_attr,
2251 							    action_handle, user_data, error);
2252 
2253 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2254 						   action_handle, user_data, ret);
2255 
2256 	return ret;
2257 }
2258 
2259 int
2260 rte_flow_async_action_handle_update(uint16_t port_id,
2261 		uint32_t queue_id,
2262 		const struct rte_flow_op_attr *op_attr,
2263 		struct rte_flow_action_handle *action_handle,
2264 		const void *update,
2265 		void *user_data,
2266 		struct rte_flow_error *error)
2267 {
2268 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2269 	int ret;
2270 
2271 #ifdef RTE_FLOW_DEBUG
2272 	if (!rte_eth_dev_is_valid_port(port_id))
2273 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2274 					  rte_strerror(ENODEV));
2275 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_update == NULL)
2276 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2277 					  rte_strerror(ENOSYS));
2278 #endif
2279 
2280 	ret = dev->flow_fp_ops->async_action_handle_update(dev, queue_id, op_attr,
2281 							   action_handle, update, user_data, error);
2282 
2283 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2284 						  action_handle, update,
2285 						  user_data, ret);
2286 
2287 	return ret;
2288 }
2289 
2290 int
2291 rte_flow_async_action_handle_query(uint16_t port_id,
2292 		uint32_t queue_id,
2293 		const struct rte_flow_op_attr *op_attr,
2294 		const struct rte_flow_action_handle *action_handle,
2295 		void *data,
2296 		void *user_data,
2297 		struct rte_flow_error *error)
2298 {
2299 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2300 	int ret;
2301 
2302 #ifdef RTE_FLOW_DEBUG
2303 	if (!rte_eth_dev_is_valid_port(port_id))
2304 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2305 					  rte_strerror(ENODEV));
2306 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query == NULL)
2307 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2308 					  rte_strerror(ENOSYS));
2309 #endif
2310 
2311 	ret = dev->flow_fp_ops->async_action_handle_query(dev, queue_id, op_attr,
2312 							  action_handle, data, user_data, error);
2313 
2314 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2315 						 action_handle, data, user_data,
2316 						 ret);
2317 
2318 	return ret;
2319 }
2320 
2321 int
2322 rte_flow_action_handle_query_update(uint16_t port_id,
2323 				    struct rte_flow_action_handle *handle,
2324 				    const void *update, void *query,
2325 				    enum rte_flow_query_update_mode mode,
2326 				    struct rte_flow_error *error)
2327 {
2328 	int ret;
2329 	struct rte_eth_dev *dev;
2330 	const struct rte_flow_ops *ops;
2331 
2332 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2333 	if (!handle)
2334 		return -EINVAL;
2335 	if (!update && !query)
2336 		return -EINVAL;
2337 	dev = &rte_eth_devices[port_id];
2338 	ops = rte_flow_ops_get(port_id, error);
2339 	if (!ops || !ops->action_handle_query_update)
2340 		return -ENOTSUP;
2341 	ret = ops->action_handle_query_update(dev, handle, update,
2342 					      query, mode, error);
2343 	return flow_err(port_id, ret, error);
2344 }
2345 
2346 int
2347 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2348 					  const struct rte_flow_op_attr *attr,
2349 					  struct rte_flow_action_handle *handle,
2350 					  const void *update, void *query,
2351 					  enum rte_flow_query_update_mode mode,
2352 					  void *user_data,
2353 					  struct rte_flow_error *error)
2354 {
2355 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2356 
2357 #ifdef RTE_FLOW_DEBUG
2358 	if (!rte_eth_dev_is_valid_port(port_id))
2359 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2360 					  rte_strerror(ENODEV));
2361 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query_update == NULL)
2362 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2363 					  rte_strerror(ENOSYS));
2364 #endif
2365 
2366 	return dev->flow_fp_ops->async_action_handle_query_update(dev, queue_id, attr,
2367 								  handle, update,
2368 								  query, mode,
2369 								  user_data, error);
2370 }
2371 
2372 struct rte_flow_action_list_handle *
2373 rte_flow_action_list_handle_create(uint16_t port_id,
2374 				   const
2375 				   struct rte_flow_indir_action_conf *conf,
2376 				   const struct rte_flow_action *actions,
2377 				   struct rte_flow_error *error)
2378 {
2379 	int ret;
2380 	struct rte_eth_dev *dev;
2381 	const struct rte_flow_ops *ops;
2382 	struct rte_flow_action_list_handle *handle;
2383 
2384 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2385 	ops = rte_flow_ops_get(port_id, error);
2386 	if (!ops || !ops->action_list_handle_create) {
2387 		rte_flow_error_set(error, ENOTSUP,
2388 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2389 				   "action_list handle not supported");
2390 		return NULL;
2391 	}
2392 	dev = &rte_eth_devices[port_id];
2393 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2394 	ret = flow_err(port_id, -rte_errno, error);
2395 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2396 	return handle;
2397 }
2398 
2399 int
2400 rte_flow_action_list_handle_destroy(uint16_t port_id,
2401 				    struct rte_flow_action_list_handle *handle,
2402 				    struct rte_flow_error *error)
2403 {
2404 	int ret;
2405 	struct rte_eth_dev *dev;
2406 	const struct rte_flow_ops *ops;
2407 
2408 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2409 	ops = rte_flow_ops_get(port_id, error);
2410 	if (!ops || !ops->action_list_handle_destroy)
2411 		return rte_flow_error_set(error, ENOTSUP,
2412 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2413 					  "action_list handle not supported");
2414 	dev = &rte_eth_devices[port_id];
2415 	ret = ops->action_list_handle_destroy(dev, handle, error);
2416 	ret = flow_err(port_id, ret, error);
2417 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2418 	return ret;
2419 }
2420 
2421 struct rte_flow_action_list_handle *
2422 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2423 					 const struct rte_flow_op_attr *attr,
2424 					 const struct rte_flow_indir_action_conf *conf,
2425 					 const struct rte_flow_action *actions,
2426 					 void *user_data,
2427 					 struct rte_flow_error *error)
2428 {
2429 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2430 	struct rte_flow_action_list_handle *handle;
2431 	int ret;
2432 
2433 #ifdef RTE_FLOW_DEBUG
2434 	if (!rte_eth_dev_is_valid_port(port_id)) {
2435 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2436 				   rte_strerror(ENODEV));
2437 		return NULL;
2438 	}
2439 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_create == NULL) {
2440 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2441 				   rte_strerror(ENOSYS));
2442 		return NULL;
2443 	}
2444 #endif
2445 
2446 	handle = dev->flow_fp_ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2447 								   actions, user_data,
2448 								   error);
2449 	ret = flow_err(port_id, -rte_errno, error);
2450 
2451 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2452 						       conf, actions, user_data,
2453 						       ret);
2454 	return handle;
2455 }
2456 
2457 int
2458 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2459 				 const struct rte_flow_op_attr *op_attr,
2460 				 struct rte_flow_action_list_handle *handle,
2461 				 void *user_data, struct rte_flow_error *error)
2462 {
2463 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2464 	int ret;
2465 
2466 #ifdef RTE_FLOW_DEBUG
2467 	if (!rte_eth_dev_is_valid_port(port_id))
2468 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2469 					  rte_strerror(ENODEV));
2470 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_destroy == NULL)
2471 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2472 					  rte_strerror(ENOSYS));
2473 #endif
2474 
2475 	ret = dev->flow_fp_ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2476 								 handle, user_data, error);
2477 
2478 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2479 							op_attr, handle,
2480 							user_data, ret);
2481 	return ret;
2482 }
2483 
2484 int
2485 rte_flow_action_list_handle_query_update(uint16_t port_id,
2486 			 const struct rte_flow_action_list_handle *handle,
2487 			 const void **update, void **query,
2488 			 enum rte_flow_query_update_mode mode,
2489 			 struct rte_flow_error *error)
2490 {
2491 	int ret;
2492 	struct rte_eth_dev *dev;
2493 	const struct rte_flow_ops *ops;
2494 
2495 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2496 	ops = rte_flow_ops_get(port_id, error);
2497 	if (!ops || !ops->action_list_handle_query_update)
2498 		return rte_flow_error_set(error, ENOTSUP,
2499 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2500 					  "action_list query_update not supported");
2501 	dev = &rte_eth_devices[port_id];
2502 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2503 						   mode, error);
2504 	ret = flow_err(port_id, ret, error);
2505 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2506 						       query, mode, ret);
2507 	return ret;
2508 }
2509 
2510 int
2511 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2512 			 const struct rte_flow_op_attr *attr,
2513 			 const struct rte_flow_action_list_handle *handle,
2514 			 const void **update, void **query,
2515 			 enum rte_flow_query_update_mode mode,
2516 			 void *user_data, struct rte_flow_error *error)
2517 {
2518 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2519 	int ret;
2520 
2521 #ifdef RTE_FLOW_DEBUG
2522 	if (!rte_eth_dev_is_valid_port(port_id))
2523 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2524 					  rte_strerror(ENODEV));
2525 	if (dev->flow_fp_ops == NULL ||
2526 	    dev->flow_fp_ops->async_action_list_handle_query_update == NULL)
2527 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2528 					  rte_strerror(ENOSYS));
2529 #endif
2530 
2531 	ret = dev->flow_fp_ops->async_action_list_handle_query_update(dev, queue_id, attr,
2532 								      handle, update, query,
2533 								      mode, user_data,
2534 								      error);
2535 
2536 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2537 							     attr, handle,
2538 							     update, query,
2539 							     mode, user_data,
2540 							     ret);
2541 	return ret;
2542 }
2543 
2544 int
2545 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2546 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2547 			 uint32_t *hash, struct rte_flow_error *error)
2548 {
2549 	int ret;
2550 	struct rte_eth_dev *dev;
2551 	const struct rte_flow_ops *ops;
2552 
2553 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2554 	ops = rte_flow_ops_get(port_id, error);
2555 	if (!ops || !ops->flow_calc_table_hash)
2556 		return rte_flow_error_set(error, ENOTSUP,
2557 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2558 					  "action_list async query_update not supported");
2559 	dev = &rte_eth_devices[port_id];
2560 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2561 					hash, error);
2562 	return flow_err(port_id, ret, error);
2563 }
2564 
2565 int
2566 rte_flow_calc_encap_hash(uint16_t port_id, const struct rte_flow_item pattern[],
2567 			 enum rte_flow_encap_hash_field dest_field, uint8_t hash_len,
2568 			 uint8_t *hash, struct rte_flow_error *error)
2569 {
2570 	int ret;
2571 	struct rte_eth_dev *dev;
2572 	const struct rte_flow_ops *ops;
2573 
2574 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2575 	ops = rte_flow_ops_get(port_id, error);
2576 	if (!ops || !ops->flow_calc_encap_hash)
2577 		return rte_flow_error_set(error, ENOTSUP,
2578 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2579 					  "calc encap hash is not supported");
2580 	if (dest_field > RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID)
2581 		return rte_flow_error_set(error, EINVAL,
2582 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2583 					  "hash dest field is not defined");
2584 	if ((dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT && hash_len != 2) ||
2585 	    (dest_field == RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID && hash_len != 1))
2586 		return rte_flow_error_set(error, EINVAL,
2587 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2588 					  "hash len doesn't match the requested field len");
2589 	dev = &rte_eth_devices[port_id];
2590 	ret = ops->flow_calc_encap_hash(dev, pattern, dest_field, hash, error);
2591 	return flow_err(port_id, ret, error);
2592 }
2593 
2594 bool
2595 rte_flow_template_table_resizable(__rte_unused uint16_t port_id,
2596 				  const struct rte_flow_template_table_attr *tbl_attr)
2597 {
2598 	return (tbl_attr->specialize &
2599 		RTE_FLOW_TABLE_SPECIALIZE_RESIZABLE) != 0;
2600 }
2601 
2602 int
2603 rte_flow_template_table_resize(uint16_t port_id,
2604 			       struct rte_flow_template_table *table,
2605 			       uint32_t nb_rules,
2606 			       struct rte_flow_error *error)
2607 {
2608 	int ret;
2609 	struct rte_eth_dev *dev;
2610 	const struct rte_flow_ops *ops;
2611 
2612 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2613 	ops = rte_flow_ops_get(port_id, error);
2614 	if (!ops || !ops->flow_template_table_resize)
2615 		return rte_flow_error_set(error, ENOTSUP,
2616 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2617 					  "flow_template_table_resize not supported");
2618 	dev = &rte_eth_devices[port_id];
2619 	ret = ops->flow_template_table_resize(dev, table, nb_rules, error);
2620 	ret = flow_err(port_id, ret, error);
2621 	rte_flow_trace_template_table_resize(port_id, table, nb_rules, ret);
2622 	return ret;
2623 }
2624 
2625 int
2626 rte_flow_async_update_resized(uint16_t port_id, uint32_t queue,
2627 			      const struct rte_flow_op_attr *attr,
2628 			      struct rte_flow *rule, void *user_data,
2629 			      struct rte_flow_error *error)
2630 {
2631 	int ret;
2632 	struct rte_eth_dev *dev;
2633 	const struct rte_flow_ops *ops;
2634 
2635 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2636 	ops = rte_flow_ops_get(port_id, error);
2637 	if (!ops || !ops->flow_update_resized)
2638 		return rte_flow_error_set(error, ENOTSUP,
2639 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2640 					  "async_flow_async_transfer not supported");
2641 	dev = &rte_eth_devices[port_id];
2642 	ret = ops->flow_update_resized(dev, queue, attr, rule, user_data, error);
2643 	ret = flow_err(port_id, ret, error);
2644 	rte_flow_trace_async_update_resized(port_id, queue, attr,
2645 					    rule, user_data, ret);
2646 	return ret;
2647 }
2648 
2649 int
2650 rte_flow_template_table_resize_complete(uint16_t port_id,
2651 					struct rte_flow_template_table *table,
2652 					struct rte_flow_error *error)
2653 {
2654 	int ret;
2655 	struct rte_eth_dev *dev;
2656 	const struct rte_flow_ops *ops;
2657 
2658 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2659 	ops = rte_flow_ops_get(port_id, error);
2660 	if (!ops || !ops->flow_template_table_resize_complete)
2661 		return rte_flow_error_set(error, ENOTSUP,
2662 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2663 					  "flow_template_table_transfer_complete not supported");
2664 	dev = &rte_eth_devices[port_id];
2665 	ret = ops->flow_template_table_resize_complete(dev, table, error);
2666 	ret = flow_err(port_id, ret, error);
2667 	rte_flow_trace_table_resize_complete(port_id, table, ret);
2668 	return ret;
2669 }
2670 
2671 static struct rte_flow *
2672 rte_flow_dummy_async_create(struct rte_eth_dev *dev __rte_unused,
2673 			    uint32_t queue __rte_unused,
2674 			    const struct rte_flow_op_attr *attr __rte_unused,
2675 			    struct rte_flow_template_table *table __rte_unused,
2676 			    const struct rte_flow_item items[] __rte_unused,
2677 			    uint8_t pattern_template_index __rte_unused,
2678 			    const struct rte_flow_action actions[] __rte_unused,
2679 			    uint8_t action_template_index __rte_unused,
2680 			    void *user_data __rte_unused,
2681 			    struct rte_flow_error *error)
2682 {
2683 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2684 			   rte_strerror(ENOSYS));
2685 	return NULL;
2686 }
2687 
2688 static struct rte_flow *
2689 rte_flow_dummy_async_create_by_index(struct rte_eth_dev *dev __rte_unused,
2690 				     uint32_t queue __rte_unused,
2691 				     const struct rte_flow_op_attr *attr __rte_unused,
2692 				     struct rte_flow_template_table *table __rte_unused,
2693 				     uint32_t rule_index __rte_unused,
2694 				     const struct rte_flow_action actions[] __rte_unused,
2695 				     uint8_t action_template_index __rte_unused,
2696 				     void *user_data __rte_unused,
2697 				     struct rte_flow_error *error)
2698 {
2699 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2700 			   rte_strerror(ENOSYS));
2701 	return NULL;
2702 }
2703 
2704 static int
2705 rte_flow_dummy_async_actions_update(struct rte_eth_dev *dev __rte_unused,
2706 				    uint32_t queue_id __rte_unused,
2707 				    const struct rte_flow_op_attr *op_attr __rte_unused,
2708 				    struct rte_flow *flow __rte_unused,
2709 				    const struct rte_flow_action actions[] __rte_unused,
2710 				    uint8_t actions_template_index __rte_unused,
2711 				    void *user_data __rte_unused,
2712 				    struct rte_flow_error *error)
2713 {
2714 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2715 				  rte_strerror(ENOSYS));
2716 }
2717 
2718 static int
2719 rte_flow_dummy_async_destroy(struct rte_eth_dev *dev __rte_unused,
2720 			     uint32_t queue_id __rte_unused,
2721 			     const struct rte_flow_op_attr *op_attr __rte_unused,
2722 			     struct rte_flow *flow __rte_unused,
2723 			     void *user_data __rte_unused,
2724 			     struct rte_flow_error *error)
2725 {
2726 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2727 				  rte_strerror(ENOSYS));
2728 }
2729 
2730 static int
2731 rte_flow_dummy_push(struct rte_eth_dev *dev __rte_unused,
2732 		    uint32_t queue_id __rte_unused,
2733 		    struct rte_flow_error *error)
2734 {
2735 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2736 				  rte_strerror(ENOSYS));
2737 }
2738 
2739 static int
2740 rte_flow_dummy_pull(struct rte_eth_dev *dev __rte_unused,
2741 		    uint32_t queue_id __rte_unused,
2742 		    struct rte_flow_op_result res[] __rte_unused,
2743 		    uint16_t n_res __rte_unused,
2744 		    struct rte_flow_error *error)
2745 {
2746 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2747 				  rte_strerror(ENOSYS));
2748 }
2749 
2750 static struct rte_flow_action_handle *
2751 rte_flow_dummy_async_action_handle_create(
2752 	struct rte_eth_dev *dev __rte_unused,
2753 	uint32_t queue_id __rte_unused,
2754 	const struct rte_flow_op_attr *op_attr __rte_unused,
2755 	const struct rte_flow_indir_action_conf *indir_action_conf __rte_unused,
2756 	const struct rte_flow_action *action __rte_unused,
2757 	void *user_data __rte_unused,
2758 	struct rte_flow_error *error)
2759 {
2760 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2761 			   rte_strerror(ENOSYS));
2762 	return NULL;
2763 }
2764 
2765 static int
2766 rte_flow_dummy_async_action_handle_destroy(
2767 	struct rte_eth_dev *dev __rte_unused,
2768 	uint32_t queue_id __rte_unused,
2769 	const struct rte_flow_op_attr *op_attr __rte_unused,
2770 	struct rte_flow_action_handle *action_handle __rte_unused,
2771 	void *user_data __rte_unused,
2772 	struct rte_flow_error *error)
2773 {
2774 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2775 				  rte_strerror(ENOSYS));
2776 }
2777 
2778 static int
2779 rte_flow_dummy_async_action_handle_update(
2780 	struct rte_eth_dev *dev __rte_unused,
2781 	uint32_t queue_id __rte_unused,
2782 	const struct rte_flow_op_attr *op_attr __rte_unused,
2783 	struct rte_flow_action_handle *action_handle __rte_unused,
2784 	const void *update __rte_unused,
2785 	void *user_data __rte_unused,
2786 	struct rte_flow_error *error)
2787 {
2788 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2789 				  rte_strerror(ENOSYS));
2790 }
2791 
2792 static int
2793 rte_flow_dummy_async_action_handle_query(
2794 	struct rte_eth_dev *dev __rte_unused,
2795 	uint32_t queue_id __rte_unused,
2796 	const struct rte_flow_op_attr *op_attr __rte_unused,
2797 	const struct rte_flow_action_handle *action_handle __rte_unused,
2798 	void *data __rte_unused,
2799 	void *user_data __rte_unused,
2800 	struct rte_flow_error *error)
2801 {
2802 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2803 				  rte_strerror(ENOSYS));
2804 }
2805 
2806 static int
2807 rte_flow_dummy_async_action_handle_query_update(
2808 	struct rte_eth_dev *dev __rte_unused,
2809 	uint32_t queue_id __rte_unused,
2810 	const struct rte_flow_op_attr *attr __rte_unused,
2811 	struct rte_flow_action_handle *handle __rte_unused,
2812 	const void *update __rte_unused,
2813 	void *query __rte_unused,
2814 	enum rte_flow_query_update_mode mode __rte_unused,
2815 	void *user_data __rte_unused,
2816 	struct rte_flow_error *error)
2817 {
2818 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2819 				  rte_strerror(ENOSYS));
2820 }
2821 
2822 static struct rte_flow_action_list_handle *
2823 rte_flow_dummy_async_action_list_handle_create(
2824 	struct rte_eth_dev *dev __rte_unused,
2825 	uint32_t queue_id __rte_unused,
2826 	const struct rte_flow_op_attr *attr __rte_unused,
2827 	const struct rte_flow_indir_action_conf *conf __rte_unused,
2828 	const struct rte_flow_action *actions __rte_unused,
2829 	void *user_data __rte_unused,
2830 	struct rte_flow_error *error)
2831 {
2832 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2833 			   rte_strerror(ENOSYS));
2834 	return NULL;
2835 }
2836 
2837 static int
2838 rte_flow_dummy_async_action_list_handle_destroy(
2839 	struct rte_eth_dev *dev __rte_unused,
2840 	uint32_t queue_id __rte_unused,
2841 	const struct rte_flow_op_attr *op_attr __rte_unused,
2842 	struct rte_flow_action_list_handle *handle __rte_unused,
2843 	void *user_data __rte_unused,
2844 	struct rte_flow_error *error)
2845 {
2846 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2847 				  rte_strerror(ENOSYS));
2848 }
2849 
2850 static int
2851 rte_flow_dummy_async_action_list_handle_query_update(
2852 	struct rte_eth_dev *dev __rte_unused,
2853 	uint32_t queue_id __rte_unused,
2854 	const struct rte_flow_op_attr *attr __rte_unused,
2855 	const struct rte_flow_action_list_handle *handle __rte_unused,
2856 	const void **update __rte_unused,
2857 	void **query __rte_unused,
2858 	enum rte_flow_query_update_mode mode __rte_unused,
2859 	void *user_data __rte_unused,
2860 	struct rte_flow_error *error)
2861 {
2862 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2863 				  rte_strerror(ENOSYS));
2864 }
2865 
2866 struct rte_flow_fp_ops rte_flow_fp_default_ops = {
2867 	.async_create = rte_flow_dummy_async_create,
2868 	.async_create_by_index = rte_flow_dummy_async_create_by_index,
2869 	.async_actions_update = rte_flow_dummy_async_actions_update,
2870 	.async_destroy = rte_flow_dummy_async_destroy,
2871 	.push = rte_flow_dummy_push,
2872 	.pull = rte_flow_dummy_pull,
2873 	.async_action_handle_create = rte_flow_dummy_async_action_handle_create,
2874 	.async_action_handle_destroy = rte_flow_dummy_async_action_handle_destroy,
2875 	.async_action_handle_update = rte_flow_dummy_async_action_handle_update,
2876 	.async_action_handle_query = rte_flow_dummy_async_action_handle_query,
2877 	.async_action_handle_query_update = rte_flow_dummy_async_action_handle_query_update,
2878 	.async_action_list_handle_create = rte_flow_dummy_async_action_list_handle_create,
2879 	.async_action_list_handle_destroy = rte_flow_dummy_async_action_list_handle_destroy,
2880 	.async_action_list_handle_query_update =
2881 		rte_flow_dummy_async_action_list_handle_query_update,
2882 };
2883