xref: /dpdk/lib/ethdev/rte_flow.c (revision be5ded2f96072e887d5155516f8bbe69d1fb07ad)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <stdalign.h>
7 #include <errno.h>
8 #include <stddef.h>
9 #include <stdint.h>
10 #include <pthread.h>
11 
12 #include <rte_common.h>
13 #include <rte_errno.h>
14 #include <rte_branch_prediction.h>
15 #include <rte_string_fns.h>
16 #include <rte_mbuf_dyn.h>
17 #include "rte_flow_driver.h"
18 #include "rte_flow.h"
19 
20 #include "ethdev_trace.h"
21 
22 #define FLOW_LOG RTE_ETHDEV_LOG_LINE
23 
24 /* Mbuf dynamic field name for metadata. */
25 int32_t rte_flow_dynf_metadata_offs = -1;
26 
27 /* Mbuf dynamic field flag bit number for metadata. */
28 uint64_t rte_flow_dynf_metadata_mask;
29 
30 /**
31  * Flow elements description tables.
32  */
33 struct rte_flow_desc_data {
34 	const char *name;
35 	size_t size;
36 	size_t (*desc_fn)(void *dst, const void *src);
37 };
38 
39 /**
40  *
41  * @param buf
42  * Destination memory.
43  * @param data
44  * Source memory
45  * @param size
46  * Requested copy size
47  * @param desc
48  * rte_flow_desc_item - for flow item conversion.
49  * rte_flow_desc_action - for flow action conversion.
50  * @param type
51  * Offset into the desc param or negative value for private flow elements.
52  */
53 static inline size_t
54 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
55 		   const struct rte_flow_desc_data *desc, int type)
56 {
57 	/**
58 	 * Allow PMD private flow item
59 	 */
60 	bool rte_type = type >= 0;
61 
62 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
63 	if (data == NULL)
64 		return 0;
65 	if (buf != NULL)
66 		rte_memcpy(buf, data, (size > sz ? sz : size));
67 	if (rte_type && desc[type].desc_fn)
68 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
69 	return sz;
70 }
71 
72 static size_t
73 rte_flow_item_flex_conv(void *buf, const void *data)
74 {
75 	struct rte_flow_item_flex *dst = buf;
76 	const struct rte_flow_item_flex *src = data;
77 	if (buf) {
78 		dst->pattern = rte_memcpy
79 			((void *)((uintptr_t)(dst + 1)), src->pattern,
80 			 src->length);
81 	}
82 	return src->length;
83 }
84 
85 /** Generate flow_item[] entry. */
86 #define MK_FLOW_ITEM(t, s) \
87 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
88 		.name = # t, \
89 		.size = s,               \
90 		.desc_fn = NULL,\
91 	}
92 
93 #define MK_FLOW_ITEM_FN(t, s, fn) \
94 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
95 		.name = # t,                 \
96 		.size = s,                   \
97 		.desc_fn = fn,               \
98 	}
99 
100 /** Information about known flow pattern items. */
101 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
102 	MK_FLOW_ITEM(END, 0),
103 	MK_FLOW_ITEM(VOID, 0),
104 	MK_FLOW_ITEM(INVERT, 0),
105 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
106 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
107 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
108 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
109 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
110 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
111 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
112 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
113 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
114 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
115 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
116 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
117 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
118 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
119 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
120 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
121 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
122 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
123 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
124 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
125 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
126 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
127 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
128 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
129 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
130 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
131 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
132 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
133 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
134 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
135 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
136 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
137 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
138 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
139 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
140 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
141 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
142 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
143 	MK_FLOW_ITEM(RANDOM, sizeof(struct rte_flow_item_random)),
144 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
145 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
146 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
147 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
148 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
149 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
150 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
151 			sizeof(struct rte_flow_item_pppoe_proto_id)),
152 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
153 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
154 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
155 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
156 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
157 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
158 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
159 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
160 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
161 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
162 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
163 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
164 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
165 			rte_flow_item_flex_conv),
166 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
167 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
168 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
169 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
170 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
171 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
172 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
173 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
174 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
175 	MK_FLOW_ITEM(COMPARE, sizeof(struct rte_flow_item_compare)),
176 };
177 
178 /** Generate flow_action[] entry. */
179 #define MK_FLOW_ACTION(t, s) \
180 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
181 		.name = # t, \
182 		.size = s, \
183 		.desc_fn = NULL,\
184 	}
185 
186 #define MK_FLOW_ACTION_FN(t, fn) \
187 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
188 		.name = # t, \
189 		.size = 0, \
190 		.desc_fn = fn,\
191 	}
192 
193 
194 /** Information about known flow actions. */
195 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
196 	MK_FLOW_ACTION(END, 0),
197 	MK_FLOW_ACTION(VOID, 0),
198 	MK_FLOW_ACTION(PASSTHRU, 0),
199 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
200 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
201 	MK_FLOW_ACTION(FLAG, 0),
202 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
203 	MK_FLOW_ACTION(DROP, 0),
204 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
205 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
206 	MK_FLOW_ACTION(PF, 0),
207 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
208 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
209 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
210 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
211 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
212 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
213 	MK_FLOW_ACTION(OF_PUSH_VLAN,
214 		       sizeof(struct rte_flow_action_of_push_vlan)),
215 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
216 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
217 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
218 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
219 	MK_FLOW_ACTION(OF_POP_MPLS,
220 		       sizeof(struct rte_flow_action_of_pop_mpls)),
221 	MK_FLOW_ACTION(OF_PUSH_MPLS,
222 		       sizeof(struct rte_flow_action_of_push_mpls)),
223 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
224 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
225 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
226 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
227 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
228 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
229 	MK_FLOW_ACTION(SET_IPV4_SRC,
230 		       sizeof(struct rte_flow_action_set_ipv4)),
231 	MK_FLOW_ACTION(SET_IPV4_DST,
232 		       sizeof(struct rte_flow_action_set_ipv4)),
233 	MK_FLOW_ACTION(SET_IPV6_SRC,
234 		       sizeof(struct rte_flow_action_set_ipv6)),
235 	MK_FLOW_ACTION(SET_IPV6_DST,
236 		       sizeof(struct rte_flow_action_set_ipv6)),
237 	MK_FLOW_ACTION(SET_TP_SRC,
238 		       sizeof(struct rte_flow_action_set_tp)),
239 	MK_FLOW_ACTION(SET_TP_DST,
240 		       sizeof(struct rte_flow_action_set_tp)),
241 	MK_FLOW_ACTION(MAC_SWAP, 0),
242 	MK_FLOW_ACTION(DEC_TTL, 0),
243 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
244 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
245 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
246 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
247 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
248 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
249 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
250 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
251 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
252 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
253 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
254 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
255 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
256 	MK_FLOW_ACTION(MODIFY_FIELD,
257 		       sizeof(struct rte_flow_action_modify_field)),
258 	/**
259 	 * Indirect action represented as handle of type
260 	 * (struct rte_flow_action_handle *) stored in conf field (see
261 	 * struct rte_flow_action); no need for additional structure to * store
262 	 * indirect action handle.
263 	 */
264 	MK_FLOW_ACTION(INDIRECT, 0),
265 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
266 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
267 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
268 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
269 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
270 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
271 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
272 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
273 	MK_FLOW_ACTION(INDIRECT_LIST,
274 		       sizeof(struct rte_flow_action_indirect_list)),
275 	MK_FLOW_ACTION(PROG,
276 		       sizeof(struct rte_flow_action_prog)),
277 	MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)),
278 	MK_FLOW_ACTION(JUMP_TO_TABLE_INDEX, sizeof(struct rte_flow_action_jump_to_table_index)),
279 };
280 
281 int
282 rte_flow_dynf_metadata_register(void)
283 {
284 	int offset;
285 	int flag;
286 
287 	static const struct rte_mbuf_dynfield desc_offs = {
288 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
289 		.size = sizeof(uint32_t),
290 		.align = alignof(uint32_t),
291 	};
292 	static const struct rte_mbuf_dynflag desc_flag = {
293 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
294 	};
295 
296 	offset = rte_mbuf_dynfield_register(&desc_offs);
297 	if (offset < 0)
298 		goto error;
299 	flag = rte_mbuf_dynflag_register(&desc_flag);
300 	if (flag < 0)
301 		goto error;
302 	rte_flow_dynf_metadata_offs = offset;
303 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
304 
305 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
306 
307 	return 0;
308 
309 error:
310 	rte_flow_dynf_metadata_offs = -1;
311 	rte_flow_dynf_metadata_mask = UINT64_C(0);
312 	return -rte_errno;
313 }
314 
315 static inline void
316 fts_enter(struct rte_eth_dev *dev)
317 {
318 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
319 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
320 }
321 
322 static inline void
323 fts_exit(struct rte_eth_dev *dev)
324 {
325 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
326 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
327 }
328 
329 static int
330 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
331 {
332 	if (ret == 0)
333 		return 0;
334 	if (rte_eth_dev_is_removed(port_id))
335 		return rte_flow_error_set(error, EIO,
336 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
337 					  NULL, rte_strerror(EIO));
338 	return ret;
339 }
340 
341 /* Get generic flow operations structure from a port. */
342 const struct rte_flow_ops *
343 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
344 {
345 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
346 	const struct rte_flow_ops *ops;
347 	int code;
348 
349 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
350 		code = ENODEV;
351 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
352 		/* flow API not supported with this driver dev_ops */
353 		code = ENOSYS;
354 	else
355 		code = dev->dev_ops->flow_ops_get(dev, &ops);
356 	if (code == 0 && ops == NULL)
357 		/* flow API not supported with this device */
358 		code = ENOSYS;
359 
360 	if (code != 0) {
361 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
362 				   NULL, rte_strerror(code));
363 		return NULL;
364 	}
365 	return ops;
366 }
367 
368 /* Check whether a flow rule can be created on a given port. */
369 int
370 rte_flow_validate(uint16_t port_id,
371 		  const struct rte_flow_attr *attr,
372 		  const struct rte_flow_item pattern[],
373 		  const struct rte_flow_action actions[],
374 		  struct rte_flow_error *error)
375 {
376 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
377 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
378 	int ret;
379 
380 	if (likely(!!attr) && attr->transfer &&
381 	    (attr->ingress || attr->egress)) {
382 		return rte_flow_error_set(error, EINVAL,
383 					  RTE_FLOW_ERROR_TYPE_ATTR,
384 					  attr, "cannot use attr ingress/egress with attr transfer");
385 	}
386 
387 	if (unlikely(!ops))
388 		return -rte_errno;
389 	if (likely(!!ops->validate)) {
390 		fts_enter(dev);
391 		ret = ops->validate(dev, attr, pattern, actions, error);
392 		fts_exit(dev);
393 		ret = flow_err(port_id, ret, error);
394 
395 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
396 
397 		return ret;
398 	}
399 	return rte_flow_error_set(error, ENOSYS,
400 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
401 				  NULL, rte_strerror(ENOSYS));
402 }
403 
404 /* Create a flow rule on a given port. */
405 struct rte_flow *
406 rte_flow_create(uint16_t port_id,
407 		const struct rte_flow_attr *attr,
408 		const struct rte_flow_item pattern[],
409 		const struct rte_flow_action actions[],
410 		struct rte_flow_error *error)
411 {
412 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
413 	struct rte_flow *flow;
414 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
415 
416 	if (unlikely(!ops))
417 		return NULL;
418 	if (likely(!!ops->create)) {
419 		fts_enter(dev);
420 		flow = ops->create(dev, attr, pattern, actions, error);
421 		fts_exit(dev);
422 		if (flow == NULL)
423 			flow_err(port_id, -rte_errno, error);
424 
425 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
426 
427 		return flow;
428 	}
429 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
430 			   NULL, rte_strerror(ENOSYS));
431 	return NULL;
432 }
433 
434 /* Destroy a flow rule on a given port. */
435 int
436 rte_flow_destroy(uint16_t port_id,
437 		 struct rte_flow *flow,
438 		 struct rte_flow_error *error)
439 {
440 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
441 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
442 	int ret;
443 
444 	if (unlikely(!ops))
445 		return -rte_errno;
446 	if (likely(!!ops->destroy)) {
447 		fts_enter(dev);
448 		ret = ops->destroy(dev, flow, error);
449 		fts_exit(dev);
450 		ret = flow_err(port_id, ret, error);
451 
452 		rte_flow_trace_destroy(port_id, flow, ret);
453 
454 		return ret;
455 	}
456 	return rte_flow_error_set(error, ENOSYS,
457 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
458 				  NULL, rte_strerror(ENOSYS));
459 }
460 
461 int
462 rte_flow_actions_update(uint16_t port_id,
463 			struct rte_flow *flow,
464 			const struct rte_flow_action actions[],
465 			struct rte_flow_error *error)
466 {
467 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
468 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
469 	int ret;
470 
471 	if (unlikely(!ops))
472 		return -rte_errno;
473 	if (likely(!!ops->actions_update)) {
474 		fts_enter(dev);
475 		ret = ops->actions_update(dev, flow, actions, error);
476 		fts_exit(dev);
477 
478 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
479 
480 		return flow_err(port_id, ret, error);
481 	}
482 	return rte_flow_error_set(error, ENOSYS,
483 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
484 				  NULL, rte_strerror(ENOSYS));
485 }
486 
487 /* Destroy all flow rules associated with a port. */
488 int
489 rte_flow_flush(uint16_t port_id,
490 	       struct rte_flow_error *error)
491 {
492 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
493 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
494 	int ret;
495 
496 	if (unlikely(!ops))
497 		return -rte_errno;
498 	if (likely(!!ops->flush)) {
499 		fts_enter(dev);
500 		ret = ops->flush(dev, error);
501 		fts_exit(dev);
502 		ret = flow_err(port_id, ret, error);
503 
504 		rte_flow_trace_flush(port_id, ret);
505 
506 		return ret;
507 	}
508 	return rte_flow_error_set(error, ENOSYS,
509 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
510 				  NULL, rte_strerror(ENOSYS));
511 }
512 
513 /* Query an existing flow rule. */
514 int
515 rte_flow_query(uint16_t port_id,
516 	       struct rte_flow *flow,
517 	       const struct rte_flow_action *action,
518 	       void *data,
519 	       struct rte_flow_error *error)
520 {
521 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
522 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
523 	int ret;
524 
525 	if (!ops)
526 		return -rte_errno;
527 	if (likely(!!ops->query)) {
528 		fts_enter(dev);
529 		ret = ops->query(dev, flow, action, data, error);
530 		fts_exit(dev);
531 		ret = flow_err(port_id, ret, error);
532 
533 		rte_flow_trace_query(port_id, flow, action, data, ret);
534 
535 		return ret;
536 	}
537 	return rte_flow_error_set(error, ENOSYS,
538 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
539 				  NULL, rte_strerror(ENOSYS));
540 }
541 
542 /* Restrict ingress traffic to the defined flow rules. */
543 int
544 rte_flow_isolate(uint16_t port_id,
545 		 int set,
546 		 struct rte_flow_error *error)
547 {
548 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
549 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
550 	int ret;
551 
552 	if (!ops)
553 		return -rte_errno;
554 	if (likely(!!ops->isolate)) {
555 		fts_enter(dev);
556 		ret = ops->isolate(dev, set, error);
557 		fts_exit(dev);
558 		ret = flow_err(port_id, ret, error);
559 
560 		rte_flow_trace_isolate(port_id, set, ret);
561 
562 		return ret;
563 	}
564 	return rte_flow_error_set(error, ENOSYS,
565 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
566 				  NULL, rte_strerror(ENOSYS));
567 }
568 
569 /* Initialize flow error structure. */
570 int
571 rte_flow_error_set(struct rte_flow_error *error,
572 		   int code,
573 		   enum rte_flow_error_type type,
574 		   const void *cause,
575 		   const char *message)
576 {
577 	if (error) {
578 		*error = (struct rte_flow_error){
579 			.type = type,
580 			.cause = cause,
581 			.message = message,
582 		};
583 	}
584 	rte_errno = code;
585 	return -code;
586 }
587 
588 /** Pattern item specification types. */
589 enum rte_flow_conv_item_spec_type {
590 	RTE_FLOW_CONV_ITEM_SPEC,
591 	RTE_FLOW_CONV_ITEM_LAST,
592 	RTE_FLOW_CONV_ITEM_MASK,
593 };
594 
595 /**
596  * Copy pattern item specification.
597  *
598  * @param[out] buf
599  *   Output buffer. Can be NULL if @p size is zero.
600  * @param size
601  *   Size of @p buf in bytes.
602  * @param[in] item
603  *   Pattern item to copy specification from.
604  * @param type
605  *   Specification selector for either @p spec, @p last or @p mask.
606  *
607  * @return
608  *   Number of bytes needed to store pattern item specification regardless
609  *   of @p size. @p buf contents are truncated to @p size if not large
610  *   enough.
611  */
612 static size_t
613 rte_flow_conv_item_spec(void *buf, const size_t size,
614 			const struct rte_flow_item *item,
615 			enum rte_flow_conv_item_spec_type type)
616 {
617 	size_t off;
618 	const void *data =
619 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
620 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
621 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
622 		NULL;
623 
624 	switch (item->type) {
625 		union {
626 			const struct rte_flow_item_raw *raw;
627 			const struct rte_flow_item_geneve_opt *geneve_opt;
628 		} spec;
629 		union {
630 			const struct rte_flow_item_raw *raw;
631 		} last;
632 		union {
633 			const struct rte_flow_item_raw *raw;
634 		} mask;
635 		union {
636 			const struct rte_flow_item_raw *raw;
637 			const struct rte_flow_item_geneve_opt *geneve_opt;
638 		} src;
639 		union {
640 			struct rte_flow_item_raw *raw;
641 			struct rte_flow_item_geneve_opt *geneve_opt;
642 		} dst;
643 		void *deep_src;
644 		size_t tmp;
645 
646 	case RTE_FLOW_ITEM_TYPE_RAW:
647 		spec.raw = item->spec;
648 		last.raw = item->last ? item->last : item->spec;
649 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
650 		src.raw = data;
651 		dst.raw = buf;
652 		rte_memcpy(dst.raw,
653 			   (&(struct rte_flow_item_raw){
654 				.relative = src.raw->relative,
655 				.search = src.raw->search,
656 				.reserved = src.raw->reserved,
657 				.offset = src.raw->offset,
658 				.limit = src.raw->limit,
659 				.length = src.raw->length,
660 			   }),
661 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
662 		off = sizeof(*dst.raw);
663 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
664 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
665 		     ((spec.raw->length & mask.raw->length) >=
666 		      (last.raw->length & mask.raw->length))))
667 			tmp = spec.raw->length & mask.raw->length;
668 		else
669 			tmp = last.raw->length & mask.raw->length;
670 		if (tmp) {
671 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
672 			if (size >= off + tmp) {
673 				deep_src = (void *)((uintptr_t)dst.raw + off);
674 				dst.raw->pattern = rte_memcpy(deep_src,
675 							      src.raw->pattern,
676 							      tmp);
677 			}
678 			off += tmp;
679 		}
680 		break;
681 	case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
682 		off = rte_flow_conv_copy(buf, data, size,
683 					 rte_flow_desc_item, item->type);
684 		spec.geneve_opt = item->spec;
685 		src.geneve_opt = data;
686 		dst.geneve_opt = buf;
687 		tmp = spec.geneve_opt->option_len << 2;
688 		if (size > 0 && src.geneve_opt->data) {
689 			deep_src = (void *)((uintptr_t)(dst.geneve_opt + 1));
690 			dst.geneve_opt->data = rte_memcpy(deep_src,
691 							  src.geneve_opt->data,
692 							  tmp);
693 		}
694 		off += tmp;
695 		break;
696 	default:
697 		off = rte_flow_conv_copy(buf, data, size,
698 					 rte_flow_desc_item, item->type);
699 		break;
700 	}
701 	return off;
702 }
703 
704 /**
705  * Copy action configuration.
706  *
707  * @param[out] buf
708  *   Output buffer. Can be NULL if @p size is zero.
709  * @param size
710  *   Size of @p buf in bytes.
711  * @param[in] action
712  *   Action to copy configuration from.
713  *
714  * @return
715  *   Number of bytes needed to store pattern item specification regardless
716  *   of @p size. @p buf contents are truncated to @p size if not large
717  *   enough.
718  */
719 static size_t
720 rte_flow_conv_action_conf(void *buf, const size_t size,
721 			  const struct rte_flow_action *action)
722 {
723 	size_t off;
724 
725 	switch (action->type) {
726 		union {
727 			const struct rte_flow_action_rss *rss;
728 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
729 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
730 		} src;
731 		union {
732 			struct rte_flow_action_rss *rss;
733 			struct rte_flow_action_vxlan_encap *vxlan_encap;
734 			struct rte_flow_action_nvgre_encap *nvgre_encap;
735 		} dst;
736 		size_t tmp;
737 		int ret;
738 
739 	case RTE_FLOW_ACTION_TYPE_RSS:
740 		src.rss = action->conf;
741 		dst.rss = buf;
742 		rte_memcpy(dst.rss,
743 			   (&(struct rte_flow_action_rss){
744 				.func = src.rss->func,
745 				.level = src.rss->level,
746 				.types = src.rss->types,
747 				.key_len = src.rss->key_len,
748 				.queue_num = src.rss->queue_num,
749 			   }),
750 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
751 		off = sizeof(*dst.rss);
752 		if (src.rss->key_len && src.rss->key) {
753 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
754 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
755 			if (size >= (uint64_t)off + (uint64_t)tmp)
756 				dst.rss->key = rte_memcpy
757 					((void *)((uintptr_t)dst.rss + off),
758 					 src.rss->key, tmp);
759 			off += tmp;
760 		}
761 		if (src.rss->queue_num) {
762 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
763 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
764 			if (size >= (uint64_t)off + (uint64_t)tmp)
765 				dst.rss->queue = rte_memcpy
766 					((void *)((uintptr_t)dst.rss + off),
767 					 src.rss->queue, tmp);
768 			off += tmp;
769 		}
770 		break;
771 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
772 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
773 		src.vxlan_encap = action->conf;
774 		dst.vxlan_encap = buf;
775 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
776 				 sizeof(*src.nvgre_encap) ||
777 				 offsetof(struct rte_flow_action_vxlan_encap,
778 					  definition) !=
779 				 offsetof(struct rte_flow_action_nvgre_encap,
780 					  definition));
781 		off = sizeof(*dst.vxlan_encap);
782 		if (src.vxlan_encap->definition) {
783 			off = RTE_ALIGN_CEIL
784 				(off, sizeof(*dst.vxlan_encap->definition));
785 			ret = rte_flow_conv
786 				(RTE_FLOW_CONV_OP_PATTERN,
787 				 (void *)((uintptr_t)dst.vxlan_encap + off),
788 				 size > off ? size - off : 0,
789 				 src.vxlan_encap->definition, NULL);
790 			if (ret < 0)
791 				return 0;
792 			if (size >= off + ret)
793 				dst.vxlan_encap->definition =
794 					(void *)((uintptr_t)dst.vxlan_encap +
795 						 off);
796 			off += ret;
797 		}
798 		break;
799 	default:
800 		off = rte_flow_conv_copy(buf, action->conf, size,
801 					 rte_flow_desc_action, action->type);
802 		break;
803 	}
804 	return off;
805 }
806 
807 /**
808  * Copy a list of pattern items.
809  *
810  * @param[out] dst
811  *   Destination buffer. Can be NULL if @p size is zero.
812  * @param size
813  *   Size of @p dst in bytes.
814  * @param[in] src
815  *   Source pattern items.
816  * @param num
817  *   Maximum number of pattern items to process from @p src or 0 to process
818  *   the entire list. In both cases, processing stops after
819  *   RTE_FLOW_ITEM_TYPE_END is encountered.
820  * @param[out] error
821  *   Perform verbose error reporting if not NULL.
822  *
823  * @return
824  *   A positive value representing the number of bytes needed to store
825  *   pattern items regardless of @p size on success (@p buf contents are
826  *   truncated to @p size if not large enough), a negative errno value
827  *   otherwise and rte_errno is set.
828  */
829 static int
830 rte_flow_conv_pattern(struct rte_flow_item *dst,
831 		      const size_t size,
832 		      const struct rte_flow_item *src,
833 		      unsigned int num,
834 		      struct rte_flow_error *error)
835 {
836 	uintptr_t data = (uintptr_t)dst;
837 	size_t off;
838 	size_t ret;
839 	unsigned int i;
840 
841 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
842 		/**
843 		 * allow PMD private flow item
844 		 */
845 		if (((int)src->type >= 0) &&
846 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
847 		    !rte_flow_desc_item[src->type].name))
848 			return rte_flow_error_set
849 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
850 				 "cannot convert unknown item type");
851 		if (size >= off + sizeof(*dst))
852 			*dst = (struct rte_flow_item){
853 				.type = src->type,
854 			};
855 		off += sizeof(*dst);
856 		if (!src->type)
857 			num = i + 1;
858 	}
859 	num = i;
860 	src -= num;
861 	dst -= num;
862 	do {
863 		if (src->spec) {
864 			off = RTE_ALIGN_CEIL(off, sizeof(double));
865 			ret = rte_flow_conv_item_spec
866 				((void *)(data + off),
867 				 size > off ? size - off : 0, src,
868 				 RTE_FLOW_CONV_ITEM_SPEC);
869 			if (size && size >= off + ret)
870 				dst->spec = (void *)(data + off);
871 			off += ret;
872 
873 		}
874 		if (src->last) {
875 			off = RTE_ALIGN_CEIL(off, sizeof(double));
876 			ret = rte_flow_conv_item_spec
877 				((void *)(data + off),
878 				 size > off ? size - off : 0, src,
879 				 RTE_FLOW_CONV_ITEM_LAST);
880 			if (size && size >= off + ret)
881 				dst->last = (void *)(data + off);
882 			off += ret;
883 		}
884 		if (src->mask) {
885 			off = RTE_ALIGN_CEIL(off, sizeof(double));
886 			ret = rte_flow_conv_item_spec
887 				((void *)(data + off),
888 				 size > off ? size - off : 0, src,
889 				 RTE_FLOW_CONV_ITEM_MASK);
890 			if (size && size >= off + ret)
891 				dst->mask = (void *)(data + off);
892 			off += ret;
893 		}
894 		++src;
895 		++dst;
896 	} while (--num);
897 	return off;
898 }
899 
900 /**
901  * Copy a list of actions.
902  *
903  * @param[out] dst
904  *   Destination buffer. Can be NULL if @p size is zero.
905  * @param size
906  *   Size of @p dst in bytes.
907  * @param[in] src
908  *   Source actions.
909  * @param num
910  *   Maximum number of actions to process from @p src or 0 to process the
911  *   entire list. In both cases, processing stops after
912  *   RTE_FLOW_ACTION_TYPE_END is encountered.
913  * @param[out] error
914  *   Perform verbose error reporting if not NULL.
915  *
916  * @return
917  *   A positive value representing the number of bytes needed to store
918  *   actions regardless of @p size on success (@p buf contents are truncated
919  *   to @p size if not large enough), a negative errno value otherwise and
920  *   rte_errno is set.
921  */
922 static int
923 rte_flow_conv_actions(struct rte_flow_action *dst,
924 		      const size_t size,
925 		      const struct rte_flow_action *src,
926 		      unsigned int num,
927 		      struct rte_flow_error *error)
928 {
929 	uintptr_t data = (uintptr_t)dst;
930 	size_t off;
931 	size_t ret;
932 	unsigned int i;
933 
934 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
935 		/**
936 		 * allow PMD private flow action
937 		 */
938 		if (((int)src->type >= 0) &&
939 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
940 		    !rte_flow_desc_action[src->type].name))
941 			return rte_flow_error_set
942 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
943 				 src, "cannot convert unknown action type");
944 		if (size >= off + sizeof(*dst))
945 			*dst = (struct rte_flow_action){
946 				.type = src->type,
947 			};
948 		off += sizeof(*dst);
949 		if (!src->type)
950 			num = i + 1;
951 	}
952 	num = i;
953 	src -= num;
954 	dst -= num;
955 	do {
956 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
957 			/*
958 			 * Indirect action conf fills the indirect action
959 			 * handler. Copy the action handle directly instead
960 			 * of duplicating the pointer memory.
961 			 */
962 			if (size)
963 				dst->conf = src->conf;
964 		} else if (src->conf) {
965 			off = RTE_ALIGN_CEIL(off, sizeof(double));
966 			ret = rte_flow_conv_action_conf
967 				((void *)(data + off),
968 				 size > off ? size - off : 0, src);
969 			if (size && size >= off + ret)
970 				dst->conf = (void *)(data + off);
971 			off += ret;
972 		}
973 		++src;
974 		++dst;
975 	} while (--num);
976 	return off;
977 }
978 
979 /**
980  * Copy flow rule components.
981  *
982  * This comprises the flow rule descriptor itself, attributes, pattern and
983  * actions list. NULL components in @p src are skipped.
984  *
985  * @param[out] dst
986  *   Destination buffer. Can be NULL if @p size is zero.
987  * @param size
988  *   Size of @p dst in bytes.
989  * @param[in] src
990  *   Source flow rule descriptor.
991  * @param[out] error
992  *   Perform verbose error reporting if not NULL.
993  *
994  * @return
995  *   A positive value representing the number of bytes needed to store all
996  *   components including the descriptor regardless of @p size on success
997  *   (@p buf contents are truncated to @p size if not large enough), a
998  *   negative errno value otherwise and rte_errno is set.
999  */
1000 static int
1001 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
1002 		   const size_t size,
1003 		   const struct rte_flow_conv_rule *src,
1004 		   struct rte_flow_error *error)
1005 {
1006 	size_t off;
1007 	int ret;
1008 
1009 	rte_memcpy(dst,
1010 		   (&(struct rte_flow_conv_rule){
1011 			.attr = NULL,
1012 			.pattern = NULL,
1013 			.actions = NULL,
1014 		   }),
1015 		   size > sizeof(*dst) ? sizeof(*dst) : size);
1016 	off = sizeof(*dst);
1017 	if (src->attr_ro) {
1018 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1019 		if (size && size >= off + sizeof(*dst->attr))
1020 			dst->attr = rte_memcpy
1021 				((void *)((uintptr_t)dst + off),
1022 				 src->attr_ro, sizeof(*dst->attr));
1023 		off += sizeof(*dst->attr);
1024 	}
1025 	if (src->pattern_ro) {
1026 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1027 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
1028 					    size > off ? size - off : 0,
1029 					    src->pattern_ro, 0, error);
1030 		if (ret < 0)
1031 			return ret;
1032 		if (size && size >= off + (size_t)ret)
1033 			dst->pattern = (void *)((uintptr_t)dst + off);
1034 		off += ret;
1035 	}
1036 	if (src->actions_ro) {
1037 		off = RTE_ALIGN_CEIL(off, sizeof(double));
1038 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
1039 					    size > off ? size - off : 0,
1040 					    src->actions_ro, 0, error);
1041 		if (ret < 0)
1042 			return ret;
1043 		if (size >= off + (size_t)ret)
1044 			dst->actions = (void *)((uintptr_t)dst + off);
1045 		off += ret;
1046 	}
1047 	return off;
1048 }
1049 
1050 /**
1051  * Retrieve the name of a pattern item/action type.
1052  *
1053  * @param is_action
1054  *   Nonzero when @p src represents an action type instead of a pattern item
1055  *   type.
1056  * @param is_ptr
1057  *   Nonzero to write string address instead of contents into @p dst.
1058  * @param[out] dst
1059  *   Destination buffer. Can be NULL if @p size is zero.
1060  * @param size
1061  *   Size of @p dst in bytes.
1062  * @param[in] src
1063  *   Depending on @p is_action, source pattern item or action type cast as a
1064  *   pointer.
1065  * @param[out] error
1066  *   Perform verbose error reporting if not NULL.
1067  *
1068  * @return
1069  *   A positive value representing the number of bytes needed to store the
1070  *   name or its address regardless of @p size on success (@p buf contents
1071  *   are truncated to @p size if not large enough), a negative errno value
1072  *   otherwise and rte_errno is set.
1073  */
1074 static int
1075 rte_flow_conv_name(int is_action,
1076 		   int is_ptr,
1077 		   char *dst,
1078 		   const size_t size,
1079 		   const void *src,
1080 		   struct rte_flow_error *error)
1081 {
1082 	struct desc_info {
1083 		const struct rte_flow_desc_data *data;
1084 		size_t num;
1085 	};
1086 	static const struct desc_info info_rep[2] = {
1087 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1088 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1089 	};
1090 	const struct desc_info *const info = &info_rep[!!is_action];
1091 	unsigned int type = (uintptr_t)src;
1092 
1093 	if (type >= info->num)
1094 		return rte_flow_error_set
1095 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1096 			 "unknown object type to retrieve the name of");
1097 	if (!is_ptr)
1098 		return strlcpy(dst, info->data[type].name, size);
1099 	if (size >= sizeof(const char **))
1100 		*((const char **)dst) = info->data[type].name;
1101 	return sizeof(const char **);
1102 }
1103 
1104 /** Helper function to convert flow API objects. */
1105 int
1106 rte_flow_conv(enum rte_flow_conv_op op,
1107 	      void *dst,
1108 	      size_t size,
1109 	      const void *src,
1110 	      struct rte_flow_error *error)
1111 {
1112 	int ret;
1113 
1114 	switch (op) {
1115 		const struct rte_flow_attr *attr;
1116 		const struct rte_flow_item *item;
1117 
1118 	case RTE_FLOW_CONV_OP_NONE:
1119 		ret = 0;
1120 		break;
1121 	case RTE_FLOW_CONV_OP_ATTR:
1122 		attr = src;
1123 		if (size > sizeof(*attr))
1124 			size = sizeof(*attr);
1125 		rte_memcpy(dst, attr, size);
1126 		ret = sizeof(*attr);
1127 		break;
1128 	case RTE_FLOW_CONV_OP_ITEM:
1129 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1130 		break;
1131 	case RTE_FLOW_CONV_OP_ITEM_MASK:
1132 		item = src;
1133 		if (item->mask == NULL) {
1134 			ret = rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1135 						 item, "Mask not provided");
1136 			break;
1137 		}
1138 		ret = rte_flow_conv_item_spec(dst, size, src, RTE_FLOW_CONV_ITEM_MASK);
1139 		break;
1140 	case RTE_FLOW_CONV_OP_ACTION:
1141 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1142 		break;
1143 	case RTE_FLOW_CONV_OP_PATTERN:
1144 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1145 		break;
1146 	case RTE_FLOW_CONV_OP_ACTIONS:
1147 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1148 		break;
1149 	case RTE_FLOW_CONV_OP_RULE:
1150 		ret = rte_flow_conv_rule(dst, size, src, error);
1151 		break;
1152 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1153 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1154 		break;
1155 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1156 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1157 		break;
1158 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1159 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1160 		break;
1161 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1162 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1163 		break;
1164 	default:
1165 		ret = rte_flow_error_set
1166 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1167 		 "unknown object conversion operation");
1168 	}
1169 
1170 	rte_flow_trace_conv(op, dst, size, src, ret);
1171 
1172 	return ret;
1173 }
1174 
1175 /** Store a full rte_flow description. */
1176 size_t
1177 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1178 	      const struct rte_flow_attr *attr,
1179 	      const struct rte_flow_item *items,
1180 	      const struct rte_flow_action *actions)
1181 {
1182 	/*
1183 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1184 	 * to convert the former to the latter without wasting space.
1185 	 */
1186 	struct rte_flow_conv_rule *dst =
1187 		len ?
1188 		(void *)((uintptr_t)desc +
1189 			 (offsetof(struct rte_flow_desc, actions) -
1190 			  offsetof(struct rte_flow_conv_rule, actions))) :
1191 		NULL;
1192 	size_t dst_size =
1193 		len > sizeof(*desc) - sizeof(*dst) ?
1194 		len - (sizeof(*desc) - sizeof(*dst)) :
1195 		0;
1196 	struct rte_flow_conv_rule src = {
1197 		.attr_ro = NULL,
1198 		.pattern_ro = items,
1199 		.actions_ro = actions,
1200 	};
1201 	int ret;
1202 
1203 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1204 			 sizeof(struct rte_flow_conv_rule));
1205 	if (dst_size &&
1206 	    (&dst->pattern != &desc->items ||
1207 	     &dst->actions != &desc->actions ||
1208 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1209 		rte_errno = EINVAL;
1210 		return 0;
1211 	}
1212 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1213 	if (ret < 0)
1214 		return 0;
1215 	ret += sizeof(*desc) - sizeof(*dst);
1216 	rte_memcpy(desc,
1217 		   (&(struct rte_flow_desc){
1218 			.size = ret,
1219 			.attr = *attr,
1220 			.items = dst_size ? dst->pattern : NULL,
1221 			.actions = dst_size ? dst->actions : NULL,
1222 		   }),
1223 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1224 
1225 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1226 
1227 	return ret;
1228 }
1229 
1230 int
1231 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1232 			FILE *file, struct rte_flow_error *error)
1233 {
1234 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1235 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1236 	int ret;
1237 
1238 	if (unlikely(!ops))
1239 		return -rte_errno;
1240 	if (likely(!!ops->dev_dump)) {
1241 		fts_enter(dev);
1242 		ret = ops->dev_dump(dev, flow, file, error);
1243 		fts_exit(dev);
1244 		return flow_err(port_id, ret, error);
1245 	}
1246 	return rte_flow_error_set(error, ENOSYS,
1247 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1248 				  NULL, rte_strerror(ENOSYS));
1249 }
1250 
1251 int
1252 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1253 		    uint32_t nb_contexts, struct rte_flow_error *error)
1254 {
1255 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1256 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1257 	int ret;
1258 
1259 	if (unlikely(!ops))
1260 		return -rte_errno;
1261 	if (likely(!!ops->get_aged_flows)) {
1262 		fts_enter(dev);
1263 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1264 		fts_exit(dev);
1265 		ret = flow_err(port_id, ret, error);
1266 
1267 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1268 
1269 		return ret;
1270 	}
1271 	return rte_flow_error_set(error, ENOTSUP,
1272 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1273 				  NULL, rte_strerror(ENOTSUP));
1274 }
1275 
1276 int
1277 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1278 			  uint32_t nb_contexts, struct rte_flow_error *error)
1279 {
1280 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1281 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1282 	int ret;
1283 
1284 	if (unlikely(!ops))
1285 		return -rte_errno;
1286 	if (likely(!!ops->get_q_aged_flows)) {
1287 		fts_enter(dev);
1288 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1289 					    nb_contexts, error);
1290 		fts_exit(dev);
1291 		ret = flow_err(port_id, ret, error);
1292 
1293 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1294 						nb_contexts, ret);
1295 
1296 		return ret;
1297 	}
1298 	return rte_flow_error_set(error, ENOTSUP,
1299 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1300 				  NULL, rte_strerror(ENOTSUP));
1301 }
1302 
1303 struct rte_flow_action_handle *
1304 rte_flow_action_handle_create(uint16_t port_id,
1305 			      const struct rte_flow_indir_action_conf *conf,
1306 			      const struct rte_flow_action *action,
1307 			      struct rte_flow_error *error)
1308 {
1309 	struct rte_flow_action_handle *handle;
1310 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1311 
1312 	if (unlikely(!ops))
1313 		return NULL;
1314 	if (unlikely(!ops->action_handle_create)) {
1315 		rte_flow_error_set(error, ENOSYS,
1316 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1317 				   rte_strerror(ENOSYS));
1318 		return NULL;
1319 	}
1320 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1321 					   conf, action, error);
1322 	if (handle == NULL)
1323 		flow_err(port_id, -rte_errno, error);
1324 
1325 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1326 
1327 	return handle;
1328 }
1329 
1330 int
1331 rte_flow_action_handle_destroy(uint16_t port_id,
1332 			       struct rte_flow_action_handle *handle,
1333 			       struct rte_flow_error *error)
1334 {
1335 	int ret;
1336 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1337 
1338 	if (unlikely(!ops))
1339 		return -rte_errno;
1340 	if (unlikely(!ops->action_handle_destroy))
1341 		return rte_flow_error_set(error, ENOSYS,
1342 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1343 					  NULL, rte_strerror(ENOSYS));
1344 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1345 					 handle, error);
1346 	ret = flow_err(port_id, ret, error);
1347 
1348 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1349 
1350 	return ret;
1351 }
1352 
1353 int
1354 rte_flow_action_handle_update(uint16_t port_id,
1355 			      struct rte_flow_action_handle *handle,
1356 			      const void *update,
1357 			      struct rte_flow_error *error)
1358 {
1359 	int ret;
1360 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1361 
1362 	if (unlikely(!ops))
1363 		return -rte_errno;
1364 	if (unlikely(!ops->action_handle_update))
1365 		return rte_flow_error_set(error, ENOSYS,
1366 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1367 					  NULL, rte_strerror(ENOSYS));
1368 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1369 					update, error);
1370 	ret = flow_err(port_id, ret, error);
1371 
1372 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1373 
1374 	return ret;
1375 }
1376 
1377 int
1378 rte_flow_action_handle_query(uint16_t port_id,
1379 			     const struct rte_flow_action_handle *handle,
1380 			     void *data,
1381 			     struct rte_flow_error *error)
1382 {
1383 	int ret;
1384 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1385 
1386 	if (unlikely(!ops))
1387 		return -rte_errno;
1388 	if (unlikely(!ops->action_handle_query))
1389 		return rte_flow_error_set(error, ENOSYS,
1390 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1391 					  NULL, rte_strerror(ENOSYS));
1392 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1393 				       data, error);
1394 	ret = flow_err(port_id, ret, error);
1395 
1396 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1397 
1398 	return ret;
1399 }
1400 
1401 int
1402 rte_flow_tunnel_decap_set(uint16_t port_id,
1403 			  struct rte_flow_tunnel *tunnel,
1404 			  struct rte_flow_action **actions,
1405 			  uint32_t *num_of_actions,
1406 			  struct rte_flow_error *error)
1407 {
1408 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1409 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1410 	int ret;
1411 
1412 	if (unlikely(!ops))
1413 		return -rte_errno;
1414 	if (likely(!!ops->tunnel_decap_set)) {
1415 		ret = flow_err(port_id,
1416 			       ops->tunnel_decap_set(dev, tunnel, actions,
1417 						     num_of_actions, error),
1418 			       error);
1419 
1420 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1421 						num_of_actions, ret);
1422 
1423 		return ret;
1424 	}
1425 	return rte_flow_error_set(error, ENOTSUP,
1426 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1427 				  NULL, rte_strerror(ENOTSUP));
1428 }
1429 
1430 int
1431 rte_flow_tunnel_match(uint16_t port_id,
1432 		      struct rte_flow_tunnel *tunnel,
1433 		      struct rte_flow_item **items,
1434 		      uint32_t *num_of_items,
1435 		      struct rte_flow_error *error)
1436 {
1437 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1438 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1439 	int ret;
1440 
1441 	if (unlikely(!ops))
1442 		return -rte_errno;
1443 	if (likely(!!ops->tunnel_match)) {
1444 		ret = flow_err(port_id,
1445 			       ops->tunnel_match(dev, tunnel, items,
1446 						 num_of_items, error),
1447 			       error);
1448 
1449 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1450 					    ret);
1451 
1452 		return ret;
1453 	}
1454 	return rte_flow_error_set(error, ENOTSUP,
1455 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1456 				  NULL, rte_strerror(ENOTSUP));
1457 }
1458 
1459 int
1460 rte_flow_get_restore_info(uint16_t port_id,
1461 			  struct rte_mbuf *m,
1462 			  struct rte_flow_restore_info *restore_info,
1463 			  struct rte_flow_error *error)
1464 {
1465 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1466 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1467 	int ret;
1468 
1469 	if (unlikely(!ops))
1470 		return -rte_errno;
1471 	if (likely(!!ops->get_restore_info)) {
1472 		ret = flow_err(port_id,
1473 			       ops->get_restore_info(dev, m, restore_info,
1474 						     error),
1475 			       error);
1476 
1477 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1478 
1479 		return ret;
1480 	}
1481 	return rte_flow_error_set(error, ENOTSUP,
1482 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1483 				  NULL, rte_strerror(ENOTSUP));
1484 }
1485 
1486 static struct {
1487 	const struct rte_mbuf_dynflag desc;
1488 	uint64_t value;
1489 } flow_restore_info_dynflag = {
1490 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1491 };
1492 
1493 uint64_t
1494 rte_flow_restore_info_dynflag(void)
1495 {
1496 	return flow_restore_info_dynflag.value;
1497 }
1498 
1499 int
1500 rte_flow_restore_info_dynflag_register(void)
1501 {
1502 	if (flow_restore_info_dynflag.value == 0) {
1503 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1504 
1505 		if (offset < 0)
1506 			return -1;
1507 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1508 	}
1509 
1510 	return 0;
1511 }
1512 
1513 int
1514 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1515 				     struct rte_flow_action *actions,
1516 				     uint32_t num_of_actions,
1517 				     struct rte_flow_error *error)
1518 {
1519 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1520 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1521 	int ret;
1522 
1523 	if (unlikely(!ops))
1524 		return -rte_errno;
1525 	if (likely(!!ops->tunnel_action_decap_release)) {
1526 		ret = flow_err(port_id,
1527 			       ops->tunnel_action_decap_release(dev, actions,
1528 								num_of_actions,
1529 								error),
1530 			       error);
1531 
1532 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1533 							   num_of_actions, ret);
1534 
1535 		return ret;
1536 	}
1537 	return rte_flow_error_set(error, ENOTSUP,
1538 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1539 				  NULL, rte_strerror(ENOTSUP));
1540 }
1541 
1542 int
1543 rte_flow_tunnel_item_release(uint16_t port_id,
1544 			     struct rte_flow_item *items,
1545 			     uint32_t num_of_items,
1546 			     struct rte_flow_error *error)
1547 {
1548 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1549 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1550 	int ret;
1551 
1552 	if (unlikely(!ops))
1553 		return -rte_errno;
1554 	if (likely(!!ops->tunnel_item_release)) {
1555 		ret = flow_err(port_id,
1556 			       ops->tunnel_item_release(dev, items,
1557 							num_of_items, error),
1558 			       error);
1559 
1560 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1561 
1562 		return ret;
1563 	}
1564 	return rte_flow_error_set(error, ENOTSUP,
1565 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1566 				  NULL, rte_strerror(ENOTSUP));
1567 }
1568 
1569 int
1570 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1571 			     struct rte_flow_error *error)
1572 {
1573 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1574 	struct rte_eth_dev *dev;
1575 	int ret;
1576 
1577 	if (unlikely(ops == NULL))
1578 		return -rte_errno;
1579 
1580 	if (ops->pick_transfer_proxy == NULL) {
1581 		*proxy_port_id = port_id;
1582 		return 0;
1583 	}
1584 
1585 	dev = &rte_eth_devices[port_id];
1586 
1587 	ret = flow_err(port_id,
1588 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1589 		       error);
1590 
1591 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1592 
1593 	return ret;
1594 }
1595 
1596 struct rte_flow_item_flex_handle *
1597 rte_flow_flex_item_create(uint16_t port_id,
1598 			  const struct rte_flow_item_flex_conf *conf,
1599 			  struct rte_flow_error *error)
1600 {
1601 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1602 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1603 	struct rte_flow_item_flex_handle *handle;
1604 
1605 	if (unlikely(!ops))
1606 		return NULL;
1607 	if (unlikely(!ops->flex_item_create)) {
1608 		rte_flow_error_set(error, ENOTSUP,
1609 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1610 				   NULL, rte_strerror(ENOTSUP));
1611 		return NULL;
1612 	}
1613 	handle = ops->flex_item_create(dev, conf, error);
1614 	if (handle == NULL)
1615 		flow_err(port_id, -rte_errno, error);
1616 
1617 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1618 
1619 	return handle;
1620 }
1621 
1622 int
1623 rte_flow_flex_item_release(uint16_t port_id,
1624 			   const struct rte_flow_item_flex_handle *handle,
1625 			   struct rte_flow_error *error)
1626 {
1627 	int ret;
1628 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1629 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1630 
1631 	if (unlikely(!ops || !ops->flex_item_release))
1632 		return rte_flow_error_set(error, ENOTSUP,
1633 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1634 					  NULL, rte_strerror(ENOTSUP));
1635 	ret = ops->flex_item_release(dev, handle, error);
1636 	ret = flow_err(port_id, ret, error);
1637 
1638 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1639 
1640 	return ret;
1641 }
1642 
1643 int
1644 rte_flow_info_get(uint16_t port_id,
1645 		  struct rte_flow_port_info *port_info,
1646 		  struct rte_flow_queue_info *queue_info,
1647 		  struct rte_flow_error *error)
1648 {
1649 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1650 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1651 	int ret;
1652 
1653 	if (unlikely(!ops))
1654 		return -rte_errno;
1655 	if (dev->data->dev_configured == 0) {
1656 		FLOW_LOG(INFO,
1657 			"Device with port_id=%"PRIu16" is not configured.",
1658 			port_id);
1659 		return -EINVAL;
1660 	}
1661 	if (port_info == NULL) {
1662 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1663 		return -EINVAL;
1664 	}
1665 	if (likely(!!ops->info_get)) {
1666 		ret = flow_err(port_id,
1667 			       ops->info_get(dev, port_info, queue_info, error),
1668 			       error);
1669 
1670 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1671 
1672 		return ret;
1673 	}
1674 	return rte_flow_error_set(error, ENOTSUP,
1675 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1676 				  NULL, rte_strerror(ENOTSUP));
1677 }
1678 
1679 int
1680 rte_flow_configure(uint16_t port_id,
1681 		   const struct rte_flow_port_attr *port_attr,
1682 		   uint16_t nb_queue,
1683 		   const struct rte_flow_queue_attr *queue_attr[],
1684 		   struct rte_flow_error *error)
1685 {
1686 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1687 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1688 	int ret;
1689 
1690 	if (unlikely(!ops))
1691 		return -rte_errno;
1692 	if (dev->data->dev_configured == 0) {
1693 		FLOW_LOG(INFO,
1694 			"Device with port_id=%"PRIu16" is not configured.",
1695 			port_id);
1696 		return -EINVAL;
1697 	}
1698 	if (dev->data->dev_started != 0) {
1699 		FLOW_LOG(INFO,
1700 			"Device with port_id=%"PRIu16" already started.",
1701 			port_id);
1702 		return -EINVAL;
1703 	}
1704 	if (port_attr == NULL) {
1705 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
1706 		return -EINVAL;
1707 	}
1708 	if (queue_attr == NULL) {
1709 		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
1710 		return -EINVAL;
1711 	}
1712 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1713 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1714 		return rte_flow_error_set(error, ENODEV,
1715 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1716 					  NULL, rte_strerror(ENODEV));
1717 	}
1718 	if (likely(!!ops->configure)) {
1719 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1720 		if (ret == 0)
1721 			dev->data->flow_configured = 1;
1722 		ret = flow_err(port_id, ret, error);
1723 
1724 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1725 
1726 		return ret;
1727 	}
1728 	return rte_flow_error_set(error, ENOTSUP,
1729 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1730 				  NULL, rte_strerror(ENOTSUP));
1731 }
1732 
1733 struct rte_flow_pattern_template *
1734 rte_flow_pattern_template_create(uint16_t port_id,
1735 		const struct rte_flow_pattern_template_attr *template_attr,
1736 		const struct rte_flow_item pattern[],
1737 		struct rte_flow_error *error)
1738 {
1739 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1740 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1741 	struct rte_flow_pattern_template *template;
1742 
1743 	if (unlikely(!ops))
1744 		return NULL;
1745 	if (dev->data->flow_configured == 0) {
1746 		FLOW_LOG(INFO,
1747 			"Flow engine on port_id=%"PRIu16" is not configured.",
1748 			port_id);
1749 		rte_flow_error_set(error, EINVAL,
1750 				RTE_FLOW_ERROR_TYPE_STATE,
1751 				NULL, rte_strerror(EINVAL));
1752 		return NULL;
1753 	}
1754 	if (template_attr == NULL) {
1755 		FLOW_LOG(ERR,
1756 			     "Port %"PRIu16" template attr is NULL.",
1757 			     port_id);
1758 		rte_flow_error_set(error, EINVAL,
1759 				   RTE_FLOW_ERROR_TYPE_ATTR,
1760 				   NULL, rte_strerror(EINVAL));
1761 		return NULL;
1762 	}
1763 	if (pattern == NULL) {
1764 		FLOW_LOG(ERR,
1765 			     "Port %"PRIu16" pattern is NULL.",
1766 			     port_id);
1767 		rte_flow_error_set(error, EINVAL,
1768 				   RTE_FLOW_ERROR_TYPE_ATTR,
1769 				   NULL, rte_strerror(EINVAL));
1770 		return NULL;
1771 	}
1772 	if (likely(!!ops->pattern_template_create)) {
1773 		template = ops->pattern_template_create(dev, template_attr,
1774 							pattern, error);
1775 		if (template == NULL)
1776 			flow_err(port_id, -rte_errno, error);
1777 
1778 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1779 						       pattern, template);
1780 
1781 		return template;
1782 	}
1783 	rte_flow_error_set(error, ENOTSUP,
1784 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1785 			   NULL, rte_strerror(ENOTSUP));
1786 	return NULL;
1787 }
1788 
1789 int
1790 rte_flow_pattern_template_destroy(uint16_t port_id,
1791 		struct rte_flow_pattern_template *pattern_template,
1792 		struct rte_flow_error *error)
1793 {
1794 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1795 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1796 	int ret;
1797 
1798 	if (unlikely(!ops))
1799 		return -rte_errno;
1800 	if (unlikely(pattern_template == NULL))
1801 		return 0;
1802 	if (likely(!!ops->pattern_template_destroy)) {
1803 		ret = flow_err(port_id,
1804 			       ops->pattern_template_destroy(dev,
1805 							     pattern_template,
1806 							     error),
1807 			       error);
1808 
1809 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1810 							ret);
1811 
1812 		return ret;
1813 	}
1814 	return rte_flow_error_set(error, ENOTSUP,
1815 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1816 				  NULL, rte_strerror(ENOTSUP));
1817 }
1818 
1819 struct rte_flow_actions_template *
1820 rte_flow_actions_template_create(uint16_t port_id,
1821 			const struct rte_flow_actions_template_attr *template_attr,
1822 			const struct rte_flow_action actions[],
1823 			const struct rte_flow_action masks[],
1824 			struct rte_flow_error *error)
1825 {
1826 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1827 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1828 	struct rte_flow_actions_template *template;
1829 
1830 	if (unlikely(!ops))
1831 		return NULL;
1832 	if (dev->data->flow_configured == 0) {
1833 		FLOW_LOG(INFO,
1834 			"Flow engine on port_id=%"PRIu16" is not configured.",
1835 			port_id);
1836 		rte_flow_error_set(error, EINVAL,
1837 				   RTE_FLOW_ERROR_TYPE_STATE,
1838 				   NULL, rte_strerror(EINVAL));
1839 		return NULL;
1840 	}
1841 	if (template_attr == NULL) {
1842 		FLOW_LOG(ERR,
1843 			     "Port %"PRIu16" template attr is NULL.",
1844 			     port_id);
1845 		rte_flow_error_set(error, EINVAL,
1846 				   RTE_FLOW_ERROR_TYPE_ATTR,
1847 				   NULL, rte_strerror(EINVAL));
1848 		return NULL;
1849 	}
1850 	if (actions == NULL) {
1851 		FLOW_LOG(ERR,
1852 			     "Port %"PRIu16" actions is NULL.",
1853 			     port_id);
1854 		rte_flow_error_set(error, EINVAL,
1855 				   RTE_FLOW_ERROR_TYPE_ATTR,
1856 				   NULL, rte_strerror(EINVAL));
1857 		return NULL;
1858 	}
1859 	if (masks == NULL) {
1860 		FLOW_LOG(ERR,
1861 			     "Port %"PRIu16" masks is NULL.",
1862 			     port_id);
1863 		rte_flow_error_set(error, EINVAL,
1864 				   RTE_FLOW_ERROR_TYPE_ATTR,
1865 				   NULL, rte_strerror(EINVAL));
1866 
1867 	}
1868 	if (likely(!!ops->actions_template_create)) {
1869 		template = ops->actions_template_create(dev, template_attr,
1870 							actions, masks, error);
1871 		if (template == NULL)
1872 			flow_err(port_id, -rte_errno, error);
1873 
1874 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1875 						       masks, template);
1876 
1877 		return template;
1878 	}
1879 	rte_flow_error_set(error, ENOTSUP,
1880 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1881 			   NULL, rte_strerror(ENOTSUP));
1882 	return NULL;
1883 }
1884 
1885 int
1886 rte_flow_actions_template_destroy(uint16_t port_id,
1887 			struct rte_flow_actions_template *actions_template,
1888 			struct rte_flow_error *error)
1889 {
1890 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1891 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1892 	int ret;
1893 
1894 	if (unlikely(!ops))
1895 		return -rte_errno;
1896 	if (unlikely(actions_template == NULL))
1897 		return 0;
1898 	if (likely(!!ops->actions_template_destroy)) {
1899 		ret = flow_err(port_id,
1900 			       ops->actions_template_destroy(dev,
1901 							     actions_template,
1902 							     error),
1903 			       error);
1904 
1905 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1906 							ret);
1907 
1908 		return ret;
1909 	}
1910 	return rte_flow_error_set(error, ENOTSUP,
1911 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1912 				  NULL, rte_strerror(ENOTSUP));
1913 }
1914 
1915 struct rte_flow_template_table *
1916 rte_flow_template_table_create(uint16_t port_id,
1917 			const struct rte_flow_template_table_attr *table_attr,
1918 			struct rte_flow_pattern_template *pattern_templates[],
1919 			uint8_t nb_pattern_templates,
1920 			struct rte_flow_actions_template *actions_templates[],
1921 			uint8_t nb_actions_templates,
1922 			struct rte_flow_error *error)
1923 {
1924 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1925 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1926 	struct rte_flow_template_table *table;
1927 
1928 	if (unlikely(!ops))
1929 		return NULL;
1930 	if (dev->data->flow_configured == 0) {
1931 		FLOW_LOG(INFO,
1932 			"Flow engine on port_id=%"PRIu16" is not configured.",
1933 			port_id);
1934 		rte_flow_error_set(error, EINVAL,
1935 				   RTE_FLOW_ERROR_TYPE_STATE,
1936 				   NULL, rte_strerror(EINVAL));
1937 		return NULL;
1938 	}
1939 	if (table_attr == NULL) {
1940 		FLOW_LOG(ERR,
1941 			     "Port %"PRIu16" table attr is NULL.",
1942 			     port_id);
1943 		rte_flow_error_set(error, EINVAL,
1944 				   RTE_FLOW_ERROR_TYPE_ATTR,
1945 				   NULL, rte_strerror(EINVAL));
1946 		return NULL;
1947 	}
1948 	if (pattern_templates == NULL) {
1949 		FLOW_LOG(ERR,
1950 			     "Port %"PRIu16" pattern templates is NULL.",
1951 			     port_id);
1952 		rte_flow_error_set(error, EINVAL,
1953 				   RTE_FLOW_ERROR_TYPE_ATTR,
1954 				   NULL, rte_strerror(EINVAL));
1955 		return NULL;
1956 	}
1957 	if (actions_templates == NULL) {
1958 		FLOW_LOG(ERR,
1959 			     "Port %"PRIu16" actions templates is NULL.",
1960 			     port_id);
1961 		rte_flow_error_set(error, EINVAL,
1962 				   RTE_FLOW_ERROR_TYPE_ATTR,
1963 				   NULL, rte_strerror(EINVAL));
1964 		return NULL;
1965 	}
1966 	if (likely(!!ops->template_table_create)) {
1967 		table = ops->template_table_create(dev, table_attr,
1968 					pattern_templates, nb_pattern_templates,
1969 					actions_templates, nb_actions_templates,
1970 					error);
1971 		if (table == NULL)
1972 			flow_err(port_id, -rte_errno, error);
1973 
1974 		rte_flow_trace_template_table_create(port_id, table_attr,
1975 						     pattern_templates,
1976 						     nb_pattern_templates,
1977 						     actions_templates,
1978 						     nb_actions_templates, table);
1979 
1980 		return table;
1981 	}
1982 	rte_flow_error_set(error, ENOTSUP,
1983 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1984 			   NULL, rte_strerror(ENOTSUP));
1985 	return NULL;
1986 }
1987 
1988 int
1989 rte_flow_template_table_destroy(uint16_t port_id,
1990 				struct rte_flow_template_table *template_table,
1991 				struct rte_flow_error *error)
1992 {
1993 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1994 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1995 	int ret;
1996 
1997 	if (unlikely(!ops))
1998 		return -rte_errno;
1999 	if (unlikely(template_table == NULL))
2000 		return 0;
2001 	if (likely(!!ops->template_table_destroy)) {
2002 		ret = flow_err(port_id,
2003 			       ops->template_table_destroy(dev,
2004 							   template_table,
2005 							   error),
2006 			       error);
2007 
2008 		rte_flow_trace_template_table_destroy(port_id, template_table,
2009 						      ret);
2010 
2011 		return ret;
2012 	}
2013 	return rte_flow_error_set(error, ENOTSUP,
2014 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2015 				  NULL, rte_strerror(ENOTSUP));
2016 }
2017 
2018 int
2019 rte_flow_group_set_miss_actions(uint16_t port_id,
2020 				uint32_t group_id,
2021 				const struct rte_flow_group_attr *attr,
2022 				const struct rte_flow_action actions[],
2023 				struct rte_flow_error *error)
2024 {
2025 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2026 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2027 
2028 	if (unlikely(!ops))
2029 		return -rte_errno;
2030 	if (likely(!!ops->group_set_miss_actions)) {
2031 		return flow_err(port_id,
2032 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
2033 				error);
2034 	}
2035 	return rte_flow_error_set(error, ENOTSUP,
2036 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2037 				  NULL, rte_strerror(ENOTSUP));
2038 }
2039 
2040 struct rte_flow *
2041 rte_flow_async_create(uint16_t port_id,
2042 		      uint32_t queue_id,
2043 		      const struct rte_flow_op_attr *op_attr,
2044 		      struct rte_flow_template_table *template_table,
2045 		      const struct rte_flow_item pattern[],
2046 		      uint8_t pattern_template_index,
2047 		      const struct rte_flow_action actions[],
2048 		      uint8_t actions_template_index,
2049 		      void *user_data,
2050 		      struct rte_flow_error *error)
2051 {
2052 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2053 	struct rte_flow *flow;
2054 
2055 #ifdef RTE_FLOW_DEBUG
2056 	if (!rte_eth_dev_is_valid_port(port_id)) {
2057 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2058 				   rte_strerror(ENODEV));
2059 		return NULL;
2060 	}
2061 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create == NULL) {
2062 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2063 				   rte_strerror(ENOSYS));
2064 		return NULL;
2065 	}
2066 #endif
2067 
2068 	flow = dev->flow_fp_ops->async_create(dev, queue_id,
2069 					      op_attr, template_table,
2070 					      pattern, pattern_template_index,
2071 					      actions, actions_template_index,
2072 					      user_data, error);
2073 
2074 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2075 				    pattern, pattern_template_index, actions,
2076 				    actions_template_index, user_data, flow);
2077 
2078 	return flow;
2079 }
2080 
2081 struct rte_flow *
2082 rte_flow_async_create_by_index(uint16_t port_id,
2083 			       uint32_t queue_id,
2084 			       const struct rte_flow_op_attr *op_attr,
2085 			       struct rte_flow_template_table *template_table,
2086 			       uint32_t rule_index,
2087 			       const struct rte_flow_action actions[],
2088 			       uint8_t actions_template_index,
2089 			       void *user_data,
2090 			       struct rte_flow_error *error)
2091 {
2092 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2093 	struct rte_flow *flow;
2094 
2095 #ifdef RTE_FLOW_DEBUG
2096 	if (!rte_eth_dev_is_valid_port(port_id)) {
2097 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2098 				   rte_strerror(ENODEV));
2099 		return NULL;
2100 	}
2101 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create_by_index == NULL) {
2102 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2103 				   rte_strerror(ENOSYS));
2104 		return NULL;
2105 	}
2106 #endif
2107 
2108 	flow = dev->flow_fp_ops->async_create_by_index(dev, queue_id,
2109 						       op_attr, template_table, rule_index,
2110 						       actions, actions_template_index,
2111 						       user_data, error);
2112 
2113 	rte_flow_trace_async_create_by_index(port_id, queue_id, op_attr, template_table, rule_index,
2114 					     actions, actions_template_index, user_data, flow);
2115 
2116 	return flow;
2117 }
2118 
2119 struct rte_flow *
2120 rte_flow_async_create_by_index_with_pattern(uint16_t port_id,
2121 					    uint32_t queue_id,
2122 					    const struct rte_flow_op_attr *op_attr,
2123 					    struct rte_flow_template_table *template_table,
2124 					    uint32_t rule_index,
2125 					    const struct rte_flow_item pattern[],
2126 					    uint8_t pattern_template_index,
2127 					    const struct rte_flow_action actions[],
2128 					    uint8_t actions_template_index,
2129 					    void *user_data,
2130 					    struct rte_flow_error *error)
2131 {
2132 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2133 	struct rte_flow *flow;
2134 
2135 #ifdef RTE_FLOW_DEBUG
2136 	if (!rte_eth_dev_is_valid_port(port_id)) {
2137 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2138 				   rte_strerror(ENODEV));
2139 		return NULL;
2140 	}
2141 	if (dev->flow_fp_ops == NULL ||
2142 	    dev->flow_fp_ops->async_create_by_index_with_pattern == NULL) {
2143 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2144 				   rte_strerror(ENOSYS));
2145 		return NULL;
2146 	}
2147 #endif
2148 
2149 	flow = dev->flow_fp_ops->async_create_by_index_with_pattern(dev, queue_id, op_attr,
2150 								    template_table, rule_index,
2151 								    pattern, pattern_template_index,
2152 								    actions, actions_template_index,
2153 								    user_data, error);
2154 
2155 	rte_flow_trace_async_create_by_index_with_pattern(port_id, queue_id, op_attr,
2156 							  template_table, rule_index, pattern,
2157 							  pattern_template_index, actions,
2158 							  actions_template_index, user_data, flow);
2159 
2160 	return flow;
2161 }
2162 
2163 int
2164 rte_flow_async_destroy(uint16_t port_id,
2165 		       uint32_t queue_id,
2166 		       const struct rte_flow_op_attr *op_attr,
2167 		       struct rte_flow *flow,
2168 		       void *user_data,
2169 		       struct rte_flow_error *error)
2170 {
2171 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2172 	int ret;
2173 
2174 #ifdef RTE_FLOW_DEBUG
2175 	if (!rte_eth_dev_is_valid_port(port_id))
2176 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2177 					  rte_strerror(ENODEV));
2178 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_destroy == NULL)
2179 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2180 					  rte_strerror(ENOSYS));
2181 #endif
2182 
2183 	ret = dev->flow_fp_ops->async_destroy(dev, queue_id,
2184 					      op_attr, flow,
2185 					      user_data, error);
2186 
2187 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2188 				     user_data, ret);
2189 
2190 	return ret;
2191 }
2192 
2193 int
2194 rte_flow_async_actions_update(uint16_t port_id,
2195 			      uint32_t queue_id,
2196 			      const struct rte_flow_op_attr *op_attr,
2197 			      struct rte_flow *flow,
2198 			      const struct rte_flow_action actions[],
2199 			      uint8_t actions_template_index,
2200 			      void *user_data,
2201 			      struct rte_flow_error *error)
2202 {
2203 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2204 	int ret;
2205 
2206 #ifdef RTE_FLOW_DEBUG
2207 	if (!rte_eth_dev_is_valid_port(port_id))
2208 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2209 					  rte_strerror(ENODEV));
2210 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_actions_update == NULL)
2211 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2212 					  rte_strerror(ENOSYS));
2213 #endif
2214 
2215 	ret = dev->flow_fp_ops->async_actions_update(dev, queue_id, op_attr,
2216 						     flow, actions,
2217 						     actions_template_index,
2218 						     user_data, error);
2219 
2220 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2221 					    actions, actions_template_index,
2222 					    user_data, ret);
2223 
2224 	return ret;
2225 }
2226 
2227 int
2228 rte_flow_push(uint16_t port_id,
2229 	      uint32_t queue_id,
2230 	      struct rte_flow_error *error)
2231 {
2232 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2233 	int ret;
2234 
2235 #ifdef RTE_FLOW_DEBUG
2236 	if (!rte_eth_dev_is_valid_port(port_id))
2237 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2238 					  rte_strerror(ENODEV));
2239 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->push == NULL)
2240 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2241 					  rte_strerror(ENOSYS));
2242 #endif
2243 
2244 	ret = dev->flow_fp_ops->push(dev, queue_id, error);
2245 
2246 	rte_flow_trace_push(port_id, queue_id, ret);
2247 
2248 	return ret;
2249 }
2250 
2251 int
2252 rte_flow_pull(uint16_t port_id,
2253 	      uint32_t queue_id,
2254 	      struct rte_flow_op_result res[],
2255 	      uint16_t n_res,
2256 	      struct rte_flow_error *error)
2257 {
2258 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2259 	int ret;
2260 
2261 #ifdef RTE_FLOW_DEBUG
2262 	if (!rte_eth_dev_is_valid_port(port_id))
2263 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2264 					  rte_strerror(ENODEV));
2265 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->pull == NULL)
2266 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2267 					  rte_strerror(ENOSYS));
2268 #endif
2269 
2270 	ret = dev->flow_fp_ops->pull(dev, queue_id, res, n_res, error);
2271 
2272 	rte_flow_trace_pull(port_id, queue_id, res, n_res, ret);
2273 
2274 	return ret;
2275 }
2276 
2277 struct rte_flow_action_handle *
2278 rte_flow_async_action_handle_create(uint16_t port_id,
2279 		uint32_t queue_id,
2280 		const struct rte_flow_op_attr *op_attr,
2281 		const struct rte_flow_indir_action_conf *indir_action_conf,
2282 		const struct rte_flow_action *action,
2283 		void *user_data,
2284 		struct rte_flow_error *error)
2285 {
2286 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2287 	struct rte_flow_action_handle *handle;
2288 
2289 #ifdef RTE_FLOW_DEBUG
2290 	if (!rte_eth_dev_is_valid_port(port_id)) {
2291 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2292 				   rte_strerror(ENODEV));
2293 		return NULL;
2294 	}
2295 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_create == NULL) {
2296 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2297 				   rte_strerror(ENOSYS));
2298 		return NULL;
2299 	}
2300 #endif
2301 
2302 	handle = dev->flow_fp_ops->async_action_handle_create(dev, queue_id, op_attr,
2303 							      indir_action_conf, action,
2304 							      user_data, error);
2305 
2306 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2307 						  indir_action_conf, action,
2308 						  user_data, handle);
2309 
2310 	return handle;
2311 }
2312 
2313 int
2314 rte_flow_async_action_handle_destroy(uint16_t port_id,
2315 		uint32_t queue_id,
2316 		const struct rte_flow_op_attr *op_attr,
2317 		struct rte_flow_action_handle *action_handle,
2318 		void *user_data,
2319 		struct rte_flow_error *error)
2320 {
2321 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2322 	int ret;
2323 
2324 #ifdef RTE_FLOW_DEBUG
2325 	if (!rte_eth_dev_is_valid_port(port_id))
2326 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2327 					  rte_strerror(ENODEV));
2328 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_destroy == NULL)
2329 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2330 					  rte_strerror(ENOSYS));
2331 #endif
2332 
2333 	ret = dev->flow_fp_ops->async_action_handle_destroy(dev, queue_id, op_attr,
2334 							    action_handle, user_data, error);
2335 
2336 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2337 						   action_handle, user_data, ret);
2338 
2339 	return ret;
2340 }
2341 
2342 int
2343 rte_flow_async_action_handle_update(uint16_t port_id,
2344 		uint32_t queue_id,
2345 		const struct rte_flow_op_attr *op_attr,
2346 		struct rte_flow_action_handle *action_handle,
2347 		const void *update,
2348 		void *user_data,
2349 		struct rte_flow_error *error)
2350 {
2351 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2352 	int ret;
2353 
2354 #ifdef RTE_FLOW_DEBUG
2355 	if (!rte_eth_dev_is_valid_port(port_id))
2356 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2357 					  rte_strerror(ENODEV));
2358 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_update == NULL)
2359 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2360 					  rte_strerror(ENOSYS));
2361 #endif
2362 
2363 	ret = dev->flow_fp_ops->async_action_handle_update(dev, queue_id, op_attr,
2364 							   action_handle, update, user_data, error);
2365 
2366 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2367 						  action_handle, update,
2368 						  user_data, ret);
2369 
2370 	return ret;
2371 }
2372 
2373 int
2374 rte_flow_async_action_handle_query(uint16_t port_id,
2375 		uint32_t queue_id,
2376 		const struct rte_flow_op_attr *op_attr,
2377 		const struct rte_flow_action_handle *action_handle,
2378 		void *data,
2379 		void *user_data,
2380 		struct rte_flow_error *error)
2381 {
2382 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2383 	int ret;
2384 
2385 #ifdef RTE_FLOW_DEBUG
2386 	if (!rte_eth_dev_is_valid_port(port_id))
2387 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2388 					  rte_strerror(ENODEV));
2389 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query == NULL)
2390 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2391 					  rte_strerror(ENOSYS));
2392 #endif
2393 
2394 	ret = dev->flow_fp_ops->async_action_handle_query(dev, queue_id, op_attr,
2395 							  action_handle, data, user_data, error);
2396 
2397 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2398 						 action_handle, data, user_data,
2399 						 ret);
2400 
2401 	return ret;
2402 }
2403 
2404 int
2405 rte_flow_action_handle_query_update(uint16_t port_id,
2406 				    struct rte_flow_action_handle *handle,
2407 				    const void *update, void *query,
2408 				    enum rte_flow_query_update_mode mode,
2409 				    struct rte_flow_error *error)
2410 {
2411 	int ret;
2412 	struct rte_eth_dev *dev;
2413 	const struct rte_flow_ops *ops;
2414 
2415 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2416 	if (!handle)
2417 		return -EINVAL;
2418 	if (!update && !query)
2419 		return -EINVAL;
2420 	dev = &rte_eth_devices[port_id];
2421 	ops = rte_flow_ops_get(port_id, error);
2422 	if (!ops || !ops->action_handle_query_update)
2423 		return -ENOTSUP;
2424 	ret = ops->action_handle_query_update(dev, handle, update,
2425 					      query, mode, error);
2426 	return flow_err(port_id, ret, error);
2427 }
2428 
2429 int
2430 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2431 					  const struct rte_flow_op_attr *attr,
2432 					  struct rte_flow_action_handle *handle,
2433 					  const void *update, void *query,
2434 					  enum rte_flow_query_update_mode mode,
2435 					  void *user_data,
2436 					  struct rte_flow_error *error)
2437 {
2438 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2439 
2440 #ifdef RTE_FLOW_DEBUG
2441 	if (!rte_eth_dev_is_valid_port(port_id))
2442 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2443 					  rte_strerror(ENODEV));
2444 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query_update == NULL)
2445 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2446 					  rte_strerror(ENOSYS));
2447 #endif
2448 
2449 	return dev->flow_fp_ops->async_action_handle_query_update(dev, queue_id, attr,
2450 								  handle, update,
2451 								  query, mode,
2452 								  user_data, error);
2453 }
2454 
2455 struct rte_flow_action_list_handle *
2456 rte_flow_action_list_handle_create(uint16_t port_id,
2457 				   const
2458 				   struct rte_flow_indir_action_conf *conf,
2459 				   const struct rte_flow_action *actions,
2460 				   struct rte_flow_error *error)
2461 {
2462 	int ret;
2463 	struct rte_eth_dev *dev;
2464 	const struct rte_flow_ops *ops;
2465 	struct rte_flow_action_list_handle *handle;
2466 
2467 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
2468 	ops = rte_flow_ops_get(port_id, error);
2469 	if (!ops || !ops->action_list_handle_create) {
2470 		rte_flow_error_set(error, ENOTSUP,
2471 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2472 				   "action_list handle not supported");
2473 		return NULL;
2474 	}
2475 	dev = &rte_eth_devices[port_id];
2476 	handle = ops->action_list_handle_create(dev, conf, actions, error);
2477 	ret = flow_err(port_id, -rte_errno, error);
2478 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
2479 	return handle;
2480 }
2481 
2482 int
2483 rte_flow_action_list_handle_destroy(uint16_t port_id,
2484 				    struct rte_flow_action_list_handle *handle,
2485 				    struct rte_flow_error *error)
2486 {
2487 	int ret;
2488 	struct rte_eth_dev *dev;
2489 	const struct rte_flow_ops *ops;
2490 
2491 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2492 	ops = rte_flow_ops_get(port_id, error);
2493 	if (!ops || !ops->action_list_handle_destroy)
2494 		return rte_flow_error_set(error, ENOTSUP,
2495 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2496 					  "action_list handle not supported");
2497 	dev = &rte_eth_devices[port_id];
2498 	ret = ops->action_list_handle_destroy(dev, handle, error);
2499 	ret = flow_err(port_id, ret, error);
2500 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
2501 	return ret;
2502 }
2503 
2504 struct rte_flow_action_list_handle *
2505 rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
2506 					 const struct rte_flow_op_attr *attr,
2507 					 const struct rte_flow_indir_action_conf *conf,
2508 					 const struct rte_flow_action *actions,
2509 					 void *user_data,
2510 					 struct rte_flow_error *error)
2511 {
2512 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2513 	struct rte_flow_action_list_handle *handle;
2514 	int ret;
2515 
2516 #ifdef RTE_FLOW_DEBUG
2517 	if (!rte_eth_dev_is_valid_port(port_id)) {
2518 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2519 				   rte_strerror(ENODEV));
2520 		return NULL;
2521 	}
2522 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_create == NULL) {
2523 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2524 				   rte_strerror(ENOSYS));
2525 		return NULL;
2526 	}
2527 #endif
2528 
2529 	handle = dev->flow_fp_ops->async_action_list_handle_create(dev, queue_id, attr, conf,
2530 								   actions, user_data,
2531 								   error);
2532 	ret = flow_err(port_id, -rte_errno, error);
2533 
2534 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
2535 						       conf, actions, user_data,
2536 						       ret);
2537 	return handle;
2538 }
2539 
2540 int
2541 rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
2542 				 const struct rte_flow_op_attr *op_attr,
2543 				 struct rte_flow_action_list_handle *handle,
2544 				 void *user_data, struct rte_flow_error *error)
2545 {
2546 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2547 	int ret;
2548 
2549 #ifdef RTE_FLOW_DEBUG
2550 	if (!rte_eth_dev_is_valid_port(port_id))
2551 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2552 					  rte_strerror(ENODEV));
2553 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_destroy == NULL)
2554 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2555 					  rte_strerror(ENOSYS));
2556 #endif
2557 
2558 	ret = dev->flow_fp_ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
2559 								 handle, user_data, error);
2560 
2561 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
2562 							op_attr, handle,
2563 							user_data, ret);
2564 	return ret;
2565 }
2566 
2567 int
2568 rte_flow_action_list_handle_query_update(uint16_t port_id,
2569 			 const struct rte_flow_action_list_handle *handle,
2570 			 const void **update, void **query,
2571 			 enum rte_flow_query_update_mode mode,
2572 			 struct rte_flow_error *error)
2573 {
2574 	int ret;
2575 	struct rte_eth_dev *dev;
2576 	const struct rte_flow_ops *ops;
2577 
2578 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2579 	ops = rte_flow_ops_get(port_id, error);
2580 	if (!ops || !ops->action_list_handle_query_update)
2581 		return rte_flow_error_set(error, ENOTSUP,
2582 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2583 					  "action_list query_update not supported");
2584 	dev = &rte_eth_devices[port_id];
2585 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
2586 						   mode, error);
2587 	ret = flow_err(port_id, ret, error);
2588 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
2589 						       query, mode, ret);
2590 	return ret;
2591 }
2592 
2593 int
2594 rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
2595 			 const struct rte_flow_op_attr *attr,
2596 			 const struct rte_flow_action_list_handle *handle,
2597 			 const void **update, void **query,
2598 			 enum rte_flow_query_update_mode mode,
2599 			 void *user_data, struct rte_flow_error *error)
2600 {
2601 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2602 	int ret;
2603 
2604 #ifdef RTE_FLOW_DEBUG
2605 	if (!rte_eth_dev_is_valid_port(port_id))
2606 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2607 					  rte_strerror(ENODEV));
2608 	if (dev->flow_fp_ops == NULL ||
2609 	    dev->flow_fp_ops->async_action_list_handle_query_update == NULL)
2610 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2611 					  rte_strerror(ENOSYS));
2612 #endif
2613 
2614 	ret = dev->flow_fp_ops->async_action_list_handle_query_update(dev, queue_id, attr,
2615 								      handle, update, query,
2616 								      mode, user_data,
2617 								      error);
2618 
2619 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
2620 							     attr, handle,
2621 							     update, query,
2622 							     mode, user_data,
2623 							     ret);
2624 	return ret;
2625 }
2626 
2627 int
2628 rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2629 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2630 			 uint32_t *hash, struct rte_flow_error *error)
2631 {
2632 	int ret;
2633 	struct rte_eth_dev *dev;
2634 	const struct rte_flow_ops *ops;
2635 
2636 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2637 	ops = rte_flow_ops_get(port_id, error);
2638 	if (!ops || !ops->flow_calc_table_hash)
2639 		return rte_flow_error_set(error, ENOTSUP,
2640 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2641 					  "action_list async query_update not supported");
2642 	dev = &rte_eth_devices[port_id];
2643 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2644 					hash, error);
2645 	return flow_err(port_id, ret, error);
2646 }
2647 
2648 int
2649 rte_flow_calc_encap_hash(uint16_t port_id, const struct rte_flow_item pattern[],
2650 			 enum rte_flow_encap_hash_field dest_field, uint8_t hash_len,
2651 			 uint8_t *hash, struct rte_flow_error *error)
2652 {
2653 	int ret;
2654 	struct rte_eth_dev *dev;
2655 	const struct rte_flow_ops *ops;
2656 
2657 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2658 	ops = rte_flow_ops_get(port_id, error);
2659 	if (!ops || !ops->flow_calc_encap_hash)
2660 		return rte_flow_error_set(error, ENOTSUP,
2661 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2662 					  "calc encap hash is not supported");
2663 	if (dest_field > RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID)
2664 		return rte_flow_error_set(error, EINVAL,
2665 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2666 					  "hash dest field is not defined");
2667 	if ((dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT && hash_len != 2) ||
2668 	    (dest_field == RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID && hash_len != 1))
2669 		return rte_flow_error_set(error, EINVAL,
2670 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2671 					  "hash len doesn't match the requested field len");
2672 	dev = &rte_eth_devices[port_id];
2673 	ret = ops->flow_calc_encap_hash(dev, pattern, dest_field, hash, error);
2674 	return flow_err(port_id, ret, error);
2675 }
2676 
2677 bool
2678 rte_flow_template_table_resizable(__rte_unused uint16_t port_id,
2679 				  const struct rte_flow_template_table_attr *tbl_attr)
2680 {
2681 	return (tbl_attr->specialize &
2682 		RTE_FLOW_TABLE_SPECIALIZE_RESIZABLE) != 0;
2683 }
2684 
2685 int
2686 rte_flow_template_table_resize(uint16_t port_id,
2687 			       struct rte_flow_template_table *table,
2688 			       uint32_t nb_rules,
2689 			       struct rte_flow_error *error)
2690 {
2691 	int ret;
2692 	struct rte_eth_dev *dev;
2693 	const struct rte_flow_ops *ops;
2694 
2695 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2696 	ops = rte_flow_ops_get(port_id, error);
2697 	if (!ops || !ops->flow_template_table_resize)
2698 		return rte_flow_error_set(error, ENOTSUP,
2699 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2700 					  "flow_template_table_resize not supported");
2701 	dev = &rte_eth_devices[port_id];
2702 	ret = ops->flow_template_table_resize(dev, table, nb_rules, error);
2703 	ret = flow_err(port_id, ret, error);
2704 	rte_flow_trace_template_table_resize(port_id, table, nb_rules, ret);
2705 	return ret;
2706 }
2707 
2708 int
2709 rte_flow_async_update_resized(uint16_t port_id, uint32_t queue,
2710 			      const struct rte_flow_op_attr *attr,
2711 			      struct rte_flow *rule, void *user_data,
2712 			      struct rte_flow_error *error)
2713 {
2714 	int ret;
2715 	struct rte_eth_dev *dev;
2716 	const struct rte_flow_ops *ops;
2717 
2718 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2719 	ops = rte_flow_ops_get(port_id, error);
2720 	if (!ops || !ops->flow_update_resized)
2721 		return rte_flow_error_set(error, ENOTSUP,
2722 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2723 					  "async_flow_async_transfer not supported");
2724 	dev = &rte_eth_devices[port_id];
2725 	ret = ops->flow_update_resized(dev, queue, attr, rule, user_data, error);
2726 	ret = flow_err(port_id, ret, error);
2727 	rte_flow_trace_async_update_resized(port_id, queue, attr,
2728 					    rule, user_data, ret);
2729 	return ret;
2730 }
2731 
2732 int
2733 rte_flow_template_table_resize_complete(uint16_t port_id,
2734 					struct rte_flow_template_table *table,
2735 					struct rte_flow_error *error)
2736 {
2737 	int ret;
2738 	struct rte_eth_dev *dev;
2739 	const struct rte_flow_ops *ops;
2740 
2741 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2742 	ops = rte_flow_ops_get(port_id, error);
2743 	if (!ops || !ops->flow_template_table_resize_complete)
2744 		return rte_flow_error_set(error, ENOTSUP,
2745 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2746 					  "flow_template_table_transfer_complete not supported");
2747 	dev = &rte_eth_devices[port_id];
2748 	ret = ops->flow_template_table_resize_complete(dev, table, error);
2749 	ret = flow_err(port_id, ret, error);
2750 	rte_flow_trace_table_resize_complete(port_id, table, ret);
2751 	return ret;
2752 }
2753 
2754 static struct rte_flow *
2755 rte_flow_dummy_async_create(struct rte_eth_dev *dev __rte_unused,
2756 			    uint32_t queue __rte_unused,
2757 			    const struct rte_flow_op_attr *attr __rte_unused,
2758 			    struct rte_flow_template_table *table __rte_unused,
2759 			    const struct rte_flow_item items[] __rte_unused,
2760 			    uint8_t pattern_template_index __rte_unused,
2761 			    const struct rte_flow_action actions[] __rte_unused,
2762 			    uint8_t action_template_index __rte_unused,
2763 			    void *user_data __rte_unused,
2764 			    struct rte_flow_error *error)
2765 {
2766 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2767 			   rte_strerror(ENOSYS));
2768 	return NULL;
2769 }
2770 
2771 static struct rte_flow *
2772 rte_flow_dummy_async_create_by_index(struct rte_eth_dev *dev __rte_unused,
2773 				     uint32_t queue __rte_unused,
2774 				     const struct rte_flow_op_attr *attr __rte_unused,
2775 				     struct rte_flow_template_table *table __rte_unused,
2776 				     uint32_t rule_index __rte_unused,
2777 				     const struct rte_flow_action actions[] __rte_unused,
2778 				     uint8_t action_template_index __rte_unused,
2779 				     void *user_data __rte_unused,
2780 				     struct rte_flow_error *error)
2781 {
2782 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2783 			   rte_strerror(ENOSYS));
2784 	return NULL;
2785 }
2786 
2787 static struct rte_flow *
2788 rte_flow_dummy_async_create_by_index_with_pattern(struct rte_eth_dev *dev __rte_unused,
2789 						uint32_t queue __rte_unused,
2790 						const struct rte_flow_op_attr *attr __rte_unused,
2791 						struct rte_flow_template_table *table __rte_unused,
2792 						uint32_t rule_index __rte_unused,
2793 						const struct rte_flow_item items[] __rte_unused,
2794 						uint8_t pattern_template_index __rte_unused,
2795 						const struct rte_flow_action actions[] __rte_unused,
2796 						uint8_t action_template_index __rte_unused,
2797 						void *user_data __rte_unused,
2798 						struct rte_flow_error *error)
2799 {
2800 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2801 			   rte_strerror(ENOSYS));
2802 	return NULL;
2803 }
2804 
2805 static int
2806 rte_flow_dummy_async_actions_update(struct rte_eth_dev *dev __rte_unused,
2807 				    uint32_t queue_id __rte_unused,
2808 				    const struct rte_flow_op_attr *op_attr __rte_unused,
2809 				    struct rte_flow *flow __rte_unused,
2810 				    const struct rte_flow_action actions[] __rte_unused,
2811 				    uint8_t actions_template_index __rte_unused,
2812 				    void *user_data __rte_unused,
2813 				    struct rte_flow_error *error)
2814 {
2815 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2816 				  rte_strerror(ENOSYS));
2817 }
2818 
2819 static int
2820 rte_flow_dummy_async_destroy(struct rte_eth_dev *dev __rte_unused,
2821 			     uint32_t queue_id __rte_unused,
2822 			     const struct rte_flow_op_attr *op_attr __rte_unused,
2823 			     struct rte_flow *flow __rte_unused,
2824 			     void *user_data __rte_unused,
2825 			     struct rte_flow_error *error)
2826 {
2827 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2828 				  rte_strerror(ENOSYS));
2829 }
2830 
2831 static int
2832 rte_flow_dummy_push(struct rte_eth_dev *dev __rte_unused,
2833 		    uint32_t queue_id __rte_unused,
2834 		    struct rte_flow_error *error)
2835 {
2836 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2837 				  rte_strerror(ENOSYS));
2838 }
2839 
2840 static int
2841 rte_flow_dummy_pull(struct rte_eth_dev *dev __rte_unused,
2842 		    uint32_t queue_id __rte_unused,
2843 		    struct rte_flow_op_result res[] __rte_unused,
2844 		    uint16_t n_res __rte_unused,
2845 		    struct rte_flow_error *error)
2846 {
2847 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2848 				  rte_strerror(ENOSYS));
2849 }
2850 
2851 static struct rte_flow_action_handle *
2852 rte_flow_dummy_async_action_handle_create(
2853 	struct rte_eth_dev *dev __rte_unused,
2854 	uint32_t queue_id __rte_unused,
2855 	const struct rte_flow_op_attr *op_attr __rte_unused,
2856 	const struct rte_flow_indir_action_conf *indir_action_conf __rte_unused,
2857 	const struct rte_flow_action *action __rte_unused,
2858 	void *user_data __rte_unused,
2859 	struct rte_flow_error *error)
2860 {
2861 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2862 			   rte_strerror(ENOSYS));
2863 	return NULL;
2864 }
2865 
2866 static int
2867 rte_flow_dummy_async_action_handle_destroy(
2868 	struct rte_eth_dev *dev __rte_unused,
2869 	uint32_t queue_id __rte_unused,
2870 	const struct rte_flow_op_attr *op_attr __rte_unused,
2871 	struct rte_flow_action_handle *action_handle __rte_unused,
2872 	void *user_data __rte_unused,
2873 	struct rte_flow_error *error)
2874 {
2875 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2876 				  rte_strerror(ENOSYS));
2877 }
2878 
2879 static int
2880 rte_flow_dummy_async_action_handle_update(
2881 	struct rte_eth_dev *dev __rte_unused,
2882 	uint32_t queue_id __rte_unused,
2883 	const struct rte_flow_op_attr *op_attr __rte_unused,
2884 	struct rte_flow_action_handle *action_handle __rte_unused,
2885 	const void *update __rte_unused,
2886 	void *user_data __rte_unused,
2887 	struct rte_flow_error *error)
2888 {
2889 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2890 				  rte_strerror(ENOSYS));
2891 }
2892 
2893 static int
2894 rte_flow_dummy_async_action_handle_query(
2895 	struct rte_eth_dev *dev __rte_unused,
2896 	uint32_t queue_id __rte_unused,
2897 	const struct rte_flow_op_attr *op_attr __rte_unused,
2898 	const struct rte_flow_action_handle *action_handle __rte_unused,
2899 	void *data __rte_unused,
2900 	void *user_data __rte_unused,
2901 	struct rte_flow_error *error)
2902 {
2903 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2904 				  rte_strerror(ENOSYS));
2905 }
2906 
2907 static int
2908 rte_flow_dummy_async_action_handle_query_update(
2909 	struct rte_eth_dev *dev __rte_unused,
2910 	uint32_t queue_id __rte_unused,
2911 	const struct rte_flow_op_attr *attr __rte_unused,
2912 	struct rte_flow_action_handle *handle __rte_unused,
2913 	const void *update __rte_unused,
2914 	void *query __rte_unused,
2915 	enum rte_flow_query_update_mode mode __rte_unused,
2916 	void *user_data __rte_unused,
2917 	struct rte_flow_error *error)
2918 {
2919 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2920 				  rte_strerror(ENOSYS));
2921 }
2922 
2923 static struct rte_flow_action_list_handle *
2924 rte_flow_dummy_async_action_list_handle_create(
2925 	struct rte_eth_dev *dev __rte_unused,
2926 	uint32_t queue_id __rte_unused,
2927 	const struct rte_flow_op_attr *attr __rte_unused,
2928 	const struct rte_flow_indir_action_conf *conf __rte_unused,
2929 	const struct rte_flow_action *actions __rte_unused,
2930 	void *user_data __rte_unused,
2931 	struct rte_flow_error *error)
2932 {
2933 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2934 			   rte_strerror(ENOSYS));
2935 	return NULL;
2936 }
2937 
2938 static int
2939 rte_flow_dummy_async_action_list_handle_destroy(
2940 	struct rte_eth_dev *dev __rte_unused,
2941 	uint32_t queue_id __rte_unused,
2942 	const struct rte_flow_op_attr *op_attr __rte_unused,
2943 	struct rte_flow_action_list_handle *handle __rte_unused,
2944 	void *user_data __rte_unused,
2945 	struct rte_flow_error *error)
2946 {
2947 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2948 				  rte_strerror(ENOSYS));
2949 }
2950 
2951 static int
2952 rte_flow_dummy_async_action_list_handle_query_update(
2953 	struct rte_eth_dev *dev __rte_unused,
2954 	uint32_t queue_id __rte_unused,
2955 	const struct rte_flow_op_attr *attr __rte_unused,
2956 	const struct rte_flow_action_list_handle *handle __rte_unused,
2957 	const void **update __rte_unused,
2958 	void **query __rte_unused,
2959 	enum rte_flow_query_update_mode mode __rte_unused,
2960 	void *user_data __rte_unused,
2961 	struct rte_flow_error *error)
2962 {
2963 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2964 				  rte_strerror(ENOSYS));
2965 }
2966 
2967 struct rte_flow_fp_ops rte_flow_fp_default_ops = {
2968 	.async_create = rte_flow_dummy_async_create,
2969 	.async_create_by_index = rte_flow_dummy_async_create_by_index,
2970 	.async_actions_update = rte_flow_dummy_async_actions_update,
2971 	.async_create_by_index_with_pattern = rte_flow_dummy_async_create_by_index_with_pattern,
2972 	.async_destroy = rte_flow_dummy_async_destroy,
2973 	.push = rte_flow_dummy_push,
2974 	.pull = rte_flow_dummy_pull,
2975 	.async_action_handle_create = rte_flow_dummy_async_action_handle_create,
2976 	.async_action_handle_destroy = rte_flow_dummy_async_action_handle_destroy,
2977 	.async_action_handle_update = rte_flow_dummy_async_action_handle_update,
2978 	.async_action_handle_query = rte_flow_dummy_async_action_handle_query,
2979 	.async_action_handle_query_update = rte_flow_dummy_async_action_handle_query_update,
2980 	.async_action_list_handle_create = rte_flow_dummy_async_action_list_handle_create,
2981 	.async_action_list_handle_destroy = rte_flow_dummy_async_action_list_handle_destroy,
2982 	.async_action_list_handle_query_update =
2983 		rte_flow_dummy_async_action_list_handle_query_update,
2984 };
2985