xref: /dpdk/lib/ethdev/rte_flow.c (revision 750ee81dcef19dd432d576953036329db034e53f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_string_fns.h>
14 #include <rte_mbuf_dyn.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18 
19 #include "ethdev_trace.h"
20 
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23 
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26 
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31 	const char *name;
32 	size_t size;
33 	size_t (*desc_fn)(void *dst, const void *src);
34 };
35 
36 /**
37  *
38  * @param buf
39  * Destination memory.
40  * @param data
41  * Source memory
42  * @param size
43  * Requested copy size
44  * @param desc
45  * rte_flow_desc_item - for flow item conversion.
46  * rte_flow_desc_action - for flow action conversion.
47  * @param type
48  * Offset into the desc param or negative value for private flow elements.
49  */
50 static inline size_t
51 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
52 		   const struct rte_flow_desc_data *desc, int type)
53 {
54 	/**
55 	 * Allow PMD private flow item
56 	 */
57 	bool rte_type = type >= 0;
58 
59 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
60 	if (buf == NULL || data == NULL)
61 		return 0;
62 	rte_memcpy(buf, data, (size > sz ? sz : size));
63 	if (rte_type && desc[type].desc_fn)
64 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
65 	return sz;
66 }
67 
68 static size_t
69 rte_flow_item_flex_conv(void *buf, const void *data)
70 {
71 	struct rte_flow_item_flex *dst = buf;
72 	const struct rte_flow_item_flex *src = data;
73 	if (buf) {
74 		dst->pattern = rte_memcpy
75 			((void *)((uintptr_t)(dst + 1)), src->pattern,
76 			 src->length);
77 	}
78 	return src->length;
79 }
80 
81 /** Generate flow_item[] entry. */
82 #define MK_FLOW_ITEM(t, s) \
83 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
84 		.name = # t, \
85 		.size = s,               \
86 		.desc_fn = NULL,\
87 	}
88 
89 #define MK_FLOW_ITEM_FN(t, s, fn) \
90 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
91 		.name = # t,                 \
92 		.size = s,                   \
93 		.desc_fn = fn,               \
94 	}
95 
96 /** Information about known flow pattern items. */
97 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
98 	MK_FLOW_ITEM(END, 0),
99 	MK_FLOW_ITEM(VOID, 0),
100 	MK_FLOW_ITEM(INVERT, 0),
101 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
102 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
103 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
104 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
105 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
106 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
107 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
108 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
109 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
110 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
111 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
112 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
113 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
114 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
115 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
116 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
117 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
118 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
119 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
122 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
123 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
124 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
125 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
126 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
127 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
128 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
131 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
132 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
134 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
136 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
137 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
138 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
139 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
140 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
141 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
142 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
143 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
144 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
146 			sizeof(struct rte_flow_item_pppoe_proto_id)),
147 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
148 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
149 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
150 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
151 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
152 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
153 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
154 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
155 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
156 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
157 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
158 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
160 			rte_flow_item_flex_conv),
161 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
162 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
163 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
164 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
165 };
166 
167 /** Generate flow_action[] entry. */
168 #define MK_FLOW_ACTION(t, s) \
169 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
170 		.name = # t, \
171 		.size = s, \
172 		.desc_fn = NULL,\
173 	}
174 
175 #define MK_FLOW_ACTION_FN(t, fn) \
176 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
177 		.name = # t, \
178 		.size = 0, \
179 		.desc_fn = fn,\
180 	}
181 
182 
183 /** Information about known flow actions. */
184 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
185 	MK_FLOW_ACTION(END, 0),
186 	MK_FLOW_ACTION(VOID, 0),
187 	MK_FLOW_ACTION(PASSTHRU, 0),
188 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
189 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
190 	MK_FLOW_ACTION(FLAG, 0),
191 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
192 	MK_FLOW_ACTION(DROP, 0),
193 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
194 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
195 	MK_FLOW_ACTION(PF, 0),
196 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
197 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
198 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
199 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
200 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
201 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
202 	MK_FLOW_ACTION(OF_PUSH_VLAN,
203 		       sizeof(struct rte_flow_action_of_push_vlan)),
204 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
205 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
206 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
207 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
208 	MK_FLOW_ACTION(OF_POP_MPLS,
209 		       sizeof(struct rte_flow_action_of_pop_mpls)),
210 	MK_FLOW_ACTION(OF_PUSH_MPLS,
211 		       sizeof(struct rte_flow_action_of_push_mpls)),
212 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
213 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
214 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
215 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
216 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
217 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
218 	MK_FLOW_ACTION(SET_IPV4_SRC,
219 		       sizeof(struct rte_flow_action_set_ipv4)),
220 	MK_FLOW_ACTION(SET_IPV4_DST,
221 		       sizeof(struct rte_flow_action_set_ipv4)),
222 	MK_FLOW_ACTION(SET_IPV6_SRC,
223 		       sizeof(struct rte_flow_action_set_ipv6)),
224 	MK_FLOW_ACTION(SET_IPV6_DST,
225 		       sizeof(struct rte_flow_action_set_ipv6)),
226 	MK_FLOW_ACTION(SET_TP_SRC,
227 		       sizeof(struct rte_flow_action_set_tp)),
228 	MK_FLOW_ACTION(SET_TP_DST,
229 		       sizeof(struct rte_flow_action_set_tp)),
230 	MK_FLOW_ACTION(MAC_SWAP, 0),
231 	MK_FLOW_ACTION(DEC_TTL, 0),
232 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
233 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
234 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
235 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
236 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
237 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
238 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
239 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
240 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
241 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
242 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
243 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
244 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
245 	MK_FLOW_ACTION(MODIFY_FIELD,
246 		       sizeof(struct rte_flow_action_modify_field)),
247 	/**
248 	 * Indirect action represented as handle of type
249 	 * (struct rte_flow_action_handle *) stored in conf field (see
250 	 * struct rte_flow_action); no need for additional structure to * store
251 	 * indirect action handle.
252 	 */
253 	MK_FLOW_ACTION(INDIRECT, 0),
254 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
255 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
256 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
257 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
258 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
259 };
260 
261 int
262 rte_flow_dynf_metadata_register(void)
263 {
264 	int offset;
265 	int flag;
266 
267 	static const struct rte_mbuf_dynfield desc_offs = {
268 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
269 		.size = sizeof(uint32_t),
270 		.align = __alignof__(uint32_t),
271 	};
272 	static const struct rte_mbuf_dynflag desc_flag = {
273 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
274 	};
275 
276 	offset = rte_mbuf_dynfield_register(&desc_offs);
277 	if (offset < 0)
278 		goto error;
279 	flag = rte_mbuf_dynflag_register(&desc_flag);
280 	if (flag < 0)
281 		goto error;
282 	rte_flow_dynf_metadata_offs = offset;
283 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
284 
285 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
286 
287 	return 0;
288 
289 error:
290 	rte_flow_dynf_metadata_offs = -1;
291 	rte_flow_dynf_metadata_mask = UINT64_C(0);
292 	return -rte_errno;
293 }
294 
295 static inline void
296 fts_enter(struct rte_eth_dev *dev)
297 {
298 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
299 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
300 }
301 
302 static inline void
303 fts_exit(struct rte_eth_dev *dev)
304 {
305 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
306 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
307 }
308 
309 static int
310 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
311 {
312 	if (ret == 0)
313 		return 0;
314 	if (rte_eth_dev_is_removed(port_id))
315 		return rte_flow_error_set(error, EIO,
316 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
317 					  NULL, rte_strerror(EIO));
318 	return ret;
319 }
320 
321 /* Get generic flow operations structure from a port. */
322 const struct rte_flow_ops *
323 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
324 {
325 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
326 	const struct rte_flow_ops *ops;
327 	int code;
328 
329 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
330 		code = ENODEV;
331 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
332 		/* flow API not supported with this driver dev_ops */
333 		code = ENOSYS;
334 	else
335 		code = dev->dev_ops->flow_ops_get(dev, &ops);
336 	if (code == 0 && ops == NULL)
337 		/* flow API not supported with this device */
338 		code = ENOSYS;
339 
340 	if (code != 0) {
341 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
342 				   NULL, rte_strerror(code));
343 		return NULL;
344 	}
345 	return ops;
346 }
347 
348 /* Check whether a flow rule can be created on a given port. */
349 int
350 rte_flow_validate(uint16_t port_id,
351 		  const struct rte_flow_attr *attr,
352 		  const struct rte_flow_item pattern[],
353 		  const struct rte_flow_action actions[],
354 		  struct rte_flow_error *error)
355 {
356 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
357 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
358 	int ret;
359 
360 	if (likely(!!attr) && attr->transfer &&
361 	    (attr->ingress || attr->egress)) {
362 		return rte_flow_error_set(error, EINVAL,
363 					  RTE_FLOW_ERROR_TYPE_ATTR,
364 					  attr, "cannot use attr ingress/egress with attr transfer");
365 	}
366 
367 	if (unlikely(!ops))
368 		return -rte_errno;
369 	if (likely(!!ops->validate)) {
370 		fts_enter(dev);
371 		ret = ops->validate(dev, attr, pattern, actions, error);
372 		fts_exit(dev);
373 		ret = flow_err(port_id, ret, error);
374 
375 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
376 
377 		return ret;
378 	}
379 	return rte_flow_error_set(error, ENOSYS,
380 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
381 				  NULL, rte_strerror(ENOSYS));
382 }
383 
384 /* Create a flow rule on a given port. */
385 struct rte_flow *
386 rte_flow_create(uint16_t port_id,
387 		const struct rte_flow_attr *attr,
388 		const struct rte_flow_item pattern[],
389 		const struct rte_flow_action actions[],
390 		struct rte_flow_error *error)
391 {
392 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
393 	struct rte_flow *flow;
394 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
395 
396 	if (unlikely(!ops))
397 		return NULL;
398 	if (likely(!!ops->create)) {
399 		fts_enter(dev);
400 		flow = ops->create(dev, attr, pattern, actions, error);
401 		fts_exit(dev);
402 		if (flow == NULL)
403 			flow_err(port_id, -rte_errno, error);
404 
405 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
406 
407 		return flow;
408 	}
409 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
410 			   NULL, rte_strerror(ENOSYS));
411 	return NULL;
412 }
413 
414 /* Destroy a flow rule on a given port. */
415 int
416 rte_flow_destroy(uint16_t port_id,
417 		 struct rte_flow *flow,
418 		 struct rte_flow_error *error)
419 {
420 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
421 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
422 	int ret;
423 
424 	if (unlikely(!ops))
425 		return -rte_errno;
426 	if (likely(!!ops->destroy)) {
427 		fts_enter(dev);
428 		ret = ops->destroy(dev, flow, error);
429 		fts_exit(dev);
430 		ret = flow_err(port_id, ret, error);
431 
432 		rte_flow_trace_destroy(port_id, flow, ret);
433 
434 		return ret;
435 	}
436 	return rte_flow_error_set(error, ENOSYS,
437 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
438 				  NULL, rte_strerror(ENOSYS));
439 }
440 
441 /* Destroy all flow rules associated with a port. */
442 int
443 rte_flow_flush(uint16_t port_id,
444 	       struct rte_flow_error *error)
445 {
446 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
447 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
448 	int ret;
449 
450 	if (unlikely(!ops))
451 		return -rte_errno;
452 	if (likely(!!ops->flush)) {
453 		fts_enter(dev);
454 		ret = ops->flush(dev, error);
455 		fts_exit(dev);
456 		ret = flow_err(port_id, ret, error);
457 
458 		rte_flow_trace_flush(port_id, ret);
459 
460 		return ret;
461 	}
462 	return rte_flow_error_set(error, ENOSYS,
463 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
464 				  NULL, rte_strerror(ENOSYS));
465 }
466 
467 /* Query an existing flow rule. */
468 int
469 rte_flow_query(uint16_t port_id,
470 	       struct rte_flow *flow,
471 	       const struct rte_flow_action *action,
472 	       void *data,
473 	       struct rte_flow_error *error)
474 {
475 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
476 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
477 	int ret;
478 
479 	if (!ops)
480 		return -rte_errno;
481 	if (likely(!!ops->query)) {
482 		fts_enter(dev);
483 		ret = ops->query(dev, flow, action, data, error);
484 		fts_exit(dev);
485 		ret = flow_err(port_id, ret, error);
486 
487 		rte_flow_trace_query(port_id, flow, action, data, ret);
488 
489 		return ret;
490 	}
491 	return rte_flow_error_set(error, ENOSYS,
492 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
493 				  NULL, rte_strerror(ENOSYS));
494 }
495 
496 /* Restrict ingress traffic to the defined flow rules. */
497 int
498 rte_flow_isolate(uint16_t port_id,
499 		 int set,
500 		 struct rte_flow_error *error)
501 {
502 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
503 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
504 	int ret;
505 
506 	if (!ops)
507 		return -rte_errno;
508 	if (likely(!!ops->isolate)) {
509 		fts_enter(dev);
510 		ret = ops->isolate(dev, set, error);
511 		fts_exit(dev);
512 		ret = flow_err(port_id, ret, error);
513 
514 		rte_flow_trace_isolate(port_id, set, ret);
515 
516 		return ret;
517 	}
518 	return rte_flow_error_set(error, ENOSYS,
519 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
520 				  NULL, rte_strerror(ENOSYS));
521 }
522 
523 /* Initialize flow error structure. */
524 int
525 rte_flow_error_set(struct rte_flow_error *error,
526 		   int code,
527 		   enum rte_flow_error_type type,
528 		   const void *cause,
529 		   const char *message)
530 {
531 	if (error) {
532 		*error = (struct rte_flow_error){
533 			.type = type,
534 			.cause = cause,
535 			.message = message,
536 		};
537 	}
538 	rte_errno = code;
539 	return -code;
540 }
541 
542 /** Pattern item specification types. */
543 enum rte_flow_conv_item_spec_type {
544 	RTE_FLOW_CONV_ITEM_SPEC,
545 	RTE_FLOW_CONV_ITEM_LAST,
546 	RTE_FLOW_CONV_ITEM_MASK,
547 };
548 
549 /**
550  * Copy pattern item specification.
551  *
552  * @param[out] buf
553  *   Output buffer. Can be NULL if @p size is zero.
554  * @param size
555  *   Size of @p buf in bytes.
556  * @param[in] item
557  *   Pattern item to copy specification from.
558  * @param type
559  *   Specification selector for either @p spec, @p last or @p mask.
560  *
561  * @return
562  *   Number of bytes needed to store pattern item specification regardless
563  *   of @p size. @p buf contents are truncated to @p size if not large
564  *   enough.
565  */
566 static size_t
567 rte_flow_conv_item_spec(void *buf, const size_t size,
568 			const struct rte_flow_item *item,
569 			enum rte_flow_conv_item_spec_type type)
570 {
571 	size_t off;
572 	const void *data =
573 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
574 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
575 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
576 		NULL;
577 
578 	switch (item->type) {
579 		union {
580 			const struct rte_flow_item_raw *raw;
581 		} spec;
582 		union {
583 			const struct rte_flow_item_raw *raw;
584 		} last;
585 		union {
586 			const struct rte_flow_item_raw *raw;
587 		} mask;
588 		union {
589 			const struct rte_flow_item_raw *raw;
590 		} src;
591 		union {
592 			struct rte_flow_item_raw *raw;
593 		} dst;
594 		size_t tmp;
595 
596 	case RTE_FLOW_ITEM_TYPE_RAW:
597 		spec.raw = item->spec;
598 		last.raw = item->last ? item->last : item->spec;
599 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
600 		src.raw = data;
601 		dst.raw = buf;
602 		rte_memcpy(dst.raw,
603 			   (&(struct rte_flow_item_raw){
604 				.relative = src.raw->relative,
605 				.search = src.raw->search,
606 				.reserved = src.raw->reserved,
607 				.offset = src.raw->offset,
608 				.limit = src.raw->limit,
609 				.length = src.raw->length,
610 			   }),
611 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
612 		off = sizeof(*dst.raw);
613 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
614 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
615 		     ((spec.raw->length & mask.raw->length) >=
616 		      (last.raw->length & mask.raw->length))))
617 			tmp = spec.raw->length & mask.raw->length;
618 		else
619 			tmp = last.raw->length & mask.raw->length;
620 		if (tmp) {
621 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
622 			if (size >= off + tmp)
623 				dst.raw->pattern = rte_memcpy
624 					((void *)((uintptr_t)dst.raw + off),
625 					 src.raw->pattern, tmp);
626 			off += tmp;
627 		}
628 		break;
629 	default:
630 		off = rte_flow_conv_copy(buf, data, size,
631 					 rte_flow_desc_item, item->type);
632 		break;
633 	}
634 	return off;
635 }
636 
637 /**
638  * Copy action configuration.
639  *
640  * @param[out] buf
641  *   Output buffer. Can be NULL if @p size is zero.
642  * @param size
643  *   Size of @p buf in bytes.
644  * @param[in] action
645  *   Action to copy configuration from.
646  *
647  * @return
648  *   Number of bytes needed to store pattern item specification regardless
649  *   of @p size. @p buf contents are truncated to @p size if not large
650  *   enough.
651  */
652 static size_t
653 rte_flow_conv_action_conf(void *buf, const size_t size,
654 			  const struct rte_flow_action *action)
655 {
656 	size_t off;
657 
658 	switch (action->type) {
659 		union {
660 			const struct rte_flow_action_rss *rss;
661 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
662 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
663 		} src;
664 		union {
665 			struct rte_flow_action_rss *rss;
666 			struct rte_flow_action_vxlan_encap *vxlan_encap;
667 			struct rte_flow_action_nvgre_encap *nvgre_encap;
668 		} dst;
669 		size_t tmp;
670 		int ret;
671 
672 	case RTE_FLOW_ACTION_TYPE_RSS:
673 		src.rss = action->conf;
674 		dst.rss = buf;
675 		rte_memcpy(dst.rss,
676 			   (&(struct rte_flow_action_rss){
677 				.func = src.rss->func,
678 				.level = src.rss->level,
679 				.types = src.rss->types,
680 				.key_len = src.rss->key_len,
681 				.queue_num = src.rss->queue_num,
682 			   }),
683 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
684 		off = sizeof(*dst.rss);
685 		if (src.rss->key_len && src.rss->key) {
686 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
687 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
688 			if (size >= off + tmp)
689 				dst.rss->key = rte_memcpy
690 					((void *)((uintptr_t)dst.rss + off),
691 					 src.rss->key, tmp);
692 			off += tmp;
693 		}
694 		if (src.rss->queue_num) {
695 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
696 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
697 			if (size >= off + tmp)
698 				dst.rss->queue = rte_memcpy
699 					((void *)((uintptr_t)dst.rss + off),
700 					 src.rss->queue, tmp);
701 			off += tmp;
702 		}
703 		break;
704 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
705 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
706 		src.vxlan_encap = action->conf;
707 		dst.vxlan_encap = buf;
708 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
709 				 sizeof(*src.nvgre_encap) ||
710 				 offsetof(struct rte_flow_action_vxlan_encap,
711 					  definition) !=
712 				 offsetof(struct rte_flow_action_nvgre_encap,
713 					  definition));
714 		off = sizeof(*dst.vxlan_encap);
715 		if (src.vxlan_encap->definition) {
716 			off = RTE_ALIGN_CEIL
717 				(off, sizeof(*dst.vxlan_encap->definition));
718 			ret = rte_flow_conv
719 				(RTE_FLOW_CONV_OP_PATTERN,
720 				 (void *)((uintptr_t)dst.vxlan_encap + off),
721 				 size > off ? size - off : 0,
722 				 src.vxlan_encap->definition, NULL);
723 			if (ret < 0)
724 				return 0;
725 			if (size >= off + ret)
726 				dst.vxlan_encap->definition =
727 					(void *)((uintptr_t)dst.vxlan_encap +
728 						 off);
729 			off += ret;
730 		}
731 		break;
732 	default:
733 		off = rte_flow_conv_copy(buf, action->conf, size,
734 					 rte_flow_desc_action, action->type);
735 		break;
736 	}
737 	return off;
738 }
739 
740 /**
741  * Copy a list of pattern items.
742  *
743  * @param[out] dst
744  *   Destination buffer. Can be NULL if @p size is zero.
745  * @param size
746  *   Size of @p dst in bytes.
747  * @param[in] src
748  *   Source pattern items.
749  * @param num
750  *   Maximum number of pattern items to process from @p src or 0 to process
751  *   the entire list. In both cases, processing stops after
752  *   RTE_FLOW_ITEM_TYPE_END is encountered.
753  * @param[out] error
754  *   Perform verbose error reporting if not NULL.
755  *
756  * @return
757  *   A positive value representing the number of bytes needed to store
758  *   pattern items regardless of @p size on success (@p buf contents are
759  *   truncated to @p size if not large enough), a negative errno value
760  *   otherwise and rte_errno is set.
761  */
762 static int
763 rte_flow_conv_pattern(struct rte_flow_item *dst,
764 		      const size_t size,
765 		      const struct rte_flow_item *src,
766 		      unsigned int num,
767 		      struct rte_flow_error *error)
768 {
769 	uintptr_t data = (uintptr_t)dst;
770 	size_t off;
771 	size_t ret;
772 	unsigned int i;
773 
774 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
775 		/**
776 		 * allow PMD private flow item
777 		 */
778 		if (((int)src->type >= 0) &&
779 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
780 		    !rte_flow_desc_item[src->type].name))
781 			return rte_flow_error_set
782 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
783 				 "cannot convert unknown item type");
784 		if (size >= off + sizeof(*dst))
785 			*dst = (struct rte_flow_item){
786 				.type = src->type,
787 			};
788 		off += sizeof(*dst);
789 		if (!src->type)
790 			num = i + 1;
791 	}
792 	num = i;
793 	src -= num;
794 	dst -= num;
795 	do {
796 		if (src->spec) {
797 			off = RTE_ALIGN_CEIL(off, sizeof(double));
798 			ret = rte_flow_conv_item_spec
799 				((void *)(data + off),
800 				 size > off ? size - off : 0, src,
801 				 RTE_FLOW_CONV_ITEM_SPEC);
802 			if (size && size >= off + ret)
803 				dst->spec = (void *)(data + off);
804 			off += ret;
805 
806 		}
807 		if (src->last) {
808 			off = RTE_ALIGN_CEIL(off, sizeof(double));
809 			ret = rte_flow_conv_item_spec
810 				((void *)(data + off),
811 				 size > off ? size - off : 0, src,
812 				 RTE_FLOW_CONV_ITEM_LAST);
813 			if (size && size >= off + ret)
814 				dst->last = (void *)(data + off);
815 			off += ret;
816 		}
817 		if (src->mask) {
818 			off = RTE_ALIGN_CEIL(off, sizeof(double));
819 			ret = rte_flow_conv_item_spec
820 				((void *)(data + off),
821 				 size > off ? size - off : 0, src,
822 				 RTE_FLOW_CONV_ITEM_MASK);
823 			if (size && size >= off + ret)
824 				dst->mask = (void *)(data + off);
825 			off += ret;
826 		}
827 		++src;
828 		++dst;
829 	} while (--num);
830 	return off;
831 }
832 
833 /**
834  * Copy a list of actions.
835  *
836  * @param[out] dst
837  *   Destination buffer. Can be NULL if @p size is zero.
838  * @param size
839  *   Size of @p dst in bytes.
840  * @param[in] src
841  *   Source actions.
842  * @param num
843  *   Maximum number of actions to process from @p src or 0 to process the
844  *   entire list. In both cases, processing stops after
845  *   RTE_FLOW_ACTION_TYPE_END is encountered.
846  * @param[out] error
847  *   Perform verbose error reporting if not NULL.
848  *
849  * @return
850  *   A positive value representing the number of bytes needed to store
851  *   actions regardless of @p size on success (@p buf contents are truncated
852  *   to @p size if not large enough), a negative errno value otherwise and
853  *   rte_errno is set.
854  */
855 static int
856 rte_flow_conv_actions(struct rte_flow_action *dst,
857 		      const size_t size,
858 		      const struct rte_flow_action *src,
859 		      unsigned int num,
860 		      struct rte_flow_error *error)
861 {
862 	uintptr_t data = (uintptr_t)dst;
863 	size_t off;
864 	size_t ret;
865 	unsigned int i;
866 
867 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
868 		/**
869 		 * allow PMD private flow action
870 		 */
871 		if (((int)src->type >= 0) &&
872 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
873 		    !rte_flow_desc_action[src->type].name))
874 			return rte_flow_error_set
875 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
876 				 src, "cannot convert unknown action type");
877 		if (size >= off + sizeof(*dst))
878 			*dst = (struct rte_flow_action){
879 				.type = src->type,
880 			};
881 		off += sizeof(*dst);
882 		if (!src->type)
883 			num = i + 1;
884 	}
885 	num = i;
886 	src -= num;
887 	dst -= num;
888 	do {
889 		if (src->conf) {
890 			off = RTE_ALIGN_CEIL(off, sizeof(double));
891 			ret = rte_flow_conv_action_conf
892 				((void *)(data + off),
893 				 size > off ? size - off : 0, src);
894 			if (size && size >= off + ret)
895 				dst->conf = (void *)(data + off);
896 			off += ret;
897 		}
898 		++src;
899 		++dst;
900 	} while (--num);
901 	return off;
902 }
903 
904 /**
905  * Copy flow rule components.
906  *
907  * This comprises the flow rule descriptor itself, attributes, pattern and
908  * actions list. NULL components in @p src are skipped.
909  *
910  * @param[out] dst
911  *   Destination buffer. Can be NULL if @p size is zero.
912  * @param size
913  *   Size of @p dst in bytes.
914  * @param[in] src
915  *   Source flow rule descriptor.
916  * @param[out] error
917  *   Perform verbose error reporting if not NULL.
918  *
919  * @return
920  *   A positive value representing the number of bytes needed to store all
921  *   components including the descriptor regardless of @p size on success
922  *   (@p buf contents are truncated to @p size if not large enough), a
923  *   negative errno value otherwise and rte_errno is set.
924  */
925 static int
926 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
927 		   const size_t size,
928 		   const struct rte_flow_conv_rule *src,
929 		   struct rte_flow_error *error)
930 {
931 	size_t off;
932 	int ret;
933 
934 	rte_memcpy(dst,
935 		   (&(struct rte_flow_conv_rule){
936 			.attr = NULL,
937 			.pattern = NULL,
938 			.actions = NULL,
939 		   }),
940 		   size > sizeof(*dst) ? sizeof(*dst) : size);
941 	off = sizeof(*dst);
942 	if (src->attr_ro) {
943 		off = RTE_ALIGN_CEIL(off, sizeof(double));
944 		if (size && size >= off + sizeof(*dst->attr))
945 			dst->attr = rte_memcpy
946 				((void *)((uintptr_t)dst + off),
947 				 src->attr_ro, sizeof(*dst->attr));
948 		off += sizeof(*dst->attr);
949 	}
950 	if (src->pattern_ro) {
951 		off = RTE_ALIGN_CEIL(off, sizeof(double));
952 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
953 					    size > off ? size - off : 0,
954 					    src->pattern_ro, 0, error);
955 		if (ret < 0)
956 			return ret;
957 		if (size && size >= off + (size_t)ret)
958 			dst->pattern = (void *)((uintptr_t)dst + off);
959 		off += ret;
960 	}
961 	if (src->actions_ro) {
962 		off = RTE_ALIGN_CEIL(off, sizeof(double));
963 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
964 					    size > off ? size - off : 0,
965 					    src->actions_ro, 0, error);
966 		if (ret < 0)
967 			return ret;
968 		if (size >= off + (size_t)ret)
969 			dst->actions = (void *)((uintptr_t)dst + off);
970 		off += ret;
971 	}
972 	return off;
973 }
974 
975 /**
976  * Retrieve the name of a pattern item/action type.
977  *
978  * @param is_action
979  *   Nonzero when @p src represents an action type instead of a pattern item
980  *   type.
981  * @param is_ptr
982  *   Nonzero to write string address instead of contents into @p dst.
983  * @param[out] dst
984  *   Destination buffer. Can be NULL if @p size is zero.
985  * @param size
986  *   Size of @p dst in bytes.
987  * @param[in] src
988  *   Depending on @p is_action, source pattern item or action type cast as a
989  *   pointer.
990  * @param[out] error
991  *   Perform verbose error reporting if not NULL.
992  *
993  * @return
994  *   A positive value representing the number of bytes needed to store the
995  *   name or its address regardless of @p size on success (@p buf contents
996  *   are truncated to @p size if not large enough), a negative errno value
997  *   otherwise and rte_errno is set.
998  */
999 static int
1000 rte_flow_conv_name(int is_action,
1001 		   int is_ptr,
1002 		   char *dst,
1003 		   const size_t size,
1004 		   const void *src,
1005 		   struct rte_flow_error *error)
1006 {
1007 	struct desc_info {
1008 		const struct rte_flow_desc_data *data;
1009 		size_t num;
1010 	};
1011 	static const struct desc_info info_rep[2] = {
1012 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1013 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1014 	};
1015 	const struct desc_info *const info = &info_rep[!!is_action];
1016 	unsigned int type = (uintptr_t)src;
1017 
1018 	if (type >= info->num)
1019 		return rte_flow_error_set
1020 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1021 			 "unknown object type to retrieve the name of");
1022 	if (!is_ptr)
1023 		return strlcpy(dst, info->data[type].name, size);
1024 	if (size >= sizeof(const char **))
1025 		*((const char **)dst) = info->data[type].name;
1026 	return sizeof(const char **);
1027 }
1028 
1029 /** Helper function to convert flow API objects. */
1030 int
1031 rte_flow_conv(enum rte_flow_conv_op op,
1032 	      void *dst,
1033 	      size_t size,
1034 	      const void *src,
1035 	      struct rte_flow_error *error)
1036 {
1037 	int ret;
1038 
1039 	switch (op) {
1040 		const struct rte_flow_attr *attr;
1041 
1042 	case RTE_FLOW_CONV_OP_NONE:
1043 		ret = 0;
1044 		break;
1045 	case RTE_FLOW_CONV_OP_ATTR:
1046 		attr = src;
1047 		if (size > sizeof(*attr))
1048 			size = sizeof(*attr);
1049 		rte_memcpy(dst, attr, size);
1050 		ret = sizeof(*attr);
1051 		break;
1052 	case RTE_FLOW_CONV_OP_ITEM:
1053 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1054 		break;
1055 	case RTE_FLOW_CONV_OP_ACTION:
1056 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1057 		break;
1058 	case RTE_FLOW_CONV_OP_PATTERN:
1059 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1060 		break;
1061 	case RTE_FLOW_CONV_OP_ACTIONS:
1062 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1063 		break;
1064 	case RTE_FLOW_CONV_OP_RULE:
1065 		ret = rte_flow_conv_rule(dst, size, src, error);
1066 		break;
1067 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1068 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1069 		break;
1070 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1071 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1072 		break;
1073 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1074 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1075 		break;
1076 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1077 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1078 		break;
1079 	default:
1080 		ret = rte_flow_error_set
1081 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1082 		 "unknown object conversion operation");
1083 	}
1084 
1085 	rte_flow_trace_conv(op, dst, size, src, ret);
1086 
1087 	return ret;
1088 }
1089 
1090 /** Store a full rte_flow description. */
1091 size_t
1092 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1093 	      const struct rte_flow_attr *attr,
1094 	      const struct rte_flow_item *items,
1095 	      const struct rte_flow_action *actions)
1096 {
1097 	/*
1098 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1099 	 * to convert the former to the latter without wasting space.
1100 	 */
1101 	struct rte_flow_conv_rule *dst =
1102 		len ?
1103 		(void *)((uintptr_t)desc +
1104 			 (offsetof(struct rte_flow_desc, actions) -
1105 			  offsetof(struct rte_flow_conv_rule, actions))) :
1106 		NULL;
1107 	size_t dst_size =
1108 		len > sizeof(*desc) - sizeof(*dst) ?
1109 		len - (sizeof(*desc) - sizeof(*dst)) :
1110 		0;
1111 	struct rte_flow_conv_rule src = {
1112 		.attr_ro = NULL,
1113 		.pattern_ro = items,
1114 		.actions_ro = actions,
1115 	};
1116 	int ret;
1117 
1118 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1119 			 sizeof(struct rte_flow_conv_rule));
1120 	if (dst_size &&
1121 	    (&dst->pattern != &desc->items ||
1122 	     &dst->actions != &desc->actions ||
1123 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1124 		rte_errno = EINVAL;
1125 		return 0;
1126 	}
1127 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1128 	if (ret < 0)
1129 		return 0;
1130 	ret += sizeof(*desc) - sizeof(*dst);
1131 	rte_memcpy(desc,
1132 		   (&(struct rte_flow_desc){
1133 			.size = ret,
1134 			.attr = *attr,
1135 			.items = dst_size ? dst->pattern : NULL,
1136 			.actions = dst_size ? dst->actions : NULL,
1137 		   }),
1138 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1139 
1140 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1141 
1142 	return ret;
1143 }
1144 
1145 int
1146 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1147 			FILE *file, struct rte_flow_error *error)
1148 {
1149 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1150 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1151 	int ret;
1152 
1153 	if (unlikely(!ops))
1154 		return -rte_errno;
1155 	if (likely(!!ops->dev_dump)) {
1156 		fts_enter(dev);
1157 		ret = ops->dev_dump(dev, flow, file, error);
1158 		fts_exit(dev);
1159 		return flow_err(port_id, ret, error);
1160 	}
1161 	return rte_flow_error_set(error, ENOSYS,
1162 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1163 				  NULL, rte_strerror(ENOSYS));
1164 }
1165 
1166 int
1167 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1168 		    uint32_t nb_contexts, struct rte_flow_error *error)
1169 {
1170 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1171 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1172 	int ret;
1173 
1174 	if (unlikely(!ops))
1175 		return -rte_errno;
1176 	if (likely(!!ops->get_aged_flows)) {
1177 		fts_enter(dev);
1178 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1179 		fts_exit(dev);
1180 		ret = flow_err(port_id, ret, error);
1181 
1182 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1183 
1184 		return ret;
1185 	}
1186 	return rte_flow_error_set(error, ENOTSUP,
1187 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1188 				  NULL, rte_strerror(ENOTSUP));
1189 }
1190 
1191 int
1192 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1193 			  uint32_t nb_contexts, struct rte_flow_error *error)
1194 {
1195 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1196 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1197 	int ret;
1198 
1199 	if (unlikely(!ops))
1200 		return -rte_errno;
1201 	if (likely(!!ops->get_q_aged_flows)) {
1202 		fts_enter(dev);
1203 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1204 					    nb_contexts, error);
1205 		fts_exit(dev);
1206 		ret = flow_err(port_id, ret, error);
1207 
1208 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1209 						nb_contexts, ret);
1210 
1211 		return ret;
1212 	}
1213 	return rte_flow_error_set(error, ENOTSUP,
1214 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1215 				  NULL, rte_strerror(ENOTSUP));
1216 }
1217 
1218 struct rte_flow_action_handle *
1219 rte_flow_action_handle_create(uint16_t port_id,
1220 			      const struct rte_flow_indir_action_conf *conf,
1221 			      const struct rte_flow_action *action,
1222 			      struct rte_flow_error *error)
1223 {
1224 	struct rte_flow_action_handle *handle;
1225 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1226 
1227 	if (unlikely(!ops))
1228 		return NULL;
1229 	if (unlikely(!ops->action_handle_create)) {
1230 		rte_flow_error_set(error, ENOSYS,
1231 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1232 				   rte_strerror(ENOSYS));
1233 		return NULL;
1234 	}
1235 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1236 					   conf, action, error);
1237 	if (handle == NULL)
1238 		flow_err(port_id, -rte_errno, error);
1239 
1240 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1241 
1242 	return handle;
1243 }
1244 
1245 int
1246 rte_flow_action_handle_destroy(uint16_t port_id,
1247 			       struct rte_flow_action_handle *handle,
1248 			       struct rte_flow_error *error)
1249 {
1250 	int ret;
1251 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1252 
1253 	if (unlikely(!ops))
1254 		return -rte_errno;
1255 	if (unlikely(!ops->action_handle_destroy))
1256 		return rte_flow_error_set(error, ENOSYS,
1257 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1258 					  NULL, rte_strerror(ENOSYS));
1259 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1260 					 handle, error);
1261 	ret = flow_err(port_id, ret, error);
1262 
1263 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1264 
1265 	return ret;
1266 }
1267 
1268 int
1269 rte_flow_action_handle_update(uint16_t port_id,
1270 			      struct rte_flow_action_handle *handle,
1271 			      const void *update,
1272 			      struct rte_flow_error *error)
1273 {
1274 	int ret;
1275 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1276 
1277 	if (unlikely(!ops))
1278 		return -rte_errno;
1279 	if (unlikely(!ops->action_handle_update))
1280 		return rte_flow_error_set(error, ENOSYS,
1281 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1282 					  NULL, rte_strerror(ENOSYS));
1283 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1284 					update, error);
1285 	ret = flow_err(port_id, ret, error);
1286 
1287 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1288 
1289 	return ret;
1290 }
1291 
1292 int
1293 rte_flow_action_handle_query(uint16_t port_id,
1294 			     const struct rte_flow_action_handle *handle,
1295 			     void *data,
1296 			     struct rte_flow_error *error)
1297 {
1298 	int ret;
1299 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1300 
1301 	if (unlikely(!ops))
1302 		return -rte_errno;
1303 	if (unlikely(!ops->action_handle_query))
1304 		return rte_flow_error_set(error, ENOSYS,
1305 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1306 					  NULL, rte_strerror(ENOSYS));
1307 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1308 				       data, error);
1309 	ret = flow_err(port_id, ret, error);
1310 
1311 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1312 
1313 	return ret;
1314 }
1315 
1316 int
1317 rte_flow_tunnel_decap_set(uint16_t port_id,
1318 			  struct rte_flow_tunnel *tunnel,
1319 			  struct rte_flow_action **actions,
1320 			  uint32_t *num_of_actions,
1321 			  struct rte_flow_error *error)
1322 {
1323 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1324 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1325 	int ret;
1326 
1327 	if (unlikely(!ops))
1328 		return -rte_errno;
1329 	if (likely(!!ops->tunnel_decap_set)) {
1330 		ret = flow_err(port_id,
1331 			       ops->tunnel_decap_set(dev, tunnel, actions,
1332 						     num_of_actions, error),
1333 			       error);
1334 
1335 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1336 						num_of_actions, ret);
1337 
1338 		return ret;
1339 	}
1340 	return rte_flow_error_set(error, ENOTSUP,
1341 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1342 				  NULL, rte_strerror(ENOTSUP));
1343 }
1344 
1345 int
1346 rte_flow_tunnel_match(uint16_t port_id,
1347 		      struct rte_flow_tunnel *tunnel,
1348 		      struct rte_flow_item **items,
1349 		      uint32_t *num_of_items,
1350 		      struct rte_flow_error *error)
1351 {
1352 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1353 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1354 	int ret;
1355 
1356 	if (unlikely(!ops))
1357 		return -rte_errno;
1358 	if (likely(!!ops->tunnel_match)) {
1359 		ret = flow_err(port_id,
1360 			       ops->tunnel_match(dev, tunnel, items,
1361 						 num_of_items, error),
1362 			       error);
1363 
1364 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1365 					    ret);
1366 
1367 		return ret;
1368 	}
1369 	return rte_flow_error_set(error, ENOTSUP,
1370 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1371 				  NULL, rte_strerror(ENOTSUP));
1372 }
1373 
1374 int
1375 rte_flow_get_restore_info(uint16_t port_id,
1376 			  struct rte_mbuf *m,
1377 			  struct rte_flow_restore_info *restore_info,
1378 			  struct rte_flow_error *error)
1379 {
1380 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1381 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1382 	int ret;
1383 
1384 	if (unlikely(!ops))
1385 		return -rte_errno;
1386 	if (likely(!!ops->get_restore_info)) {
1387 		ret = flow_err(port_id,
1388 			       ops->get_restore_info(dev, m, restore_info,
1389 						     error),
1390 			       error);
1391 
1392 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1393 
1394 		return ret;
1395 	}
1396 	return rte_flow_error_set(error, ENOTSUP,
1397 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1398 				  NULL, rte_strerror(ENOTSUP));
1399 }
1400 
1401 int
1402 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1403 				     struct rte_flow_action *actions,
1404 				     uint32_t num_of_actions,
1405 				     struct rte_flow_error *error)
1406 {
1407 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1408 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1409 	int ret;
1410 
1411 	if (unlikely(!ops))
1412 		return -rte_errno;
1413 	if (likely(!!ops->tunnel_action_decap_release)) {
1414 		ret = flow_err(port_id,
1415 			       ops->tunnel_action_decap_release(dev, actions,
1416 								num_of_actions,
1417 								error),
1418 			       error);
1419 
1420 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1421 							   num_of_actions, ret);
1422 
1423 		return ret;
1424 	}
1425 	return rte_flow_error_set(error, ENOTSUP,
1426 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1427 				  NULL, rte_strerror(ENOTSUP));
1428 }
1429 
1430 int
1431 rte_flow_tunnel_item_release(uint16_t port_id,
1432 			     struct rte_flow_item *items,
1433 			     uint32_t num_of_items,
1434 			     struct rte_flow_error *error)
1435 {
1436 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1437 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1438 	int ret;
1439 
1440 	if (unlikely(!ops))
1441 		return -rte_errno;
1442 	if (likely(!!ops->tunnel_item_release)) {
1443 		ret = flow_err(port_id,
1444 			       ops->tunnel_item_release(dev, items,
1445 							num_of_items, error),
1446 			       error);
1447 
1448 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1449 
1450 		return ret;
1451 	}
1452 	return rte_flow_error_set(error, ENOTSUP,
1453 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1454 				  NULL, rte_strerror(ENOTSUP));
1455 }
1456 
1457 int
1458 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1459 			     struct rte_flow_error *error)
1460 {
1461 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1462 	struct rte_eth_dev *dev;
1463 	int ret;
1464 
1465 	if (unlikely(ops == NULL))
1466 		return -rte_errno;
1467 
1468 	if (ops->pick_transfer_proxy == NULL) {
1469 		*proxy_port_id = port_id;
1470 		return 0;
1471 	}
1472 
1473 	dev = &rte_eth_devices[port_id];
1474 
1475 	ret = flow_err(port_id,
1476 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1477 		       error);
1478 
1479 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1480 
1481 	return ret;
1482 }
1483 
1484 struct rte_flow_item_flex_handle *
1485 rte_flow_flex_item_create(uint16_t port_id,
1486 			  const struct rte_flow_item_flex_conf *conf,
1487 			  struct rte_flow_error *error)
1488 {
1489 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1490 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1491 	struct rte_flow_item_flex_handle *handle;
1492 
1493 	if (unlikely(!ops))
1494 		return NULL;
1495 	if (unlikely(!ops->flex_item_create)) {
1496 		rte_flow_error_set(error, ENOTSUP,
1497 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1498 				   NULL, rte_strerror(ENOTSUP));
1499 		return NULL;
1500 	}
1501 	handle = ops->flex_item_create(dev, conf, error);
1502 	if (handle == NULL)
1503 		flow_err(port_id, -rte_errno, error);
1504 
1505 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1506 
1507 	return handle;
1508 }
1509 
1510 int
1511 rte_flow_flex_item_release(uint16_t port_id,
1512 			   const struct rte_flow_item_flex_handle *handle,
1513 			   struct rte_flow_error *error)
1514 {
1515 	int ret;
1516 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1517 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1518 
1519 	if (unlikely(!ops || !ops->flex_item_release))
1520 		return rte_flow_error_set(error, ENOTSUP,
1521 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1522 					  NULL, rte_strerror(ENOTSUP));
1523 	ret = ops->flex_item_release(dev, handle, error);
1524 	ret = flow_err(port_id, ret, error);
1525 
1526 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1527 
1528 	return ret;
1529 }
1530 
1531 int
1532 rte_flow_info_get(uint16_t port_id,
1533 		  struct rte_flow_port_info *port_info,
1534 		  struct rte_flow_queue_info *queue_info,
1535 		  struct rte_flow_error *error)
1536 {
1537 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1538 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1539 	int ret;
1540 
1541 	if (unlikely(!ops))
1542 		return -rte_errno;
1543 	if (dev->data->dev_configured == 0) {
1544 		RTE_FLOW_LOG(INFO,
1545 			"Device with port_id=%"PRIu16" is not configured.\n",
1546 			port_id);
1547 		return -EINVAL;
1548 	}
1549 	if (port_info == NULL) {
1550 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1551 		return -EINVAL;
1552 	}
1553 	if (likely(!!ops->info_get)) {
1554 		ret = flow_err(port_id,
1555 			       ops->info_get(dev, port_info, queue_info, error),
1556 			       error);
1557 
1558 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1559 
1560 		return ret;
1561 	}
1562 	return rte_flow_error_set(error, ENOTSUP,
1563 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1564 				  NULL, rte_strerror(ENOTSUP));
1565 }
1566 
1567 int
1568 rte_flow_configure(uint16_t port_id,
1569 		   const struct rte_flow_port_attr *port_attr,
1570 		   uint16_t nb_queue,
1571 		   const struct rte_flow_queue_attr *queue_attr[],
1572 		   struct rte_flow_error *error)
1573 {
1574 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1575 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1576 	int ret;
1577 
1578 	if (unlikely(!ops))
1579 		return -rte_errno;
1580 	if (dev->data->dev_configured == 0) {
1581 		RTE_FLOW_LOG(INFO,
1582 			"Device with port_id=%"PRIu16" is not configured.\n",
1583 			port_id);
1584 		return -EINVAL;
1585 	}
1586 	if (dev->data->dev_started != 0) {
1587 		RTE_FLOW_LOG(INFO,
1588 			"Device with port_id=%"PRIu16" already started.\n",
1589 			port_id);
1590 		return -EINVAL;
1591 	}
1592 	if (port_attr == NULL) {
1593 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1594 		return -EINVAL;
1595 	}
1596 	if (queue_attr == NULL) {
1597 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1598 		return -EINVAL;
1599 	}
1600 	if (likely(!!ops->configure)) {
1601 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1602 		if (ret == 0)
1603 			dev->data->flow_configured = 1;
1604 		ret = flow_err(port_id, ret, error);
1605 
1606 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1607 
1608 		return ret;
1609 	}
1610 	return rte_flow_error_set(error, ENOTSUP,
1611 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1612 				  NULL, rte_strerror(ENOTSUP));
1613 }
1614 
1615 struct rte_flow_pattern_template *
1616 rte_flow_pattern_template_create(uint16_t port_id,
1617 		const struct rte_flow_pattern_template_attr *template_attr,
1618 		const struct rte_flow_item pattern[],
1619 		struct rte_flow_error *error)
1620 {
1621 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1622 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1623 	struct rte_flow_pattern_template *template;
1624 
1625 	if (unlikely(!ops))
1626 		return NULL;
1627 	if (dev->data->flow_configured == 0) {
1628 		RTE_FLOW_LOG(INFO,
1629 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1630 			port_id);
1631 		rte_flow_error_set(error, EINVAL,
1632 				RTE_FLOW_ERROR_TYPE_STATE,
1633 				NULL, rte_strerror(EINVAL));
1634 		return NULL;
1635 	}
1636 	if (template_attr == NULL) {
1637 		RTE_FLOW_LOG(ERR,
1638 			     "Port %"PRIu16" template attr is NULL.\n",
1639 			     port_id);
1640 		rte_flow_error_set(error, EINVAL,
1641 				   RTE_FLOW_ERROR_TYPE_ATTR,
1642 				   NULL, rte_strerror(EINVAL));
1643 		return NULL;
1644 	}
1645 	if (pattern == NULL) {
1646 		RTE_FLOW_LOG(ERR,
1647 			     "Port %"PRIu16" pattern is NULL.\n",
1648 			     port_id);
1649 		rte_flow_error_set(error, EINVAL,
1650 				   RTE_FLOW_ERROR_TYPE_ATTR,
1651 				   NULL, rte_strerror(EINVAL));
1652 		return NULL;
1653 	}
1654 	if (likely(!!ops->pattern_template_create)) {
1655 		template = ops->pattern_template_create(dev, template_attr,
1656 							pattern, error);
1657 		if (template == NULL)
1658 			flow_err(port_id, -rte_errno, error);
1659 
1660 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1661 						       pattern, template);
1662 
1663 		return template;
1664 	}
1665 	rte_flow_error_set(error, ENOTSUP,
1666 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1667 			   NULL, rte_strerror(ENOTSUP));
1668 	return NULL;
1669 }
1670 
1671 int
1672 rte_flow_pattern_template_destroy(uint16_t port_id,
1673 		struct rte_flow_pattern_template *pattern_template,
1674 		struct rte_flow_error *error)
1675 {
1676 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1677 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1678 	int ret;
1679 
1680 	if (unlikely(!ops))
1681 		return -rte_errno;
1682 	if (unlikely(pattern_template == NULL))
1683 		return 0;
1684 	if (likely(!!ops->pattern_template_destroy)) {
1685 		ret = flow_err(port_id,
1686 			       ops->pattern_template_destroy(dev,
1687 							     pattern_template,
1688 							     error),
1689 			       error);
1690 
1691 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1692 							ret);
1693 
1694 		return ret;
1695 	}
1696 	return rte_flow_error_set(error, ENOTSUP,
1697 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1698 				  NULL, rte_strerror(ENOTSUP));
1699 }
1700 
1701 struct rte_flow_actions_template *
1702 rte_flow_actions_template_create(uint16_t port_id,
1703 			const struct rte_flow_actions_template_attr *template_attr,
1704 			const struct rte_flow_action actions[],
1705 			const struct rte_flow_action masks[],
1706 			struct rte_flow_error *error)
1707 {
1708 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1709 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1710 	struct rte_flow_actions_template *template;
1711 
1712 	if (unlikely(!ops))
1713 		return NULL;
1714 	if (dev->data->flow_configured == 0) {
1715 		RTE_FLOW_LOG(INFO,
1716 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1717 			port_id);
1718 		rte_flow_error_set(error, EINVAL,
1719 				   RTE_FLOW_ERROR_TYPE_STATE,
1720 				   NULL, rte_strerror(EINVAL));
1721 		return NULL;
1722 	}
1723 	if (template_attr == NULL) {
1724 		RTE_FLOW_LOG(ERR,
1725 			     "Port %"PRIu16" template attr is NULL.\n",
1726 			     port_id);
1727 		rte_flow_error_set(error, EINVAL,
1728 				   RTE_FLOW_ERROR_TYPE_ATTR,
1729 				   NULL, rte_strerror(EINVAL));
1730 		return NULL;
1731 	}
1732 	if (actions == NULL) {
1733 		RTE_FLOW_LOG(ERR,
1734 			     "Port %"PRIu16" actions is NULL.\n",
1735 			     port_id);
1736 		rte_flow_error_set(error, EINVAL,
1737 				   RTE_FLOW_ERROR_TYPE_ATTR,
1738 				   NULL, rte_strerror(EINVAL));
1739 		return NULL;
1740 	}
1741 	if (masks == NULL) {
1742 		RTE_FLOW_LOG(ERR,
1743 			     "Port %"PRIu16" masks is NULL.\n",
1744 			     port_id);
1745 		rte_flow_error_set(error, EINVAL,
1746 				   RTE_FLOW_ERROR_TYPE_ATTR,
1747 				   NULL, rte_strerror(EINVAL));
1748 
1749 	}
1750 	if (likely(!!ops->actions_template_create)) {
1751 		template = ops->actions_template_create(dev, template_attr,
1752 							actions, masks, error);
1753 		if (template == NULL)
1754 			flow_err(port_id, -rte_errno, error);
1755 
1756 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1757 						       masks, template);
1758 
1759 		return template;
1760 	}
1761 	rte_flow_error_set(error, ENOTSUP,
1762 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1763 			   NULL, rte_strerror(ENOTSUP));
1764 	return NULL;
1765 }
1766 
1767 int
1768 rte_flow_actions_template_destroy(uint16_t port_id,
1769 			struct rte_flow_actions_template *actions_template,
1770 			struct rte_flow_error *error)
1771 {
1772 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1773 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1774 	int ret;
1775 
1776 	if (unlikely(!ops))
1777 		return -rte_errno;
1778 	if (unlikely(actions_template == NULL))
1779 		return 0;
1780 	if (likely(!!ops->actions_template_destroy)) {
1781 		ret = flow_err(port_id,
1782 			       ops->actions_template_destroy(dev,
1783 							     actions_template,
1784 							     error),
1785 			       error);
1786 
1787 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1788 							ret);
1789 
1790 		return ret;
1791 	}
1792 	return rte_flow_error_set(error, ENOTSUP,
1793 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1794 				  NULL, rte_strerror(ENOTSUP));
1795 }
1796 
1797 struct rte_flow_template_table *
1798 rte_flow_template_table_create(uint16_t port_id,
1799 			const struct rte_flow_template_table_attr *table_attr,
1800 			struct rte_flow_pattern_template *pattern_templates[],
1801 			uint8_t nb_pattern_templates,
1802 			struct rte_flow_actions_template *actions_templates[],
1803 			uint8_t nb_actions_templates,
1804 			struct rte_flow_error *error)
1805 {
1806 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1807 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1808 	struct rte_flow_template_table *table;
1809 
1810 	if (unlikely(!ops))
1811 		return NULL;
1812 	if (dev->data->flow_configured == 0) {
1813 		RTE_FLOW_LOG(INFO,
1814 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1815 			port_id);
1816 		rte_flow_error_set(error, EINVAL,
1817 				   RTE_FLOW_ERROR_TYPE_STATE,
1818 				   NULL, rte_strerror(EINVAL));
1819 		return NULL;
1820 	}
1821 	if (table_attr == NULL) {
1822 		RTE_FLOW_LOG(ERR,
1823 			     "Port %"PRIu16" table attr is NULL.\n",
1824 			     port_id);
1825 		rte_flow_error_set(error, EINVAL,
1826 				   RTE_FLOW_ERROR_TYPE_ATTR,
1827 				   NULL, rte_strerror(EINVAL));
1828 		return NULL;
1829 	}
1830 	if (pattern_templates == NULL) {
1831 		RTE_FLOW_LOG(ERR,
1832 			     "Port %"PRIu16" pattern templates is NULL.\n",
1833 			     port_id);
1834 		rte_flow_error_set(error, EINVAL,
1835 				   RTE_FLOW_ERROR_TYPE_ATTR,
1836 				   NULL, rte_strerror(EINVAL));
1837 		return NULL;
1838 	}
1839 	if (actions_templates == NULL) {
1840 		RTE_FLOW_LOG(ERR,
1841 			     "Port %"PRIu16" actions templates is NULL.\n",
1842 			     port_id);
1843 		rte_flow_error_set(error, EINVAL,
1844 				   RTE_FLOW_ERROR_TYPE_ATTR,
1845 				   NULL, rte_strerror(EINVAL));
1846 		return NULL;
1847 	}
1848 	if (likely(!!ops->template_table_create)) {
1849 		table = ops->template_table_create(dev, table_attr,
1850 					pattern_templates, nb_pattern_templates,
1851 					actions_templates, nb_actions_templates,
1852 					error);
1853 		if (table == NULL)
1854 			flow_err(port_id, -rte_errno, error);
1855 
1856 		rte_flow_trace_template_table_create(port_id, table_attr,
1857 						     pattern_templates,
1858 						     nb_pattern_templates,
1859 						     actions_templates,
1860 						     nb_actions_templates, table);
1861 
1862 		return table;
1863 	}
1864 	rte_flow_error_set(error, ENOTSUP,
1865 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1866 			   NULL, rte_strerror(ENOTSUP));
1867 	return NULL;
1868 }
1869 
1870 int
1871 rte_flow_template_table_destroy(uint16_t port_id,
1872 				struct rte_flow_template_table *template_table,
1873 				struct rte_flow_error *error)
1874 {
1875 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1876 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1877 	int ret;
1878 
1879 	if (unlikely(!ops))
1880 		return -rte_errno;
1881 	if (unlikely(template_table == NULL))
1882 		return 0;
1883 	if (likely(!!ops->template_table_destroy)) {
1884 		ret = flow_err(port_id,
1885 			       ops->template_table_destroy(dev,
1886 							   template_table,
1887 							   error),
1888 			       error);
1889 
1890 		rte_flow_trace_template_table_destroy(port_id, template_table,
1891 						      ret);
1892 
1893 		return ret;
1894 	}
1895 	return rte_flow_error_set(error, ENOTSUP,
1896 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1897 				  NULL, rte_strerror(ENOTSUP));
1898 }
1899 
1900 struct rte_flow *
1901 rte_flow_async_create(uint16_t port_id,
1902 		      uint32_t queue_id,
1903 		      const struct rte_flow_op_attr *op_attr,
1904 		      struct rte_flow_template_table *template_table,
1905 		      const struct rte_flow_item pattern[],
1906 		      uint8_t pattern_template_index,
1907 		      const struct rte_flow_action actions[],
1908 		      uint8_t actions_template_index,
1909 		      void *user_data,
1910 		      struct rte_flow_error *error)
1911 {
1912 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1913 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1914 	struct rte_flow *flow;
1915 
1916 	flow = ops->async_create(dev, queue_id,
1917 				 op_attr, template_table,
1918 				 pattern, pattern_template_index,
1919 				 actions, actions_template_index,
1920 				 user_data, error);
1921 	if (flow == NULL)
1922 		flow_err(port_id, -rte_errno, error);
1923 
1924 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
1925 				    pattern, pattern_template_index, actions,
1926 				    actions_template_index, user_data, flow);
1927 
1928 	return flow;
1929 }
1930 
1931 int
1932 rte_flow_async_destroy(uint16_t port_id,
1933 		       uint32_t queue_id,
1934 		       const struct rte_flow_op_attr *op_attr,
1935 		       struct rte_flow *flow,
1936 		       void *user_data,
1937 		       struct rte_flow_error *error)
1938 {
1939 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1940 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1941 	int ret;
1942 
1943 	ret = flow_err(port_id,
1944 		       ops->async_destroy(dev, queue_id,
1945 					  op_attr, flow,
1946 					  user_data, error),
1947 		       error);
1948 
1949 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
1950 				     user_data, ret);
1951 
1952 	return ret;
1953 }
1954 
1955 int
1956 rte_flow_push(uint16_t port_id,
1957 	      uint32_t queue_id,
1958 	      struct rte_flow_error *error)
1959 {
1960 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1961 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1962 	int ret;
1963 
1964 	ret = flow_err(port_id,
1965 		       ops->push(dev, queue_id, error),
1966 		       error);
1967 
1968 	rte_flow_trace_push(port_id, queue_id, ret);
1969 
1970 	return ret;
1971 }
1972 
1973 int
1974 rte_flow_pull(uint16_t port_id,
1975 	      uint32_t queue_id,
1976 	      struct rte_flow_op_result res[],
1977 	      uint16_t n_res,
1978 	      struct rte_flow_error *error)
1979 {
1980 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1981 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1982 	int ret;
1983 	int rc;
1984 
1985 	ret = ops->pull(dev, queue_id, res, n_res, error);
1986 	rc = ret ? ret : flow_err(port_id, ret, error);
1987 
1988 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
1989 
1990 	return rc;
1991 }
1992 
1993 struct rte_flow_action_handle *
1994 rte_flow_async_action_handle_create(uint16_t port_id,
1995 		uint32_t queue_id,
1996 		const struct rte_flow_op_attr *op_attr,
1997 		const struct rte_flow_indir_action_conf *indir_action_conf,
1998 		const struct rte_flow_action *action,
1999 		void *user_data,
2000 		struct rte_flow_error *error)
2001 {
2002 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2003 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2004 	struct rte_flow_action_handle *handle;
2005 
2006 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2007 					     indir_action_conf, action, user_data, error);
2008 	if (handle == NULL)
2009 		flow_err(port_id, -rte_errno, error);
2010 
2011 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2012 						  indir_action_conf, action,
2013 						  user_data, handle);
2014 
2015 	return handle;
2016 }
2017 
2018 int
2019 rte_flow_async_action_handle_destroy(uint16_t port_id,
2020 		uint32_t queue_id,
2021 		const struct rte_flow_op_attr *op_attr,
2022 		struct rte_flow_action_handle *action_handle,
2023 		void *user_data,
2024 		struct rte_flow_error *error)
2025 {
2026 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2027 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2028 	int ret;
2029 
2030 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2031 					   action_handle, user_data, error);
2032 	ret = flow_err(port_id, ret, error);
2033 
2034 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2035 						   action_handle, user_data, ret);
2036 
2037 	return ret;
2038 }
2039 
2040 int
2041 rte_flow_async_action_handle_update(uint16_t port_id,
2042 		uint32_t queue_id,
2043 		const struct rte_flow_op_attr *op_attr,
2044 		struct rte_flow_action_handle *action_handle,
2045 		const void *update,
2046 		void *user_data,
2047 		struct rte_flow_error *error)
2048 {
2049 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2050 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2051 	int ret;
2052 
2053 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2054 					  action_handle, update, user_data, error);
2055 	ret = flow_err(port_id, ret, error);
2056 
2057 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2058 						  action_handle, update,
2059 						  user_data, ret);
2060 
2061 	return ret;
2062 }
2063 
2064 int
2065 rte_flow_async_action_handle_query(uint16_t port_id,
2066 		uint32_t queue_id,
2067 		const struct rte_flow_op_attr *op_attr,
2068 		const struct rte_flow_action_handle *action_handle,
2069 		void *data,
2070 		void *user_data,
2071 		struct rte_flow_error *error)
2072 {
2073 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2074 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2075 	int ret;
2076 
2077 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2078 					  action_handle, data, user_data, error);
2079 	ret = flow_err(port_id, ret, error);
2080 
2081 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2082 						 action_handle, data, user_data,
2083 						 ret);
2084 
2085 	return ret;
2086 }
2087