xref: /dpdk/lib/ethdev/rte_flow.c (revision 665b49c51639a10c553433bc2bcd85c7331c631e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_string_fns.h>
14 #include <rte_mbuf_dyn.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18 
19 #include "ethdev_trace.h"
20 
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23 
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26 
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31 	const char *name;
32 	size_t size;
33 	size_t (*desc_fn)(void *dst, const void *src);
34 };
35 
36 /**
37  *
38  * @param buf
39  * Destination memory.
40  * @param data
41  * Source memory
42  * @param size
43  * Requested copy size
44  * @param desc
45  * rte_flow_desc_item - for flow item conversion.
46  * rte_flow_desc_action - for flow action conversion.
47  * @param type
48  * Offset into the desc param or negative value for private flow elements.
49  */
50 static inline size_t
51 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
52 		   const struct rte_flow_desc_data *desc, int type)
53 {
54 	/**
55 	 * Allow PMD private flow item
56 	 */
57 	bool rte_type = type >= 0;
58 
59 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
60 	if (buf == NULL || data == NULL)
61 		return 0;
62 	rte_memcpy(buf, data, (size > sz ? sz : size));
63 	if (rte_type && desc[type].desc_fn)
64 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
65 	return sz;
66 }
67 
68 static size_t
69 rte_flow_item_flex_conv(void *buf, const void *data)
70 {
71 	struct rte_flow_item_flex *dst = buf;
72 	const struct rte_flow_item_flex *src = data;
73 	if (buf) {
74 		dst->pattern = rte_memcpy
75 			((void *)((uintptr_t)(dst + 1)), src->pattern,
76 			 src->length);
77 	}
78 	return src->length;
79 }
80 
81 /** Generate flow_item[] entry. */
82 #define MK_FLOW_ITEM(t, s) \
83 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
84 		.name = # t, \
85 		.size = s,               \
86 		.desc_fn = NULL,\
87 	}
88 
89 #define MK_FLOW_ITEM_FN(t, s, fn) \
90 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
91 		.name = # t,                 \
92 		.size = s,                   \
93 		.desc_fn = fn,               \
94 	}
95 
96 /** Information about known flow pattern items. */
97 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
98 	MK_FLOW_ITEM(END, 0),
99 	MK_FLOW_ITEM(VOID, 0),
100 	MK_FLOW_ITEM(INVERT, 0),
101 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
102 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
103 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
104 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
105 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
106 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
107 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
108 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
109 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
110 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
111 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
112 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
113 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
114 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
115 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
116 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
117 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
118 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
119 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
122 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
123 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
124 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
125 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
126 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
127 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
128 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
131 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
132 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
134 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
136 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
137 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
138 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
139 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
140 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
141 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
142 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
143 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
144 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
146 			sizeof(struct rte_flow_item_pppoe_proto_id)),
147 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
148 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
149 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
150 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
151 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
152 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
153 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
154 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
155 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
156 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
157 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
158 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
160 			rte_flow_item_flex_conv),
161 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
162 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
163 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
164 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
165 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
166 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
167 };
168 
169 /** Generate flow_action[] entry. */
170 #define MK_FLOW_ACTION(t, s) \
171 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
172 		.name = # t, \
173 		.size = s, \
174 		.desc_fn = NULL,\
175 	}
176 
177 #define MK_FLOW_ACTION_FN(t, fn) \
178 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
179 		.name = # t, \
180 		.size = 0, \
181 		.desc_fn = fn,\
182 	}
183 
184 
185 /** Information about known flow actions. */
186 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
187 	MK_FLOW_ACTION(END, 0),
188 	MK_FLOW_ACTION(VOID, 0),
189 	MK_FLOW_ACTION(PASSTHRU, 0),
190 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
191 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
192 	MK_FLOW_ACTION(FLAG, 0),
193 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
194 	MK_FLOW_ACTION(DROP, 0),
195 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
196 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
197 	MK_FLOW_ACTION(PF, 0),
198 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
199 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
200 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
201 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
202 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
203 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
204 	MK_FLOW_ACTION(OF_PUSH_VLAN,
205 		       sizeof(struct rte_flow_action_of_push_vlan)),
206 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
207 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
208 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
209 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
210 	MK_FLOW_ACTION(OF_POP_MPLS,
211 		       sizeof(struct rte_flow_action_of_pop_mpls)),
212 	MK_FLOW_ACTION(OF_PUSH_MPLS,
213 		       sizeof(struct rte_flow_action_of_push_mpls)),
214 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
215 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
216 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
217 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
218 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
219 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
220 	MK_FLOW_ACTION(SET_IPV4_SRC,
221 		       sizeof(struct rte_flow_action_set_ipv4)),
222 	MK_FLOW_ACTION(SET_IPV4_DST,
223 		       sizeof(struct rte_flow_action_set_ipv4)),
224 	MK_FLOW_ACTION(SET_IPV6_SRC,
225 		       sizeof(struct rte_flow_action_set_ipv6)),
226 	MK_FLOW_ACTION(SET_IPV6_DST,
227 		       sizeof(struct rte_flow_action_set_ipv6)),
228 	MK_FLOW_ACTION(SET_TP_SRC,
229 		       sizeof(struct rte_flow_action_set_tp)),
230 	MK_FLOW_ACTION(SET_TP_DST,
231 		       sizeof(struct rte_flow_action_set_tp)),
232 	MK_FLOW_ACTION(MAC_SWAP, 0),
233 	MK_FLOW_ACTION(DEC_TTL, 0),
234 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
235 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
236 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
237 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
238 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
239 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
240 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
241 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
242 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
243 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
244 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
245 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
246 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
247 	MK_FLOW_ACTION(MODIFY_FIELD,
248 		       sizeof(struct rte_flow_action_modify_field)),
249 	/**
250 	 * Indirect action represented as handle of type
251 	 * (struct rte_flow_action_handle *) stored in conf field (see
252 	 * struct rte_flow_action); no need for additional structure to * store
253 	 * indirect action handle.
254 	 */
255 	MK_FLOW_ACTION(INDIRECT, 0),
256 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
257 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
258 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
259 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
260 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
261 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
262 };
263 
264 int
265 rte_flow_dynf_metadata_register(void)
266 {
267 	int offset;
268 	int flag;
269 
270 	static const struct rte_mbuf_dynfield desc_offs = {
271 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
272 		.size = sizeof(uint32_t),
273 		.align = __alignof__(uint32_t),
274 	};
275 	static const struct rte_mbuf_dynflag desc_flag = {
276 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
277 	};
278 
279 	offset = rte_mbuf_dynfield_register(&desc_offs);
280 	if (offset < 0)
281 		goto error;
282 	flag = rte_mbuf_dynflag_register(&desc_flag);
283 	if (flag < 0)
284 		goto error;
285 	rte_flow_dynf_metadata_offs = offset;
286 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
287 
288 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
289 
290 	return 0;
291 
292 error:
293 	rte_flow_dynf_metadata_offs = -1;
294 	rte_flow_dynf_metadata_mask = UINT64_C(0);
295 	return -rte_errno;
296 }
297 
298 static inline void
299 fts_enter(struct rte_eth_dev *dev)
300 {
301 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
302 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
303 }
304 
305 static inline void
306 fts_exit(struct rte_eth_dev *dev)
307 {
308 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
309 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
310 }
311 
312 static int
313 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
314 {
315 	if (ret == 0)
316 		return 0;
317 	if (rte_eth_dev_is_removed(port_id))
318 		return rte_flow_error_set(error, EIO,
319 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
320 					  NULL, rte_strerror(EIO));
321 	return ret;
322 }
323 
324 /* Get generic flow operations structure from a port. */
325 const struct rte_flow_ops *
326 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
327 {
328 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
329 	const struct rte_flow_ops *ops;
330 	int code;
331 
332 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
333 		code = ENODEV;
334 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
335 		/* flow API not supported with this driver dev_ops */
336 		code = ENOSYS;
337 	else
338 		code = dev->dev_ops->flow_ops_get(dev, &ops);
339 	if (code == 0 && ops == NULL)
340 		/* flow API not supported with this device */
341 		code = ENOSYS;
342 
343 	if (code != 0) {
344 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
345 				   NULL, rte_strerror(code));
346 		return NULL;
347 	}
348 	return ops;
349 }
350 
351 /* Check whether a flow rule can be created on a given port. */
352 int
353 rte_flow_validate(uint16_t port_id,
354 		  const struct rte_flow_attr *attr,
355 		  const struct rte_flow_item pattern[],
356 		  const struct rte_flow_action actions[],
357 		  struct rte_flow_error *error)
358 {
359 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
360 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
361 	int ret;
362 
363 	if (likely(!!attr) && attr->transfer &&
364 	    (attr->ingress || attr->egress)) {
365 		return rte_flow_error_set(error, EINVAL,
366 					  RTE_FLOW_ERROR_TYPE_ATTR,
367 					  attr, "cannot use attr ingress/egress with attr transfer");
368 	}
369 
370 	if (unlikely(!ops))
371 		return -rte_errno;
372 	if (likely(!!ops->validate)) {
373 		fts_enter(dev);
374 		ret = ops->validate(dev, attr, pattern, actions, error);
375 		fts_exit(dev);
376 		ret = flow_err(port_id, ret, error);
377 
378 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
379 
380 		return ret;
381 	}
382 	return rte_flow_error_set(error, ENOSYS,
383 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
384 				  NULL, rte_strerror(ENOSYS));
385 }
386 
387 /* Create a flow rule on a given port. */
388 struct rte_flow *
389 rte_flow_create(uint16_t port_id,
390 		const struct rte_flow_attr *attr,
391 		const struct rte_flow_item pattern[],
392 		const struct rte_flow_action actions[],
393 		struct rte_flow_error *error)
394 {
395 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
396 	struct rte_flow *flow;
397 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
398 
399 	if (unlikely(!ops))
400 		return NULL;
401 	if (likely(!!ops->create)) {
402 		fts_enter(dev);
403 		flow = ops->create(dev, attr, pattern, actions, error);
404 		fts_exit(dev);
405 		if (flow == NULL)
406 			flow_err(port_id, -rte_errno, error);
407 
408 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
409 
410 		return flow;
411 	}
412 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
413 			   NULL, rte_strerror(ENOSYS));
414 	return NULL;
415 }
416 
417 /* Destroy a flow rule on a given port. */
418 int
419 rte_flow_destroy(uint16_t port_id,
420 		 struct rte_flow *flow,
421 		 struct rte_flow_error *error)
422 {
423 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
424 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
425 	int ret;
426 
427 	if (unlikely(!ops))
428 		return -rte_errno;
429 	if (likely(!!ops->destroy)) {
430 		fts_enter(dev);
431 		ret = ops->destroy(dev, flow, error);
432 		fts_exit(dev);
433 		ret = flow_err(port_id, ret, error);
434 
435 		rte_flow_trace_destroy(port_id, flow, ret);
436 
437 		return ret;
438 	}
439 	return rte_flow_error_set(error, ENOSYS,
440 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
441 				  NULL, rte_strerror(ENOSYS));
442 }
443 
444 /* Destroy all flow rules associated with a port. */
445 int
446 rte_flow_flush(uint16_t port_id,
447 	       struct rte_flow_error *error)
448 {
449 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
450 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
451 	int ret;
452 
453 	if (unlikely(!ops))
454 		return -rte_errno;
455 	if (likely(!!ops->flush)) {
456 		fts_enter(dev);
457 		ret = ops->flush(dev, error);
458 		fts_exit(dev);
459 		ret = flow_err(port_id, ret, error);
460 
461 		rte_flow_trace_flush(port_id, ret);
462 
463 		return ret;
464 	}
465 	return rte_flow_error_set(error, ENOSYS,
466 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
467 				  NULL, rte_strerror(ENOSYS));
468 }
469 
470 /* Query an existing flow rule. */
471 int
472 rte_flow_query(uint16_t port_id,
473 	       struct rte_flow *flow,
474 	       const struct rte_flow_action *action,
475 	       void *data,
476 	       struct rte_flow_error *error)
477 {
478 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
479 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
480 	int ret;
481 
482 	if (!ops)
483 		return -rte_errno;
484 	if (likely(!!ops->query)) {
485 		fts_enter(dev);
486 		ret = ops->query(dev, flow, action, data, error);
487 		fts_exit(dev);
488 		ret = flow_err(port_id, ret, error);
489 
490 		rte_flow_trace_query(port_id, flow, action, data, ret);
491 
492 		return ret;
493 	}
494 	return rte_flow_error_set(error, ENOSYS,
495 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
496 				  NULL, rte_strerror(ENOSYS));
497 }
498 
499 /* Restrict ingress traffic to the defined flow rules. */
500 int
501 rte_flow_isolate(uint16_t port_id,
502 		 int set,
503 		 struct rte_flow_error *error)
504 {
505 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
506 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
507 	int ret;
508 
509 	if (!ops)
510 		return -rte_errno;
511 	if (likely(!!ops->isolate)) {
512 		fts_enter(dev);
513 		ret = ops->isolate(dev, set, error);
514 		fts_exit(dev);
515 		ret = flow_err(port_id, ret, error);
516 
517 		rte_flow_trace_isolate(port_id, set, ret);
518 
519 		return ret;
520 	}
521 	return rte_flow_error_set(error, ENOSYS,
522 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
523 				  NULL, rte_strerror(ENOSYS));
524 }
525 
526 /* Initialize flow error structure. */
527 int
528 rte_flow_error_set(struct rte_flow_error *error,
529 		   int code,
530 		   enum rte_flow_error_type type,
531 		   const void *cause,
532 		   const char *message)
533 {
534 	if (error) {
535 		*error = (struct rte_flow_error){
536 			.type = type,
537 			.cause = cause,
538 			.message = message,
539 		};
540 	}
541 	rte_errno = code;
542 	return -code;
543 }
544 
545 /** Pattern item specification types. */
546 enum rte_flow_conv_item_spec_type {
547 	RTE_FLOW_CONV_ITEM_SPEC,
548 	RTE_FLOW_CONV_ITEM_LAST,
549 	RTE_FLOW_CONV_ITEM_MASK,
550 };
551 
552 /**
553  * Copy pattern item specification.
554  *
555  * @param[out] buf
556  *   Output buffer. Can be NULL if @p size is zero.
557  * @param size
558  *   Size of @p buf in bytes.
559  * @param[in] item
560  *   Pattern item to copy specification from.
561  * @param type
562  *   Specification selector for either @p spec, @p last or @p mask.
563  *
564  * @return
565  *   Number of bytes needed to store pattern item specification regardless
566  *   of @p size. @p buf contents are truncated to @p size if not large
567  *   enough.
568  */
569 static size_t
570 rte_flow_conv_item_spec(void *buf, const size_t size,
571 			const struct rte_flow_item *item,
572 			enum rte_flow_conv_item_spec_type type)
573 {
574 	size_t off;
575 	const void *data =
576 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
577 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
578 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
579 		NULL;
580 
581 	switch (item->type) {
582 		union {
583 			const struct rte_flow_item_raw *raw;
584 		} spec;
585 		union {
586 			const struct rte_flow_item_raw *raw;
587 		} last;
588 		union {
589 			const struct rte_flow_item_raw *raw;
590 		} mask;
591 		union {
592 			const struct rte_flow_item_raw *raw;
593 		} src;
594 		union {
595 			struct rte_flow_item_raw *raw;
596 		} dst;
597 		size_t tmp;
598 
599 	case RTE_FLOW_ITEM_TYPE_RAW:
600 		spec.raw = item->spec;
601 		last.raw = item->last ? item->last : item->spec;
602 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
603 		src.raw = data;
604 		dst.raw = buf;
605 		rte_memcpy(dst.raw,
606 			   (&(struct rte_flow_item_raw){
607 				.relative = src.raw->relative,
608 				.search = src.raw->search,
609 				.reserved = src.raw->reserved,
610 				.offset = src.raw->offset,
611 				.limit = src.raw->limit,
612 				.length = src.raw->length,
613 			   }),
614 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
615 		off = sizeof(*dst.raw);
616 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
617 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
618 		     ((spec.raw->length & mask.raw->length) >=
619 		      (last.raw->length & mask.raw->length))))
620 			tmp = spec.raw->length & mask.raw->length;
621 		else
622 			tmp = last.raw->length & mask.raw->length;
623 		if (tmp) {
624 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
625 			if (size >= off + tmp)
626 				dst.raw->pattern = rte_memcpy
627 					((void *)((uintptr_t)dst.raw + off),
628 					 src.raw->pattern, tmp);
629 			off += tmp;
630 		}
631 		break;
632 	default:
633 		off = rte_flow_conv_copy(buf, data, size,
634 					 rte_flow_desc_item, item->type);
635 		break;
636 	}
637 	return off;
638 }
639 
640 /**
641  * Copy action configuration.
642  *
643  * @param[out] buf
644  *   Output buffer. Can be NULL if @p size is zero.
645  * @param size
646  *   Size of @p buf in bytes.
647  * @param[in] action
648  *   Action to copy configuration from.
649  *
650  * @return
651  *   Number of bytes needed to store pattern item specification regardless
652  *   of @p size. @p buf contents are truncated to @p size if not large
653  *   enough.
654  */
655 static size_t
656 rte_flow_conv_action_conf(void *buf, const size_t size,
657 			  const struct rte_flow_action *action)
658 {
659 	size_t off;
660 
661 	switch (action->type) {
662 		union {
663 			const struct rte_flow_action_rss *rss;
664 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
665 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
666 		} src;
667 		union {
668 			struct rte_flow_action_rss *rss;
669 			struct rte_flow_action_vxlan_encap *vxlan_encap;
670 			struct rte_flow_action_nvgre_encap *nvgre_encap;
671 		} dst;
672 		size_t tmp;
673 		int ret;
674 
675 	case RTE_FLOW_ACTION_TYPE_RSS:
676 		src.rss = action->conf;
677 		dst.rss = buf;
678 		rte_memcpy(dst.rss,
679 			   (&(struct rte_flow_action_rss){
680 				.func = src.rss->func,
681 				.level = src.rss->level,
682 				.types = src.rss->types,
683 				.key_len = src.rss->key_len,
684 				.queue_num = src.rss->queue_num,
685 			   }),
686 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
687 		off = sizeof(*dst.rss);
688 		if (src.rss->key_len && src.rss->key) {
689 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
690 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
691 			if (size >= off + tmp)
692 				dst.rss->key = rte_memcpy
693 					((void *)((uintptr_t)dst.rss + off),
694 					 src.rss->key, tmp);
695 			off += tmp;
696 		}
697 		if (src.rss->queue_num) {
698 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
699 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
700 			if (size >= off + tmp)
701 				dst.rss->queue = rte_memcpy
702 					((void *)((uintptr_t)dst.rss + off),
703 					 src.rss->queue, tmp);
704 			off += tmp;
705 		}
706 		break;
707 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
708 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
709 		src.vxlan_encap = action->conf;
710 		dst.vxlan_encap = buf;
711 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
712 				 sizeof(*src.nvgre_encap) ||
713 				 offsetof(struct rte_flow_action_vxlan_encap,
714 					  definition) !=
715 				 offsetof(struct rte_flow_action_nvgre_encap,
716 					  definition));
717 		off = sizeof(*dst.vxlan_encap);
718 		if (src.vxlan_encap->definition) {
719 			off = RTE_ALIGN_CEIL
720 				(off, sizeof(*dst.vxlan_encap->definition));
721 			ret = rte_flow_conv
722 				(RTE_FLOW_CONV_OP_PATTERN,
723 				 (void *)((uintptr_t)dst.vxlan_encap + off),
724 				 size > off ? size - off : 0,
725 				 src.vxlan_encap->definition, NULL);
726 			if (ret < 0)
727 				return 0;
728 			if (size >= off + ret)
729 				dst.vxlan_encap->definition =
730 					(void *)((uintptr_t)dst.vxlan_encap +
731 						 off);
732 			off += ret;
733 		}
734 		break;
735 	default:
736 		off = rte_flow_conv_copy(buf, action->conf, size,
737 					 rte_flow_desc_action, action->type);
738 		break;
739 	}
740 	return off;
741 }
742 
743 /**
744  * Copy a list of pattern items.
745  *
746  * @param[out] dst
747  *   Destination buffer. Can be NULL if @p size is zero.
748  * @param size
749  *   Size of @p dst in bytes.
750  * @param[in] src
751  *   Source pattern items.
752  * @param num
753  *   Maximum number of pattern items to process from @p src or 0 to process
754  *   the entire list. In both cases, processing stops after
755  *   RTE_FLOW_ITEM_TYPE_END is encountered.
756  * @param[out] error
757  *   Perform verbose error reporting if not NULL.
758  *
759  * @return
760  *   A positive value representing the number of bytes needed to store
761  *   pattern items regardless of @p size on success (@p buf contents are
762  *   truncated to @p size if not large enough), a negative errno value
763  *   otherwise and rte_errno is set.
764  */
765 static int
766 rte_flow_conv_pattern(struct rte_flow_item *dst,
767 		      const size_t size,
768 		      const struct rte_flow_item *src,
769 		      unsigned int num,
770 		      struct rte_flow_error *error)
771 {
772 	uintptr_t data = (uintptr_t)dst;
773 	size_t off;
774 	size_t ret;
775 	unsigned int i;
776 
777 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
778 		/**
779 		 * allow PMD private flow item
780 		 */
781 		if (((int)src->type >= 0) &&
782 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
783 		    !rte_flow_desc_item[src->type].name))
784 			return rte_flow_error_set
785 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
786 				 "cannot convert unknown item type");
787 		if (size >= off + sizeof(*dst))
788 			*dst = (struct rte_flow_item){
789 				.type = src->type,
790 			};
791 		off += sizeof(*dst);
792 		if (!src->type)
793 			num = i + 1;
794 	}
795 	num = i;
796 	src -= num;
797 	dst -= num;
798 	do {
799 		if (src->spec) {
800 			off = RTE_ALIGN_CEIL(off, sizeof(double));
801 			ret = rte_flow_conv_item_spec
802 				((void *)(data + off),
803 				 size > off ? size - off : 0, src,
804 				 RTE_FLOW_CONV_ITEM_SPEC);
805 			if (size && size >= off + ret)
806 				dst->spec = (void *)(data + off);
807 			off += ret;
808 
809 		}
810 		if (src->last) {
811 			off = RTE_ALIGN_CEIL(off, sizeof(double));
812 			ret = rte_flow_conv_item_spec
813 				((void *)(data + off),
814 				 size > off ? size - off : 0, src,
815 				 RTE_FLOW_CONV_ITEM_LAST);
816 			if (size && size >= off + ret)
817 				dst->last = (void *)(data + off);
818 			off += ret;
819 		}
820 		if (src->mask) {
821 			off = RTE_ALIGN_CEIL(off, sizeof(double));
822 			ret = rte_flow_conv_item_spec
823 				((void *)(data + off),
824 				 size > off ? size - off : 0, src,
825 				 RTE_FLOW_CONV_ITEM_MASK);
826 			if (size && size >= off + ret)
827 				dst->mask = (void *)(data + off);
828 			off += ret;
829 		}
830 		++src;
831 		++dst;
832 	} while (--num);
833 	return off;
834 }
835 
836 /**
837  * Copy a list of actions.
838  *
839  * @param[out] dst
840  *   Destination buffer. Can be NULL if @p size is zero.
841  * @param size
842  *   Size of @p dst in bytes.
843  * @param[in] src
844  *   Source actions.
845  * @param num
846  *   Maximum number of actions to process from @p src or 0 to process the
847  *   entire list. In both cases, processing stops after
848  *   RTE_FLOW_ACTION_TYPE_END is encountered.
849  * @param[out] error
850  *   Perform verbose error reporting if not NULL.
851  *
852  * @return
853  *   A positive value representing the number of bytes needed to store
854  *   actions regardless of @p size on success (@p buf contents are truncated
855  *   to @p size if not large enough), a negative errno value otherwise and
856  *   rte_errno is set.
857  */
858 static int
859 rte_flow_conv_actions(struct rte_flow_action *dst,
860 		      const size_t size,
861 		      const struct rte_flow_action *src,
862 		      unsigned int num,
863 		      struct rte_flow_error *error)
864 {
865 	uintptr_t data = (uintptr_t)dst;
866 	size_t off;
867 	size_t ret;
868 	unsigned int i;
869 
870 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
871 		/**
872 		 * allow PMD private flow action
873 		 */
874 		if (((int)src->type >= 0) &&
875 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
876 		    !rte_flow_desc_action[src->type].name))
877 			return rte_flow_error_set
878 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
879 				 src, "cannot convert unknown action type");
880 		if (size >= off + sizeof(*dst))
881 			*dst = (struct rte_flow_action){
882 				.type = src->type,
883 			};
884 		off += sizeof(*dst);
885 		if (!src->type)
886 			num = i + 1;
887 	}
888 	num = i;
889 	src -= num;
890 	dst -= num;
891 	do {
892 		if (src->conf) {
893 			off = RTE_ALIGN_CEIL(off, sizeof(double));
894 			ret = rte_flow_conv_action_conf
895 				((void *)(data + off),
896 				 size > off ? size - off : 0, src);
897 			if (size && size >= off + ret)
898 				dst->conf = (void *)(data + off);
899 			off += ret;
900 		}
901 		++src;
902 		++dst;
903 	} while (--num);
904 	return off;
905 }
906 
907 /**
908  * Copy flow rule components.
909  *
910  * This comprises the flow rule descriptor itself, attributes, pattern and
911  * actions list. NULL components in @p src are skipped.
912  *
913  * @param[out] dst
914  *   Destination buffer. Can be NULL if @p size is zero.
915  * @param size
916  *   Size of @p dst in bytes.
917  * @param[in] src
918  *   Source flow rule descriptor.
919  * @param[out] error
920  *   Perform verbose error reporting if not NULL.
921  *
922  * @return
923  *   A positive value representing the number of bytes needed to store all
924  *   components including the descriptor regardless of @p size on success
925  *   (@p buf contents are truncated to @p size if not large enough), a
926  *   negative errno value otherwise and rte_errno is set.
927  */
928 static int
929 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
930 		   const size_t size,
931 		   const struct rte_flow_conv_rule *src,
932 		   struct rte_flow_error *error)
933 {
934 	size_t off;
935 	int ret;
936 
937 	rte_memcpy(dst,
938 		   (&(struct rte_flow_conv_rule){
939 			.attr = NULL,
940 			.pattern = NULL,
941 			.actions = NULL,
942 		   }),
943 		   size > sizeof(*dst) ? sizeof(*dst) : size);
944 	off = sizeof(*dst);
945 	if (src->attr_ro) {
946 		off = RTE_ALIGN_CEIL(off, sizeof(double));
947 		if (size && size >= off + sizeof(*dst->attr))
948 			dst->attr = rte_memcpy
949 				((void *)((uintptr_t)dst + off),
950 				 src->attr_ro, sizeof(*dst->attr));
951 		off += sizeof(*dst->attr);
952 	}
953 	if (src->pattern_ro) {
954 		off = RTE_ALIGN_CEIL(off, sizeof(double));
955 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
956 					    size > off ? size - off : 0,
957 					    src->pattern_ro, 0, error);
958 		if (ret < 0)
959 			return ret;
960 		if (size && size >= off + (size_t)ret)
961 			dst->pattern = (void *)((uintptr_t)dst + off);
962 		off += ret;
963 	}
964 	if (src->actions_ro) {
965 		off = RTE_ALIGN_CEIL(off, sizeof(double));
966 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
967 					    size > off ? size - off : 0,
968 					    src->actions_ro, 0, error);
969 		if (ret < 0)
970 			return ret;
971 		if (size >= off + (size_t)ret)
972 			dst->actions = (void *)((uintptr_t)dst + off);
973 		off += ret;
974 	}
975 	return off;
976 }
977 
978 /**
979  * Retrieve the name of a pattern item/action type.
980  *
981  * @param is_action
982  *   Nonzero when @p src represents an action type instead of a pattern item
983  *   type.
984  * @param is_ptr
985  *   Nonzero to write string address instead of contents into @p dst.
986  * @param[out] dst
987  *   Destination buffer. Can be NULL if @p size is zero.
988  * @param size
989  *   Size of @p dst in bytes.
990  * @param[in] src
991  *   Depending on @p is_action, source pattern item or action type cast as a
992  *   pointer.
993  * @param[out] error
994  *   Perform verbose error reporting if not NULL.
995  *
996  * @return
997  *   A positive value representing the number of bytes needed to store the
998  *   name or its address regardless of @p size on success (@p buf contents
999  *   are truncated to @p size if not large enough), a negative errno value
1000  *   otherwise and rte_errno is set.
1001  */
1002 static int
1003 rte_flow_conv_name(int is_action,
1004 		   int is_ptr,
1005 		   char *dst,
1006 		   const size_t size,
1007 		   const void *src,
1008 		   struct rte_flow_error *error)
1009 {
1010 	struct desc_info {
1011 		const struct rte_flow_desc_data *data;
1012 		size_t num;
1013 	};
1014 	static const struct desc_info info_rep[2] = {
1015 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1016 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1017 	};
1018 	const struct desc_info *const info = &info_rep[!!is_action];
1019 	unsigned int type = (uintptr_t)src;
1020 
1021 	if (type >= info->num)
1022 		return rte_flow_error_set
1023 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1024 			 "unknown object type to retrieve the name of");
1025 	if (!is_ptr)
1026 		return strlcpy(dst, info->data[type].name, size);
1027 	if (size >= sizeof(const char **))
1028 		*((const char **)dst) = info->data[type].name;
1029 	return sizeof(const char **);
1030 }
1031 
1032 /** Helper function to convert flow API objects. */
1033 int
1034 rte_flow_conv(enum rte_flow_conv_op op,
1035 	      void *dst,
1036 	      size_t size,
1037 	      const void *src,
1038 	      struct rte_flow_error *error)
1039 {
1040 	int ret;
1041 
1042 	switch (op) {
1043 		const struct rte_flow_attr *attr;
1044 
1045 	case RTE_FLOW_CONV_OP_NONE:
1046 		ret = 0;
1047 		break;
1048 	case RTE_FLOW_CONV_OP_ATTR:
1049 		attr = src;
1050 		if (size > sizeof(*attr))
1051 			size = sizeof(*attr);
1052 		rte_memcpy(dst, attr, size);
1053 		ret = sizeof(*attr);
1054 		break;
1055 	case RTE_FLOW_CONV_OP_ITEM:
1056 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1057 		break;
1058 	case RTE_FLOW_CONV_OP_ACTION:
1059 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1060 		break;
1061 	case RTE_FLOW_CONV_OP_PATTERN:
1062 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1063 		break;
1064 	case RTE_FLOW_CONV_OP_ACTIONS:
1065 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1066 		break;
1067 	case RTE_FLOW_CONV_OP_RULE:
1068 		ret = rte_flow_conv_rule(dst, size, src, error);
1069 		break;
1070 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1071 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1072 		break;
1073 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1074 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1075 		break;
1076 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1077 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1078 		break;
1079 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1080 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1081 		break;
1082 	default:
1083 		ret = rte_flow_error_set
1084 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1085 		 "unknown object conversion operation");
1086 	}
1087 
1088 	rte_flow_trace_conv(op, dst, size, src, ret);
1089 
1090 	return ret;
1091 }
1092 
1093 /** Store a full rte_flow description. */
1094 size_t
1095 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1096 	      const struct rte_flow_attr *attr,
1097 	      const struct rte_flow_item *items,
1098 	      const struct rte_flow_action *actions)
1099 {
1100 	/*
1101 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1102 	 * to convert the former to the latter without wasting space.
1103 	 */
1104 	struct rte_flow_conv_rule *dst =
1105 		len ?
1106 		(void *)((uintptr_t)desc +
1107 			 (offsetof(struct rte_flow_desc, actions) -
1108 			  offsetof(struct rte_flow_conv_rule, actions))) :
1109 		NULL;
1110 	size_t dst_size =
1111 		len > sizeof(*desc) - sizeof(*dst) ?
1112 		len - (sizeof(*desc) - sizeof(*dst)) :
1113 		0;
1114 	struct rte_flow_conv_rule src = {
1115 		.attr_ro = NULL,
1116 		.pattern_ro = items,
1117 		.actions_ro = actions,
1118 	};
1119 	int ret;
1120 
1121 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1122 			 sizeof(struct rte_flow_conv_rule));
1123 	if (dst_size &&
1124 	    (&dst->pattern != &desc->items ||
1125 	     &dst->actions != &desc->actions ||
1126 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1127 		rte_errno = EINVAL;
1128 		return 0;
1129 	}
1130 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1131 	if (ret < 0)
1132 		return 0;
1133 	ret += sizeof(*desc) - sizeof(*dst);
1134 	rte_memcpy(desc,
1135 		   (&(struct rte_flow_desc){
1136 			.size = ret,
1137 			.attr = *attr,
1138 			.items = dst_size ? dst->pattern : NULL,
1139 			.actions = dst_size ? dst->actions : NULL,
1140 		   }),
1141 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1142 
1143 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1144 
1145 	return ret;
1146 }
1147 
1148 int
1149 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1150 			FILE *file, struct rte_flow_error *error)
1151 {
1152 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1153 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1154 	int ret;
1155 
1156 	if (unlikely(!ops))
1157 		return -rte_errno;
1158 	if (likely(!!ops->dev_dump)) {
1159 		fts_enter(dev);
1160 		ret = ops->dev_dump(dev, flow, file, error);
1161 		fts_exit(dev);
1162 		return flow_err(port_id, ret, error);
1163 	}
1164 	return rte_flow_error_set(error, ENOSYS,
1165 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1166 				  NULL, rte_strerror(ENOSYS));
1167 }
1168 
1169 int
1170 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1171 		    uint32_t nb_contexts, struct rte_flow_error *error)
1172 {
1173 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1174 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1175 	int ret;
1176 
1177 	if (unlikely(!ops))
1178 		return -rte_errno;
1179 	if (likely(!!ops->get_aged_flows)) {
1180 		fts_enter(dev);
1181 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1182 		fts_exit(dev);
1183 		ret = flow_err(port_id, ret, error);
1184 
1185 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1186 
1187 		return ret;
1188 	}
1189 	return rte_flow_error_set(error, ENOTSUP,
1190 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1191 				  NULL, rte_strerror(ENOTSUP));
1192 }
1193 
1194 int
1195 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1196 			  uint32_t nb_contexts, struct rte_flow_error *error)
1197 {
1198 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1199 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1200 	int ret;
1201 
1202 	if (unlikely(!ops))
1203 		return -rte_errno;
1204 	if (likely(!!ops->get_q_aged_flows)) {
1205 		fts_enter(dev);
1206 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1207 					    nb_contexts, error);
1208 		fts_exit(dev);
1209 		ret = flow_err(port_id, ret, error);
1210 
1211 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1212 						nb_contexts, ret);
1213 
1214 		return ret;
1215 	}
1216 	return rte_flow_error_set(error, ENOTSUP,
1217 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1218 				  NULL, rte_strerror(ENOTSUP));
1219 }
1220 
1221 struct rte_flow_action_handle *
1222 rte_flow_action_handle_create(uint16_t port_id,
1223 			      const struct rte_flow_indir_action_conf *conf,
1224 			      const struct rte_flow_action *action,
1225 			      struct rte_flow_error *error)
1226 {
1227 	struct rte_flow_action_handle *handle;
1228 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1229 
1230 	if (unlikely(!ops))
1231 		return NULL;
1232 	if (unlikely(!ops->action_handle_create)) {
1233 		rte_flow_error_set(error, ENOSYS,
1234 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1235 				   rte_strerror(ENOSYS));
1236 		return NULL;
1237 	}
1238 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1239 					   conf, action, error);
1240 	if (handle == NULL)
1241 		flow_err(port_id, -rte_errno, error);
1242 
1243 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1244 
1245 	return handle;
1246 }
1247 
1248 int
1249 rte_flow_action_handle_destroy(uint16_t port_id,
1250 			       struct rte_flow_action_handle *handle,
1251 			       struct rte_flow_error *error)
1252 {
1253 	int ret;
1254 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1255 
1256 	if (unlikely(!ops))
1257 		return -rte_errno;
1258 	if (unlikely(!ops->action_handle_destroy))
1259 		return rte_flow_error_set(error, ENOSYS,
1260 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1261 					  NULL, rte_strerror(ENOSYS));
1262 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1263 					 handle, error);
1264 	ret = flow_err(port_id, ret, error);
1265 
1266 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1267 
1268 	return ret;
1269 }
1270 
1271 int
1272 rte_flow_action_handle_update(uint16_t port_id,
1273 			      struct rte_flow_action_handle *handle,
1274 			      const void *update,
1275 			      struct rte_flow_error *error)
1276 {
1277 	int ret;
1278 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1279 
1280 	if (unlikely(!ops))
1281 		return -rte_errno;
1282 	if (unlikely(!ops->action_handle_update))
1283 		return rte_flow_error_set(error, ENOSYS,
1284 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1285 					  NULL, rte_strerror(ENOSYS));
1286 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1287 					update, error);
1288 	ret = flow_err(port_id, ret, error);
1289 
1290 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1291 
1292 	return ret;
1293 }
1294 
1295 int
1296 rte_flow_action_handle_query(uint16_t port_id,
1297 			     const struct rte_flow_action_handle *handle,
1298 			     void *data,
1299 			     struct rte_flow_error *error)
1300 {
1301 	int ret;
1302 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1303 
1304 	if (unlikely(!ops))
1305 		return -rte_errno;
1306 	if (unlikely(!ops->action_handle_query))
1307 		return rte_flow_error_set(error, ENOSYS,
1308 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1309 					  NULL, rte_strerror(ENOSYS));
1310 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1311 				       data, error);
1312 	ret = flow_err(port_id, ret, error);
1313 
1314 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1315 
1316 	return ret;
1317 }
1318 
1319 int
1320 rte_flow_tunnel_decap_set(uint16_t port_id,
1321 			  struct rte_flow_tunnel *tunnel,
1322 			  struct rte_flow_action **actions,
1323 			  uint32_t *num_of_actions,
1324 			  struct rte_flow_error *error)
1325 {
1326 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1327 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1328 	int ret;
1329 
1330 	if (unlikely(!ops))
1331 		return -rte_errno;
1332 	if (likely(!!ops->tunnel_decap_set)) {
1333 		ret = flow_err(port_id,
1334 			       ops->tunnel_decap_set(dev, tunnel, actions,
1335 						     num_of_actions, error),
1336 			       error);
1337 
1338 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1339 						num_of_actions, ret);
1340 
1341 		return ret;
1342 	}
1343 	return rte_flow_error_set(error, ENOTSUP,
1344 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1345 				  NULL, rte_strerror(ENOTSUP));
1346 }
1347 
1348 int
1349 rte_flow_tunnel_match(uint16_t port_id,
1350 		      struct rte_flow_tunnel *tunnel,
1351 		      struct rte_flow_item **items,
1352 		      uint32_t *num_of_items,
1353 		      struct rte_flow_error *error)
1354 {
1355 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1356 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1357 	int ret;
1358 
1359 	if (unlikely(!ops))
1360 		return -rte_errno;
1361 	if (likely(!!ops->tunnel_match)) {
1362 		ret = flow_err(port_id,
1363 			       ops->tunnel_match(dev, tunnel, items,
1364 						 num_of_items, error),
1365 			       error);
1366 
1367 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1368 					    ret);
1369 
1370 		return ret;
1371 	}
1372 	return rte_flow_error_set(error, ENOTSUP,
1373 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1374 				  NULL, rte_strerror(ENOTSUP));
1375 }
1376 
1377 int
1378 rte_flow_get_restore_info(uint16_t port_id,
1379 			  struct rte_mbuf *m,
1380 			  struct rte_flow_restore_info *restore_info,
1381 			  struct rte_flow_error *error)
1382 {
1383 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1384 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1385 	int ret;
1386 
1387 	if (unlikely(!ops))
1388 		return -rte_errno;
1389 	if (likely(!!ops->get_restore_info)) {
1390 		ret = flow_err(port_id,
1391 			       ops->get_restore_info(dev, m, restore_info,
1392 						     error),
1393 			       error);
1394 
1395 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1396 
1397 		return ret;
1398 	}
1399 	return rte_flow_error_set(error, ENOTSUP,
1400 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1401 				  NULL, rte_strerror(ENOTSUP));
1402 }
1403 
1404 int
1405 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1406 				     struct rte_flow_action *actions,
1407 				     uint32_t num_of_actions,
1408 				     struct rte_flow_error *error)
1409 {
1410 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1411 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1412 	int ret;
1413 
1414 	if (unlikely(!ops))
1415 		return -rte_errno;
1416 	if (likely(!!ops->tunnel_action_decap_release)) {
1417 		ret = flow_err(port_id,
1418 			       ops->tunnel_action_decap_release(dev, actions,
1419 								num_of_actions,
1420 								error),
1421 			       error);
1422 
1423 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1424 							   num_of_actions, ret);
1425 
1426 		return ret;
1427 	}
1428 	return rte_flow_error_set(error, ENOTSUP,
1429 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1430 				  NULL, rte_strerror(ENOTSUP));
1431 }
1432 
1433 int
1434 rte_flow_tunnel_item_release(uint16_t port_id,
1435 			     struct rte_flow_item *items,
1436 			     uint32_t num_of_items,
1437 			     struct rte_flow_error *error)
1438 {
1439 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1440 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1441 	int ret;
1442 
1443 	if (unlikely(!ops))
1444 		return -rte_errno;
1445 	if (likely(!!ops->tunnel_item_release)) {
1446 		ret = flow_err(port_id,
1447 			       ops->tunnel_item_release(dev, items,
1448 							num_of_items, error),
1449 			       error);
1450 
1451 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1452 
1453 		return ret;
1454 	}
1455 	return rte_flow_error_set(error, ENOTSUP,
1456 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1457 				  NULL, rte_strerror(ENOTSUP));
1458 }
1459 
1460 int
1461 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1462 			     struct rte_flow_error *error)
1463 {
1464 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1465 	struct rte_eth_dev *dev;
1466 	int ret;
1467 
1468 	if (unlikely(ops == NULL))
1469 		return -rte_errno;
1470 
1471 	if (ops->pick_transfer_proxy == NULL) {
1472 		*proxy_port_id = port_id;
1473 		return 0;
1474 	}
1475 
1476 	dev = &rte_eth_devices[port_id];
1477 
1478 	ret = flow_err(port_id,
1479 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1480 		       error);
1481 
1482 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1483 
1484 	return ret;
1485 }
1486 
1487 struct rte_flow_item_flex_handle *
1488 rte_flow_flex_item_create(uint16_t port_id,
1489 			  const struct rte_flow_item_flex_conf *conf,
1490 			  struct rte_flow_error *error)
1491 {
1492 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1493 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1494 	struct rte_flow_item_flex_handle *handle;
1495 
1496 	if (unlikely(!ops))
1497 		return NULL;
1498 	if (unlikely(!ops->flex_item_create)) {
1499 		rte_flow_error_set(error, ENOTSUP,
1500 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1501 				   NULL, rte_strerror(ENOTSUP));
1502 		return NULL;
1503 	}
1504 	handle = ops->flex_item_create(dev, conf, error);
1505 	if (handle == NULL)
1506 		flow_err(port_id, -rte_errno, error);
1507 
1508 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1509 
1510 	return handle;
1511 }
1512 
1513 int
1514 rte_flow_flex_item_release(uint16_t port_id,
1515 			   const struct rte_flow_item_flex_handle *handle,
1516 			   struct rte_flow_error *error)
1517 {
1518 	int ret;
1519 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1520 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1521 
1522 	if (unlikely(!ops || !ops->flex_item_release))
1523 		return rte_flow_error_set(error, ENOTSUP,
1524 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1525 					  NULL, rte_strerror(ENOTSUP));
1526 	ret = ops->flex_item_release(dev, handle, error);
1527 	ret = flow_err(port_id, ret, error);
1528 
1529 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1530 
1531 	return ret;
1532 }
1533 
1534 int
1535 rte_flow_info_get(uint16_t port_id,
1536 		  struct rte_flow_port_info *port_info,
1537 		  struct rte_flow_queue_info *queue_info,
1538 		  struct rte_flow_error *error)
1539 {
1540 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1541 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1542 	int ret;
1543 
1544 	if (unlikely(!ops))
1545 		return -rte_errno;
1546 	if (dev->data->dev_configured == 0) {
1547 		RTE_FLOW_LOG(INFO,
1548 			"Device with port_id=%"PRIu16" is not configured.\n",
1549 			port_id);
1550 		return -EINVAL;
1551 	}
1552 	if (port_info == NULL) {
1553 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1554 		return -EINVAL;
1555 	}
1556 	if (likely(!!ops->info_get)) {
1557 		ret = flow_err(port_id,
1558 			       ops->info_get(dev, port_info, queue_info, error),
1559 			       error);
1560 
1561 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1562 
1563 		return ret;
1564 	}
1565 	return rte_flow_error_set(error, ENOTSUP,
1566 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1567 				  NULL, rte_strerror(ENOTSUP));
1568 }
1569 
1570 int
1571 rte_flow_configure(uint16_t port_id,
1572 		   const struct rte_flow_port_attr *port_attr,
1573 		   uint16_t nb_queue,
1574 		   const struct rte_flow_queue_attr *queue_attr[],
1575 		   struct rte_flow_error *error)
1576 {
1577 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1578 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1579 	int ret;
1580 
1581 	if (unlikely(!ops))
1582 		return -rte_errno;
1583 	if (dev->data->dev_configured == 0) {
1584 		RTE_FLOW_LOG(INFO,
1585 			"Device with port_id=%"PRIu16" is not configured.\n",
1586 			port_id);
1587 		return -EINVAL;
1588 	}
1589 	if (dev->data->dev_started != 0) {
1590 		RTE_FLOW_LOG(INFO,
1591 			"Device with port_id=%"PRIu16" already started.\n",
1592 			port_id);
1593 		return -EINVAL;
1594 	}
1595 	if (port_attr == NULL) {
1596 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1597 		return -EINVAL;
1598 	}
1599 	if (queue_attr == NULL) {
1600 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1601 		return -EINVAL;
1602 	}
1603 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1604 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1605 		return rte_flow_error_set(error, ENODEV,
1606 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1607 					  NULL, rte_strerror(ENODEV));
1608 	}
1609 	if (likely(!!ops->configure)) {
1610 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1611 		if (ret == 0)
1612 			dev->data->flow_configured = 1;
1613 		ret = flow_err(port_id, ret, error);
1614 
1615 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1616 
1617 		return ret;
1618 	}
1619 	return rte_flow_error_set(error, ENOTSUP,
1620 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1621 				  NULL, rte_strerror(ENOTSUP));
1622 }
1623 
1624 struct rte_flow_pattern_template *
1625 rte_flow_pattern_template_create(uint16_t port_id,
1626 		const struct rte_flow_pattern_template_attr *template_attr,
1627 		const struct rte_flow_item pattern[],
1628 		struct rte_flow_error *error)
1629 {
1630 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1631 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1632 	struct rte_flow_pattern_template *template;
1633 
1634 	if (unlikely(!ops))
1635 		return NULL;
1636 	if (dev->data->flow_configured == 0) {
1637 		RTE_FLOW_LOG(INFO,
1638 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1639 			port_id);
1640 		rte_flow_error_set(error, EINVAL,
1641 				RTE_FLOW_ERROR_TYPE_STATE,
1642 				NULL, rte_strerror(EINVAL));
1643 		return NULL;
1644 	}
1645 	if (template_attr == NULL) {
1646 		RTE_FLOW_LOG(ERR,
1647 			     "Port %"PRIu16" template attr is NULL.\n",
1648 			     port_id);
1649 		rte_flow_error_set(error, EINVAL,
1650 				   RTE_FLOW_ERROR_TYPE_ATTR,
1651 				   NULL, rte_strerror(EINVAL));
1652 		return NULL;
1653 	}
1654 	if (pattern == NULL) {
1655 		RTE_FLOW_LOG(ERR,
1656 			     "Port %"PRIu16" pattern is NULL.\n",
1657 			     port_id);
1658 		rte_flow_error_set(error, EINVAL,
1659 				   RTE_FLOW_ERROR_TYPE_ATTR,
1660 				   NULL, rte_strerror(EINVAL));
1661 		return NULL;
1662 	}
1663 	if (likely(!!ops->pattern_template_create)) {
1664 		template = ops->pattern_template_create(dev, template_attr,
1665 							pattern, error);
1666 		if (template == NULL)
1667 			flow_err(port_id, -rte_errno, error);
1668 
1669 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1670 						       pattern, template);
1671 
1672 		return template;
1673 	}
1674 	rte_flow_error_set(error, ENOTSUP,
1675 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1676 			   NULL, rte_strerror(ENOTSUP));
1677 	return NULL;
1678 }
1679 
1680 int
1681 rte_flow_pattern_template_destroy(uint16_t port_id,
1682 		struct rte_flow_pattern_template *pattern_template,
1683 		struct rte_flow_error *error)
1684 {
1685 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1686 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1687 	int ret;
1688 
1689 	if (unlikely(!ops))
1690 		return -rte_errno;
1691 	if (unlikely(pattern_template == NULL))
1692 		return 0;
1693 	if (likely(!!ops->pattern_template_destroy)) {
1694 		ret = flow_err(port_id,
1695 			       ops->pattern_template_destroy(dev,
1696 							     pattern_template,
1697 							     error),
1698 			       error);
1699 
1700 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1701 							ret);
1702 
1703 		return ret;
1704 	}
1705 	return rte_flow_error_set(error, ENOTSUP,
1706 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1707 				  NULL, rte_strerror(ENOTSUP));
1708 }
1709 
1710 struct rte_flow_actions_template *
1711 rte_flow_actions_template_create(uint16_t port_id,
1712 			const struct rte_flow_actions_template_attr *template_attr,
1713 			const struct rte_flow_action actions[],
1714 			const struct rte_flow_action masks[],
1715 			struct rte_flow_error *error)
1716 {
1717 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1718 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1719 	struct rte_flow_actions_template *template;
1720 
1721 	if (unlikely(!ops))
1722 		return NULL;
1723 	if (dev->data->flow_configured == 0) {
1724 		RTE_FLOW_LOG(INFO,
1725 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1726 			port_id);
1727 		rte_flow_error_set(error, EINVAL,
1728 				   RTE_FLOW_ERROR_TYPE_STATE,
1729 				   NULL, rte_strerror(EINVAL));
1730 		return NULL;
1731 	}
1732 	if (template_attr == NULL) {
1733 		RTE_FLOW_LOG(ERR,
1734 			     "Port %"PRIu16" template attr is NULL.\n",
1735 			     port_id);
1736 		rte_flow_error_set(error, EINVAL,
1737 				   RTE_FLOW_ERROR_TYPE_ATTR,
1738 				   NULL, rte_strerror(EINVAL));
1739 		return NULL;
1740 	}
1741 	if (actions == NULL) {
1742 		RTE_FLOW_LOG(ERR,
1743 			     "Port %"PRIu16" actions is NULL.\n",
1744 			     port_id);
1745 		rte_flow_error_set(error, EINVAL,
1746 				   RTE_FLOW_ERROR_TYPE_ATTR,
1747 				   NULL, rte_strerror(EINVAL));
1748 		return NULL;
1749 	}
1750 	if (masks == NULL) {
1751 		RTE_FLOW_LOG(ERR,
1752 			     "Port %"PRIu16" masks is NULL.\n",
1753 			     port_id);
1754 		rte_flow_error_set(error, EINVAL,
1755 				   RTE_FLOW_ERROR_TYPE_ATTR,
1756 				   NULL, rte_strerror(EINVAL));
1757 
1758 	}
1759 	if (likely(!!ops->actions_template_create)) {
1760 		template = ops->actions_template_create(dev, template_attr,
1761 							actions, masks, error);
1762 		if (template == NULL)
1763 			flow_err(port_id, -rte_errno, error);
1764 
1765 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1766 						       masks, template);
1767 
1768 		return template;
1769 	}
1770 	rte_flow_error_set(error, ENOTSUP,
1771 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1772 			   NULL, rte_strerror(ENOTSUP));
1773 	return NULL;
1774 }
1775 
1776 int
1777 rte_flow_actions_template_destroy(uint16_t port_id,
1778 			struct rte_flow_actions_template *actions_template,
1779 			struct rte_flow_error *error)
1780 {
1781 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1782 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1783 	int ret;
1784 
1785 	if (unlikely(!ops))
1786 		return -rte_errno;
1787 	if (unlikely(actions_template == NULL))
1788 		return 0;
1789 	if (likely(!!ops->actions_template_destroy)) {
1790 		ret = flow_err(port_id,
1791 			       ops->actions_template_destroy(dev,
1792 							     actions_template,
1793 							     error),
1794 			       error);
1795 
1796 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1797 							ret);
1798 
1799 		return ret;
1800 	}
1801 	return rte_flow_error_set(error, ENOTSUP,
1802 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1803 				  NULL, rte_strerror(ENOTSUP));
1804 }
1805 
1806 struct rte_flow_template_table *
1807 rte_flow_template_table_create(uint16_t port_id,
1808 			const struct rte_flow_template_table_attr *table_attr,
1809 			struct rte_flow_pattern_template *pattern_templates[],
1810 			uint8_t nb_pattern_templates,
1811 			struct rte_flow_actions_template *actions_templates[],
1812 			uint8_t nb_actions_templates,
1813 			struct rte_flow_error *error)
1814 {
1815 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1816 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1817 	struct rte_flow_template_table *table;
1818 
1819 	if (unlikely(!ops))
1820 		return NULL;
1821 	if (dev->data->flow_configured == 0) {
1822 		RTE_FLOW_LOG(INFO,
1823 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1824 			port_id);
1825 		rte_flow_error_set(error, EINVAL,
1826 				   RTE_FLOW_ERROR_TYPE_STATE,
1827 				   NULL, rte_strerror(EINVAL));
1828 		return NULL;
1829 	}
1830 	if (table_attr == NULL) {
1831 		RTE_FLOW_LOG(ERR,
1832 			     "Port %"PRIu16" table attr is NULL.\n",
1833 			     port_id);
1834 		rte_flow_error_set(error, EINVAL,
1835 				   RTE_FLOW_ERROR_TYPE_ATTR,
1836 				   NULL, rte_strerror(EINVAL));
1837 		return NULL;
1838 	}
1839 	if (pattern_templates == NULL) {
1840 		RTE_FLOW_LOG(ERR,
1841 			     "Port %"PRIu16" pattern templates is NULL.\n",
1842 			     port_id);
1843 		rte_flow_error_set(error, EINVAL,
1844 				   RTE_FLOW_ERROR_TYPE_ATTR,
1845 				   NULL, rte_strerror(EINVAL));
1846 		return NULL;
1847 	}
1848 	if (actions_templates == NULL) {
1849 		RTE_FLOW_LOG(ERR,
1850 			     "Port %"PRIu16" actions templates is NULL.\n",
1851 			     port_id);
1852 		rte_flow_error_set(error, EINVAL,
1853 				   RTE_FLOW_ERROR_TYPE_ATTR,
1854 				   NULL, rte_strerror(EINVAL));
1855 		return NULL;
1856 	}
1857 	if (likely(!!ops->template_table_create)) {
1858 		table = ops->template_table_create(dev, table_attr,
1859 					pattern_templates, nb_pattern_templates,
1860 					actions_templates, nb_actions_templates,
1861 					error);
1862 		if (table == NULL)
1863 			flow_err(port_id, -rte_errno, error);
1864 
1865 		rte_flow_trace_template_table_create(port_id, table_attr,
1866 						     pattern_templates,
1867 						     nb_pattern_templates,
1868 						     actions_templates,
1869 						     nb_actions_templates, table);
1870 
1871 		return table;
1872 	}
1873 	rte_flow_error_set(error, ENOTSUP,
1874 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1875 			   NULL, rte_strerror(ENOTSUP));
1876 	return NULL;
1877 }
1878 
1879 int
1880 rte_flow_template_table_destroy(uint16_t port_id,
1881 				struct rte_flow_template_table *template_table,
1882 				struct rte_flow_error *error)
1883 {
1884 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1885 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1886 	int ret;
1887 
1888 	if (unlikely(!ops))
1889 		return -rte_errno;
1890 	if (unlikely(template_table == NULL))
1891 		return 0;
1892 	if (likely(!!ops->template_table_destroy)) {
1893 		ret = flow_err(port_id,
1894 			       ops->template_table_destroy(dev,
1895 							   template_table,
1896 							   error),
1897 			       error);
1898 
1899 		rte_flow_trace_template_table_destroy(port_id, template_table,
1900 						      ret);
1901 
1902 		return ret;
1903 	}
1904 	return rte_flow_error_set(error, ENOTSUP,
1905 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1906 				  NULL, rte_strerror(ENOTSUP));
1907 }
1908 
1909 struct rte_flow *
1910 rte_flow_async_create(uint16_t port_id,
1911 		      uint32_t queue_id,
1912 		      const struct rte_flow_op_attr *op_attr,
1913 		      struct rte_flow_template_table *template_table,
1914 		      const struct rte_flow_item pattern[],
1915 		      uint8_t pattern_template_index,
1916 		      const struct rte_flow_action actions[],
1917 		      uint8_t actions_template_index,
1918 		      void *user_data,
1919 		      struct rte_flow_error *error)
1920 {
1921 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1922 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1923 	struct rte_flow *flow;
1924 
1925 	flow = ops->async_create(dev, queue_id,
1926 				 op_attr, template_table,
1927 				 pattern, pattern_template_index,
1928 				 actions, actions_template_index,
1929 				 user_data, error);
1930 	if (flow == NULL)
1931 		flow_err(port_id, -rte_errno, error);
1932 
1933 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
1934 				    pattern, pattern_template_index, actions,
1935 				    actions_template_index, user_data, flow);
1936 
1937 	return flow;
1938 }
1939 
1940 struct rte_flow *
1941 rte_flow_async_create_by_index(uint16_t port_id,
1942 			       uint32_t queue_id,
1943 			       const struct rte_flow_op_attr *op_attr,
1944 			       struct rte_flow_template_table *template_table,
1945 			       uint32_t rule_index,
1946 			       const struct rte_flow_action actions[],
1947 			       uint8_t actions_template_index,
1948 			       void *user_data,
1949 			       struct rte_flow_error *error)
1950 {
1951 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1952 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1953 	struct rte_flow *flow;
1954 
1955 	flow = ops->async_create_by_index(dev, queue_id,
1956 					  op_attr, template_table, rule_index,
1957 					  actions, actions_template_index,
1958 					  user_data, error);
1959 	if (flow == NULL)
1960 		flow_err(port_id, -rte_errno, error);
1961 	return flow;
1962 }
1963 
1964 int
1965 rte_flow_async_destroy(uint16_t port_id,
1966 		       uint32_t queue_id,
1967 		       const struct rte_flow_op_attr *op_attr,
1968 		       struct rte_flow *flow,
1969 		       void *user_data,
1970 		       struct rte_flow_error *error)
1971 {
1972 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1973 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1974 	int ret;
1975 
1976 	ret = flow_err(port_id,
1977 		       ops->async_destroy(dev, queue_id,
1978 					  op_attr, flow,
1979 					  user_data, error),
1980 		       error);
1981 
1982 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
1983 				     user_data, ret);
1984 
1985 	return ret;
1986 }
1987 
1988 int
1989 rte_flow_push(uint16_t port_id,
1990 	      uint32_t queue_id,
1991 	      struct rte_flow_error *error)
1992 {
1993 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1994 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1995 	int ret;
1996 
1997 	ret = flow_err(port_id,
1998 		       ops->push(dev, queue_id, error),
1999 		       error);
2000 
2001 	rte_flow_trace_push(port_id, queue_id, ret);
2002 
2003 	return ret;
2004 }
2005 
2006 int
2007 rte_flow_pull(uint16_t port_id,
2008 	      uint32_t queue_id,
2009 	      struct rte_flow_op_result res[],
2010 	      uint16_t n_res,
2011 	      struct rte_flow_error *error)
2012 {
2013 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2014 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2015 	int ret;
2016 	int rc;
2017 
2018 	ret = ops->pull(dev, queue_id, res, n_res, error);
2019 	rc = ret ? ret : flow_err(port_id, ret, error);
2020 
2021 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2022 
2023 	return rc;
2024 }
2025 
2026 struct rte_flow_action_handle *
2027 rte_flow_async_action_handle_create(uint16_t port_id,
2028 		uint32_t queue_id,
2029 		const struct rte_flow_op_attr *op_attr,
2030 		const struct rte_flow_indir_action_conf *indir_action_conf,
2031 		const struct rte_flow_action *action,
2032 		void *user_data,
2033 		struct rte_flow_error *error)
2034 {
2035 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2036 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2037 	struct rte_flow_action_handle *handle;
2038 
2039 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2040 					     indir_action_conf, action, user_data, error);
2041 	if (handle == NULL)
2042 		flow_err(port_id, -rte_errno, error);
2043 
2044 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2045 						  indir_action_conf, action,
2046 						  user_data, handle);
2047 
2048 	return handle;
2049 }
2050 
2051 int
2052 rte_flow_async_action_handle_destroy(uint16_t port_id,
2053 		uint32_t queue_id,
2054 		const struct rte_flow_op_attr *op_attr,
2055 		struct rte_flow_action_handle *action_handle,
2056 		void *user_data,
2057 		struct rte_flow_error *error)
2058 {
2059 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2060 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2061 	int ret;
2062 
2063 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2064 					   action_handle, user_data, error);
2065 	ret = flow_err(port_id, ret, error);
2066 
2067 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2068 						   action_handle, user_data, ret);
2069 
2070 	return ret;
2071 }
2072 
2073 int
2074 rte_flow_async_action_handle_update(uint16_t port_id,
2075 		uint32_t queue_id,
2076 		const struct rte_flow_op_attr *op_attr,
2077 		struct rte_flow_action_handle *action_handle,
2078 		const void *update,
2079 		void *user_data,
2080 		struct rte_flow_error *error)
2081 {
2082 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2083 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2084 	int ret;
2085 
2086 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2087 					  action_handle, update, user_data, error);
2088 	ret = flow_err(port_id, ret, error);
2089 
2090 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2091 						  action_handle, update,
2092 						  user_data, ret);
2093 
2094 	return ret;
2095 }
2096 
2097 int
2098 rte_flow_async_action_handle_query(uint16_t port_id,
2099 		uint32_t queue_id,
2100 		const struct rte_flow_op_attr *op_attr,
2101 		const struct rte_flow_action_handle *action_handle,
2102 		void *data,
2103 		void *user_data,
2104 		struct rte_flow_error *error)
2105 {
2106 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2107 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2108 	int ret;
2109 
2110 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2111 					  action_handle, data, user_data, error);
2112 	ret = flow_err(port_id, ret, error);
2113 
2114 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2115 						 action_handle, data, user_data,
2116 						 ret);
2117 
2118 	return ret;
2119 }
2120 
2121 int
2122 rte_flow_action_handle_query_update(uint16_t port_id,
2123 				    struct rte_flow_action_handle *handle,
2124 				    const void *update, void *query,
2125 				    enum rte_flow_query_update_mode mode,
2126 				    struct rte_flow_error *error)
2127 {
2128 	int ret;
2129 	struct rte_eth_dev *dev;
2130 	const struct rte_flow_ops *ops;
2131 
2132 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2133 	if (!handle)
2134 		return -EINVAL;
2135 	if (!update && !query)
2136 		return -EINVAL;
2137 	dev = &rte_eth_devices[port_id];
2138 	ops = rte_flow_ops_get(port_id, error);
2139 	if (!ops || !ops->action_handle_query_update)
2140 		return -ENOTSUP;
2141 	ret = ops->action_handle_query_update(dev, handle, update,
2142 					      query, mode, error);
2143 	return flow_err(port_id, ret, error);
2144 }
2145 
2146 int
2147 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2148 					  const struct rte_flow_op_attr *attr,
2149 					  struct rte_flow_action_handle *handle,
2150 					  const void *update, void *query,
2151 					  enum rte_flow_query_update_mode mode,
2152 					  void *user_data,
2153 					  struct rte_flow_error *error)
2154 {
2155 	int ret;
2156 	struct rte_eth_dev *dev;
2157 	const struct rte_flow_ops *ops;
2158 
2159 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2160 	if (!handle)
2161 		return -EINVAL;
2162 	if (!update && !query)
2163 		return -EINVAL;
2164 	dev = &rte_eth_devices[port_id];
2165 	ops = rte_flow_ops_get(port_id, error);
2166 	if (!ops || !ops->async_action_handle_query_update)
2167 		return -ENOTSUP;
2168 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2169 						    handle, update,
2170 						    query, mode,
2171 						    user_data, error);
2172 	return flow_err(port_id, ret, error);
2173 }
2174