xref: /dpdk/lib/ethdev/rte_flow.c (revision 06ea547943c373be9e80a55d587d73ece06d289e)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_string_fns.h>
14 #include <rte_mbuf_dyn.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18 
19 #include "ethdev_trace.h"
20 
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23 
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26 
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31 	const char *name;
32 	size_t size;
33 	size_t (*desc_fn)(void *dst, const void *src);
34 };
35 
36 /**
37  *
38  * @param buf
39  * Destination memory.
40  * @param data
41  * Source memory
42  * @param size
43  * Requested copy size
44  * @param desc
45  * rte_flow_desc_item - for flow item conversion.
46  * rte_flow_desc_action - for flow action conversion.
47  * @param type
48  * Offset into the desc param or negative value for private flow elements.
49  */
50 static inline size_t
51 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
52 		   const struct rte_flow_desc_data *desc, int type)
53 {
54 	/**
55 	 * Allow PMD private flow item
56 	 */
57 	bool rte_type = type >= 0;
58 
59 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
60 	if (buf == NULL || data == NULL)
61 		return 0;
62 	rte_memcpy(buf, data, (size > sz ? sz : size));
63 	if (rte_type && desc[type].desc_fn)
64 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
65 	return sz;
66 }
67 
68 static size_t
69 rte_flow_item_flex_conv(void *buf, const void *data)
70 {
71 	struct rte_flow_item_flex *dst = buf;
72 	const struct rte_flow_item_flex *src = data;
73 	if (buf) {
74 		dst->pattern = rte_memcpy
75 			((void *)((uintptr_t)(dst + 1)), src->pattern,
76 			 src->length);
77 	}
78 	return src->length;
79 }
80 
81 /** Generate flow_item[] entry. */
82 #define MK_FLOW_ITEM(t, s) \
83 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
84 		.name = # t, \
85 		.size = s,               \
86 		.desc_fn = NULL,\
87 	}
88 
89 #define MK_FLOW_ITEM_FN(t, s, fn) \
90 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
91 		.name = # t,                 \
92 		.size = s,                   \
93 		.desc_fn = fn,               \
94 	}
95 
96 /** Information about known flow pattern items. */
97 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
98 	MK_FLOW_ITEM(END, 0),
99 	MK_FLOW_ITEM(VOID, 0),
100 	MK_FLOW_ITEM(INVERT, 0),
101 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
102 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
103 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
104 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
105 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
106 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
107 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
108 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
109 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
110 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
111 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
112 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
113 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
114 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
115 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
116 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
117 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
118 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
119 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
122 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
123 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
124 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
125 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
126 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
127 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
128 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
131 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
132 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
134 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
136 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
137 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
138 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
139 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
140 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
141 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
142 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
143 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
144 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
146 			sizeof(struct rte_flow_item_pppoe_proto_id)),
147 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
148 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
149 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
150 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
151 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
152 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
153 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
154 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
155 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
156 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
157 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
158 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
160 			rte_flow_item_flex_conv),
161 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
162 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
163 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
164 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
165 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
166 };
167 
168 /** Generate flow_action[] entry. */
169 #define MK_FLOW_ACTION(t, s) \
170 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
171 		.name = # t, \
172 		.size = s, \
173 		.desc_fn = NULL,\
174 	}
175 
176 #define MK_FLOW_ACTION_FN(t, fn) \
177 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
178 		.name = # t, \
179 		.size = 0, \
180 		.desc_fn = fn,\
181 	}
182 
183 
184 /** Information about known flow actions. */
185 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
186 	MK_FLOW_ACTION(END, 0),
187 	MK_FLOW_ACTION(VOID, 0),
188 	MK_FLOW_ACTION(PASSTHRU, 0),
189 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
190 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
191 	MK_FLOW_ACTION(FLAG, 0),
192 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
193 	MK_FLOW_ACTION(DROP, 0),
194 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
195 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
196 	MK_FLOW_ACTION(PF, 0),
197 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
198 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
199 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
200 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
201 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
202 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
203 	MK_FLOW_ACTION(OF_PUSH_VLAN,
204 		       sizeof(struct rte_flow_action_of_push_vlan)),
205 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
206 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
207 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
208 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
209 	MK_FLOW_ACTION(OF_POP_MPLS,
210 		       sizeof(struct rte_flow_action_of_pop_mpls)),
211 	MK_FLOW_ACTION(OF_PUSH_MPLS,
212 		       sizeof(struct rte_flow_action_of_push_mpls)),
213 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
214 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
215 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
216 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
217 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
218 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
219 	MK_FLOW_ACTION(SET_IPV4_SRC,
220 		       sizeof(struct rte_flow_action_set_ipv4)),
221 	MK_FLOW_ACTION(SET_IPV4_DST,
222 		       sizeof(struct rte_flow_action_set_ipv4)),
223 	MK_FLOW_ACTION(SET_IPV6_SRC,
224 		       sizeof(struct rte_flow_action_set_ipv6)),
225 	MK_FLOW_ACTION(SET_IPV6_DST,
226 		       sizeof(struct rte_flow_action_set_ipv6)),
227 	MK_FLOW_ACTION(SET_TP_SRC,
228 		       sizeof(struct rte_flow_action_set_tp)),
229 	MK_FLOW_ACTION(SET_TP_DST,
230 		       sizeof(struct rte_flow_action_set_tp)),
231 	MK_FLOW_ACTION(MAC_SWAP, 0),
232 	MK_FLOW_ACTION(DEC_TTL, 0),
233 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
234 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
235 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
236 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
237 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
238 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
239 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
240 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
241 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
242 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
243 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
244 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
245 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
246 	MK_FLOW_ACTION(MODIFY_FIELD,
247 		       sizeof(struct rte_flow_action_modify_field)),
248 	/**
249 	 * Indirect action represented as handle of type
250 	 * (struct rte_flow_action_handle *) stored in conf field (see
251 	 * struct rte_flow_action); no need for additional structure to * store
252 	 * indirect action handle.
253 	 */
254 	MK_FLOW_ACTION(INDIRECT, 0),
255 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
256 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
257 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
258 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
259 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
260 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
261 };
262 
263 int
264 rte_flow_dynf_metadata_register(void)
265 {
266 	int offset;
267 	int flag;
268 
269 	static const struct rte_mbuf_dynfield desc_offs = {
270 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
271 		.size = sizeof(uint32_t),
272 		.align = __alignof__(uint32_t),
273 	};
274 	static const struct rte_mbuf_dynflag desc_flag = {
275 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
276 	};
277 
278 	offset = rte_mbuf_dynfield_register(&desc_offs);
279 	if (offset < 0)
280 		goto error;
281 	flag = rte_mbuf_dynflag_register(&desc_flag);
282 	if (flag < 0)
283 		goto error;
284 	rte_flow_dynf_metadata_offs = offset;
285 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
286 
287 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
288 
289 	return 0;
290 
291 error:
292 	rte_flow_dynf_metadata_offs = -1;
293 	rte_flow_dynf_metadata_mask = UINT64_C(0);
294 	return -rte_errno;
295 }
296 
297 static inline void
298 fts_enter(struct rte_eth_dev *dev)
299 {
300 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
301 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
302 }
303 
304 static inline void
305 fts_exit(struct rte_eth_dev *dev)
306 {
307 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
308 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
309 }
310 
311 static int
312 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
313 {
314 	if (ret == 0)
315 		return 0;
316 	if (rte_eth_dev_is_removed(port_id))
317 		return rte_flow_error_set(error, EIO,
318 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
319 					  NULL, rte_strerror(EIO));
320 	return ret;
321 }
322 
323 /* Get generic flow operations structure from a port. */
324 const struct rte_flow_ops *
325 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
326 {
327 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
328 	const struct rte_flow_ops *ops;
329 	int code;
330 
331 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
332 		code = ENODEV;
333 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
334 		/* flow API not supported with this driver dev_ops */
335 		code = ENOSYS;
336 	else
337 		code = dev->dev_ops->flow_ops_get(dev, &ops);
338 	if (code == 0 && ops == NULL)
339 		/* flow API not supported with this device */
340 		code = ENOSYS;
341 
342 	if (code != 0) {
343 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
344 				   NULL, rte_strerror(code));
345 		return NULL;
346 	}
347 	return ops;
348 }
349 
350 /* Check whether a flow rule can be created on a given port. */
351 int
352 rte_flow_validate(uint16_t port_id,
353 		  const struct rte_flow_attr *attr,
354 		  const struct rte_flow_item pattern[],
355 		  const struct rte_flow_action actions[],
356 		  struct rte_flow_error *error)
357 {
358 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
359 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
360 	int ret;
361 
362 	if (likely(!!attr) && attr->transfer &&
363 	    (attr->ingress || attr->egress)) {
364 		return rte_flow_error_set(error, EINVAL,
365 					  RTE_FLOW_ERROR_TYPE_ATTR,
366 					  attr, "cannot use attr ingress/egress with attr transfer");
367 	}
368 
369 	if (unlikely(!ops))
370 		return -rte_errno;
371 	if (likely(!!ops->validate)) {
372 		fts_enter(dev);
373 		ret = ops->validate(dev, attr, pattern, actions, error);
374 		fts_exit(dev);
375 		ret = flow_err(port_id, ret, error);
376 
377 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
378 
379 		return ret;
380 	}
381 	return rte_flow_error_set(error, ENOSYS,
382 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
383 				  NULL, rte_strerror(ENOSYS));
384 }
385 
386 /* Create a flow rule on a given port. */
387 struct rte_flow *
388 rte_flow_create(uint16_t port_id,
389 		const struct rte_flow_attr *attr,
390 		const struct rte_flow_item pattern[],
391 		const struct rte_flow_action actions[],
392 		struct rte_flow_error *error)
393 {
394 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
395 	struct rte_flow *flow;
396 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
397 
398 	if (unlikely(!ops))
399 		return NULL;
400 	if (likely(!!ops->create)) {
401 		fts_enter(dev);
402 		flow = ops->create(dev, attr, pattern, actions, error);
403 		fts_exit(dev);
404 		if (flow == NULL)
405 			flow_err(port_id, -rte_errno, error);
406 
407 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
408 
409 		return flow;
410 	}
411 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
412 			   NULL, rte_strerror(ENOSYS));
413 	return NULL;
414 }
415 
416 /* Destroy a flow rule on a given port. */
417 int
418 rte_flow_destroy(uint16_t port_id,
419 		 struct rte_flow *flow,
420 		 struct rte_flow_error *error)
421 {
422 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
423 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
424 	int ret;
425 
426 	if (unlikely(!ops))
427 		return -rte_errno;
428 	if (likely(!!ops->destroy)) {
429 		fts_enter(dev);
430 		ret = ops->destroy(dev, flow, error);
431 		fts_exit(dev);
432 		ret = flow_err(port_id, ret, error);
433 
434 		rte_flow_trace_destroy(port_id, flow, ret);
435 
436 		return ret;
437 	}
438 	return rte_flow_error_set(error, ENOSYS,
439 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
440 				  NULL, rte_strerror(ENOSYS));
441 }
442 
443 /* Destroy all flow rules associated with a port. */
444 int
445 rte_flow_flush(uint16_t port_id,
446 	       struct rte_flow_error *error)
447 {
448 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
449 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
450 	int ret;
451 
452 	if (unlikely(!ops))
453 		return -rte_errno;
454 	if (likely(!!ops->flush)) {
455 		fts_enter(dev);
456 		ret = ops->flush(dev, error);
457 		fts_exit(dev);
458 		ret = flow_err(port_id, ret, error);
459 
460 		rte_flow_trace_flush(port_id, ret);
461 
462 		return ret;
463 	}
464 	return rte_flow_error_set(error, ENOSYS,
465 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
466 				  NULL, rte_strerror(ENOSYS));
467 }
468 
469 /* Query an existing flow rule. */
470 int
471 rte_flow_query(uint16_t port_id,
472 	       struct rte_flow *flow,
473 	       const struct rte_flow_action *action,
474 	       void *data,
475 	       struct rte_flow_error *error)
476 {
477 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
478 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
479 	int ret;
480 
481 	if (!ops)
482 		return -rte_errno;
483 	if (likely(!!ops->query)) {
484 		fts_enter(dev);
485 		ret = ops->query(dev, flow, action, data, error);
486 		fts_exit(dev);
487 		ret = flow_err(port_id, ret, error);
488 
489 		rte_flow_trace_query(port_id, flow, action, data, ret);
490 
491 		return ret;
492 	}
493 	return rte_flow_error_set(error, ENOSYS,
494 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
495 				  NULL, rte_strerror(ENOSYS));
496 }
497 
498 /* Restrict ingress traffic to the defined flow rules. */
499 int
500 rte_flow_isolate(uint16_t port_id,
501 		 int set,
502 		 struct rte_flow_error *error)
503 {
504 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
505 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
506 	int ret;
507 
508 	if (!ops)
509 		return -rte_errno;
510 	if (likely(!!ops->isolate)) {
511 		fts_enter(dev);
512 		ret = ops->isolate(dev, set, error);
513 		fts_exit(dev);
514 		ret = flow_err(port_id, ret, error);
515 
516 		rte_flow_trace_isolate(port_id, set, ret);
517 
518 		return ret;
519 	}
520 	return rte_flow_error_set(error, ENOSYS,
521 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
522 				  NULL, rte_strerror(ENOSYS));
523 }
524 
525 /* Initialize flow error structure. */
526 int
527 rte_flow_error_set(struct rte_flow_error *error,
528 		   int code,
529 		   enum rte_flow_error_type type,
530 		   const void *cause,
531 		   const char *message)
532 {
533 	if (error) {
534 		*error = (struct rte_flow_error){
535 			.type = type,
536 			.cause = cause,
537 			.message = message,
538 		};
539 	}
540 	rte_errno = code;
541 	return -code;
542 }
543 
544 /** Pattern item specification types. */
545 enum rte_flow_conv_item_spec_type {
546 	RTE_FLOW_CONV_ITEM_SPEC,
547 	RTE_FLOW_CONV_ITEM_LAST,
548 	RTE_FLOW_CONV_ITEM_MASK,
549 };
550 
551 /**
552  * Copy pattern item specification.
553  *
554  * @param[out] buf
555  *   Output buffer. Can be NULL if @p size is zero.
556  * @param size
557  *   Size of @p buf in bytes.
558  * @param[in] item
559  *   Pattern item to copy specification from.
560  * @param type
561  *   Specification selector for either @p spec, @p last or @p mask.
562  *
563  * @return
564  *   Number of bytes needed to store pattern item specification regardless
565  *   of @p size. @p buf contents are truncated to @p size if not large
566  *   enough.
567  */
568 static size_t
569 rte_flow_conv_item_spec(void *buf, const size_t size,
570 			const struct rte_flow_item *item,
571 			enum rte_flow_conv_item_spec_type type)
572 {
573 	size_t off;
574 	const void *data =
575 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
576 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
577 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
578 		NULL;
579 
580 	switch (item->type) {
581 		union {
582 			const struct rte_flow_item_raw *raw;
583 		} spec;
584 		union {
585 			const struct rte_flow_item_raw *raw;
586 		} last;
587 		union {
588 			const struct rte_flow_item_raw *raw;
589 		} mask;
590 		union {
591 			const struct rte_flow_item_raw *raw;
592 		} src;
593 		union {
594 			struct rte_flow_item_raw *raw;
595 		} dst;
596 		size_t tmp;
597 
598 	case RTE_FLOW_ITEM_TYPE_RAW:
599 		spec.raw = item->spec;
600 		last.raw = item->last ? item->last : item->spec;
601 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
602 		src.raw = data;
603 		dst.raw = buf;
604 		rte_memcpy(dst.raw,
605 			   (&(struct rte_flow_item_raw){
606 				.relative = src.raw->relative,
607 				.search = src.raw->search,
608 				.reserved = src.raw->reserved,
609 				.offset = src.raw->offset,
610 				.limit = src.raw->limit,
611 				.length = src.raw->length,
612 			   }),
613 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
614 		off = sizeof(*dst.raw);
615 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
616 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
617 		     ((spec.raw->length & mask.raw->length) >=
618 		      (last.raw->length & mask.raw->length))))
619 			tmp = spec.raw->length & mask.raw->length;
620 		else
621 			tmp = last.raw->length & mask.raw->length;
622 		if (tmp) {
623 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
624 			if (size >= off + tmp)
625 				dst.raw->pattern = rte_memcpy
626 					((void *)((uintptr_t)dst.raw + off),
627 					 src.raw->pattern, tmp);
628 			off += tmp;
629 		}
630 		break;
631 	default:
632 		off = rte_flow_conv_copy(buf, data, size,
633 					 rte_flow_desc_item, item->type);
634 		break;
635 	}
636 	return off;
637 }
638 
639 /**
640  * Copy action configuration.
641  *
642  * @param[out] buf
643  *   Output buffer. Can be NULL if @p size is zero.
644  * @param size
645  *   Size of @p buf in bytes.
646  * @param[in] action
647  *   Action to copy configuration from.
648  *
649  * @return
650  *   Number of bytes needed to store pattern item specification regardless
651  *   of @p size. @p buf contents are truncated to @p size if not large
652  *   enough.
653  */
654 static size_t
655 rte_flow_conv_action_conf(void *buf, const size_t size,
656 			  const struct rte_flow_action *action)
657 {
658 	size_t off;
659 
660 	switch (action->type) {
661 		union {
662 			const struct rte_flow_action_rss *rss;
663 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
664 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
665 		} src;
666 		union {
667 			struct rte_flow_action_rss *rss;
668 			struct rte_flow_action_vxlan_encap *vxlan_encap;
669 			struct rte_flow_action_nvgre_encap *nvgre_encap;
670 		} dst;
671 		size_t tmp;
672 		int ret;
673 
674 	case RTE_FLOW_ACTION_TYPE_RSS:
675 		src.rss = action->conf;
676 		dst.rss = buf;
677 		rte_memcpy(dst.rss,
678 			   (&(struct rte_flow_action_rss){
679 				.func = src.rss->func,
680 				.level = src.rss->level,
681 				.types = src.rss->types,
682 				.key_len = src.rss->key_len,
683 				.queue_num = src.rss->queue_num,
684 			   }),
685 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
686 		off = sizeof(*dst.rss);
687 		if (src.rss->key_len && src.rss->key) {
688 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
689 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
690 			if (size >= off + tmp)
691 				dst.rss->key = rte_memcpy
692 					((void *)((uintptr_t)dst.rss + off),
693 					 src.rss->key, tmp);
694 			off += tmp;
695 		}
696 		if (src.rss->queue_num) {
697 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
698 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
699 			if (size >= off + tmp)
700 				dst.rss->queue = rte_memcpy
701 					((void *)((uintptr_t)dst.rss + off),
702 					 src.rss->queue, tmp);
703 			off += tmp;
704 		}
705 		break;
706 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
707 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
708 		src.vxlan_encap = action->conf;
709 		dst.vxlan_encap = buf;
710 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
711 				 sizeof(*src.nvgre_encap) ||
712 				 offsetof(struct rte_flow_action_vxlan_encap,
713 					  definition) !=
714 				 offsetof(struct rte_flow_action_nvgre_encap,
715 					  definition));
716 		off = sizeof(*dst.vxlan_encap);
717 		if (src.vxlan_encap->definition) {
718 			off = RTE_ALIGN_CEIL
719 				(off, sizeof(*dst.vxlan_encap->definition));
720 			ret = rte_flow_conv
721 				(RTE_FLOW_CONV_OP_PATTERN,
722 				 (void *)((uintptr_t)dst.vxlan_encap + off),
723 				 size > off ? size - off : 0,
724 				 src.vxlan_encap->definition, NULL);
725 			if (ret < 0)
726 				return 0;
727 			if (size >= off + ret)
728 				dst.vxlan_encap->definition =
729 					(void *)((uintptr_t)dst.vxlan_encap +
730 						 off);
731 			off += ret;
732 		}
733 		break;
734 	default:
735 		off = rte_flow_conv_copy(buf, action->conf, size,
736 					 rte_flow_desc_action, action->type);
737 		break;
738 	}
739 	return off;
740 }
741 
742 /**
743  * Copy a list of pattern items.
744  *
745  * @param[out] dst
746  *   Destination buffer. Can be NULL if @p size is zero.
747  * @param size
748  *   Size of @p dst in bytes.
749  * @param[in] src
750  *   Source pattern items.
751  * @param num
752  *   Maximum number of pattern items to process from @p src or 0 to process
753  *   the entire list. In both cases, processing stops after
754  *   RTE_FLOW_ITEM_TYPE_END is encountered.
755  * @param[out] error
756  *   Perform verbose error reporting if not NULL.
757  *
758  * @return
759  *   A positive value representing the number of bytes needed to store
760  *   pattern items regardless of @p size on success (@p buf contents are
761  *   truncated to @p size if not large enough), a negative errno value
762  *   otherwise and rte_errno is set.
763  */
764 static int
765 rte_flow_conv_pattern(struct rte_flow_item *dst,
766 		      const size_t size,
767 		      const struct rte_flow_item *src,
768 		      unsigned int num,
769 		      struct rte_flow_error *error)
770 {
771 	uintptr_t data = (uintptr_t)dst;
772 	size_t off;
773 	size_t ret;
774 	unsigned int i;
775 
776 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
777 		/**
778 		 * allow PMD private flow item
779 		 */
780 		if (((int)src->type >= 0) &&
781 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
782 		    !rte_flow_desc_item[src->type].name))
783 			return rte_flow_error_set
784 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
785 				 "cannot convert unknown item type");
786 		if (size >= off + sizeof(*dst))
787 			*dst = (struct rte_flow_item){
788 				.type = src->type,
789 			};
790 		off += sizeof(*dst);
791 		if (!src->type)
792 			num = i + 1;
793 	}
794 	num = i;
795 	src -= num;
796 	dst -= num;
797 	do {
798 		if (src->spec) {
799 			off = RTE_ALIGN_CEIL(off, sizeof(double));
800 			ret = rte_flow_conv_item_spec
801 				((void *)(data + off),
802 				 size > off ? size - off : 0, src,
803 				 RTE_FLOW_CONV_ITEM_SPEC);
804 			if (size && size >= off + ret)
805 				dst->spec = (void *)(data + off);
806 			off += ret;
807 
808 		}
809 		if (src->last) {
810 			off = RTE_ALIGN_CEIL(off, sizeof(double));
811 			ret = rte_flow_conv_item_spec
812 				((void *)(data + off),
813 				 size > off ? size - off : 0, src,
814 				 RTE_FLOW_CONV_ITEM_LAST);
815 			if (size && size >= off + ret)
816 				dst->last = (void *)(data + off);
817 			off += ret;
818 		}
819 		if (src->mask) {
820 			off = RTE_ALIGN_CEIL(off, sizeof(double));
821 			ret = rte_flow_conv_item_spec
822 				((void *)(data + off),
823 				 size > off ? size - off : 0, src,
824 				 RTE_FLOW_CONV_ITEM_MASK);
825 			if (size && size >= off + ret)
826 				dst->mask = (void *)(data + off);
827 			off += ret;
828 		}
829 		++src;
830 		++dst;
831 	} while (--num);
832 	return off;
833 }
834 
835 /**
836  * Copy a list of actions.
837  *
838  * @param[out] dst
839  *   Destination buffer. Can be NULL if @p size is zero.
840  * @param size
841  *   Size of @p dst in bytes.
842  * @param[in] src
843  *   Source actions.
844  * @param num
845  *   Maximum number of actions to process from @p src or 0 to process the
846  *   entire list. In both cases, processing stops after
847  *   RTE_FLOW_ACTION_TYPE_END is encountered.
848  * @param[out] error
849  *   Perform verbose error reporting if not NULL.
850  *
851  * @return
852  *   A positive value representing the number of bytes needed to store
853  *   actions regardless of @p size on success (@p buf contents are truncated
854  *   to @p size if not large enough), a negative errno value otherwise and
855  *   rte_errno is set.
856  */
857 static int
858 rte_flow_conv_actions(struct rte_flow_action *dst,
859 		      const size_t size,
860 		      const struct rte_flow_action *src,
861 		      unsigned int num,
862 		      struct rte_flow_error *error)
863 {
864 	uintptr_t data = (uintptr_t)dst;
865 	size_t off;
866 	size_t ret;
867 	unsigned int i;
868 
869 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
870 		/**
871 		 * allow PMD private flow action
872 		 */
873 		if (((int)src->type >= 0) &&
874 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
875 		    !rte_flow_desc_action[src->type].name))
876 			return rte_flow_error_set
877 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
878 				 src, "cannot convert unknown action type");
879 		if (size >= off + sizeof(*dst))
880 			*dst = (struct rte_flow_action){
881 				.type = src->type,
882 			};
883 		off += sizeof(*dst);
884 		if (!src->type)
885 			num = i + 1;
886 	}
887 	num = i;
888 	src -= num;
889 	dst -= num;
890 	do {
891 		if (src->conf) {
892 			off = RTE_ALIGN_CEIL(off, sizeof(double));
893 			ret = rte_flow_conv_action_conf
894 				((void *)(data + off),
895 				 size > off ? size - off : 0, src);
896 			if (size && size >= off + ret)
897 				dst->conf = (void *)(data + off);
898 			off += ret;
899 		}
900 		++src;
901 		++dst;
902 	} while (--num);
903 	return off;
904 }
905 
906 /**
907  * Copy flow rule components.
908  *
909  * This comprises the flow rule descriptor itself, attributes, pattern and
910  * actions list. NULL components in @p src are skipped.
911  *
912  * @param[out] dst
913  *   Destination buffer. Can be NULL if @p size is zero.
914  * @param size
915  *   Size of @p dst in bytes.
916  * @param[in] src
917  *   Source flow rule descriptor.
918  * @param[out] error
919  *   Perform verbose error reporting if not NULL.
920  *
921  * @return
922  *   A positive value representing the number of bytes needed to store all
923  *   components including the descriptor regardless of @p size on success
924  *   (@p buf contents are truncated to @p size if not large enough), a
925  *   negative errno value otherwise and rte_errno is set.
926  */
927 static int
928 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
929 		   const size_t size,
930 		   const struct rte_flow_conv_rule *src,
931 		   struct rte_flow_error *error)
932 {
933 	size_t off;
934 	int ret;
935 
936 	rte_memcpy(dst,
937 		   (&(struct rte_flow_conv_rule){
938 			.attr = NULL,
939 			.pattern = NULL,
940 			.actions = NULL,
941 		   }),
942 		   size > sizeof(*dst) ? sizeof(*dst) : size);
943 	off = sizeof(*dst);
944 	if (src->attr_ro) {
945 		off = RTE_ALIGN_CEIL(off, sizeof(double));
946 		if (size && size >= off + sizeof(*dst->attr))
947 			dst->attr = rte_memcpy
948 				((void *)((uintptr_t)dst + off),
949 				 src->attr_ro, sizeof(*dst->attr));
950 		off += sizeof(*dst->attr);
951 	}
952 	if (src->pattern_ro) {
953 		off = RTE_ALIGN_CEIL(off, sizeof(double));
954 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
955 					    size > off ? size - off : 0,
956 					    src->pattern_ro, 0, error);
957 		if (ret < 0)
958 			return ret;
959 		if (size && size >= off + (size_t)ret)
960 			dst->pattern = (void *)((uintptr_t)dst + off);
961 		off += ret;
962 	}
963 	if (src->actions_ro) {
964 		off = RTE_ALIGN_CEIL(off, sizeof(double));
965 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
966 					    size > off ? size - off : 0,
967 					    src->actions_ro, 0, error);
968 		if (ret < 0)
969 			return ret;
970 		if (size >= off + (size_t)ret)
971 			dst->actions = (void *)((uintptr_t)dst + off);
972 		off += ret;
973 	}
974 	return off;
975 }
976 
977 /**
978  * Retrieve the name of a pattern item/action type.
979  *
980  * @param is_action
981  *   Nonzero when @p src represents an action type instead of a pattern item
982  *   type.
983  * @param is_ptr
984  *   Nonzero to write string address instead of contents into @p dst.
985  * @param[out] dst
986  *   Destination buffer. Can be NULL if @p size is zero.
987  * @param size
988  *   Size of @p dst in bytes.
989  * @param[in] src
990  *   Depending on @p is_action, source pattern item or action type cast as a
991  *   pointer.
992  * @param[out] error
993  *   Perform verbose error reporting if not NULL.
994  *
995  * @return
996  *   A positive value representing the number of bytes needed to store the
997  *   name or its address regardless of @p size on success (@p buf contents
998  *   are truncated to @p size if not large enough), a negative errno value
999  *   otherwise and rte_errno is set.
1000  */
1001 static int
1002 rte_flow_conv_name(int is_action,
1003 		   int is_ptr,
1004 		   char *dst,
1005 		   const size_t size,
1006 		   const void *src,
1007 		   struct rte_flow_error *error)
1008 {
1009 	struct desc_info {
1010 		const struct rte_flow_desc_data *data;
1011 		size_t num;
1012 	};
1013 	static const struct desc_info info_rep[2] = {
1014 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1015 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1016 	};
1017 	const struct desc_info *const info = &info_rep[!!is_action];
1018 	unsigned int type = (uintptr_t)src;
1019 
1020 	if (type >= info->num)
1021 		return rte_flow_error_set
1022 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1023 			 "unknown object type to retrieve the name of");
1024 	if (!is_ptr)
1025 		return strlcpy(dst, info->data[type].name, size);
1026 	if (size >= sizeof(const char **))
1027 		*((const char **)dst) = info->data[type].name;
1028 	return sizeof(const char **);
1029 }
1030 
1031 /** Helper function to convert flow API objects. */
1032 int
1033 rte_flow_conv(enum rte_flow_conv_op op,
1034 	      void *dst,
1035 	      size_t size,
1036 	      const void *src,
1037 	      struct rte_flow_error *error)
1038 {
1039 	int ret;
1040 
1041 	switch (op) {
1042 		const struct rte_flow_attr *attr;
1043 
1044 	case RTE_FLOW_CONV_OP_NONE:
1045 		ret = 0;
1046 		break;
1047 	case RTE_FLOW_CONV_OP_ATTR:
1048 		attr = src;
1049 		if (size > sizeof(*attr))
1050 			size = sizeof(*attr);
1051 		rte_memcpy(dst, attr, size);
1052 		ret = sizeof(*attr);
1053 		break;
1054 	case RTE_FLOW_CONV_OP_ITEM:
1055 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1056 		break;
1057 	case RTE_FLOW_CONV_OP_ACTION:
1058 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1059 		break;
1060 	case RTE_FLOW_CONV_OP_PATTERN:
1061 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1062 		break;
1063 	case RTE_FLOW_CONV_OP_ACTIONS:
1064 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1065 		break;
1066 	case RTE_FLOW_CONV_OP_RULE:
1067 		ret = rte_flow_conv_rule(dst, size, src, error);
1068 		break;
1069 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1070 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1071 		break;
1072 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1073 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1074 		break;
1075 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1076 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1077 		break;
1078 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1079 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1080 		break;
1081 	default:
1082 		ret = rte_flow_error_set
1083 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1084 		 "unknown object conversion operation");
1085 	}
1086 
1087 	rte_flow_trace_conv(op, dst, size, src, ret);
1088 
1089 	return ret;
1090 }
1091 
1092 /** Store a full rte_flow description. */
1093 size_t
1094 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1095 	      const struct rte_flow_attr *attr,
1096 	      const struct rte_flow_item *items,
1097 	      const struct rte_flow_action *actions)
1098 {
1099 	/*
1100 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1101 	 * to convert the former to the latter without wasting space.
1102 	 */
1103 	struct rte_flow_conv_rule *dst =
1104 		len ?
1105 		(void *)((uintptr_t)desc +
1106 			 (offsetof(struct rte_flow_desc, actions) -
1107 			  offsetof(struct rte_flow_conv_rule, actions))) :
1108 		NULL;
1109 	size_t dst_size =
1110 		len > sizeof(*desc) - sizeof(*dst) ?
1111 		len - (sizeof(*desc) - sizeof(*dst)) :
1112 		0;
1113 	struct rte_flow_conv_rule src = {
1114 		.attr_ro = NULL,
1115 		.pattern_ro = items,
1116 		.actions_ro = actions,
1117 	};
1118 	int ret;
1119 
1120 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1121 			 sizeof(struct rte_flow_conv_rule));
1122 	if (dst_size &&
1123 	    (&dst->pattern != &desc->items ||
1124 	     &dst->actions != &desc->actions ||
1125 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1126 		rte_errno = EINVAL;
1127 		return 0;
1128 	}
1129 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1130 	if (ret < 0)
1131 		return 0;
1132 	ret += sizeof(*desc) - sizeof(*dst);
1133 	rte_memcpy(desc,
1134 		   (&(struct rte_flow_desc){
1135 			.size = ret,
1136 			.attr = *attr,
1137 			.items = dst_size ? dst->pattern : NULL,
1138 			.actions = dst_size ? dst->actions : NULL,
1139 		   }),
1140 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1141 
1142 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1143 
1144 	return ret;
1145 }
1146 
1147 int
1148 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1149 			FILE *file, struct rte_flow_error *error)
1150 {
1151 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1152 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1153 	int ret;
1154 
1155 	if (unlikely(!ops))
1156 		return -rte_errno;
1157 	if (likely(!!ops->dev_dump)) {
1158 		fts_enter(dev);
1159 		ret = ops->dev_dump(dev, flow, file, error);
1160 		fts_exit(dev);
1161 		return flow_err(port_id, ret, error);
1162 	}
1163 	return rte_flow_error_set(error, ENOSYS,
1164 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1165 				  NULL, rte_strerror(ENOSYS));
1166 }
1167 
1168 int
1169 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1170 		    uint32_t nb_contexts, struct rte_flow_error *error)
1171 {
1172 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1173 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1174 	int ret;
1175 
1176 	if (unlikely(!ops))
1177 		return -rte_errno;
1178 	if (likely(!!ops->get_aged_flows)) {
1179 		fts_enter(dev);
1180 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1181 		fts_exit(dev);
1182 		ret = flow_err(port_id, ret, error);
1183 
1184 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1185 
1186 		return ret;
1187 	}
1188 	return rte_flow_error_set(error, ENOTSUP,
1189 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1190 				  NULL, rte_strerror(ENOTSUP));
1191 }
1192 
1193 int
1194 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1195 			  uint32_t nb_contexts, struct rte_flow_error *error)
1196 {
1197 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1198 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1199 	int ret;
1200 
1201 	if (unlikely(!ops))
1202 		return -rte_errno;
1203 	if (likely(!!ops->get_q_aged_flows)) {
1204 		fts_enter(dev);
1205 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1206 					    nb_contexts, error);
1207 		fts_exit(dev);
1208 		ret = flow_err(port_id, ret, error);
1209 
1210 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1211 						nb_contexts, ret);
1212 
1213 		return ret;
1214 	}
1215 	return rte_flow_error_set(error, ENOTSUP,
1216 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1217 				  NULL, rte_strerror(ENOTSUP));
1218 }
1219 
1220 struct rte_flow_action_handle *
1221 rte_flow_action_handle_create(uint16_t port_id,
1222 			      const struct rte_flow_indir_action_conf *conf,
1223 			      const struct rte_flow_action *action,
1224 			      struct rte_flow_error *error)
1225 {
1226 	struct rte_flow_action_handle *handle;
1227 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1228 
1229 	if (unlikely(!ops))
1230 		return NULL;
1231 	if (unlikely(!ops->action_handle_create)) {
1232 		rte_flow_error_set(error, ENOSYS,
1233 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1234 				   rte_strerror(ENOSYS));
1235 		return NULL;
1236 	}
1237 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1238 					   conf, action, error);
1239 	if (handle == NULL)
1240 		flow_err(port_id, -rte_errno, error);
1241 
1242 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1243 
1244 	return handle;
1245 }
1246 
1247 int
1248 rte_flow_action_handle_destroy(uint16_t port_id,
1249 			       struct rte_flow_action_handle *handle,
1250 			       struct rte_flow_error *error)
1251 {
1252 	int ret;
1253 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1254 
1255 	if (unlikely(!ops))
1256 		return -rte_errno;
1257 	if (unlikely(!ops->action_handle_destroy))
1258 		return rte_flow_error_set(error, ENOSYS,
1259 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1260 					  NULL, rte_strerror(ENOSYS));
1261 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1262 					 handle, error);
1263 	ret = flow_err(port_id, ret, error);
1264 
1265 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1266 
1267 	return ret;
1268 }
1269 
1270 int
1271 rte_flow_action_handle_update(uint16_t port_id,
1272 			      struct rte_flow_action_handle *handle,
1273 			      const void *update,
1274 			      struct rte_flow_error *error)
1275 {
1276 	int ret;
1277 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1278 
1279 	if (unlikely(!ops))
1280 		return -rte_errno;
1281 	if (unlikely(!ops->action_handle_update))
1282 		return rte_flow_error_set(error, ENOSYS,
1283 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1284 					  NULL, rte_strerror(ENOSYS));
1285 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1286 					update, error);
1287 	ret = flow_err(port_id, ret, error);
1288 
1289 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1290 
1291 	return ret;
1292 }
1293 
1294 int
1295 rte_flow_action_handle_query(uint16_t port_id,
1296 			     const struct rte_flow_action_handle *handle,
1297 			     void *data,
1298 			     struct rte_flow_error *error)
1299 {
1300 	int ret;
1301 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1302 
1303 	if (unlikely(!ops))
1304 		return -rte_errno;
1305 	if (unlikely(!ops->action_handle_query))
1306 		return rte_flow_error_set(error, ENOSYS,
1307 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1308 					  NULL, rte_strerror(ENOSYS));
1309 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1310 				       data, error);
1311 	ret = flow_err(port_id, ret, error);
1312 
1313 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1314 
1315 	return ret;
1316 }
1317 
1318 int
1319 rte_flow_tunnel_decap_set(uint16_t port_id,
1320 			  struct rte_flow_tunnel *tunnel,
1321 			  struct rte_flow_action **actions,
1322 			  uint32_t *num_of_actions,
1323 			  struct rte_flow_error *error)
1324 {
1325 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1326 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1327 	int ret;
1328 
1329 	if (unlikely(!ops))
1330 		return -rte_errno;
1331 	if (likely(!!ops->tunnel_decap_set)) {
1332 		ret = flow_err(port_id,
1333 			       ops->tunnel_decap_set(dev, tunnel, actions,
1334 						     num_of_actions, error),
1335 			       error);
1336 
1337 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1338 						num_of_actions, ret);
1339 
1340 		return ret;
1341 	}
1342 	return rte_flow_error_set(error, ENOTSUP,
1343 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1344 				  NULL, rte_strerror(ENOTSUP));
1345 }
1346 
1347 int
1348 rte_flow_tunnel_match(uint16_t port_id,
1349 		      struct rte_flow_tunnel *tunnel,
1350 		      struct rte_flow_item **items,
1351 		      uint32_t *num_of_items,
1352 		      struct rte_flow_error *error)
1353 {
1354 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1355 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1356 	int ret;
1357 
1358 	if (unlikely(!ops))
1359 		return -rte_errno;
1360 	if (likely(!!ops->tunnel_match)) {
1361 		ret = flow_err(port_id,
1362 			       ops->tunnel_match(dev, tunnel, items,
1363 						 num_of_items, error),
1364 			       error);
1365 
1366 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1367 					    ret);
1368 
1369 		return ret;
1370 	}
1371 	return rte_flow_error_set(error, ENOTSUP,
1372 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1373 				  NULL, rte_strerror(ENOTSUP));
1374 }
1375 
1376 int
1377 rte_flow_get_restore_info(uint16_t port_id,
1378 			  struct rte_mbuf *m,
1379 			  struct rte_flow_restore_info *restore_info,
1380 			  struct rte_flow_error *error)
1381 {
1382 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1383 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1384 	int ret;
1385 
1386 	if (unlikely(!ops))
1387 		return -rte_errno;
1388 	if (likely(!!ops->get_restore_info)) {
1389 		ret = flow_err(port_id,
1390 			       ops->get_restore_info(dev, m, restore_info,
1391 						     error),
1392 			       error);
1393 
1394 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1395 
1396 		return ret;
1397 	}
1398 	return rte_flow_error_set(error, ENOTSUP,
1399 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1400 				  NULL, rte_strerror(ENOTSUP));
1401 }
1402 
1403 int
1404 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1405 				     struct rte_flow_action *actions,
1406 				     uint32_t num_of_actions,
1407 				     struct rte_flow_error *error)
1408 {
1409 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1410 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1411 	int ret;
1412 
1413 	if (unlikely(!ops))
1414 		return -rte_errno;
1415 	if (likely(!!ops->tunnel_action_decap_release)) {
1416 		ret = flow_err(port_id,
1417 			       ops->tunnel_action_decap_release(dev, actions,
1418 								num_of_actions,
1419 								error),
1420 			       error);
1421 
1422 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1423 							   num_of_actions, ret);
1424 
1425 		return ret;
1426 	}
1427 	return rte_flow_error_set(error, ENOTSUP,
1428 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1429 				  NULL, rte_strerror(ENOTSUP));
1430 }
1431 
1432 int
1433 rte_flow_tunnel_item_release(uint16_t port_id,
1434 			     struct rte_flow_item *items,
1435 			     uint32_t num_of_items,
1436 			     struct rte_flow_error *error)
1437 {
1438 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1439 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1440 	int ret;
1441 
1442 	if (unlikely(!ops))
1443 		return -rte_errno;
1444 	if (likely(!!ops->tunnel_item_release)) {
1445 		ret = flow_err(port_id,
1446 			       ops->tunnel_item_release(dev, items,
1447 							num_of_items, error),
1448 			       error);
1449 
1450 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1451 
1452 		return ret;
1453 	}
1454 	return rte_flow_error_set(error, ENOTSUP,
1455 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1456 				  NULL, rte_strerror(ENOTSUP));
1457 }
1458 
1459 int
1460 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1461 			     struct rte_flow_error *error)
1462 {
1463 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1464 	struct rte_eth_dev *dev;
1465 	int ret;
1466 
1467 	if (unlikely(ops == NULL))
1468 		return -rte_errno;
1469 
1470 	if (ops->pick_transfer_proxy == NULL) {
1471 		*proxy_port_id = port_id;
1472 		return 0;
1473 	}
1474 
1475 	dev = &rte_eth_devices[port_id];
1476 
1477 	ret = flow_err(port_id,
1478 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1479 		       error);
1480 
1481 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1482 
1483 	return ret;
1484 }
1485 
1486 struct rte_flow_item_flex_handle *
1487 rte_flow_flex_item_create(uint16_t port_id,
1488 			  const struct rte_flow_item_flex_conf *conf,
1489 			  struct rte_flow_error *error)
1490 {
1491 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1492 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1493 	struct rte_flow_item_flex_handle *handle;
1494 
1495 	if (unlikely(!ops))
1496 		return NULL;
1497 	if (unlikely(!ops->flex_item_create)) {
1498 		rte_flow_error_set(error, ENOTSUP,
1499 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1500 				   NULL, rte_strerror(ENOTSUP));
1501 		return NULL;
1502 	}
1503 	handle = ops->flex_item_create(dev, conf, error);
1504 	if (handle == NULL)
1505 		flow_err(port_id, -rte_errno, error);
1506 
1507 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1508 
1509 	return handle;
1510 }
1511 
1512 int
1513 rte_flow_flex_item_release(uint16_t port_id,
1514 			   const struct rte_flow_item_flex_handle *handle,
1515 			   struct rte_flow_error *error)
1516 {
1517 	int ret;
1518 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1519 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1520 
1521 	if (unlikely(!ops || !ops->flex_item_release))
1522 		return rte_flow_error_set(error, ENOTSUP,
1523 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1524 					  NULL, rte_strerror(ENOTSUP));
1525 	ret = ops->flex_item_release(dev, handle, error);
1526 	ret = flow_err(port_id, ret, error);
1527 
1528 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1529 
1530 	return ret;
1531 }
1532 
1533 int
1534 rte_flow_info_get(uint16_t port_id,
1535 		  struct rte_flow_port_info *port_info,
1536 		  struct rte_flow_queue_info *queue_info,
1537 		  struct rte_flow_error *error)
1538 {
1539 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1540 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1541 	int ret;
1542 
1543 	if (unlikely(!ops))
1544 		return -rte_errno;
1545 	if (dev->data->dev_configured == 0) {
1546 		RTE_FLOW_LOG(INFO,
1547 			"Device with port_id=%"PRIu16" is not configured.\n",
1548 			port_id);
1549 		return -EINVAL;
1550 	}
1551 	if (port_info == NULL) {
1552 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1553 		return -EINVAL;
1554 	}
1555 	if (likely(!!ops->info_get)) {
1556 		ret = flow_err(port_id,
1557 			       ops->info_get(dev, port_info, queue_info, error),
1558 			       error);
1559 
1560 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1561 
1562 		return ret;
1563 	}
1564 	return rte_flow_error_set(error, ENOTSUP,
1565 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1566 				  NULL, rte_strerror(ENOTSUP));
1567 }
1568 
1569 int
1570 rte_flow_configure(uint16_t port_id,
1571 		   const struct rte_flow_port_attr *port_attr,
1572 		   uint16_t nb_queue,
1573 		   const struct rte_flow_queue_attr *queue_attr[],
1574 		   struct rte_flow_error *error)
1575 {
1576 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1577 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1578 	int ret;
1579 
1580 	if (unlikely(!ops))
1581 		return -rte_errno;
1582 	if (dev->data->dev_configured == 0) {
1583 		RTE_FLOW_LOG(INFO,
1584 			"Device with port_id=%"PRIu16" is not configured.\n",
1585 			port_id);
1586 		return -EINVAL;
1587 	}
1588 	if (dev->data->dev_started != 0) {
1589 		RTE_FLOW_LOG(INFO,
1590 			"Device with port_id=%"PRIu16" already started.\n",
1591 			port_id);
1592 		return -EINVAL;
1593 	}
1594 	if (port_attr == NULL) {
1595 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1596 		return -EINVAL;
1597 	}
1598 	if (queue_attr == NULL) {
1599 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1600 		return -EINVAL;
1601 	}
1602 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1603 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1604 		return rte_flow_error_set(error, ENODEV,
1605 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1606 					  NULL, rte_strerror(ENODEV));
1607 	}
1608 	if (likely(!!ops->configure)) {
1609 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1610 		if (ret == 0)
1611 			dev->data->flow_configured = 1;
1612 		ret = flow_err(port_id, ret, error);
1613 
1614 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1615 
1616 		return ret;
1617 	}
1618 	return rte_flow_error_set(error, ENOTSUP,
1619 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1620 				  NULL, rte_strerror(ENOTSUP));
1621 }
1622 
1623 struct rte_flow_pattern_template *
1624 rte_flow_pattern_template_create(uint16_t port_id,
1625 		const struct rte_flow_pattern_template_attr *template_attr,
1626 		const struct rte_flow_item pattern[],
1627 		struct rte_flow_error *error)
1628 {
1629 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1630 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1631 	struct rte_flow_pattern_template *template;
1632 
1633 	if (unlikely(!ops))
1634 		return NULL;
1635 	if (dev->data->flow_configured == 0) {
1636 		RTE_FLOW_LOG(INFO,
1637 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1638 			port_id);
1639 		rte_flow_error_set(error, EINVAL,
1640 				RTE_FLOW_ERROR_TYPE_STATE,
1641 				NULL, rte_strerror(EINVAL));
1642 		return NULL;
1643 	}
1644 	if (template_attr == NULL) {
1645 		RTE_FLOW_LOG(ERR,
1646 			     "Port %"PRIu16" template attr is NULL.\n",
1647 			     port_id);
1648 		rte_flow_error_set(error, EINVAL,
1649 				   RTE_FLOW_ERROR_TYPE_ATTR,
1650 				   NULL, rte_strerror(EINVAL));
1651 		return NULL;
1652 	}
1653 	if (pattern == NULL) {
1654 		RTE_FLOW_LOG(ERR,
1655 			     "Port %"PRIu16" pattern is NULL.\n",
1656 			     port_id);
1657 		rte_flow_error_set(error, EINVAL,
1658 				   RTE_FLOW_ERROR_TYPE_ATTR,
1659 				   NULL, rte_strerror(EINVAL));
1660 		return NULL;
1661 	}
1662 	if (likely(!!ops->pattern_template_create)) {
1663 		template = ops->pattern_template_create(dev, template_attr,
1664 							pattern, error);
1665 		if (template == NULL)
1666 			flow_err(port_id, -rte_errno, error);
1667 
1668 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1669 						       pattern, template);
1670 
1671 		return template;
1672 	}
1673 	rte_flow_error_set(error, ENOTSUP,
1674 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1675 			   NULL, rte_strerror(ENOTSUP));
1676 	return NULL;
1677 }
1678 
1679 int
1680 rte_flow_pattern_template_destroy(uint16_t port_id,
1681 		struct rte_flow_pattern_template *pattern_template,
1682 		struct rte_flow_error *error)
1683 {
1684 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1685 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1686 	int ret;
1687 
1688 	if (unlikely(!ops))
1689 		return -rte_errno;
1690 	if (unlikely(pattern_template == NULL))
1691 		return 0;
1692 	if (likely(!!ops->pattern_template_destroy)) {
1693 		ret = flow_err(port_id,
1694 			       ops->pattern_template_destroy(dev,
1695 							     pattern_template,
1696 							     error),
1697 			       error);
1698 
1699 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1700 							ret);
1701 
1702 		return ret;
1703 	}
1704 	return rte_flow_error_set(error, ENOTSUP,
1705 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1706 				  NULL, rte_strerror(ENOTSUP));
1707 }
1708 
1709 struct rte_flow_actions_template *
1710 rte_flow_actions_template_create(uint16_t port_id,
1711 			const struct rte_flow_actions_template_attr *template_attr,
1712 			const struct rte_flow_action actions[],
1713 			const struct rte_flow_action masks[],
1714 			struct rte_flow_error *error)
1715 {
1716 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1717 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1718 	struct rte_flow_actions_template *template;
1719 
1720 	if (unlikely(!ops))
1721 		return NULL;
1722 	if (dev->data->flow_configured == 0) {
1723 		RTE_FLOW_LOG(INFO,
1724 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1725 			port_id);
1726 		rte_flow_error_set(error, EINVAL,
1727 				   RTE_FLOW_ERROR_TYPE_STATE,
1728 				   NULL, rte_strerror(EINVAL));
1729 		return NULL;
1730 	}
1731 	if (template_attr == NULL) {
1732 		RTE_FLOW_LOG(ERR,
1733 			     "Port %"PRIu16" template attr is NULL.\n",
1734 			     port_id);
1735 		rte_flow_error_set(error, EINVAL,
1736 				   RTE_FLOW_ERROR_TYPE_ATTR,
1737 				   NULL, rte_strerror(EINVAL));
1738 		return NULL;
1739 	}
1740 	if (actions == NULL) {
1741 		RTE_FLOW_LOG(ERR,
1742 			     "Port %"PRIu16" actions is NULL.\n",
1743 			     port_id);
1744 		rte_flow_error_set(error, EINVAL,
1745 				   RTE_FLOW_ERROR_TYPE_ATTR,
1746 				   NULL, rte_strerror(EINVAL));
1747 		return NULL;
1748 	}
1749 	if (masks == NULL) {
1750 		RTE_FLOW_LOG(ERR,
1751 			     "Port %"PRIu16" masks is NULL.\n",
1752 			     port_id);
1753 		rte_flow_error_set(error, EINVAL,
1754 				   RTE_FLOW_ERROR_TYPE_ATTR,
1755 				   NULL, rte_strerror(EINVAL));
1756 
1757 	}
1758 	if (likely(!!ops->actions_template_create)) {
1759 		template = ops->actions_template_create(dev, template_attr,
1760 							actions, masks, error);
1761 		if (template == NULL)
1762 			flow_err(port_id, -rte_errno, error);
1763 
1764 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1765 						       masks, template);
1766 
1767 		return template;
1768 	}
1769 	rte_flow_error_set(error, ENOTSUP,
1770 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1771 			   NULL, rte_strerror(ENOTSUP));
1772 	return NULL;
1773 }
1774 
1775 int
1776 rte_flow_actions_template_destroy(uint16_t port_id,
1777 			struct rte_flow_actions_template *actions_template,
1778 			struct rte_flow_error *error)
1779 {
1780 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1781 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1782 	int ret;
1783 
1784 	if (unlikely(!ops))
1785 		return -rte_errno;
1786 	if (unlikely(actions_template == NULL))
1787 		return 0;
1788 	if (likely(!!ops->actions_template_destroy)) {
1789 		ret = flow_err(port_id,
1790 			       ops->actions_template_destroy(dev,
1791 							     actions_template,
1792 							     error),
1793 			       error);
1794 
1795 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1796 							ret);
1797 
1798 		return ret;
1799 	}
1800 	return rte_flow_error_set(error, ENOTSUP,
1801 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1802 				  NULL, rte_strerror(ENOTSUP));
1803 }
1804 
1805 struct rte_flow_template_table *
1806 rte_flow_template_table_create(uint16_t port_id,
1807 			const struct rte_flow_template_table_attr *table_attr,
1808 			struct rte_flow_pattern_template *pattern_templates[],
1809 			uint8_t nb_pattern_templates,
1810 			struct rte_flow_actions_template *actions_templates[],
1811 			uint8_t nb_actions_templates,
1812 			struct rte_flow_error *error)
1813 {
1814 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1815 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1816 	struct rte_flow_template_table *table;
1817 
1818 	if (unlikely(!ops))
1819 		return NULL;
1820 	if (dev->data->flow_configured == 0) {
1821 		RTE_FLOW_LOG(INFO,
1822 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1823 			port_id);
1824 		rte_flow_error_set(error, EINVAL,
1825 				   RTE_FLOW_ERROR_TYPE_STATE,
1826 				   NULL, rte_strerror(EINVAL));
1827 		return NULL;
1828 	}
1829 	if (table_attr == NULL) {
1830 		RTE_FLOW_LOG(ERR,
1831 			     "Port %"PRIu16" table attr is NULL.\n",
1832 			     port_id);
1833 		rte_flow_error_set(error, EINVAL,
1834 				   RTE_FLOW_ERROR_TYPE_ATTR,
1835 				   NULL, rte_strerror(EINVAL));
1836 		return NULL;
1837 	}
1838 	if (pattern_templates == NULL) {
1839 		RTE_FLOW_LOG(ERR,
1840 			     "Port %"PRIu16" pattern templates is NULL.\n",
1841 			     port_id);
1842 		rte_flow_error_set(error, EINVAL,
1843 				   RTE_FLOW_ERROR_TYPE_ATTR,
1844 				   NULL, rte_strerror(EINVAL));
1845 		return NULL;
1846 	}
1847 	if (actions_templates == NULL) {
1848 		RTE_FLOW_LOG(ERR,
1849 			     "Port %"PRIu16" actions templates is NULL.\n",
1850 			     port_id);
1851 		rte_flow_error_set(error, EINVAL,
1852 				   RTE_FLOW_ERROR_TYPE_ATTR,
1853 				   NULL, rte_strerror(EINVAL));
1854 		return NULL;
1855 	}
1856 	if (likely(!!ops->template_table_create)) {
1857 		table = ops->template_table_create(dev, table_attr,
1858 					pattern_templates, nb_pattern_templates,
1859 					actions_templates, nb_actions_templates,
1860 					error);
1861 		if (table == NULL)
1862 			flow_err(port_id, -rte_errno, error);
1863 
1864 		rte_flow_trace_template_table_create(port_id, table_attr,
1865 						     pattern_templates,
1866 						     nb_pattern_templates,
1867 						     actions_templates,
1868 						     nb_actions_templates, table);
1869 
1870 		return table;
1871 	}
1872 	rte_flow_error_set(error, ENOTSUP,
1873 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1874 			   NULL, rte_strerror(ENOTSUP));
1875 	return NULL;
1876 }
1877 
1878 int
1879 rte_flow_template_table_destroy(uint16_t port_id,
1880 				struct rte_flow_template_table *template_table,
1881 				struct rte_flow_error *error)
1882 {
1883 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1884 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1885 	int ret;
1886 
1887 	if (unlikely(!ops))
1888 		return -rte_errno;
1889 	if (unlikely(template_table == NULL))
1890 		return 0;
1891 	if (likely(!!ops->template_table_destroy)) {
1892 		ret = flow_err(port_id,
1893 			       ops->template_table_destroy(dev,
1894 							   template_table,
1895 							   error),
1896 			       error);
1897 
1898 		rte_flow_trace_template_table_destroy(port_id, template_table,
1899 						      ret);
1900 
1901 		return ret;
1902 	}
1903 	return rte_flow_error_set(error, ENOTSUP,
1904 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1905 				  NULL, rte_strerror(ENOTSUP));
1906 }
1907 
1908 struct rte_flow *
1909 rte_flow_async_create(uint16_t port_id,
1910 		      uint32_t queue_id,
1911 		      const struct rte_flow_op_attr *op_attr,
1912 		      struct rte_flow_template_table *template_table,
1913 		      const struct rte_flow_item pattern[],
1914 		      uint8_t pattern_template_index,
1915 		      const struct rte_flow_action actions[],
1916 		      uint8_t actions_template_index,
1917 		      void *user_data,
1918 		      struct rte_flow_error *error)
1919 {
1920 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1921 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1922 	struct rte_flow *flow;
1923 
1924 	flow = ops->async_create(dev, queue_id,
1925 				 op_attr, template_table,
1926 				 pattern, pattern_template_index,
1927 				 actions, actions_template_index,
1928 				 user_data, error);
1929 	if (flow == NULL)
1930 		flow_err(port_id, -rte_errno, error);
1931 
1932 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
1933 				    pattern, pattern_template_index, actions,
1934 				    actions_template_index, user_data, flow);
1935 
1936 	return flow;
1937 }
1938 
1939 struct rte_flow *
1940 rte_flow_async_create_by_index(uint16_t port_id,
1941 			       uint32_t queue_id,
1942 			       const struct rte_flow_op_attr *op_attr,
1943 			       struct rte_flow_template_table *template_table,
1944 			       uint32_t rule_index,
1945 			       const struct rte_flow_action actions[],
1946 			       uint8_t actions_template_index,
1947 			       void *user_data,
1948 			       struct rte_flow_error *error)
1949 {
1950 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1951 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1952 	struct rte_flow *flow;
1953 
1954 	flow = ops->async_create_by_index(dev, queue_id,
1955 					  op_attr, template_table, rule_index,
1956 					  actions, actions_template_index,
1957 					  user_data, error);
1958 	if (flow == NULL)
1959 		flow_err(port_id, -rte_errno, error);
1960 	return flow;
1961 }
1962 
1963 int
1964 rte_flow_async_destroy(uint16_t port_id,
1965 		       uint32_t queue_id,
1966 		       const struct rte_flow_op_attr *op_attr,
1967 		       struct rte_flow *flow,
1968 		       void *user_data,
1969 		       struct rte_flow_error *error)
1970 {
1971 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1972 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1973 	int ret;
1974 
1975 	ret = flow_err(port_id,
1976 		       ops->async_destroy(dev, queue_id,
1977 					  op_attr, flow,
1978 					  user_data, error),
1979 		       error);
1980 
1981 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
1982 				     user_data, ret);
1983 
1984 	return ret;
1985 }
1986 
1987 int
1988 rte_flow_push(uint16_t port_id,
1989 	      uint32_t queue_id,
1990 	      struct rte_flow_error *error)
1991 {
1992 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1993 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1994 	int ret;
1995 
1996 	ret = flow_err(port_id,
1997 		       ops->push(dev, queue_id, error),
1998 		       error);
1999 
2000 	rte_flow_trace_push(port_id, queue_id, ret);
2001 
2002 	return ret;
2003 }
2004 
2005 int
2006 rte_flow_pull(uint16_t port_id,
2007 	      uint32_t queue_id,
2008 	      struct rte_flow_op_result res[],
2009 	      uint16_t n_res,
2010 	      struct rte_flow_error *error)
2011 {
2012 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2013 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2014 	int ret;
2015 	int rc;
2016 
2017 	ret = ops->pull(dev, queue_id, res, n_res, error);
2018 	rc = ret ? ret : flow_err(port_id, ret, error);
2019 
2020 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2021 
2022 	return rc;
2023 }
2024 
2025 struct rte_flow_action_handle *
2026 rte_flow_async_action_handle_create(uint16_t port_id,
2027 		uint32_t queue_id,
2028 		const struct rte_flow_op_attr *op_attr,
2029 		const struct rte_flow_indir_action_conf *indir_action_conf,
2030 		const struct rte_flow_action *action,
2031 		void *user_data,
2032 		struct rte_flow_error *error)
2033 {
2034 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2035 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2036 	struct rte_flow_action_handle *handle;
2037 
2038 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2039 					     indir_action_conf, action, user_data, error);
2040 	if (handle == NULL)
2041 		flow_err(port_id, -rte_errno, error);
2042 
2043 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2044 						  indir_action_conf, action,
2045 						  user_data, handle);
2046 
2047 	return handle;
2048 }
2049 
2050 int
2051 rte_flow_async_action_handle_destroy(uint16_t port_id,
2052 		uint32_t queue_id,
2053 		const struct rte_flow_op_attr *op_attr,
2054 		struct rte_flow_action_handle *action_handle,
2055 		void *user_data,
2056 		struct rte_flow_error *error)
2057 {
2058 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2059 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2060 	int ret;
2061 
2062 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2063 					   action_handle, user_data, error);
2064 	ret = flow_err(port_id, ret, error);
2065 
2066 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2067 						   action_handle, user_data, ret);
2068 
2069 	return ret;
2070 }
2071 
2072 int
2073 rte_flow_async_action_handle_update(uint16_t port_id,
2074 		uint32_t queue_id,
2075 		const struct rte_flow_op_attr *op_attr,
2076 		struct rte_flow_action_handle *action_handle,
2077 		const void *update,
2078 		void *user_data,
2079 		struct rte_flow_error *error)
2080 {
2081 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2082 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2083 	int ret;
2084 
2085 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2086 					  action_handle, update, user_data, error);
2087 	ret = flow_err(port_id, ret, error);
2088 
2089 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2090 						  action_handle, update,
2091 						  user_data, ret);
2092 
2093 	return ret;
2094 }
2095 
2096 int
2097 rte_flow_async_action_handle_query(uint16_t port_id,
2098 		uint32_t queue_id,
2099 		const struct rte_flow_op_attr *op_attr,
2100 		const struct rte_flow_action_handle *action_handle,
2101 		void *data,
2102 		void *user_data,
2103 		struct rte_flow_error *error)
2104 {
2105 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2106 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2107 	int ret;
2108 
2109 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2110 					  action_handle, data, user_data, error);
2111 	ret = flow_err(port_id, ret, error);
2112 
2113 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2114 						 action_handle, data, user_data,
2115 						 ret);
2116 
2117 	return ret;
2118 }
2119 
2120 int
2121 rte_flow_action_handle_query_update(uint16_t port_id,
2122 				    struct rte_flow_action_handle *handle,
2123 				    const void *update, void *query,
2124 				    enum rte_flow_query_update_mode mode,
2125 				    struct rte_flow_error *error)
2126 {
2127 	int ret;
2128 	struct rte_eth_dev *dev;
2129 	const struct rte_flow_ops *ops;
2130 
2131 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2132 	if (!handle)
2133 		return -EINVAL;
2134 	if (!update && !query)
2135 		return -EINVAL;
2136 	dev = &rte_eth_devices[port_id];
2137 	ops = rte_flow_ops_get(port_id, error);
2138 	if (!ops || !ops->action_handle_query_update)
2139 		return -ENOTSUP;
2140 	ret = ops->action_handle_query_update(dev, handle, update,
2141 					      query, mode, error);
2142 	return flow_err(port_id, ret, error);
2143 }
2144 
2145 int
2146 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2147 					  const struct rte_flow_op_attr *attr,
2148 					  struct rte_flow_action_handle *handle,
2149 					  const void *update, void *query,
2150 					  enum rte_flow_query_update_mode mode,
2151 					  void *user_data,
2152 					  struct rte_flow_error *error)
2153 {
2154 	int ret;
2155 	struct rte_eth_dev *dev;
2156 	const struct rte_flow_ops *ops;
2157 
2158 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2159 	if (!handle)
2160 		return -EINVAL;
2161 	if (!update && !query)
2162 		return -EINVAL;
2163 	dev = &rte_eth_devices[port_id];
2164 	ops = rte_flow_ops_get(port_id, error);
2165 	if (!ops || !ops->async_action_handle_query_update)
2166 		return -ENOTSUP;
2167 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2168 						    handle, update,
2169 						    query, mode,
2170 						    user_data, error);
2171 	return flow_err(port_id, ret, error);
2172 }
2173