xref: /dpdk/lib/ethdev/rte_flow.c (revision 4aa10e5dc1b0fd6cc5b1b18770ac603e2c33a66c)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_string_fns.h>
14 #include <rte_mbuf_dyn.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18 
19 #include "ethdev_trace.h"
20 
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23 
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26 
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31 	const char *name;
32 	size_t size;
33 	size_t (*desc_fn)(void *dst, const void *src);
34 };
35 
36 /**
37  *
38  * @param buf
39  * Destination memory.
40  * @param data
41  * Source memory
42  * @param size
43  * Requested copy size
44  * @param desc
45  * rte_flow_desc_item - for flow item conversion.
46  * rte_flow_desc_action - for flow action conversion.
47  * @param type
48  * Offset into the desc param or negative value for private flow elements.
49  */
50 static inline size_t
51 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
52 		   const struct rte_flow_desc_data *desc, int type)
53 {
54 	/**
55 	 * Allow PMD private flow item
56 	 */
57 	bool rte_type = type >= 0;
58 
59 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
60 	if (buf == NULL || data == NULL)
61 		return 0;
62 	rte_memcpy(buf, data, (size > sz ? sz : size));
63 	if (rte_type && desc[type].desc_fn)
64 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
65 	return sz;
66 }
67 
68 static size_t
69 rte_flow_item_flex_conv(void *buf, const void *data)
70 {
71 	struct rte_flow_item_flex *dst = buf;
72 	const struct rte_flow_item_flex *src = data;
73 	if (buf) {
74 		dst->pattern = rte_memcpy
75 			((void *)((uintptr_t)(dst + 1)), src->pattern,
76 			 src->length);
77 	}
78 	return src->length;
79 }
80 
81 /** Generate flow_item[] entry. */
82 #define MK_FLOW_ITEM(t, s) \
83 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
84 		.name = # t, \
85 		.size = s,               \
86 		.desc_fn = NULL,\
87 	}
88 
89 #define MK_FLOW_ITEM_FN(t, s, fn) \
90 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
91 		.name = # t,                 \
92 		.size = s,                   \
93 		.desc_fn = fn,               \
94 	}
95 
96 /** Information about known flow pattern items. */
97 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
98 	MK_FLOW_ITEM(END, 0),
99 	MK_FLOW_ITEM(VOID, 0),
100 	MK_FLOW_ITEM(INVERT, 0),
101 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
102 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
103 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
104 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
105 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
106 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
107 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
108 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
109 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
110 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
111 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
112 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
113 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
114 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
115 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
116 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
117 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
118 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
119 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
122 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
123 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
124 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
125 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
126 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
127 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
128 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
129 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
130 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
131 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
132 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
134 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
135 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
136 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
137 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
138 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
139 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
140 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
141 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
142 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
143 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
144 			sizeof(struct rte_flow_item_pppoe_proto_id)),
145 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
146 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
147 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
148 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
149 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
150 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
151 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
152 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
153 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
154 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
155 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
156 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
157 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
158 			rte_flow_item_flex_conv),
159 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
160 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
161 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
162 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
163 };
164 
165 /** Generate flow_action[] entry. */
166 #define MK_FLOW_ACTION(t, s) \
167 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
168 		.name = # t, \
169 		.size = s, \
170 		.desc_fn = NULL,\
171 	}
172 
173 #define MK_FLOW_ACTION_FN(t, fn) \
174 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
175 		.name = # t, \
176 		.size = 0, \
177 		.desc_fn = fn,\
178 	}
179 
180 
181 /** Information about known flow actions. */
182 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
183 	MK_FLOW_ACTION(END, 0),
184 	MK_FLOW_ACTION(VOID, 0),
185 	MK_FLOW_ACTION(PASSTHRU, 0),
186 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
187 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
188 	MK_FLOW_ACTION(FLAG, 0),
189 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
190 	MK_FLOW_ACTION(DROP, 0),
191 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
192 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
193 	MK_FLOW_ACTION(PF, 0),
194 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
195 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
196 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
197 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
198 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
199 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
200 	MK_FLOW_ACTION(OF_PUSH_VLAN,
201 		       sizeof(struct rte_flow_action_of_push_vlan)),
202 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
203 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
204 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
205 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
206 	MK_FLOW_ACTION(OF_POP_MPLS,
207 		       sizeof(struct rte_flow_action_of_pop_mpls)),
208 	MK_FLOW_ACTION(OF_PUSH_MPLS,
209 		       sizeof(struct rte_flow_action_of_push_mpls)),
210 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
211 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
212 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
213 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
214 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
215 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
216 	MK_FLOW_ACTION(SET_IPV4_SRC,
217 		       sizeof(struct rte_flow_action_set_ipv4)),
218 	MK_FLOW_ACTION(SET_IPV4_DST,
219 		       sizeof(struct rte_flow_action_set_ipv4)),
220 	MK_FLOW_ACTION(SET_IPV6_SRC,
221 		       sizeof(struct rte_flow_action_set_ipv6)),
222 	MK_FLOW_ACTION(SET_IPV6_DST,
223 		       sizeof(struct rte_flow_action_set_ipv6)),
224 	MK_FLOW_ACTION(SET_TP_SRC,
225 		       sizeof(struct rte_flow_action_set_tp)),
226 	MK_FLOW_ACTION(SET_TP_DST,
227 		       sizeof(struct rte_flow_action_set_tp)),
228 	MK_FLOW_ACTION(MAC_SWAP, 0),
229 	MK_FLOW_ACTION(DEC_TTL, 0),
230 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
231 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
232 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
233 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
234 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
235 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
236 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
237 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
238 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
239 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
240 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
241 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
242 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
243 	MK_FLOW_ACTION(MODIFY_FIELD,
244 		       sizeof(struct rte_flow_action_modify_field)),
245 	/**
246 	 * Indirect action represented as handle of type
247 	 * (struct rte_flow_action_handle *) stored in conf field (see
248 	 * struct rte_flow_action); no need for additional structure to * store
249 	 * indirect action handle.
250 	 */
251 	MK_FLOW_ACTION(INDIRECT, 0),
252 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
253 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
254 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
255 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
256 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
257 };
258 
259 int
260 rte_flow_dynf_metadata_register(void)
261 {
262 	int offset;
263 	int flag;
264 
265 	static const struct rte_mbuf_dynfield desc_offs = {
266 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
267 		.size = sizeof(uint32_t),
268 		.align = __alignof__(uint32_t),
269 	};
270 	static const struct rte_mbuf_dynflag desc_flag = {
271 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
272 	};
273 
274 	offset = rte_mbuf_dynfield_register(&desc_offs);
275 	if (offset < 0)
276 		goto error;
277 	flag = rte_mbuf_dynflag_register(&desc_flag);
278 	if (flag < 0)
279 		goto error;
280 	rte_flow_dynf_metadata_offs = offset;
281 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
282 
283 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
284 
285 	return 0;
286 
287 error:
288 	rte_flow_dynf_metadata_offs = -1;
289 	rte_flow_dynf_metadata_mask = UINT64_C(0);
290 	return -rte_errno;
291 }
292 
293 static inline void
294 fts_enter(struct rte_eth_dev *dev)
295 {
296 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
297 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
298 }
299 
300 static inline void
301 fts_exit(struct rte_eth_dev *dev)
302 {
303 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
304 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
305 }
306 
307 static int
308 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
309 {
310 	if (ret == 0)
311 		return 0;
312 	if (rte_eth_dev_is_removed(port_id))
313 		return rte_flow_error_set(error, EIO,
314 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
315 					  NULL, rte_strerror(EIO));
316 	return ret;
317 }
318 
319 /* Get generic flow operations structure from a port. */
320 const struct rte_flow_ops *
321 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
322 {
323 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
324 	const struct rte_flow_ops *ops;
325 	int code;
326 
327 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
328 		code = ENODEV;
329 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
330 		/* flow API not supported with this driver dev_ops */
331 		code = ENOSYS;
332 	else
333 		code = dev->dev_ops->flow_ops_get(dev, &ops);
334 	if (code == 0 && ops == NULL)
335 		/* flow API not supported with this device */
336 		code = ENOSYS;
337 
338 	if (code != 0) {
339 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
340 				   NULL, rte_strerror(code));
341 		return NULL;
342 	}
343 	return ops;
344 }
345 
346 /* Check whether a flow rule can be created on a given port. */
347 int
348 rte_flow_validate(uint16_t port_id,
349 		  const struct rte_flow_attr *attr,
350 		  const struct rte_flow_item pattern[],
351 		  const struct rte_flow_action actions[],
352 		  struct rte_flow_error *error)
353 {
354 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
355 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
356 	int ret;
357 
358 	if (likely(!!attr) && attr->transfer &&
359 	    (attr->ingress || attr->egress)) {
360 		return rte_flow_error_set(error, EINVAL,
361 					  RTE_FLOW_ERROR_TYPE_ATTR,
362 					  attr, "cannot use attr ingress/egress with attr transfer");
363 	}
364 
365 	if (unlikely(!ops))
366 		return -rte_errno;
367 	if (likely(!!ops->validate)) {
368 		fts_enter(dev);
369 		ret = ops->validate(dev, attr, pattern, actions, error);
370 		fts_exit(dev);
371 		ret = flow_err(port_id, ret, error);
372 
373 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
374 
375 		return ret;
376 	}
377 	return rte_flow_error_set(error, ENOSYS,
378 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
379 				  NULL, rte_strerror(ENOSYS));
380 }
381 
382 /* Create a flow rule on a given port. */
383 struct rte_flow *
384 rte_flow_create(uint16_t port_id,
385 		const struct rte_flow_attr *attr,
386 		const struct rte_flow_item pattern[],
387 		const struct rte_flow_action actions[],
388 		struct rte_flow_error *error)
389 {
390 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
391 	struct rte_flow *flow;
392 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
393 
394 	if (unlikely(!ops))
395 		return NULL;
396 	if (likely(!!ops->create)) {
397 		fts_enter(dev);
398 		flow = ops->create(dev, attr, pattern, actions, error);
399 		fts_exit(dev);
400 		if (flow == NULL)
401 			flow_err(port_id, -rte_errno, error);
402 
403 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
404 
405 		return flow;
406 	}
407 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
408 			   NULL, rte_strerror(ENOSYS));
409 	return NULL;
410 }
411 
412 /* Destroy a flow rule on a given port. */
413 int
414 rte_flow_destroy(uint16_t port_id,
415 		 struct rte_flow *flow,
416 		 struct rte_flow_error *error)
417 {
418 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
419 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
420 	int ret;
421 
422 	if (unlikely(!ops))
423 		return -rte_errno;
424 	if (likely(!!ops->destroy)) {
425 		fts_enter(dev);
426 		ret = ops->destroy(dev, flow, error);
427 		fts_exit(dev);
428 		ret = flow_err(port_id, ret, error);
429 
430 		rte_flow_trace_destroy(port_id, flow, ret);
431 
432 		return ret;
433 	}
434 	return rte_flow_error_set(error, ENOSYS,
435 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
436 				  NULL, rte_strerror(ENOSYS));
437 }
438 
439 /* Destroy all flow rules associated with a port. */
440 int
441 rte_flow_flush(uint16_t port_id,
442 	       struct rte_flow_error *error)
443 {
444 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
445 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
446 	int ret;
447 
448 	if (unlikely(!ops))
449 		return -rte_errno;
450 	if (likely(!!ops->flush)) {
451 		fts_enter(dev);
452 		ret = ops->flush(dev, error);
453 		fts_exit(dev);
454 		ret = flow_err(port_id, ret, error);
455 
456 		rte_flow_trace_flush(port_id, ret);
457 
458 		return ret;
459 	}
460 	return rte_flow_error_set(error, ENOSYS,
461 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
462 				  NULL, rte_strerror(ENOSYS));
463 }
464 
465 /* Query an existing flow rule. */
466 int
467 rte_flow_query(uint16_t port_id,
468 	       struct rte_flow *flow,
469 	       const struct rte_flow_action *action,
470 	       void *data,
471 	       struct rte_flow_error *error)
472 {
473 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
474 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
475 	int ret;
476 
477 	if (!ops)
478 		return -rte_errno;
479 	if (likely(!!ops->query)) {
480 		fts_enter(dev);
481 		ret = ops->query(dev, flow, action, data, error);
482 		fts_exit(dev);
483 		ret = flow_err(port_id, ret, error);
484 
485 		rte_flow_trace_query(port_id, flow, action, data, ret);
486 
487 		return ret;
488 	}
489 	return rte_flow_error_set(error, ENOSYS,
490 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
491 				  NULL, rte_strerror(ENOSYS));
492 }
493 
494 /* Restrict ingress traffic to the defined flow rules. */
495 int
496 rte_flow_isolate(uint16_t port_id,
497 		 int set,
498 		 struct rte_flow_error *error)
499 {
500 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
501 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
502 	int ret;
503 
504 	if (!ops)
505 		return -rte_errno;
506 	if (likely(!!ops->isolate)) {
507 		fts_enter(dev);
508 		ret = ops->isolate(dev, set, error);
509 		fts_exit(dev);
510 		ret = flow_err(port_id, ret, error);
511 
512 		rte_flow_trace_isolate(port_id, set, ret);
513 
514 		return ret;
515 	}
516 	return rte_flow_error_set(error, ENOSYS,
517 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
518 				  NULL, rte_strerror(ENOSYS));
519 }
520 
521 /* Initialize flow error structure. */
522 int
523 rte_flow_error_set(struct rte_flow_error *error,
524 		   int code,
525 		   enum rte_flow_error_type type,
526 		   const void *cause,
527 		   const char *message)
528 {
529 	if (error) {
530 		*error = (struct rte_flow_error){
531 			.type = type,
532 			.cause = cause,
533 			.message = message,
534 		};
535 	}
536 	rte_errno = code;
537 	return -code;
538 }
539 
540 /** Pattern item specification types. */
541 enum rte_flow_conv_item_spec_type {
542 	RTE_FLOW_CONV_ITEM_SPEC,
543 	RTE_FLOW_CONV_ITEM_LAST,
544 	RTE_FLOW_CONV_ITEM_MASK,
545 };
546 
547 /**
548  * Copy pattern item specification.
549  *
550  * @param[out] buf
551  *   Output buffer. Can be NULL if @p size is zero.
552  * @param size
553  *   Size of @p buf in bytes.
554  * @param[in] item
555  *   Pattern item to copy specification from.
556  * @param type
557  *   Specification selector for either @p spec, @p last or @p mask.
558  *
559  * @return
560  *   Number of bytes needed to store pattern item specification regardless
561  *   of @p size. @p buf contents are truncated to @p size if not large
562  *   enough.
563  */
564 static size_t
565 rte_flow_conv_item_spec(void *buf, const size_t size,
566 			const struct rte_flow_item *item,
567 			enum rte_flow_conv_item_spec_type type)
568 {
569 	size_t off;
570 	const void *data =
571 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
572 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
573 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
574 		NULL;
575 
576 	switch (item->type) {
577 		union {
578 			const struct rte_flow_item_raw *raw;
579 		} spec;
580 		union {
581 			const struct rte_flow_item_raw *raw;
582 		} last;
583 		union {
584 			const struct rte_flow_item_raw *raw;
585 		} mask;
586 		union {
587 			const struct rte_flow_item_raw *raw;
588 		} src;
589 		union {
590 			struct rte_flow_item_raw *raw;
591 		} dst;
592 		size_t tmp;
593 
594 	case RTE_FLOW_ITEM_TYPE_RAW:
595 		spec.raw = item->spec;
596 		last.raw = item->last ? item->last : item->spec;
597 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
598 		src.raw = data;
599 		dst.raw = buf;
600 		rte_memcpy(dst.raw,
601 			   (&(struct rte_flow_item_raw){
602 				.relative = src.raw->relative,
603 				.search = src.raw->search,
604 				.reserved = src.raw->reserved,
605 				.offset = src.raw->offset,
606 				.limit = src.raw->limit,
607 				.length = src.raw->length,
608 			   }),
609 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
610 		off = sizeof(*dst.raw);
611 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
612 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
613 		     ((spec.raw->length & mask.raw->length) >=
614 		      (last.raw->length & mask.raw->length))))
615 			tmp = spec.raw->length & mask.raw->length;
616 		else
617 			tmp = last.raw->length & mask.raw->length;
618 		if (tmp) {
619 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
620 			if (size >= off + tmp)
621 				dst.raw->pattern = rte_memcpy
622 					((void *)((uintptr_t)dst.raw + off),
623 					 src.raw->pattern, tmp);
624 			off += tmp;
625 		}
626 		break;
627 	default:
628 		off = rte_flow_conv_copy(buf, data, size,
629 					 rte_flow_desc_item, item->type);
630 		break;
631 	}
632 	return off;
633 }
634 
635 /**
636  * Copy action configuration.
637  *
638  * @param[out] buf
639  *   Output buffer. Can be NULL if @p size is zero.
640  * @param size
641  *   Size of @p buf in bytes.
642  * @param[in] action
643  *   Action to copy configuration from.
644  *
645  * @return
646  *   Number of bytes needed to store pattern item specification regardless
647  *   of @p size. @p buf contents are truncated to @p size if not large
648  *   enough.
649  */
650 static size_t
651 rte_flow_conv_action_conf(void *buf, const size_t size,
652 			  const struct rte_flow_action *action)
653 {
654 	size_t off;
655 
656 	switch (action->type) {
657 		union {
658 			const struct rte_flow_action_rss *rss;
659 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
660 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
661 		} src;
662 		union {
663 			struct rte_flow_action_rss *rss;
664 			struct rte_flow_action_vxlan_encap *vxlan_encap;
665 			struct rte_flow_action_nvgre_encap *nvgre_encap;
666 		} dst;
667 		size_t tmp;
668 		int ret;
669 
670 	case RTE_FLOW_ACTION_TYPE_RSS:
671 		src.rss = action->conf;
672 		dst.rss = buf;
673 		rte_memcpy(dst.rss,
674 			   (&(struct rte_flow_action_rss){
675 				.func = src.rss->func,
676 				.level = src.rss->level,
677 				.types = src.rss->types,
678 				.key_len = src.rss->key_len,
679 				.queue_num = src.rss->queue_num,
680 			   }),
681 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
682 		off = sizeof(*dst.rss);
683 		if (src.rss->key_len && src.rss->key) {
684 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
685 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
686 			if (size >= off + tmp)
687 				dst.rss->key = rte_memcpy
688 					((void *)((uintptr_t)dst.rss + off),
689 					 src.rss->key, tmp);
690 			off += tmp;
691 		}
692 		if (src.rss->queue_num) {
693 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
694 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
695 			if (size >= off + tmp)
696 				dst.rss->queue = rte_memcpy
697 					((void *)((uintptr_t)dst.rss + off),
698 					 src.rss->queue, tmp);
699 			off += tmp;
700 		}
701 		break;
702 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
703 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
704 		src.vxlan_encap = action->conf;
705 		dst.vxlan_encap = buf;
706 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
707 				 sizeof(*src.nvgre_encap) ||
708 				 offsetof(struct rte_flow_action_vxlan_encap,
709 					  definition) !=
710 				 offsetof(struct rte_flow_action_nvgre_encap,
711 					  definition));
712 		off = sizeof(*dst.vxlan_encap);
713 		if (src.vxlan_encap->definition) {
714 			off = RTE_ALIGN_CEIL
715 				(off, sizeof(*dst.vxlan_encap->definition));
716 			ret = rte_flow_conv
717 				(RTE_FLOW_CONV_OP_PATTERN,
718 				 (void *)((uintptr_t)dst.vxlan_encap + off),
719 				 size > off ? size - off : 0,
720 				 src.vxlan_encap->definition, NULL);
721 			if (ret < 0)
722 				return 0;
723 			if (size >= off + ret)
724 				dst.vxlan_encap->definition =
725 					(void *)((uintptr_t)dst.vxlan_encap +
726 						 off);
727 			off += ret;
728 		}
729 		break;
730 	default:
731 		off = rte_flow_conv_copy(buf, action->conf, size,
732 					 rte_flow_desc_action, action->type);
733 		break;
734 	}
735 	return off;
736 }
737 
738 /**
739  * Copy a list of pattern items.
740  *
741  * @param[out] dst
742  *   Destination buffer. Can be NULL if @p size is zero.
743  * @param size
744  *   Size of @p dst in bytes.
745  * @param[in] src
746  *   Source pattern items.
747  * @param num
748  *   Maximum number of pattern items to process from @p src or 0 to process
749  *   the entire list. In both cases, processing stops after
750  *   RTE_FLOW_ITEM_TYPE_END is encountered.
751  * @param[out] error
752  *   Perform verbose error reporting if not NULL.
753  *
754  * @return
755  *   A positive value representing the number of bytes needed to store
756  *   pattern items regardless of @p size on success (@p buf contents are
757  *   truncated to @p size if not large enough), a negative errno value
758  *   otherwise and rte_errno is set.
759  */
760 static int
761 rte_flow_conv_pattern(struct rte_flow_item *dst,
762 		      const size_t size,
763 		      const struct rte_flow_item *src,
764 		      unsigned int num,
765 		      struct rte_flow_error *error)
766 {
767 	uintptr_t data = (uintptr_t)dst;
768 	size_t off;
769 	size_t ret;
770 	unsigned int i;
771 
772 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
773 		/**
774 		 * allow PMD private flow item
775 		 */
776 		if (((int)src->type >= 0) &&
777 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
778 		    !rte_flow_desc_item[src->type].name))
779 			return rte_flow_error_set
780 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
781 				 "cannot convert unknown item type");
782 		if (size >= off + sizeof(*dst))
783 			*dst = (struct rte_flow_item){
784 				.type = src->type,
785 			};
786 		off += sizeof(*dst);
787 		if (!src->type)
788 			num = i + 1;
789 	}
790 	num = i;
791 	src -= num;
792 	dst -= num;
793 	do {
794 		if (src->spec) {
795 			off = RTE_ALIGN_CEIL(off, sizeof(double));
796 			ret = rte_flow_conv_item_spec
797 				((void *)(data + off),
798 				 size > off ? size - off : 0, src,
799 				 RTE_FLOW_CONV_ITEM_SPEC);
800 			if (size && size >= off + ret)
801 				dst->spec = (void *)(data + off);
802 			off += ret;
803 
804 		}
805 		if (src->last) {
806 			off = RTE_ALIGN_CEIL(off, sizeof(double));
807 			ret = rte_flow_conv_item_spec
808 				((void *)(data + off),
809 				 size > off ? size - off : 0, src,
810 				 RTE_FLOW_CONV_ITEM_LAST);
811 			if (size && size >= off + ret)
812 				dst->last = (void *)(data + off);
813 			off += ret;
814 		}
815 		if (src->mask) {
816 			off = RTE_ALIGN_CEIL(off, sizeof(double));
817 			ret = rte_flow_conv_item_spec
818 				((void *)(data + off),
819 				 size > off ? size - off : 0, src,
820 				 RTE_FLOW_CONV_ITEM_MASK);
821 			if (size && size >= off + ret)
822 				dst->mask = (void *)(data + off);
823 			off += ret;
824 		}
825 		++src;
826 		++dst;
827 	} while (--num);
828 	return off;
829 }
830 
831 /**
832  * Copy a list of actions.
833  *
834  * @param[out] dst
835  *   Destination buffer. Can be NULL if @p size is zero.
836  * @param size
837  *   Size of @p dst in bytes.
838  * @param[in] src
839  *   Source actions.
840  * @param num
841  *   Maximum number of actions to process from @p src or 0 to process the
842  *   entire list. In both cases, processing stops after
843  *   RTE_FLOW_ACTION_TYPE_END is encountered.
844  * @param[out] error
845  *   Perform verbose error reporting if not NULL.
846  *
847  * @return
848  *   A positive value representing the number of bytes needed to store
849  *   actions regardless of @p size on success (@p buf contents are truncated
850  *   to @p size if not large enough), a negative errno value otherwise and
851  *   rte_errno is set.
852  */
853 static int
854 rte_flow_conv_actions(struct rte_flow_action *dst,
855 		      const size_t size,
856 		      const struct rte_flow_action *src,
857 		      unsigned int num,
858 		      struct rte_flow_error *error)
859 {
860 	uintptr_t data = (uintptr_t)dst;
861 	size_t off;
862 	size_t ret;
863 	unsigned int i;
864 
865 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
866 		/**
867 		 * allow PMD private flow action
868 		 */
869 		if (((int)src->type >= 0) &&
870 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
871 		    !rte_flow_desc_action[src->type].name))
872 			return rte_flow_error_set
873 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
874 				 src, "cannot convert unknown action type");
875 		if (size >= off + sizeof(*dst))
876 			*dst = (struct rte_flow_action){
877 				.type = src->type,
878 			};
879 		off += sizeof(*dst);
880 		if (!src->type)
881 			num = i + 1;
882 	}
883 	num = i;
884 	src -= num;
885 	dst -= num;
886 	do {
887 		if (src->conf) {
888 			off = RTE_ALIGN_CEIL(off, sizeof(double));
889 			ret = rte_flow_conv_action_conf
890 				((void *)(data + off),
891 				 size > off ? size - off : 0, src);
892 			if (size && size >= off + ret)
893 				dst->conf = (void *)(data + off);
894 			off += ret;
895 		}
896 		++src;
897 		++dst;
898 	} while (--num);
899 	return off;
900 }
901 
902 /**
903  * Copy flow rule components.
904  *
905  * This comprises the flow rule descriptor itself, attributes, pattern and
906  * actions list. NULL components in @p src are skipped.
907  *
908  * @param[out] dst
909  *   Destination buffer. Can be NULL if @p size is zero.
910  * @param size
911  *   Size of @p dst in bytes.
912  * @param[in] src
913  *   Source flow rule descriptor.
914  * @param[out] error
915  *   Perform verbose error reporting if not NULL.
916  *
917  * @return
918  *   A positive value representing the number of bytes needed to store all
919  *   components including the descriptor regardless of @p size on success
920  *   (@p buf contents are truncated to @p size if not large enough), a
921  *   negative errno value otherwise and rte_errno is set.
922  */
923 static int
924 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
925 		   const size_t size,
926 		   const struct rte_flow_conv_rule *src,
927 		   struct rte_flow_error *error)
928 {
929 	size_t off;
930 	int ret;
931 
932 	rte_memcpy(dst,
933 		   (&(struct rte_flow_conv_rule){
934 			.attr = NULL,
935 			.pattern = NULL,
936 			.actions = NULL,
937 		   }),
938 		   size > sizeof(*dst) ? sizeof(*dst) : size);
939 	off = sizeof(*dst);
940 	if (src->attr_ro) {
941 		off = RTE_ALIGN_CEIL(off, sizeof(double));
942 		if (size && size >= off + sizeof(*dst->attr))
943 			dst->attr = rte_memcpy
944 				((void *)((uintptr_t)dst + off),
945 				 src->attr_ro, sizeof(*dst->attr));
946 		off += sizeof(*dst->attr);
947 	}
948 	if (src->pattern_ro) {
949 		off = RTE_ALIGN_CEIL(off, sizeof(double));
950 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
951 					    size > off ? size - off : 0,
952 					    src->pattern_ro, 0, error);
953 		if (ret < 0)
954 			return ret;
955 		if (size && size >= off + (size_t)ret)
956 			dst->pattern = (void *)((uintptr_t)dst + off);
957 		off += ret;
958 	}
959 	if (src->actions_ro) {
960 		off = RTE_ALIGN_CEIL(off, sizeof(double));
961 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
962 					    size > off ? size - off : 0,
963 					    src->actions_ro, 0, error);
964 		if (ret < 0)
965 			return ret;
966 		if (size >= off + (size_t)ret)
967 			dst->actions = (void *)((uintptr_t)dst + off);
968 		off += ret;
969 	}
970 	return off;
971 }
972 
973 /**
974  * Retrieve the name of a pattern item/action type.
975  *
976  * @param is_action
977  *   Nonzero when @p src represents an action type instead of a pattern item
978  *   type.
979  * @param is_ptr
980  *   Nonzero to write string address instead of contents into @p dst.
981  * @param[out] dst
982  *   Destination buffer. Can be NULL if @p size is zero.
983  * @param size
984  *   Size of @p dst in bytes.
985  * @param[in] src
986  *   Depending on @p is_action, source pattern item or action type cast as a
987  *   pointer.
988  * @param[out] error
989  *   Perform verbose error reporting if not NULL.
990  *
991  * @return
992  *   A positive value representing the number of bytes needed to store the
993  *   name or its address regardless of @p size on success (@p buf contents
994  *   are truncated to @p size if not large enough), a negative errno value
995  *   otherwise and rte_errno is set.
996  */
997 static int
998 rte_flow_conv_name(int is_action,
999 		   int is_ptr,
1000 		   char *dst,
1001 		   const size_t size,
1002 		   const void *src,
1003 		   struct rte_flow_error *error)
1004 {
1005 	struct desc_info {
1006 		const struct rte_flow_desc_data *data;
1007 		size_t num;
1008 	};
1009 	static const struct desc_info info_rep[2] = {
1010 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1011 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1012 	};
1013 	const struct desc_info *const info = &info_rep[!!is_action];
1014 	unsigned int type = (uintptr_t)src;
1015 
1016 	if (type >= info->num)
1017 		return rte_flow_error_set
1018 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1019 			 "unknown object type to retrieve the name of");
1020 	if (!is_ptr)
1021 		return strlcpy(dst, info->data[type].name, size);
1022 	if (size >= sizeof(const char **))
1023 		*((const char **)dst) = info->data[type].name;
1024 	return sizeof(const char **);
1025 }
1026 
1027 /** Helper function to convert flow API objects. */
1028 int
1029 rte_flow_conv(enum rte_flow_conv_op op,
1030 	      void *dst,
1031 	      size_t size,
1032 	      const void *src,
1033 	      struct rte_flow_error *error)
1034 {
1035 	int ret;
1036 
1037 	switch (op) {
1038 		const struct rte_flow_attr *attr;
1039 
1040 	case RTE_FLOW_CONV_OP_NONE:
1041 		ret = 0;
1042 		break;
1043 	case RTE_FLOW_CONV_OP_ATTR:
1044 		attr = src;
1045 		if (size > sizeof(*attr))
1046 			size = sizeof(*attr);
1047 		rte_memcpy(dst, attr, size);
1048 		ret = sizeof(*attr);
1049 		break;
1050 	case RTE_FLOW_CONV_OP_ITEM:
1051 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1052 		break;
1053 	case RTE_FLOW_CONV_OP_ACTION:
1054 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1055 		break;
1056 	case RTE_FLOW_CONV_OP_PATTERN:
1057 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1058 		break;
1059 	case RTE_FLOW_CONV_OP_ACTIONS:
1060 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1061 		break;
1062 	case RTE_FLOW_CONV_OP_RULE:
1063 		ret = rte_flow_conv_rule(dst, size, src, error);
1064 		break;
1065 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1066 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1067 		break;
1068 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1069 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1070 		break;
1071 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1072 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1073 		break;
1074 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1075 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1076 		break;
1077 	default:
1078 		ret = rte_flow_error_set
1079 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1080 		 "unknown object conversion operation");
1081 	}
1082 
1083 	rte_flow_trace_conv(op, dst, size, src, ret);
1084 
1085 	return ret;
1086 }
1087 
1088 /** Store a full rte_flow description. */
1089 size_t
1090 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1091 	      const struct rte_flow_attr *attr,
1092 	      const struct rte_flow_item *items,
1093 	      const struct rte_flow_action *actions)
1094 {
1095 	/*
1096 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1097 	 * to convert the former to the latter without wasting space.
1098 	 */
1099 	struct rte_flow_conv_rule *dst =
1100 		len ?
1101 		(void *)((uintptr_t)desc +
1102 			 (offsetof(struct rte_flow_desc, actions) -
1103 			  offsetof(struct rte_flow_conv_rule, actions))) :
1104 		NULL;
1105 	size_t dst_size =
1106 		len > sizeof(*desc) - sizeof(*dst) ?
1107 		len - (sizeof(*desc) - sizeof(*dst)) :
1108 		0;
1109 	struct rte_flow_conv_rule src = {
1110 		.attr_ro = NULL,
1111 		.pattern_ro = items,
1112 		.actions_ro = actions,
1113 	};
1114 	int ret;
1115 
1116 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1117 			 sizeof(struct rte_flow_conv_rule));
1118 	if (dst_size &&
1119 	    (&dst->pattern != &desc->items ||
1120 	     &dst->actions != &desc->actions ||
1121 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1122 		rte_errno = EINVAL;
1123 		return 0;
1124 	}
1125 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1126 	if (ret < 0)
1127 		return 0;
1128 	ret += sizeof(*desc) - sizeof(*dst);
1129 	rte_memcpy(desc,
1130 		   (&(struct rte_flow_desc){
1131 			.size = ret,
1132 			.attr = *attr,
1133 			.items = dst_size ? dst->pattern : NULL,
1134 			.actions = dst_size ? dst->actions : NULL,
1135 		   }),
1136 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1137 
1138 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1139 
1140 	return ret;
1141 }
1142 
1143 int
1144 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1145 			FILE *file, struct rte_flow_error *error)
1146 {
1147 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1148 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1149 	int ret;
1150 
1151 	if (unlikely(!ops))
1152 		return -rte_errno;
1153 	if (likely(!!ops->dev_dump)) {
1154 		fts_enter(dev);
1155 		ret = ops->dev_dump(dev, flow, file, error);
1156 		fts_exit(dev);
1157 		return flow_err(port_id, ret, error);
1158 	}
1159 	return rte_flow_error_set(error, ENOSYS,
1160 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1161 				  NULL, rte_strerror(ENOSYS));
1162 }
1163 
1164 int
1165 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1166 		    uint32_t nb_contexts, struct rte_flow_error *error)
1167 {
1168 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1169 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1170 	int ret;
1171 
1172 	if (unlikely(!ops))
1173 		return -rte_errno;
1174 	if (likely(!!ops->get_aged_flows)) {
1175 		fts_enter(dev);
1176 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1177 		fts_exit(dev);
1178 		ret = flow_err(port_id, ret, error);
1179 
1180 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1181 
1182 		return ret;
1183 	}
1184 	return rte_flow_error_set(error, ENOTSUP,
1185 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1186 				  NULL, rte_strerror(ENOTSUP));
1187 }
1188 
1189 int
1190 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1191 			  uint32_t nb_contexts, struct rte_flow_error *error)
1192 {
1193 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1194 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1195 	int ret;
1196 
1197 	if (unlikely(!ops))
1198 		return -rte_errno;
1199 	if (likely(!!ops->get_q_aged_flows)) {
1200 		fts_enter(dev);
1201 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1202 					    nb_contexts, error);
1203 		fts_exit(dev);
1204 		ret = flow_err(port_id, ret, error);
1205 
1206 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1207 						nb_contexts, ret);
1208 
1209 		return ret;
1210 	}
1211 	return rte_flow_error_set(error, ENOTSUP,
1212 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1213 				  NULL, rte_strerror(ENOTSUP));
1214 }
1215 
1216 struct rte_flow_action_handle *
1217 rte_flow_action_handle_create(uint16_t port_id,
1218 			      const struct rte_flow_indir_action_conf *conf,
1219 			      const struct rte_flow_action *action,
1220 			      struct rte_flow_error *error)
1221 {
1222 	struct rte_flow_action_handle *handle;
1223 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1224 
1225 	if (unlikely(!ops))
1226 		return NULL;
1227 	if (unlikely(!ops->action_handle_create)) {
1228 		rte_flow_error_set(error, ENOSYS,
1229 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1230 				   rte_strerror(ENOSYS));
1231 		return NULL;
1232 	}
1233 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1234 					   conf, action, error);
1235 	if (handle == NULL)
1236 		flow_err(port_id, -rte_errno, error);
1237 
1238 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1239 
1240 	return handle;
1241 }
1242 
1243 int
1244 rte_flow_action_handle_destroy(uint16_t port_id,
1245 			       struct rte_flow_action_handle *handle,
1246 			       struct rte_flow_error *error)
1247 {
1248 	int ret;
1249 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1250 
1251 	if (unlikely(!ops))
1252 		return -rte_errno;
1253 	if (unlikely(!ops->action_handle_destroy))
1254 		return rte_flow_error_set(error, ENOSYS,
1255 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1256 					  NULL, rte_strerror(ENOSYS));
1257 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1258 					 handle, error);
1259 	ret = flow_err(port_id, ret, error);
1260 
1261 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1262 
1263 	return ret;
1264 }
1265 
1266 int
1267 rte_flow_action_handle_update(uint16_t port_id,
1268 			      struct rte_flow_action_handle *handle,
1269 			      const void *update,
1270 			      struct rte_flow_error *error)
1271 {
1272 	int ret;
1273 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1274 
1275 	if (unlikely(!ops))
1276 		return -rte_errno;
1277 	if (unlikely(!ops->action_handle_update))
1278 		return rte_flow_error_set(error, ENOSYS,
1279 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1280 					  NULL, rte_strerror(ENOSYS));
1281 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1282 					update, error);
1283 	ret = flow_err(port_id, ret, error);
1284 
1285 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1286 
1287 	return ret;
1288 }
1289 
1290 int
1291 rte_flow_action_handle_query(uint16_t port_id,
1292 			     const struct rte_flow_action_handle *handle,
1293 			     void *data,
1294 			     struct rte_flow_error *error)
1295 {
1296 	int ret;
1297 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1298 
1299 	if (unlikely(!ops))
1300 		return -rte_errno;
1301 	if (unlikely(!ops->action_handle_query))
1302 		return rte_flow_error_set(error, ENOSYS,
1303 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1304 					  NULL, rte_strerror(ENOSYS));
1305 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1306 				       data, error);
1307 	ret = flow_err(port_id, ret, error);
1308 
1309 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1310 
1311 	return ret;
1312 }
1313 
1314 int
1315 rte_flow_tunnel_decap_set(uint16_t port_id,
1316 			  struct rte_flow_tunnel *tunnel,
1317 			  struct rte_flow_action **actions,
1318 			  uint32_t *num_of_actions,
1319 			  struct rte_flow_error *error)
1320 {
1321 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1322 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1323 	int ret;
1324 
1325 	if (unlikely(!ops))
1326 		return -rte_errno;
1327 	if (likely(!!ops->tunnel_decap_set)) {
1328 		ret = flow_err(port_id,
1329 			       ops->tunnel_decap_set(dev, tunnel, actions,
1330 						     num_of_actions, error),
1331 			       error);
1332 
1333 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1334 						num_of_actions, ret);
1335 
1336 		return ret;
1337 	}
1338 	return rte_flow_error_set(error, ENOTSUP,
1339 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1340 				  NULL, rte_strerror(ENOTSUP));
1341 }
1342 
1343 int
1344 rte_flow_tunnel_match(uint16_t port_id,
1345 		      struct rte_flow_tunnel *tunnel,
1346 		      struct rte_flow_item **items,
1347 		      uint32_t *num_of_items,
1348 		      struct rte_flow_error *error)
1349 {
1350 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1351 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1352 	int ret;
1353 
1354 	if (unlikely(!ops))
1355 		return -rte_errno;
1356 	if (likely(!!ops->tunnel_match)) {
1357 		ret = flow_err(port_id,
1358 			       ops->tunnel_match(dev, tunnel, items,
1359 						 num_of_items, error),
1360 			       error);
1361 
1362 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1363 					    ret);
1364 
1365 		return ret;
1366 	}
1367 	return rte_flow_error_set(error, ENOTSUP,
1368 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1369 				  NULL, rte_strerror(ENOTSUP));
1370 }
1371 
1372 int
1373 rte_flow_get_restore_info(uint16_t port_id,
1374 			  struct rte_mbuf *m,
1375 			  struct rte_flow_restore_info *restore_info,
1376 			  struct rte_flow_error *error)
1377 {
1378 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1379 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1380 	int ret;
1381 
1382 	if (unlikely(!ops))
1383 		return -rte_errno;
1384 	if (likely(!!ops->get_restore_info)) {
1385 		ret = flow_err(port_id,
1386 			       ops->get_restore_info(dev, m, restore_info,
1387 						     error),
1388 			       error);
1389 
1390 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1391 
1392 		return ret;
1393 	}
1394 	return rte_flow_error_set(error, ENOTSUP,
1395 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1396 				  NULL, rte_strerror(ENOTSUP));
1397 }
1398 
1399 int
1400 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1401 				     struct rte_flow_action *actions,
1402 				     uint32_t num_of_actions,
1403 				     struct rte_flow_error *error)
1404 {
1405 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1406 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1407 	int ret;
1408 
1409 	if (unlikely(!ops))
1410 		return -rte_errno;
1411 	if (likely(!!ops->tunnel_action_decap_release)) {
1412 		ret = flow_err(port_id,
1413 			       ops->tunnel_action_decap_release(dev, actions,
1414 								num_of_actions,
1415 								error),
1416 			       error);
1417 
1418 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1419 							   num_of_actions, ret);
1420 
1421 		return ret;
1422 	}
1423 	return rte_flow_error_set(error, ENOTSUP,
1424 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1425 				  NULL, rte_strerror(ENOTSUP));
1426 }
1427 
1428 int
1429 rte_flow_tunnel_item_release(uint16_t port_id,
1430 			     struct rte_flow_item *items,
1431 			     uint32_t num_of_items,
1432 			     struct rte_flow_error *error)
1433 {
1434 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1435 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1436 	int ret;
1437 
1438 	if (unlikely(!ops))
1439 		return -rte_errno;
1440 	if (likely(!!ops->tunnel_item_release)) {
1441 		ret = flow_err(port_id,
1442 			       ops->tunnel_item_release(dev, items,
1443 							num_of_items, error),
1444 			       error);
1445 
1446 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1447 
1448 		return ret;
1449 	}
1450 	return rte_flow_error_set(error, ENOTSUP,
1451 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1452 				  NULL, rte_strerror(ENOTSUP));
1453 }
1454 
1455 int
1456 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1457 			     struct rte_flow_error *error)
1458 {
1459 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1460 	struct rte_eth_dev *dev;
1461 	int ret;
1462 
1463 	if (unlikely(ops == NULL))
1464 		return -rte_errno;
1465 
1466 	if (ops->pick_transfer_proxy == NULL) {
1467 		*proxy_port_id = port_id;
1468 		return 0;
1469 	}
1470 
1471 	dev = &rte_eth_devices[port_id];
1472 
1473 	ret = flow_err(port_id,
1474 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1475 		       error);
1476 
1477 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1478 
1479 	return ret;
1480 }
1481 
1482 struct rte_flow_item_flex_handle *
1483 rte_flow_flex_item_create(uint16_t port_id,
1484 			  const struct rte_flow_item_flex_conf *conf,
1485 			  struct rte_flow_error *error)
1486 {
1487 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1488 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1489 	struct rte_flow_item_flex_handle *handle;
1490 
1491 	if (unlikely(!ops))
1492 		return NULL;
1493 	if (unlikely(!ops->flex_item_create)) {
1494 		rte_flow_error_set(error, ENOTSUP,
1495 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1496 				   NULL, rte_strerror(ENOTSUP));
1497 		return NULL;
1498 	}
1499 	handle = ops->flex_item_create(dev, conf, error);
1500 	if (handle == NULL)
1501 		flow_err(port_id, -rte_errno, error);
1502 
1503 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1504 
1505 	return handle;
1506 }
1507 
1508 int
1509 rte_flow_flex_item_release(uint16_t port_id,
1510 			   const struct rte_flow_item_flex_handle *handle,
1511 			   struct rte_flow_error *error)
1512 {
1513 	int ret;
1514 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1515 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1516 
1517 	if (unlikely(!ops || !ops->flex_item_release))
1518 		return rte_flow_error_set(error, ENOTSUP,
1519 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1520 					  NULL, rte_strerror(ENOTSUP));
1521 	ret = ops->flex_item_release(dev, handle, error);
1522 	ret = flow_err(port_id, ret, error);
1523 
1524 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1525 
1526 	return ret;
1527 }
1528 
1529 int
1530 rte_flow_info_get(uint16_t port_id,
1531 		  struct rte_flow_port_info *port_info,
1532 		  struct rte_flow_queue_info *queue_info,
1533 		  struct rte_flow_error *error)
1534 {
1535 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1536 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1537 	int ret;
1538 
1539 	if (unlikely(!ops))
1540 		return -rte_errno;
1541 	if (dev->data->dev_configured == 0) {
1542 		RTE_FLOW_LOG(INFO,
1543 			"Device with port_id=%"PRIu16" is not configured.\n",
1544 			port_id);
1545 		return -EINVAL;
1546 	}
1547 	if (port_info == NULL) {
1548 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1549 		return -EINVAL;
1550 	}
1551 	if (likely(!!ops->info_get)) {
1552 		ret = flow_err(port_id,
1553 			       ops->info_get(dev, port_info, queue_info, error),
1554 			       error);
1555 
1556 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1557 
1558 		return ret;
1559 	}
1560 	return rte_flow_error_set(error, ENOTSUP,
1561 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1562 				  NULL, rte_strerror(ENOTSUP));
1563 }
1564 
1565 int
1566 rte_flow_configure(uint16_t port_id,
1567 		   const struct rte_flow_port_attr *port_attr,
1568 		   uint16_t nb_queue,
1569 		   const struct rte_flow_queue_attr *queue_attr[],
1570 		   struct rte_flow_error *error)
1571 {
1572 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1573 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1574 	int ret;
1575 
1576 	if (unlikely(!ops))
1577 		return -rte_errno;
1578 	if (dev->data->dev_configured == 0) {
1579 		RTE_FLOW_LOG(INFO,
1580 			"Device with port_id=%"PRIu16" is not configured.\n",
1581 			port_id);
1582 		return -EINVAL;
1583 	}
1584 	if (dev->data->dev_started != 0) {
1585 		RTE_FLOW_LOG(INFO,
1586 			"Device with port_id=%"PRIu16" already started.\n",
1587 			port_id);
1588 		return -EINVAL;
1589 	}
1590 	if (port_attr == NULL) {
1591 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1592 		return -EINVAL;
1593 	}
1594 	if (queue_attr == NULL) {
1595 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1596 		return -EINVAL;
1597 	}
1598 	if (likely(!!ops->configure)) {
1599 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1600 		if (ret == 0)
1601 			dev->data->flow_configured = 1;
1602 		ret = flow_err(port_id, ret, error);
1603 
1604 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1605 
1606 		return ret;
1607 	}
1608 	return rte_flow_error_set(error, ENOTSUP,
1609 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1610 				  NULL, rte_strerror(ENOTSUP));
1611 }
1612 
1613 struct rte_flow_pattern_template *
1614 rte_flow_pattern_template_create(uint16_t port_id,
1615 		const struct rte_flow_pattern_template_attr *template_attr,
1616 		const struct rte_flow_item pattern[],
1617 		struct rte_flow_error *error)
1618 {
1619 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1620 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1621 	struct rte_flow_pattern_template *template;
1622 
1623 	if (unlikely(!ops))
1624 		return NULL;
1625 	if (dev->data->flow_configured == 0) {
1626 		RTE_FLOW_LOG(INFO,
1627 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1628 			port_id);
1629 		rte_flow_error_set(error, EINVAL,
1630 				RTE_FLOW_ERROR_TYPE_STATE,
1631 				NULL, rte_strerror(EINVAL));
1632 		return NULL;
1633 	}
1634 	if (template_attr == NULL) {
1635 		RTE_FLOW_LOG(ERR,
1636 			     "Port %"PRIu16" template attr is NULL.\n",
1637 			     port_id);
1638 		rte_flow_error_set(error, EINVAL,
1639 				   RTE_FLOW_ERROR_TYPE_ATTR,
1640 				   NULL, rte_strerror(EINVAL));
1641 		return NULL;
1642 	}
1643 	if (pattern == NULL) {
1644 		RTE_FLOW_LOG(ERR,
1645 			     "Port %"PRIu16" pattern is NULL.\n",
1646 			     port_id);
1647 		rte_flow_error_set(error, EINVAL,
1648 				   RTE_FLOW_ERROR_TYPE_ATTR,
1649 				   NULL, rte_strerror(EINVAL));
1650 		return NULL;
1651 	}
1652 	if (likely(!!ops->pattern_template_create)) {
1653 		template = ops->pattern_template_create(dev, template_attr,
1654 							pattern, error);
1655 		if (template == NULL)
1656 			flow_err(port_id, -rte_errno, error);
1657 
1658 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1659 						       pattern, template);
1660 
1661 		return template;
1662 	}
1663 	rte_flow_error_set(error, ENOTSUP,
1664 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1665 			   NULL, rte_strerror(ENOTSUP));
1666 	return NULL;
1667 }
1668 
1669 int
1670 rte_flow_pattern_template_destroy(uint16_t port_id,
1671 		struct rte_flow_pattern_template *pattern_template,
1672 		struct rte_flow_error *error)
1673 {
1674 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1675 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1676 	int ret;
1677 
1678 	if (unlikely(!ops))
1679 		return -rte_errno;
1680 	if (unlikely(pattern_template == NULL))
1681 		return 0;
1682 	if (likely(!!ops->pattern_template_destroy)) {
1683 		ret = flow_err(port_id,
1684 			       ops->pattern_template_destroy(dev,
1685 							     pattern_template,
1686 							     error),
1687 			       error);
1688 
1689 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1690 							ret);
1691 
1692 		return ret;
1693 	}
1694 	return rte_flow_error_set(error, ENOTSUP,
1695 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1696 				  NULL, rte_strerror(ENOTSUP));
1697 }
1698 
1699 struct rte_flow_actions_template *
1700 rte_flow_actions_template_create(uint16_t port_id,
1701 			const struct rte_flow_actions_template_attr *template_attr,
1702 			const struct rte_flow_action actions[],
1703 			const struct rte_flow_action masks[],
1704 			struct rte_flow_error *error)
1705 {
1706 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1707 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1708 	struct rte_flow_actions_template *template;
1709 
1710 	if (unlikely(!ops))
1711 		return NULL;
1712 	if (dev->data->flow_configured == 0) {
1713 		RTE_FLOW_LOG(INFO,
1714 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1715 			port_id);
1716 		rte_flow_error_set(error, EINVAL,
1717 				   RTE_FLOW_ERROR_TYPE_STATE,
1718 				   NULL, rte_strerror(EINVAL));
1719 		return NULL;
1720 	}
1721 	if (template_attr == NULL) {
1722 		RTE_FLOW_LOG(ERR,
1723 			     "Port %"PRIu16" template attr is NULL.\n",
1724 			     port_id);
1725 		rte_flow_error_set(error, EINVAL,
1726 				   RTE_FLOW_ERROR_TYPE_ATTR,
1727 				   NULL, rte_strerror(EINVAL));
1728 		return NULL;
1729 	}
1730 	if (actions == NULL) {
1731 		RTE_FLOW_LOG(ERR,
1732 			     "Port %"PRIu16" actions is NULL.\n",
1733 			     port_id);
1734 		rte_flow_error_set(error, EINVAL,
1735 				   RTE_FLOW_ERROR_TYPE_ATTR,
1736 				   NULL, rte_strerror(EINVAL));
1737 		return NULL;
1738 	}
1739 	if (masks == NULL) {
1740 		RTE_FLOW_LOG(ERR,
1741 			     "Port %"PRIu16" masks is NULL.\n",
1742 			     port_id);
1743 		rte_flow_error_set(error, EINVAL,
1744 				   RTE_FLOW_ERROR_TYPE_ATTR,
1745 				   NULL, rte_strerror(EINVAL));
1746 
1747 	}
1748 	if (likely(!!ops->actions_template_create)) {
1749 		template = ops->actions_template_create(dev, template_attr,
1750 							actions, masks, error);
1751 		if (template == NULL)
1752 			flow_err(port_id, -rte_errno, error);
1753 
1754 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1755 						       masks, template);
1756 
1757 		return template;
1758 	}
1759 	rte_flow_error_set(error, ENOTSUP,
1760 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1761 			   NULL, rte_strerror(ENOTSUP));
1762 	return NULL;
1763 }
1764 
1765 int
1766 rte_flow_actions_template_destroy(uint16_t port_id,
1767 			struct rte_flow_actions_template *actions_template,
1768 			struct rte_flow_error *error)
1769 {
1770 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1771 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1772 	int ret;
1773 
1774 	if (unlikely(!ops))
1775 		return -rte_errno;
1776 	if (unlikely(actions_template == NULL))
1777 		return 0;
1778 	if (likely(!!ops->actions_template_destroy)) {
1779 		ret = flow_err(port_id,
1780 			       ops->actions_template_destroy(dev,
1781 							     actions_template,
1782 							     error),
1783 			       error);
1784 
1785 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1786 							ret);
1787 
1788 		return ret;
1789 	}
1790 	return rte_flow_error_set(error, ENOTSUP,
1791 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1792 				  NULL, rte_strerror(ENOTSUP));
1793 }
1794 
1795 struct rte_flow_template_table *
1796 rte_flow_template_table_create(uint16_t port_id,
1797 			const struct rte_flow_template_table_attr *table_attr,
1798 			struct rte_flow_pattern_template *pattern_templates[],
1799 			uint8_t nb_pattern_templates,
1800 			struct rte_flow_actions_template *actions_templates[],
1801 			uint8_t nb_actions_templates,
1802 			struct rte_flow_error *error)
1803 {
1804 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1805 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1806 	struct rte_flow_template_table *table;
1807 
1808 	if (unlikely(!ops))
1809 		return NULL;
1810 	if (dev->data->flow_configured == 0) {
1811 		RTE_FLOW_LOG(INFO,
1812 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1813 			port_id);
1814 		rte_flow_error_set(error, EINVAL,
1815 				   RTE_FLOW_ERROR_TYPE_STATE,
1816 				   NULL, rte_strerror(EINVAL));
1817 		return NULL;
1818 	}
1819 	if (table_attr == NULL) {
1820 		RTE_FLOW_LOG(ERR,
1821 			     "Port %"PRIu16" table attr is NULL.\n",
1822 			     port_id);
1823 		rte_flow_error_set(error, EINVAL,
1824 				   RTE_FLOW_ERROR_TYPE_ATTR,
1825 				   NULL, rte_strerror(EINVAL));
1826 		return NULL;
1827 	}
1828 	if (pattern_templates == NULL) {
1829 		RTE_FLOW_LOG(ERR,
1830 			     "Port %"PRIu16" pattern templates is NULL.\n",
1831 			     port_id);
1832 		rte_flow_error_set(error, EINVAL,
1833 				   RTE_FLOW_ERROR_TYPE_ATTR,
1834 				   NULL, rte_strerror(EINVAL));
1835 		return NULL;
1836 	}
1837 	if (actions_templates == NULL) {
1838 		RTE_FLOW_LOG(ERR,
1839 			     "Port %"PRIu16" actions templates is NULL.\n",
1840 			     port_id);
1841 		rte_flow_error_set(error, EINVAL,
1842 				   RTE_FLOW_ERROR_TYPE_ATTR,
1843 				   NULL, rte_strerror(EINVAL));
1844 		return NULL;
1845 	}
1846 	if (likely(!!ops->template_table_create)) {
1847 		table = ops->template_table_create(dev, table_attr,
1848 					pattern_templates, nb_pattern_templates,
1849 					actions_templates, nb_actions_templates,
1850 					error);
1851 		if (table == NULL)
1852 			flow_err(port_id, -rte_errno, error);
1853 
1854 		rte_flow_trace_template_table_create(port_id, table_attr,
1855 						     pattern_templates,
1856 						     nb_pattern_templates,
1857 						     actions_templates,
1858 						     nb_actions_templates, table);
1859 
1860 		return table;
1861 	}
1862 	rte_flow_error_set(error, ENOTSUP,
1863 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1864 			   NULL, rte_strerror(ENOTSUP));
1865 	return NULL;
1866 }
1867 
1868 int
1869 rte_flow_template_table_destroy(uint16_t port_id,
1870 				struct rte_flow_template_table *template_table,
1871 				struct rte_flow_error *error)
1872 {
1873 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1874 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1875 	int ret;
1876 
1877 	if (unlikely(!ops))
1878 		return -rte_errno;
1879 	if (unlikely(template_table == NULL))
1880 		return 0;
1881 	if (likely(!!ops->template_table_destroy)) {
1882 		ret = flow_err(port_id,
1883 			       ops->template_table_destroy(dev,
1884 							   template_table,
1885 							   error),
1886 			       error);
1887 
1888 		rte_flow_trace_template_table_destroy(port_id, template_table,
1889 						      ret);
1890 
1891 		return ret;
1892 	}
1893 	return rte_flow_error_set(error, ENOTSUP,
1894 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1895 				  NULL, rte_strerror(ENOTSUP));
1896 }
1897 
1898 struct rte_flow *
1899 rte_flow_async_create(uint16_t port_id,
1900 		      uint32_t queue_id,
1901 		      const struct rte_flow_op_attr *op_attr,
1902 		      struct rte_flow_template_table *template_table,
1903 		      const struct rte_flow_item pattern[],
1904 		      uint8_t pattern_template_index,
1905 		      const struct rte_flow_action actions[],
1906 		      uint8_t actions_template_index,
1907 		      void *user_data,
1908 		      struct rte_flow_error *error)
1909 {
1910 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1911 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1912 	struct rte_flow *flow;
1913 
1914 	flow = ops->async_create(dev, queue_id,
1915 				 op_attr, template_table,
1916 				 pattern, pattern_template_index,
1917 				 actions, actions_template_index,
1918 				 user_data, error);
1919 	if (flow == NULL)
1920 		flow_err(port_id, -rte_errno, error);
1921 
1922 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
1923 				    pattern, pattern_template_index, actions,
1924 				    actions_template_index, user_data, flow);
1925 
1926 	return flow;
1927 }
1928 
1929 int
1930 rte_flow_async_destroy(uint16_t port_id,
1931 		       uint32_t queue_id,
1932 		       const struct rte_flow_op_attr *op_attr,
1933 		       struct rte_flow *flow,
1934 		       void *user_data,
1935 		       struct rte_flow_error *error)
1936 {
1937 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1938 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1939 	int ret;
1940 
1941 	ret = flow_err(port_id,
1942 		       ops->async_destroy(dev, queue_id,
1943 					  op_attr, flow,
1944 					  user_data, error),
1945 		       error);
1946 
1947 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
1948 				     user_data, ret);
1949 
1950 	return ret;
1951 }
1952 
1953 int
1954 rte_flow_push(uint16_t port_id,
1955 	      uint32_t queue_id,
1956 	      struct rte_flow_error *error)
1957 {
1958 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1959 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1960 	int ret;
1961 
1962 	ret = flow_err(port_id,
1963 		       ops->push(dev, queue_id, error),
1964 		       error);
1965 
1966 	rte_flow_trace_push(port_id, queue_id, ret);
1967 
1968 	return ret;
1969 }
1970 
1971 int
1972 rte_flow_pull(uint16_t port_id,
1973 	      uint32_t queue_id,
1974 	      struct rte_flow_op_result res[],
1975 	      uint16_t n_res,
1976 	      struct rte_flow_error *error)
1977 {
1978 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1979 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1980 	int ret;
1981 	int rc;
1982 
1983 	ret = ops->pull(dev, queue_id, res, n_res, error);
1984 	rc = ret ? ret : flow_err(port_id, ret, error);
1985 
1986 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
1987 
1988 	return rc;
1989 }
1990 
1991 struct rte_flow_action_handle *
1992 rte_flow_async_action_handle_create(uint16_t port_id,
1993 		uint32_t queue_id,
1994 		const struct rte_flow_op_attr *op_attr,
1995 		const struct rte_flow_indir_action_conf *indir_action_conf,
1996 		const struct rte_flow_action *action,
1997 		void *user_data,
1998 		struct rte_flow_error *error)
1999 {
2000 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2001 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2002 	struct rte_flow_action_handle *handle;
2003 
2004 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2005 					     indir_action_conf, action, user_data, error);
2006 	if (handle == NULL)
2007 		flow_err(port_id, -rte_errno, error);
2008 
2009 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2010 						  indir_action_conf, action,
2011 						  user_data, handle);
2012 
2013 	return handle;
2014 }
2015 
2016 int
2017 rte_flow_async_action_handle_destroy(uint16_t port_id,
2018 		uint32_t queue_id,
2019 		const struct rte_flow_op_attr *op_attr,
2020 		struct rte_flow_action_handle *action_handle,
2021 		void *user_data,
2022 		struct rte_flow_error *error)
2023 {
2024 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2025 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2026 	int ret;
2027 
2028 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2029 					   action_handle, user_data, error);
2030 	ret = flow_err(port_id, ret, error);
2031 
2032 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2033 						   action_handle, user_data, ret);
2034 
2035 	return ret;
2036 }
2037 
2038 int
2039 rte_flow_async_action_handle_update(uint16_t port_id,
2040 		uint32_t queue_id,
2041 		const struct rte_flow_op_attr *op_attr,
2042 		struct rte_flow_action_handle *action_handle,
2043 		const void *update,
2044 		void *user_data,
2045 		struct rte_flow_error *error)
2046 {
2047 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2048 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2049 	int ret;
2050 
2051 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2052 					  action_handle, update, user_data, error);
2053 	ret = flow_err(port_id, ret, error);
2054 
2055 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2056 						  action_handle, update,
2057 						  user_data, ret);
2058 
2059 	return ret;
2060 }
2061 
2062 int
2063 rte_flow_async_action_handle_query(uint16_t port_id,
2064 		uint32_t queue_id,
2065 		const struct rte_flow_op_attr *op_attr,
2066 		const struct rte_flow_action_handle *action_handle,
2067 		void *data,
2068 		void *user_data,
2069 		struct rte_flow_error *error)
2070 {
2071 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2072 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2073 	int ret;
2074 
2075 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2076 					  action_handle, data, user_data, error);
2077 	ret = flow_err(port_id, ret, error);
2078 
2079 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2080 						 action_handle, data, user_data,
2081 						 ret);
2082 
2083 	return ret;
2084 }
2085