xref: /dpdk/lib/ethdev/rte_flow.c (revision e9b8532e10b6cef1117e16c12348fe8ab78782af)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright 2016 6WIND S.A.
3  * Copyright 2016 Mellanox Technologies, Ltd
4  */
5 
6 #include <errno.h>
7 #include <stddef.h>
8 #include <stdint.h>
9 
10 #include <rte_common.h>
11 #include <rte_errno.h>
12 #include <rte_branch_prediction.h>
13 #include <rte_string_fns.h>
14 #include <rte_mbuf_dyn.h>
15 #include "rte_ethdev.h"
16 #include "rte_flow_driver.h"
17 #include "rte_flow.h"
18 
19 #include "ethdev_trace.h"
20 
21 /* Mbuf dynamic field name for metadata. */
22 int32_t rte_flow_dynf_metadata_offs = -1;
23 
24 /* Mbuf dynamic field flag bit number for metadata. */
25 uint64_t rte_flow_dynf_metadata_mask;
26 
27 /**
28  * Flow elements description tables.
29  */
30 struct rte_flow_desc_data {
31 	const char *name;
32 	size_t size;
33 	size_t (*desc_fn)(void *dst, const void *src);
34 };
35 
36 /**
37  *
38  * @param buf
39  * Destination memory.
40  * @param data
41  * Source memory
42  * @param size
43  * Requested copy size
44  * @param desc
45  * rte_flow_desc_item - for flow item conversion.
46  * rte_flow_desc_action - for flow action conversion.
47  * @param type
48  * Offset into the desc param or negative value for private flow elements.
49  */
50 static inline size_t
51 rte_flow_conv_copy(void *buf, const void *data, const size_t size,
52 		   const struct rte_flow_desc_data *desc, int type)
53 {
54 	/**
55 	 * Allow PMD private flow item
56 	 */
57 	bool rte_type = type >= 0;
58 
59 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
60 	if (buf == NULL || data == NULL)
61 		return 0;
62 	rte_memcpy(buf, data, (size > sz ? sz : size));
63 	if (rte_type && desc[type].desc_fn)
64 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
65 	return sz;
66 }
67 
68 static size_t
69 rte_flow_item_flex_conv(void *buf, const void *data)
70 {
71 	struct rte_flow_item_flex *dst = buf;
72 	const struct rte_flow_item_flex *src = data;
73 	if (buf) {
74 		dst->pattern = rte_memcpy
75 			((void *)((uintptr_t)(dst + 1)), src->pattern,
76 			 src->length);
77 	}
78 	return src->length;
79 }
80 
81 /** Generate flow_item[] entry. */
82 #define MK_FLOW_ITEM(t, s) \
83 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
84 		.name = # t, \
85 		.size = s,               \
86 		.desc_fn = NULL,\
87 	}
88 
89 #define MK_FLOW_ITEM_FN(t, s, fn) \
90 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
91 		.name = # t,                 \
92 		.size = s,                   \
93 		.desc_fn = fn,               \
94 	}
95 
96 /** Information about known flow pattern items. */
97 static const struct rte_flow_desc_data rte_flow_desc_item[] = {
98 	MK_FLOW_ITEM(END, 0),
99 	MK_FLOW_ITEM(VOID, 0),
100 	MK_FLOW_ITEM(INVERT, 0),
101 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
102 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
103 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
104 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
105 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
106 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
107 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
108 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
109 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
110 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
111 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
112 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
113 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
114 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
115 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
116 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
117 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
118 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
119 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
120 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
121 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
122 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
123 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
124 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
125 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
126 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
127 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
128 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
129 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
130 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
131 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
132 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
133 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
134 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
135 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
136 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
137 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
138 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
139 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
140 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
141 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
142 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
143 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
144 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
145 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
146 			sizeof(struct rte_flow_item_pppoe_proto_id)),
147 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
148 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
149 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
150 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
151 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
152 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
153 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
154 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
155 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
156 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
157 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
158 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
159 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
160 			rte_flow_item_flex_conv),
161 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
162 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
163 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
164 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
165 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
166 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
167 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
168 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
169 };
170 
171 /** Generate flow_action[] entry. */
172 #define MK_FLOW_ACTION(t, s) \
173 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
174 		.name = # t, \
175 		.size = s, \
176 		.desc_fn = NULL,\
177 	}
178 
179 #define MK_FLOW_ACTION_FN(t, fn) \
180 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
181 		.name = # t, \
182 		.size = 0, \
183 		.desc_fn = fn,\
184 	}
185 
186 
187 /** Information about known flow actions. */
188 static const struct rte_flow_desc_data rte_flow_desc_action[] = {
189 	MK_FLOW_ACTION(END, 0),
190 	MK_FLOW_ACTION(VOID, 0),
191 	MK_FLOW_ACTION(PASSTHRU, 0),
192 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
193 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
194 	MK_FLOW_ACTION(FLAG, 0),
195 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
196 	MK_FLOW_ACTION(DROP, 0),
197 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
198 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
199 	MK_FLOW_ACTION(PF, 0),
200 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
201 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
202 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
203 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
204 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
205 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
206 	MK_FLOW_ACTION(OF_PUSH_VLAN,
207 		       sizeof(struct rte_flow_action_of_push_vlan)),
208 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
209 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
210 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
211 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
212 	MK_FLOW_ACTION(OF_POP_MPLS,
213 		       sizeof(struct rte_flow_action_of_pop_mpls)),
214 	MK_FLOW_ACTION(OF_PUSH_MPLS,
215 		       sizeof(struct rte_flow_action_of_push_mpls)),
216 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
217 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
218 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
219 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
220 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
221 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
222 	MK_FLOW_ACTION(SET_IPV4_SRC,
223 		       sizeof(struct rte_flow_action_set_ipv4)),
224 	MK_FLOW_ACTION(SET_IPV4_DST,
225 		       sizeof(struct rte_flow_action_set_ipv4)),
226 	MK_FLOW_ACTION(SET_IPV6_SRC,
227 		       sizeof(struct rte_flow_action_set_ipv6)),
228 	MK_FLOW_ACTION(SET_IPV6_DST,
229 		       sizeof(struct rte_flow_action_set_ipv6)),
230 	MK_FLOW_ACTION(SET_TP_SRC,
231 		       sizeof(struct rte_flow_action_set_tp)),
232 	MK_FLOW_ACTION(SET_TP_DST,
233 		       sizeof(struct rte_flow_action_set_tp)),
234 	MK_FLOW_ACTION(MAC_SWAP, 0),
235 	MK_FLOW_ACTION(DEC_TTL, 0),
236 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
237 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
238 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
239 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
240 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
241 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
242 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
243 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
244 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
245 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
246 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
247 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
248 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
249 	MK_FLOW_ACTION(MODIFY_FIELD,
250 		       sizeof(struct rte_flow_action_modify_field)),
251 	/**
252 	 * Indirect action represented as handle of type
253 	 * (struct rte_flow_action_handle *) stored in conf field (see
254 	 * struct rte_flow_action); no need for additional structure to * store
255 	 * indirect action handle.
256 	 */
257 	MK_FLOW_ACTION(INDIRECT, 0),
258 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
259 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
260 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
261 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
262 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
263 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
264 };
265 
266 int
267 rte_flow_dynf_metadata_register(void)
268 {
269 	int offset;
270 	int flag;
271 
272 	static const struct rte_mbuf_dynfield desc_offs = {
273 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
274 		.size = sizeof(uint32_t),
275 		.align = __alignof__(uint32_t),
276 	};
277 	static const struct rte_mbuf_dynflag desc_flag = {
278 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
279 	};
280 
281 	offset = rte_mbuf_dynfield_register(&desc_offs);
282 	if (offset < 0)
283 		goto error;
284 	flag = rte_mbuf_dynflag_register(&desc_flag);
285 	if (flag < 0)
286 		goto error;
287 	rte_flow_dynf_metadata_offs = offset;
288 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
289 
290 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
291 
292 	return 0;
293 
294 error:
295 	rte_flow_dynf_metadata_offs = -1;
296 	rte_flow_dynf_metadata_mask = UINT64_C(0);
297 	return -rte_errno;
298 }
299 
300 static inline void
301 fts_enter(struct rte_eth_dev *dev)
302 {
303 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
304 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
305 }
306 
307 static inline void
308 fts_exit(struct rte_eth_dev *dev)
309 {
310 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
311 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
312 }
313 
314 static int
315 flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
316 {
317 	if (ret == 0)
318 		return 0;
319 	if (rte_eth_dev_is_removed(port_id))
320 		return rte_flow_error_set(error, EIO,
321 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
322 					  NULL, rte_strerror(EIO));
323 	return ret;
324 }
325 
326 /* Get generic flow operations structure from a port. */
327 const struct rte_flow_ops *
328 rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
329 {
330 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
331 	const struct rte_flow_ops *ops;
332 	int code;
333 
334 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
335 		code = ENODEV;
336 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
337 		/* flow API not supported with this driver dev_ops */
338 		code = ENOSYS;
339 	else
340 		code = dev->dev_ops->flow_ops_get(dev, &ops);
341 	if (code == 0 && ops == NULL)
342 		/* flow API not supported with this device */
343 		code = ENOSYS;
344 
345 	if (code != 0) {
346 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
347 				   NULL, rte_strerror(code));
348 		return NULL;
349 	}
350 	return ops;
351 }
352 
353 /* Check whether a flow rule can be created on a given port. */
354 int
355 rte_flow_validate(uint16_t port_id,
356 		  const struct rte_flow_attr *attr,
357 		  const struct rte_flow_item pattern[],
358 		  const struct rte_flow_action actions[],
359 		  struct rte_flow_error *error)
360 {
361 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
362 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
363 	int ret;
364 
365 	if (likely(!!attr) && attr->transfer &&
366 	    (attr->ingress || attr->egress)) {
367 		return rte_flow_error_set(error, EINVAL,
368 					  RTE_FLOW_ERROR_TYPE_ATTR,
369 					  attr, "cannot use attr ingress/egress with attr transfer");
370 	}
371 
372 	if (unlikely(!ops))
373 		return -rte_errno;
374 	if (likely(!!ops->validate)) {
375 		fts_enter(dev);
376 		ret = ops->validate(dev, attr, pattern, actions, error);
377 		fts_exit(dev);
378 		ret = flow_err(port_id, ret, error);
379 
380 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
381 
382 		return ret;
383 	}
384 	return rte_flow_error_set(error, ENOSYS,
385 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
386 				  NULL, rte_strerror(ENOSYS));
387 }
388 
389 /* Create a flow rule on a given port. */
390 struct rte_flow *
391 rte_flow_create(uint16_t port_id,
392 		const struct rte_flow_attr *attr,
393 		const struct rte_flow_item pattern[],
394 		const struct rte_flow_action actions[],
395 		struct rte_flow_error *error)
396 {
397 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
398 	struct rte_flow *flow;
399 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
400 
401 	if (unlikely(!ops))
402 		return NULL;
403 	if (likely(!!ops->create)) {
404 		fts_enter(dev);
405 		flow = ops->create(dev, attr, pattern, actions, error);
406 		fts_exit(dev);
407 		if (flow == NULL)
408 			flow_err(port_id, -rte_errno, error);
409 
410 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
411 
412 		return flow;
413 	}
414 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
415 			   NULL, rte_strerror(ENOSYS));
416 	return NULL;
417 }
418 
419 /* Destroy a flow rule on a given port. */
420 int
421 rte_flow_destroy(uint16_t port_id,
422 		 struct rte_flow *flow,
423 		 struct rte_flow_error *error)
424 {
425 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
426 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
427 	int ret;
428 
429 	if (unlikely(!ops))
430 		return -rte_errno;
431 	if (likely(!!ops->destroy)) {
432 		fts_enter(dev);
433 		ret = ops->destroy(dev, flow, error);
434 		fts_exit(dev);
435 		ret = flow_err(port_id, ret, error);
436 
437 		rte_flow_trace_destroy(port_id, flow, ret);
438 
439 		return ret;
440 	}
441 	return rte_flow_error_set(error, ENOSYS,
442 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
443 				  NULL, rte_strerror(ENOSYS));
444 }
445 
446 int
447 rte_flow_actions_update(uint16_t port_id,
448 			struct rte_flow *flow,
449 			const struct rte_flow_action actions[],
450 			struct rte_flow_error *error)
451 {
452 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
453 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
454 	int ret;
455 
456 	if (unlikely(!ops))
457 		return -rte_errno;
458 	if (likely(!!ops->actions_update)) {
459 		fts_enter(dev);
460 		ret = ops->actions_update(dev, flow, actions, error);
461 		fts_exit(dev);
462 
463 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
464 
465 		return flow_err(port_id, ret, error);
466 	}
467 	return rte_flow_error_set(error, ENOSYS,
468 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
469 				  NULL, rte_strerror(ENOSYS));
470 }
471 
472 /* Destroy all flow rules associated with a port. */
473 int
474 rte_flow_flush(uint16_t port_id,
475 	       struct rte_flow_error *error)
476 {
477 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
478 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
479 	int ret;
480 
481 	if (unlikely(!ops))
482 		return -rte_errno;
483 	if (likely(!!ops->flush)) {
484 		fts_enter(dev);
485 		ret = ops->flush(dev, error);
486 		fts_exit(dev);
487 		ret = flow_err(port_id, ret, error);
488 
489 		rte_flow_trace_flush(port_id, ret);
490 
491 		return ret;
492 	}
493 	return rte_flow_error_set(error, ENOSYS,
494 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
495 				  NULL, rte_strerror(ENOSYS));
496 }
497 
498 /* Query an existing flow rule. */
499 int
500 rte_flow_query(uint16_t port_id,
501 	       struct rte_flow *flow,
502 	       const struct rte_flow_action *action,
503 	       void *data,
504 	       struct rte_flow_error *error)
505 {
506 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
507 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
508 	int ret;
509 
510 	if (!ops)
511 		return -rte_errno;
512 	if (likely(!!ops->query)) {
513 		fts_enter(dev);
514 		ret = ops->query(dev, flow, action, data, error);
515 		fts_exit(dev);
516 		ret = flow_err(port_id, ret, error);
517 
518 		rte_flow_trace_query(port_id, flow, action, data, ret);
519 
520 		return ret;
521 	}
522 	return rte_flow_error_set(error, ENOSYS,
523 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
524 				  NULL, rte_strerror(ENOSYS));
525 }
526 
527 /* Restrict ingress traffic to the defined flow rules. */
528 int
529 rte_flow_isolate(uint16_t port_id,
530 		 int set,
531 		 struct rte_flow_error *error)
532 {
533 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
534 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
535 	int ret;
536 
537 	if (!ops)
538 		return -rte_errno;
539 	if (likely(!!ops->isolate)) {
540 		fts_enter(dev);
541 		ret = ops->isolate(dev, set, error);
542 		fts_exit(dev);
543 		ret = flow_err(port_id, ret, error);
544 
545 		rte_flow_trace_isolate(port_id, set, ret);
546 
547 		return ret;
548 	}
549 	return rte_flow_error_set(error, ENOSYS,
550 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
551 				  NULL, rte_strerror(ENOSYS));
552 }
553 
554 /* Initialize flow error structure. */
555 int
556 rte_flow_error_set(struct rte_flow_error *error,
557 		   int code,
558 		   enum rte_flow_error_type type,
559 		   const void *cause,
560 		   const char *message)
561 {
562 	if (error) {
563 		*error = (struct rte_flow_error){
564 			.type = type,
565 			.cause = cause,
566 			.message = message,
567 		};
568 	}
569 	rte_errno = code;
570 	return -code;
571 }
572 
573 /** Pattern item specification types. */
574 enum rte_flow_conv_item_spec_type {
575 	RTE_FLOW_CONV_ITEM_SPEC,
576 	RTE_FLOW_CONV_ITEM_LAST,
577 	RTE_FLOW_CONV_ITEM_MASK,
578 };
579 
580 /**
581  * Copy pattern item specification.
582  *
583  * @param[out] buf
584  *   Output buffer. Can be NULL if @p size is zero.
585  * @param size
586  *   Size of @p buf in bytes.
587  * @param[in] item
588  *   Pattern item to copy specification from.
589  * @param type
590  *   Specification selector for either @p spec, @p last or @p mask.
591  *
592  * @return
593  *   Number of bytes needed to store pattern item specification regardless
594  *   of @p size. @p buf contents are truncated to @p size if not large
595  *   enough.
596  */
597 static size_t
598 rte_flow_conv_item_spec(void *buf, const size_t size,
599 			const struct rte_flow_item *item,
600 			enum rte_flow_conv_item_spec_type type)
601 {
602 	size_t off;
603 	const void *data =
604 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
605 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
606 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
607 		NULL;
608 
609 	switch (item->type) {
610 		union {
611 			const struct rte_flow_item_raw *raw;
612 		} spec;
613 		union {
614 			const struct rte_flow_item_raw *raw;
615 		} last;
616 		union {
617 			const struct rte_flow_item_raw *raw;
618 		} mask;
619 		union {
620 			const struct rte_flow_item_raw *raw;
621 		} src;
622 		union {
623 			struct rte_flow_item_raw *raw;
624 		} dst;
625 		size_t tmp;
626 
627 	case RTE_FLOW_ITEM_TYPE_RAW:
628 		spec.raw = item->spec;
629 		last.raw = item->last ? item->last : item->spec;
630 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
631 		src.raw = data;
632 		dst.raw = buf;
633 		rte_memcpy(dst.raw,
634 			   (&(struct rte_flow_item_raw){
635 				.relative = src.raw->relative,
636 				.search = src.raw->search,
637 				.reserved = src.raw->reserved,
638 				.offset = src.raw->offset,
639 				.limit = src.raw->limit,
640 				.length = src.raw->length,
641 			   }),
642 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
643 		off = sizeof(*dst.raw);
644 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
645 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
646 		     ((spec.raw->length & mask.raw->length) >=
647 		      (last.raw->length & mask.raw->length))))
648 			tmp = spec.raw->length & mask.raw->length;
649 		else
650 			tmp = last.raw->length & mask.raw->length;
651 		if (tmp) {
652 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
653 			if (size >= off + tmp)
654 				dst.raw->pattern = rte_memcpy
655 					((void *)((uintptr_t)dst.raw + off),
656 					 src.raw->pattern, tmp);
657 			off += tmp;
658 		}
659 		break;
660 	default:
661 		off = rte_flow_conv_copy(buf, data, size,
662 					 rte_flow_desc_item, item->type);
663 		break;
664 	}
665 	return off;
666 }
667 
668 /**
669  * Copy action configuration.
670  *
671  * @param[out] buf
672  *   Output buffer. Can be NULL if @p size is zero.
673  * @param size
674  *   Size of @p buf in bytes.
675  * @param[in] action
676  *   Action to copy configuration from.
677  *
678  * @return
679  *   Number of bytes needed to store pattern item specification regardless
680  *   of @p size. @p buf contents are truncated to @p size if not large
681  *   enough.
682  */
683 static size_t
684 rte_flow_conv_action_conf(void *buf, const size_t size,
685 			  const struct rte_flow_action *action)
686 {
687 	size_t off;
688 
689 	switch (action->type) {
690 		union {
691 			const struct rte_flow_action_rss *rss;
692 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
693 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
694 		} src;
695 		union {
696 			struct rte_flow_action_rss *rss;
697 			struct rte_flow_action_vxlan_encap *vxlan_encap;
698 			struct rte_flow_action_nvgre_encap *nvgre_encap;
699 		} dst;
700 		size_t tmp;
701 		int ret;
702 
703 	case RTE_FLOW_ACTION_TYPE_RSS:
704 		src.rss = action->conf;
705 		dst.rss = buf;
706 		rte_memcpy(dst.rss,
707 			   (&(struct rte_flow_action_rss){
708 				.func = src.rss->func,
709 				.level = src.rss->level,
710 				.types = src.rss->types,
711 				.key_len = src.rss->key_len,
712 				.queue_num = src.rss->queue_num,
713 			   }),
714 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
715 		off = sizeof(*dst.rss);
716 		if (src.rss->key_len && src.rss->key) {
717 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
718 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
719 			if (size >= off + tmp)
720 				dst.rss->key = rte_memcpy
721 					((void *)((uintptr_t)dst.rss + off),
722 					 src.rss->key, tmp);
723 			off += tmp;
724 		}
725 		if (src.rss->queue_num) {
726 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
727 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
728 			if (size >= off + tmp)
729 				dst.rss->queue = rte_memcpy
730 					((void *)((uintptr_t)dst.rss + off),
731 					 src.rss->queue, tmp);
732 			off += tmp;
733 		}
734 		break;
735 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
736 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
737 		src.vxlan_encap = action->conf;
738 		dst.vxlan_encap = buf;
739 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
740 				 sizeof(*src.nvgre_encap) ||
741 				 offsetof(struct rte_flow_action_vxlan_encap,
742 					  definition) !=
743 				 offsetof(struct rte_flow_action_nvgre_encap,
744 					  definition));
745 		off = sizeof(*dst.vxlan_encap);
746 		if (src.vxlan_encap->definition) {
747 			off = RTE_ALIGN_CEIL
748 				(off, sizeof(*dst.vxlan_encap->definition));
749 			ret = rte_flow_conv
750 				(RTE_FLOW_CONV_OP_PATTERN,
751 				 (void *)((uintptr_t)dst.vxlan_encap + off),
752 				 size > off ? size - off : 0,
753 				 src.vxlan_encap->definition, NULL);
754 			if (ret < 0)
755 				return 0;
756 			if (size >= off + ret)
757 				dst.vxlan_encap->definition =
758 					(void *)((uintptr_t)dst.vxlan_encap +
759 						 off);
760 			off += ret;
761 		}
762 		break;
763 	default:
764 		off = rte_flow_conv_copy(buf, action->conf, size,
765 					 rte_flow_desc_action, action->type);
766 		break;
767 	}
768 	return off;
769 }
770 
771 /**
772  * Copy a list of pattern items.
773  *
774  * @param[out] dst
775  *   Destination buffer. Can be NULL if @p size is zero.
776  * @param size
777  *   Size of @p dst in bytes.
778  * @param[in] src
779  *   Source pattern items.
780  * @param num
781  *   Maximum number of pattern items to process from @p src or 0 to process
782  *   the entire list. In both cases, processing stops after
783  *   RTE_FLOW_ITEM_TYPE_END is encountered.
784  * @param[out] error
785  *   Perform verbose error reporting if not NULL.
786  *
787  * @return
788  *   A positive value representing the number of bytes needed to store
789  *   pattern items regardless of @p size on success (@p buf contents are
790  *   truncated to @p size if not large enough), a negative errno value
791  *   otherwise and rte_errno is set.
792  */
793 static int
794 rte_flow_conv_pattern(struct rte_flow_item *dst,
795 		      const size_t size,
796 		      const struct rte_flow_item *src,
797 		      unsigned int num,
798 		      struct rte_flow_error *error)
799 {
800 	uintptr_t data = (uintptr_t)dst;
801 	size_t off;
802 	size_t ret;
803 	unsigned int i;
804 
805 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
806 		/**
807 		 * allow PMD private flow item
808 		 */
809 		if (((int)src->type >= 0) &&
810 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
811 		    !rte_flow_desc_item[src->type].name))
812 			return rte_flow_error_set
813 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
814 				 "cannot convert unknown item type");
815 		if (size >= off + sizeof(*dst))
816 			*dst = (struct rte_flow_item){
817 				.type = src->type,
818 			};
819 		off += sizeof(*dst);
820 		if (!src->type)
821 			num = i + 1;
822 	}
823 	num = i;
824 	src -= num;
825 	dst -= num;
826 	do {
827 		if (src->spec) {
828 			off = RTE_ALIGN_CEIL(off, sizeof(double));
829 			ret = rte_flow_conv_item_spec
830 				((void *)(data + off),
831 				 size > off ? size - off : 0, src,
832 				 RTE_FLOW_CONV_ITEM_SPEC);
833 			if (size && size >= off + ret)
834 				dst->spec = (void *)(data + off);
835 			off += ret;
836 
837 		}
838 		if (src->last) {
839 			off = RTE_ALIGN_CEIL(off, sizeof(double));
840 			ret = rte_flow_conv_item_spec
841 				((void *)(data + off),
842 				 size > off ? size - off : 0, src,
843 				 RTE_FLOW_CONV_ITEM_LAST);
844 			if (size && size >= off + ret)
845 				dst->last = (void *)(data + off);
846 			off += ret;
847 		}
848 		if (src->mask) {
849 			off = RTE_ALIGN_CEIL(off, sizeof(double));
850 			ret = rte_flow_conv_item_spec
851 				((void *)(data + off),
852 				 size > off ? size - off : 0, src,
853 				 RTE_FLOW_CONV_ITEM_MASK);
854 			if (size && size >= off + ret)
855 				dst->mask = (void *)(data + off);
856 			off += ret;
857 		}
858 		++src;
859 		++dst;
860 	} while (--num);
861 	return off;
862 }
863 
864 /**
865  * Copy a list of actions.
866  *
867  * @param[out] dst
868  *   Destination buffer. Can be NULL if @p size is zero.
869  * @param size
870  *   Size of @p dst in bytes.
871  * @param[in] src
872  *   Source actions.
873  * @param num
874  *   Maximum number of actions to process from @p src or 0 to process the
875  *   entire list. In both cases, processing stops after
876  *   RTE_FLOW_ACTION_TYPE_END is encountered.
877  * @param[out] error
878  *   Perform verbose error reporting if not NULL.
879  *
880  * @return
881  *   A positive value representing the number of bytes needed to store
882  *   actions regardless of @p size on success (@p buf contents are truncated
883  *   to @p size if not large enough), a negative errno value otherwise and
884  *   rte_errno is set.
885  */
886 static int
887 rte_flow_conv_actions(struct rte_flow_action *dst,
888 		      const size_t size,
889 		      const struct rte_flow_action *src,
890 		      unsigned int num,
891 		      struct rte_flow_error *error)
892 {
893 	uintptr_t data = (uintptr_t)dst;
894 	size_t off;
895 	size_t ret;
896 	unsigned int i;
897 
898 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
899 		/**
900 		 * allow PMD private flow action
901 		 */
902 		if (((int)src->type >= 0) &&
903 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
904 		    !rte_flow_desc_action[src->type].name))
905 			return rte_flow_error_set
906 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
907 				 src, "cannot convert unknown action type");
908 		if (size >= off + sizeof(*dst))
909 			*dst = (struct rte_flow_action){
910 				.type = src->type,
911 			};
912 		off += sizeof(*dst);
913 		if (!src->type)
914 			num = i + 1;
915 	}
916 	num = i;
917 	src -= num;
918 	dst -= num;
919 	do {
920 		if (src->conf) {
921 			off = RTE_ALIGN_CEIL(off, sizeof(double));
922 			ret = rte_flow_conv_action_conf
923 				((void *)(data + off),
924 				 size > off ? size - off : 0, src);
925 			if (size && size >= off + ret)
926 				dst->conf = (void *)(data + off);
927 			off += ret;
928 		}
929 		++src;
930 		++dst;
931 	} while (--num);
932 	return off;
933 }
934 
935 /**
936  * Copy flow rule components.
937  *
938  * This comprises the flow rule descriptor itself, attributes, pattern and
939  * actions list. NULL components in @p src are skipped.
940  *
941  * @param[out] dst
942  *   Destination buffer. Can be NULL if @p size is zero.
943  * @param size
944  *   Size of @p dst in bytes.
945  * @param[in] src
946  *   Source flow rule descriptor.
947  * @param[out] error
948  *   Perform verbose error reporting if not NULL.
949  *
950  * @return
951  *   A positive value representing the number of bytes needed to store all
952  *   components including the descriptor regardless of @p size on success
953  *   (@p buf contents are truncated to @p size if not large enough), a
954  *   negative errno value otherwise and rte_errno is set.
955  */
956 static int
957 rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
958 		   const size_t size,
959 		   const struct rte_flow_conv_rule *src,
960 		   struct rte_flow_error *error)
961 {
962 	size_t off;
963 	int ret;
964 
965 	rte_memcpy(dst,
966 		   (&(struct rte_flow_conv_rule){
967 			.attr = NULL,
968 			.pattern = NULL,
969 			.actions = NULL,
970 		   }),
971 		   size > sizeof(*dst) ? sizeof(*dst) : size);
972 	off = sizeof(*dst);
973 	if (src->attr_ro) {
974 		off = RTE_ALIGN_CEIL(off, sizeof(double));
975 		if (size && size >= off + sizeof(*dst->attr))
976 			dst->attr = rte_memcpy
977 				((void *)((uintptr_t)dst + off),
978 				 src->attr_ro, sizeof(*dst->attr));
979 		off += sizeof(*dst->attr);
980 	}
981 	if (src->pattern_ro) {
982 		off = RTE_ALIGN_CEIL(off, sizeof(double));
983 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
984 					    size > off ? size - off : 0,
985 					    src->pattern_ro, 0, error);
986 		if (ret < 0)
987 			return ret;
988 		if (size && size >= off + (size_t)ret)
989 			dst->pattern = (void *)((uintptr_t)dst + off);
990 		off += ret;
991 	}
992 	if (src->actions_ro) {
993 		off = RTE_ALIGN_CEIL(off, sizeof(double));
994 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
995 					    size > off ? size - off : 0,
996 					    src->actions_ro, 0, error);
997 		if (ret < 0)
998 			return ret;
999 		if (size >= off + (size_t)ret)
1000 			dst->actions = (void *)((uintptr_t)dst + off);
1001 		off += ret;
1002 	}
1003 	return off;
1004 }
1005 
1006 /**
1007  * Retrieve the name of a pattern item/action type.
1008  *
1009  * @param is_action
1010  *   Nonzero when @p src represents an action type instead of a pattern item
1011  *   type.
1012  * @param is_ptr
1013  *   Nonzero to write string address instead of contents into @p dst.
1014  * @param[out] dst
1015  *   Destination buffer. Can be NULL if @p size is zero.
1016  * @param size
1017  *   Size of @p dst in bytes.
1018  * @param[in] src
1019  *   Depending on @p is_action, source pattern item or action type cast as a
1020  *   pointer.
1021  * @param[out] error
1022  *   Perform verbose error reporting if not NULL.
1023  *
1024  * @return
1025  *   A positive value representing the number of bytes needed to store the
1026  *   name or its address regardless of @p size on success (@p buf contents
1027  *   are truncated to @p size if not large enough), a negative errno value
1028  *   otherwise and rte_errno is set.
1029  */
1030 static int
1031 rte_flow_conv_name(int is_action,
1032 		   int is_ptr,
1033 		   char *dst,
1034 		   const size_t size,
1035 		   const void *src,
1036 		   struct rte_flow_error *error)
1037 {
1038 	struct desc_info {
1039 		const struct rte_flow_desc_data *data;
1040 		size_t num;
1041 	};
1042 	static const struct desc_info info_rep[2] = {
1043 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
1044 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
1045 	};
1046 	const struct desc_info *const info = &info_rep[!!is_action];
1047 	unsigned int type = (uintptr_t)src;
1048 
1049 	if (type >= info->num)
1050 		return rte_flow_error_set
1051 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1052 			 "unknown object type to retrieve the name of");
1053 	if (!is_ptr)
1054 		return strlcpy(dst, info->data[type].name, size);
1055 	if (size >= sizeof(const char **))
1056 		*((const char **)dst) = info->data[type].name;
1057 	return sizeof(const char **);
1058 }
1059 
1060 /** Helper function to convert flow API objects. */
1061 int
1062 rte_flow_conv(enum rte_flow_conv_op op,
1063 	      void *dst,
1064 	      size_t size,
1065 	      const void *src,
1066 	      struct rte_flow_error *error)
1067 {
1068 	int ret;
1069 
1070 	switch (op) {
1071 		const struct rte_flow_attr *attr;
1072 
1073 	case RTE_FLOW_CONV_OP_NONE:
1074 		ret = 0;
1075 		break;
1076 	case RTE_FLOW_CONV_OP_ATTR:
1077 		attr = src;
1078 		if (size > sizeof(*attr))
1079 			size = sizeof(*attr);
1080 		rte_memcpy(dst, attr, size);
1081 		ret = sizeof(*attr);
1082 		break;
1083 	case RTE_FLOW_CONV_OP_ITEM:
1084 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1085 		break;
1086 	case RTE_FLOW_CONV_OP_ACTION:
1087 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1088 		break;
1089 	case RTE_FLOW_CONV_OP_PATTERN:
1090 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1091 		break;
1092 	case RTE_FLOW_CONV_OP_ACTIONS:
1093 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1094 		break;
1095 	case RTE_FLOW_CONV_OP_RULE:
1096 		ret = rte_flow_conv_rule(dst, size, src, error);
1097 		break;
1098 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1099 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1100 		break;
1101 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1102 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1103 		break;
1104 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1105 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1106 		break;
1107 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1108 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1109 		break;
1110 	default:
1111 		ret = rte_flow_error_set
1112 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1113 		 "unknown object conversion operation");
1114 	}
1115 
1116 	rte_flow_trace_conv(op, dst, size, src, ret);
1117 
1118 	return ret;
1119 }
1120 
1121 /** Store a full rte_flow description. */
1122 size_t
1123 rte_flow_copy(struct rte_flow_desc *desc, size_t len,
1124 	      const struct rte_flow_attr *attr,
1125 	      const struct rte_flow_item *items,
1126 	      const struct rte_flow_action *actions)
1127 {
1128 	/*
1129 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
1130 	 * to convert the former to the latter without wasting space.
1131 	 */
1132 	struct rte_flow_conv_rule *dst =
1133 		len ?
1134 		(void *)((uintptr_t)desc +
1135 			 (offsetof(struct rte_flow_desc, actions) -
1136 			  offsetof(struct rte_flow_conv_rule, actions))) :
1137 		NULL;
1138 	size_t dst_size =
1139 		len > sizeof(*desc) - sizeof(*dst) ?
1140 		len - (sizeof(*desc) - sizeof(*dst)) :
1141 		0;
1142 	struct rte_flow_conv_rule src = {
1143 		.attr_ro = NULL,
1144 		.pattern_ro = items,
1145 		.actions_ro = actions,
1146 	};
1147 	int ret;
1148 
1149 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
1150 			 sizeof(struct rte_flow_conv_rule));
1151 	if (dst_size &&
1152 	    (&dst->pattern != &desc->items ||
1153 	     &dst->actions != &desc->actions ||
1154 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
1155 		rte_errno = EINVAL;
1156 		return 0;
1157 	}
1158 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
1159 	if (ret < 0)
1160 		return 0;
1161 	ret += sizeof(*desc) - sizeof(*dst);
1162 	rte_memcpy(desc,
1163 		   (&(struct rte_flow_desc){
1164 			.size = ret,
1165 			.attr = *attr,
1166 			.items = dst_size ? dst->pattern : NULL,
1167 			.actions = dst_size ? dst->actions : NULL,
1168 		   }),
1169 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1170 
1171 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1172 
1173 	return ret;
1174 }
1175 
1176 int
1177 rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
1178 			FILE *file, struct rte_flow_error *error)
1179 {
1180 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1181 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1182 	int ret;
1183 
1184 	if (unlikely(!ops))
1185 		return -rte_errno;
1186 	if (likely(!!ops->dev_dump)) {
1187 		fts_enter(dev);
1188 		ret = ops->dev_dump(dev, flow, file, error);
1189 		fts_exit(dev);
1190 		return flow_err(port_id, ret, error);
1191 	}
1192 	return rte_flow_error_set(error, ENOSYS,
1193 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1194 				  NULL, rte_strerror(ENOSYS));
1195 }
1196 
1197 int
1198 rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
1199 		    uint32_t nb_contexts, struct rte_flow_error *error)
1200 {
1201 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1202 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1203 	int ret;
1204 
1205 	if (unlikely(!ops))
1206 		return -rte_errno;
1207 	if (likely(!!ops->get_aged_flows)) {
1208 		fts_enter(dev);
1209 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
1210 		fts_exit(dev);
1211 		ret = flow_err(port_id, ret, error);
1212 
1213 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1214 
1215 		return ret;
1216 	}
1217 	return rte_flow_error_set(error, ENOTSUP,
1218 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1219 				  NULL, rte_strerror(ENOTSUP));
1220 }
1221 
1222 int
1223 rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1224 			  uint32_t nb_contexts, struct rte_flow_error *error)
1225 {
1226 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1227 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1228 	int ret;
1229 
1230 	if (unlikely(!ops))
1231 		return -rte_errno;
1232 	if (likely(!!ops->get_q_aged_flows)) {
1233 		fts_enter(dev);
1234 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1235 					    nb_contexts, error);
1236 		fts_exit(dev);
1237 		ret = flow_err(port_id, ret, error);
1238 
1239 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1240 						nb_contexts, ret);
1241 
1242 		return ret;
1243 	}
1244 	return rte_flow_error_set(error, ENOTSUP,
1245 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1246 				  NULL, rte_strerror(ENOTSUP));
1247 }
1248 
1249 struct rte_flow_action_handle *
1250 rte_flow_action_handle_create(uint16_t port_id,
1251 			      const struct rte_flow_indir_action_conf *conf,
1252 			      const struct rte_flow_action *action,
1253 			      struct rte_flow_error *error)
1254 {
1255 	struct rte_flow_action_handle *handle;
1256 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1257 
1258 	if (unlikely(!ops))
1259 		return NULL;
1260 	if (unlikely(!ops->action_handle_create)) {
1261 		rte_flow_error_set(error, ENOSYS,
1262 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
1263 				   rte_strerror(ENOSYS));
1264 		return NULL;
1265 	}
1266 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
1267 					   conf, action, error);
1268 	if (handle == NULL)
1269 		flow_err(port_id, -rte_errno, error);
1270 
1271 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1272 
1273 	return handle;
1274 }
1275 
1276 int
1277 rte_flow_action_handle_destroy(uint16_t port_id,
1278 			       struct rte_flow_action_handle *handle,
1279 			       struct rte_flow_error *error)
1280 {
1281 	int ret;
1282 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1283 
1284 	if (unlikely(!ops))
1285 		return -rte_errno;
1286 	if (unlikely(!ops->action_handle_destroy))
1287 		return rte_flow_error_set(error, ENOSYS,
1288 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1289 					  NULL, rte_strerror(ENOSYS));
1290 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
1291 					 handle, error);
1292 	ret = flow_err(port_id, ret, error);
1293 
1294 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1295 
1296 	return ret;
1297 }
1298 
1299 int
1300 rte_flow_action_handle_update(uint16_t port_id,
1301 			      struct rte_flow_action_handle *handle,
1302 			      const void *update,
1303 			      struct rte_flow_error *error)
1304 {
1305 	int ret;
1306 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1307 
1308 	if (unlikely(!ops))
1309 		return -rte_errno;
1310 	if (unlikely(!ops->action_handle_update))
1311 		return rte_flow_error_set(error, ENOSYS,
1312 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1313 					  NULL, rte_strerror(ENOSYS));
1314 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
1315 					update, error);
1316 	ret = flow_err(port_id, ret, error);
1317 
1318 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1319 
1320 	return ret;
1321 }
1322 
1323 int
1324 rte_flow_action_handle_query(uint16_t port_id,
1325 			     const struct rte_flow_action_handle *handle,
1326 			     void *data,
1327 			     struct rte_flow_error *error)
1328 {
1329 	int ret;
1330 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1331 
1332 	if (unlikely(!ops))
1333 		return -rte_errno;
1334 	if (unlikely(!ops->action_handle_query))
1335 		return rte_flow_error_set(error, ENOSYS,
1336 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1337 					  NULL, rte_strerror(ENOSYS));
1338 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
1339 				       data, error);
1340 	ret = flow_err(port_id, ret, error);
1341 
1342 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1343 
1344 	return ret;
1345 }
1346 
1347 int
1348 rte_flow_tunnel_decap_set(uint16_t port_id,
1349 			  struct rte_flow_tunnel *tunnel,
1350 			  struct rte_flow_action **actions,
1351 			  uint32_t *num_of_actions,
1352 			  struct rte_flow_error *error)
1353 {
1354 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1355 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1356 	int ret;
1357 
1358 	if (unlikely(!ops))
1359 		return -rte_errno;
1360 	if (likely(!!ops->tunnel_decap_set)) {
1361 		ret = flow_err(port_id,
1362 			       ops->tunnel_decap_set(dev, tunnel, actions,
1363 						     num_of_actions, error),
1364 			       error);
1365 
1366 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1367 						num_of_actions, ret);
1368 
1369 		return ret;
1370 	}
1371 	return rte_flow_error_set(error, ENOTSUP,
1372 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1373 				  NULL, rte_strerror(ENOTSUP));
1374 }
1375 
1376 int
1377 rte_flow_tunnel_match(uint16_t port_id,
1378 		      struct rte_flow_tunnel *tunnel,
1379 		      struct rte_flow_item **items,
1380 		      uint32_t *num_of_items,
1381 		      struct rte_flow_error *error)
1382 {
1383 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1384 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1385 	int ret;
1386 
1387 	if (unlikely(!ops))
1388 		return -rte_errno;
1389 	if (likely(!!ops->tunnel_match)) {
1390 		ret = flow_err(port_id,
1391 			       ops->tunnel_match(dev, tunnel, items,
1392 						 num_of_items, error),
1393 			       error);
1394 
1395 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1396 					    ret);
1397 
1398 		return ret;
1399 	}
1400 	return rte_flow_error_set(error, ENOTSUP,
1401 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1402 				  NULL, rte_strerror(ENOTSUP));
1403 }
1404 
1405 int
1406 rte_flow_get_restore_info(uint16_t port_id,
1407 			  struct rte_mbuf *m,
1408 			  struct rte_flow_restore_info *restore_info,
1409 			  struct rte_flow_error *error)
1410 {
1411 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1412 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1413 	int ret;
1414 
1415 	if (unlikely(!ops))
1416 		return -rte_errno;
1417 	if (likely(!!ops->get_restore_info)) {
1418 		ret = flow_err(port_id,
1419 			       ops->get_restore_info(dev, m, restore_info,
1420 						     error),
1421 			       error);
1422 
1423 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1424 
1425 		return ret;
1426 	}
1427 	return rte_flow_error_set(error, ENOTSUP,
1428 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1429 				  NULL, rte_strerror(ENOTSUP));
1430 }
1431 
1432 int
1433 rte_flow_tunnel_action_decap_release(uint16_t port_id,
1434 				     struct rte_flow_action *actions,
1435 				     uint32_t num_of_actions,
1436 				     struct rte_flow_error *error)
1437 {
1438 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1439 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1440 	int ret;
1441 
1442 	if (unlikely(!ops))
1443 		return -rte_errno;
1444 	if (likely(!!ops->tunnel_action_decap_release)) {
1445 		ret = flow_err(port_id,
1446 			       ops->tunnel_action_decap_release(dev, actions,
1447 								num_of_actions,
1448 								error),
1449 			       error);
1450 
1451 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1452 							   num_of_actions, ret);
1453 
1454 		return ret;
1455 	}
1456 	return rte_flow_error_set(error, ENOTSUP,
1457 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1458 				  NULL, rte_strerror(ENOTSUP));
1459 }
1460 
1461 int
1462 rte_flow_tunnel_item_release(uint16_t port_id,
1463 			     struct rte_flow_item *items,
1464 			     uint32_t num_of_items,
1465 			     struct rte_flow_error *error)
1466 {
1467 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1468 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1469 	int ret;
1470 
1471 	if (unlikely(!ops))
1472 		return -rte_errno;
1473 	if (likely(!!ops->tunnel_item_release)) {
1474 		ret = flow_err(port_id,
1475 			       ops->tunnel_item_release(dev, items,
1476 							num_of_items, error),
1477 			       error);
1478 
1479 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1480 
1481 		return ret;
1482 	}
1483 	return rte_flow_error_set(error, ENOTSUP,
1484 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1485 				  NULL, rte_strerror(ENOTSUP));
1486 }
1487 
1488 int
1489 rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
1490 			     struct rte_flow_error *error)
1491 {
1492 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1493 	struct rte_eth_dev *dev;
1494 	int ret;
1495 
1496 	if (unlikely(ops == NULL))
1497 		return -rte_errno;
1498 
1499 	if (ops->pick_transfer_proxy == NULL) {
1500 		*proxy_port_id = port_id;
1501 		return 0;
1502 	}
1503 
1504 	dev = &rte_eth_devices[port_id];
1505 
1506 	ret = flow_err(port_id,
1507 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
1508 		       error);
1509 
1510 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1511 
1512 	return ret;
1513 }
1514 
1515 struct rte_flow_item_flex_handle *
1516 rte_flow_flex_item_create(uint16_t port_id,
1517 			  const struct rte_flow_item_flex_conf *conf,
1518 			  struct rte_flow_error *error)
1519 {
1520 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1521 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1522 	struct rte_flow_item_flex_handle *handle;
1523 
1524 	if (unlikely(!ops))
1525 		return NULL;
1526 	if (unlikely(!ops->flex_item_create)) {
1527 		rte_flow_error_set(error, ENOTSUP,
1528 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1529 				   NULL, rte_strerror(ENOTSUP));
1530 		return NULL;
1531 	}
1532 	handle = ops->flex_item_create(dev, conf, error);
1533 	if (handle == NULL)
1534 		flow_err(port_id, -rte_errno, error);
1535 
1536 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1537 
1538 	return handle;
1539 }
1540 
1541 int
1542 rte_flow_flex_item_release(uint16_t port_id,
1543 			   const struct rte_flow_item_flex_handle *handle,
1544 			   struct rte_flow_error *error)
1545 {
1546 	int ret;
1547 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1548 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1549 
1550 	if (unlikely(!ops || !ops->flex_item_release))
1551 		return rte_flow_error_set(error, ENOTSUP,
1552 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1553 					  NULL, rte_strerror(ENOTSUP));
1554 	ret = ops->flex_item_release(dev, handle, error);
1555 	ret = flow_err(port_id, ret, error);
1556 
1557 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1558 
1559 	return ret;
1560 }
1561 
1562 int
1563 rte_flow_info_get(uint16_t port_id,
1564 		  struct rte_flow_port_info *port_info,
1565 		  struct rte_flow_queue_info *queue_info,
1566 		  struct rte_flow_error *error)
1567 {
1568 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1569 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1570 	int ret;
1571 
1572 	if (unlikely(!ops))
1573 		return -rte_errno;
1574 	if (dev->data->dev_configured == 0) {
1575 		RTE_FLOW_LOG(INFO,
1576 			"Device with port_id=%"PRIu16" is not configured.\n",
1577 			port_id);
1578 		return -EINVAL;
1579 	}
1580 	if (port_info == NULL) {
1581 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1582 		return -EINVAL;
1583 	}
1584 	if (likely(!!ops->info_get)) {
1585 		ret = flow_err(port_id,
1586 			       ops->info_get(dev, port_info, queue_info, error),
1587 			       error);
1588 
1589 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1590 
1591 		return ret;
1592 	}
1593 	return rte_flow_error_set(error, ENOTSUP,
1594 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1595 				  NULL, rte_strerror(ENOTSUP));
1596 }
1597 
1598 int
1599 rte_flow_configure(uint16_t port_id,
1600 		   const struct rte_flow_port_attr *port_attr,
1601 		   uint16_t nb_queue,
1602 		   const struct rte_flow_queue_attr *queue_attr[],
1603 		   struct rte_flow_error *error)
1604 {
1605 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1606 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1607 	int ret;
1608 
1609 	if (unlikely(!ops))
1610 		return -rte_errno;
1611 	if (dev->data->dev_configured == 0) {
1612 		RTE_FLOW_LOG(INFO,
1613 			"Device with port_id=%"PRIu16" is not configured.\n",
1614 			port_id);
1615 		return -EINVAL;
1616 	}
1617 	if (dev->data->dev_started != 0) {
1618 		RTE_FLOW_LOG(INFO,
1619 			"Device with port_id=%"PRIu16" already started.\n",
1620 			port_id);
1621 		return -EINVAL;
1622 	}
1623 	if (port_attr == NULL) {
1624 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.\n", port_id);
1625 		return -EINVAL;
1626 	}
1627 	if (queue_attr == NULL) {
1628 		RTE_FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.\n", port_id);
1629 		return -EINVAL;
1630 	}
1631 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1632 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1633 		return rte_flow_error_set(error, ENODEV,
1634 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1635 					  NULL, rte_strerror(ENODEV));
1636 	}
1637 	if (likely(!!ops->configure)) {
1638 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
1639 		if (ret == 0)
1640 			dev->data->flow_configured = 1;
1641 		ret = flow_err(port_id, ret, error);
1642 
1643 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1644 
1645 		return ret;
1646 	}
1647 	return rte_flow_error_set(error, ENOTSUP,
1648 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1649 				  NULL, rte_strerror(ENOTSUP));
1650 }
1651 
1652 struct rte_flow_pattern_template *
1653 rte_flow_pattern_template_create(uint16_t port_id,
1654 		const struct rte_flow_pattern_template_attr *template_attr,
1655 		const struct rte_flow_item pattern[],
1656 		struct rte_flow_error *error)
1657 {
1658 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1659 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1660 	struct rte_flow_pattern_template *template;
1661 
1662 	if (unlikely(!ops))
1663 		return NULL;
1664 	if (dev->data->flow_configured == 0) {
1665 		RTE_FLOW_LOG(INFO,
1666 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1667 			port_id);
1668 		rte_flow_error_set(error, EINVAL,
1669 				RTE_FLOW_ERROR_TYPE_STATE,
1670 				NULL, rte_strerror(EINVAL));
1671 		return NULL;
1672 	}
1673 	if (template_attr == NULL) {
1674 		RTE_FLOW_LOG(ERR,
1675 			     "Port %"PRIu16" template attr is NULL.\n",
1676 			     port_id);
1677 		rte_flow_error_set(error, EINVAL,
1678 				   RTE_FLOW_ERROR_TYPE_ATTR,
1679 				   NULL, rte_strerror(EINVAL));
1680 		return NULL;
1681 	}
1682 	if (pattern == NULL) {
1683 		RTE_FLOW_LOG(ERR,
1684 			     "Port %"PRIu16" pattern is NULL.\n",
1685 			     port_id);
1686 		rte_flow_error_set(error, EINVAL,
1687 				   RTE_FLOW_ERROR_TYPE_ATTR,
1688 				   NULL, rte_strerror(EINVAL));
1689 		return NULL;
1690 	}
1691 	if (likely(!!ops->pattern_template_create)) {
1692 		template = ops->pattern_template_create(dev, template_attr,
1693 							pattern, error);
1694 		if (template == NULL)
1695 			flow_err(port_id, -rte_errno, error);
1696 
1697 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1698 						       pattern, template);
1699 
1700 		return template;
1701 	}
1702 	rte_flow_error_set(error, ENOTSUP,
1703 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1704 			   NULL, rte_strerror(ENOTSUP));
1705 	return NULL;
1706 }
1707 
1708 int
1709 rte_flow_pattern_template_destroy(uint16_t port_id,
1710 		struct rte_flow_pattern_template *pattern_template,
1711 		struct rte_flow_error *error)
1712 {
1713 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1714 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1715 	int ret;
1716 
1717 	if (unlikely(!ops))
1718 		return -rte_errno;
1719 	if (unlikely(pattern_template == NULL))
1720 		return 0;
1721 	if (likely(!!ops->pattern_template_destroy)) {
1722 		ret = flow_err(port_id,
1723 			       ops->pattern_template_destroy(dev,
1724 							     pattern_template,
1725 							     error),
1726 			       error);
1727 
1728 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1729 							ret);
1730 
1731 		return ret;
1732 	}
1733 	return rte_flow_error_set(error, ENOTSUP,
1734 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1735 				  NULL, rte_strerror(ENOTSUP));
1736 }
1737 
1738 struct rte_flow_actions_template *
1739 rte_flow_actions_template_create(uint16_t port_id,
1740 			const struct rte_flow_actions_template_attr *template_attr,
1741 			const struct rte_flow_action actions[],
1742 			const struct rte_flow_action masks[],
1743 			struct rte_flow_error *error)
1744 {
1745 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1746 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1747 	struct rte_flow_actions_template *template;
1748 
1749 	if (unlikely(!ops))
1750 		return NULL;
1751 	if (dev->data->flow_configured == 0) {
1752 		RTE_FLOW_LOG(INFO,
1753 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1754 			port_id);
1755 		rte_flow_error_set(error, EINVAL,
1756 				   RTE_FLOW_ERROR_TYPE_STATE,
1757 				   NULL, rte_strerror(EINVAL));
1758 		return NULL;
1759 	}
1760 	if (template_attr == NULL) {
1761 		RTE_FLOW_LOG(ERR,
1762 			     "Port %"PRIu16" template attr is NULL.\n",
1763 			     port_id);
1764 		rte_flow_error_set(error, EINVAL,
1765 				   RTE_FLOW_ERROR_TYPE_ATTR,
1766 				   NULL, rte_strerror(EINVAL));
1767 		return NULL;
1768 	}
1769 	if (actions == NULL) {
1770 		RTE_FLOW_LOG(ERR,
1771 			     "Port %"PRIu16" actions is NULL.\n",
1772 			     port_id);
1773 		rte_flow_error_set(error, EINVAL,
1774 				   RTE_FLOW_ERROR_TYPE_ATTR,
1775 				   NULL, rte_strerror(EINVAL));
1776 		return NULL;
1777 	}
1778 	if (masks == NULL) {
1779 		RTE_FLOW_LOG(ERR,
1780 			     "Port %"PRIu16" masks is NULL.\n",
1781 			     port_id);
1782 		rte_flow_error_set(error, EINVAL,
1783 				   RTE_FLOW_ERROR_TYPE_ATTR,
1784 				   NULL, rte_strerror(EINVAL));
1785 
1786 	}
1787 	if (likely(!!ops->actions_template_create)) {
1788 		template = ops->actions_template_create(dev, template_attr,
1789 							actions, masks, error);
1790 		if (template == NULL)
1791 			flow_err(port_id, -rte_errno, error);
1792 
1793 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1794 						       masks, template);
1795 
1796 		return template;
1797 	}
1798 	rte_flow_error_set(error, ENOTSUP,
1799 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1800 			   NULL, rte_strerror(ENOTSUP));
1801 	return NULL;
1802 }
1803 
1804 int
1805 rte_flow_actions_template_destroy(uint16_t port_id,
1806 			struct rte_flow_actions_template *actions_template,
1807 			struct rte_flow_error *error)
1808 {
1809 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1810 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1811 	int ret;
1812 
1813 	if (unlikely(!ops))
1814 		return -rte_errno;
1815 	if (unlikely(actions_template == NULL))
1816 		return 0;
1817 	if (likely(!!ops->actions_template_destroy)) {
1818 		ret = flow_err(port_id,
1819 			       ops->actions_template_destroy(dev,
1820 							     actions_template,
1821 							     error),
1822 			       error);
1823 
1824 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1825 							ret);
1826 
1827 		return ret;
1828 	}
1829 	return rte_flow_error_set(error, ENOTSUP,
1830 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1831 				  NULL, rte_strerror(ENOTSUP));
1832 }
1833 
1834 struct rte_flow_template_table *
1835 rte_flow_template_table_create(uint16_t port_id,
1836 			const struct rte_flow_template_table_attr *table_attr,
1837 			struct rte_flow_pattern_template *pattern_templates[],
1838 			uint8_t nb_pattern_templates,
1839 			struct rte_flow_actions_template *actions_templates[],
1840 			uint8_t nb_actions_templates,
1841 			struct rte_flow_error *error)
1842 {
1843 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1844 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1845 	struct rte_flow_template_table *table;
1846 
1847 	if (unlikely(!ops))
1848 		return NULL;
1849 	if (dev->data->flow_configured == 0) {
1850 		RTE_FLOW_LOG(INFO,
1851 			"Flow engine on port_id=%"PRIu16" is not configured.\n",
1852 			port_id);
1853 		rte_flow_error_set(error, EINVAL,
1854 				   RTE_FLOW_ERROR_TYPE_STATE,
1855 				   NULL, rte_strerror(EINVAL));
1856 		return NULL;
1857 	}
1858 	if (table_attr == NULL) {
1859 		RTE_FLOW_LOG(ERR,
1860 			     "Port %"PRIu16" table attr is NULL.\n",
1861 			     port_id);
1862 		rte_flow_error_set(error, EINVAL,
1863 				   RTE_FLOW_ERROR_TYPE_ATTR,
1864 				   NULL, rte_strerror(EINVAL));
1865 		return NULL;
1866 	}
1867 	if (pattern_templates == NULL) {
1868 		RTE_FLOW_LOG(ERR,
1869 			     "Port %"PRIu16" pattern templates is NULL.\n",
1870 			     port_id);
1871 		rte_flow_error_set(error, EINVAL,
1872 				   RTE_FLOW_ERROR_TYPE_ATTR,
1873 				   NULL, rte_strerror(EINVAL));
1874 		return NULL;
1875 	}
1876 	if (actions_templates == NULL) {
1877 		RTE_FLOW_LOG(ERR,
1878 			     "Port %"PRIu16" actions templates is NULL.\n",
1879 			     port_id);
1880 		rte_flow_error_set(error, EINVAL,
1881 				   RTE_FLOW_ERROR_TYPE_ATTR,
1882 				   NULL, rte_strerror(EINVAL));
1883 		return NULL;
1884 	}
1885 	if (likely(!!ops->template_table_create)) {
1886 		table = ops->template_table_create(dev, table_attr,
1887 					pattern_templates, nb_pattern_templates,
1888 					actions_templates, nb_actions_templates,
1889 					error);
1890 		if (table == NULL)
1891 			flow_err(port_id, -rte_errno, error);
1892 
1893 		rte_flow_trace_template_table_create(port_id, table_attr,
1894 						     pattern_templates,
1895 						     nb_pattern_templates,
1896 						     actions_templates,
1897 						     nb_actions_templates, table);
1898 
1899 		return table;
1900 	}
1901 	rte_flow_error_set(error, ENOTSUP,
1902 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1903 			   NULL, rte_strerror(ENOTSUP));
1904 	return NULL;
1905 }
1906 
1907 int
1908 rte_flow_template_table_destroy(uint16_t port_id,
1909 				struct rte_flow_template_table *template_table,
1910 				struct rte_flow_error *error)
1911 {
1912 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1913 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1914 	int ret;
1915 
1916 	if (unlikely(!ops))
1917 		return -rte_errno;
1918 	if (unlikely(template_table == NULL))
1919 		return 0;
1920 	if (likely(!!ops->template_table_destroy)) {
1921 		ret = flow_err(port_id,
1922 			       ops->template_table_destroy(dev,
1923 							   template_table,
1924 							   error),
1925 			       error);
1926 
1927 		rte_flow_trace_template_table_destroy(port_id, template_table,
1928 						      ret);
1929 
1930 		return ret;
1931 	}
1932 	return rte_flow_error_set(error, ENOTSUP,
1933 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1934 				  NULL, rte_strerror(ENOTSUP));
1935 }
1936 
1937 struct rte_flow *
1938 rte_flow_async_create(uint16_t port_id,
1939 		      uint32_t queue_id,
1940 		      const struct rte_flow_op_attr *op_attr,
1941 		      struct rte_flow_template_table *template_table,
1942 		      const struct rte_flow_item pattern[],
1943 		      uint8_t pattern_template_index,
1944 		      const struct rte_flow_action actions[],
1945 		      uint8_t actions_template_index,
1946 		      void *user_data,
1947 		      struct rte_flow_error *error)
1948 {
1949 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1950 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1951 	struct rte_flow *flow;
1952 
1953 	flow = ops->async_create(dev, queue_id,
1954 				 op_attr, template_table,
1955 				 pattern, pattern_template_index,
1956 				 actions, actions_template_index,
1957 				 user_data, error);
1958 	if (flow == NULL)
1959 		flow_err(port_id, -rte_errno, error);
1960 
1961 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
1962 				    pattern, pattern_template_index, actions,
1963 				    actions_template_index, user_data, flow);
1964 
1965 	return flow;
1966 }
1967 
1968 struct rte_flow *
1969 rte_flow_async_create_by_index(uint16_t port_id,
1970 			       uint32_t queue_id,
1971 			       const struct rte_flow_op_attr *op_attr,
1972 			       struct rte_flow_template_table *template_table,
1973 			       uint32_t rule_index,
1974 			       const struct rte_flow_action actions[],
1975 			       uint8_t actions_template_index,
1976 			       void *user_data,
1977 			       struct rte_flow_error *error)
1978 {
1979 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1980 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1981 	struct rte_flow *flow;
1982 
1983 	flow = ops->async_create_by_index(dev, queue_id,
1984 					  op_attr, template_table, rule_index,
1985 					  actions, actions_template_index,
1986 					  user_data, error);
1987 	if (flow == NULL)
1988 		flow_err(port_id, -rte_errno, error);
1989 	return flow;
1990 }
1991 
1992 int
1993 rte_flow_async_destroy(uint16_t port_id,
1994 		       uint32_t queue_id,
1995 		       const struct rte_flow_op_attr *op_attr,
1996 		       struct rte_flow *flow,
1997 		       void *user_data,
1998 		       struct rte_flow_error *error)
1999 {
2000 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2001 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2002 	int ret;
2003 
2004 	ret = flow_err(port_id,
2005 		       ops->async_destroy(dev, queue_id,
2006 					  op_attr, flow,
2007 					  user_data, error),
2008 		       error);
2009 
2010 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2011 				     user_data, ret);
2012 
2013 	return ret;
2014 }
2015 
2016 int
2017 rte_flow_async_actions_update(uint16_t port_id,
2018 			      uint32_t queue_id,
2019 			      const struct rte_flow_op_attr *op_attr,
2020 			      struct rte_flow *flow,
2021 			      const struct rte_flow_action actions[],
2022 			      uint8_t actions_template_index,
2023 			      void *user_data,
2024 			      struct rte_flow_error *error)
2025 {
2026 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2027 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2028 	int ret;
2029 
2030 	ret = flow_err(port_id,
2031 		       ops->async_actions_update(dev, queue_id, op_attr,
2032 						 flow, actions,
2033 						 actions_template_index,
2034 						 user_data, error),
2035 		       error);
2036 
2037 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
2038 					    actions, actions_template_index,
2039 					    user_data, ret);
2040 
2041 	return ret;
2042 }
2043 
2044 int
2045 rte_flow_push(uint16_t port_id,
2046 	      uint32_t queue_id,
2047 	      struct rte_flow_error *error)
2048 {
2049 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2050 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2051 	int ret;
2052 
2053 	ret = flow_err(port_id,
2054 		       ops->push(dev, queue_id, error),
2055 		       error);
2056 
2057 	rte_flow_trace_push(port_id, queue_id, ret);
2058 
2059 	return ret;
2060 }
2061 
2062 int
2063 rte_flow_pull(uint16_t port_id,
2064 	      uint32_t queue_id,
2065 	      struct rte_flow_op_result res[],
2066 	      uint16_t n_res,
2067 	      struct rte_flow_error *error)
2068 {
2069 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2070 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2071 	int ret;
2072 	int rc;
2073 
2074 	ret = ops->pull(dev, queue_id, res, n_res, error);
2075 	rc = ret ? ret : flow_err(port_id, ret, error);
2076 
2077 	rte_flow_trace_pull(port_id, queue_id, res, n_res, rc);
2078 
2079 	return rc;
2080 }
2081 
2082 struct rte_flow_action_handle *
2083 rte_flow_async_action_handle_create(uint16_t port_id,
2084 		uint32_t queue_id,
2085 		const struct rte_flow_op_attr *op_attr,
2086 		const struct rte_flow_indir_action_conf *indir_action_conf,
2087 		const struct rte_flow_action *action,
2088 		void *user_data,
2089 		struct rte_flow_error *error)
2090 {
2091 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2092 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2093 	struct rte_flow_action_handle *handle;
2094 
2095 	handle = ops->async_action_handle_create(dev, queue_id, op_attr,
2096 					     indir_action_conf, action, user_data, error);
2097 	if (handle == NULL)
2098 		flow_err(port_id, -rte_errno, error);
2099 
2100 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2101 						  indir_action_conf, action,
2102 						  user_data, handle);
2103 
2104 	return handle;
2105 }
2106 
2107 int
2108 rte_flow_async_action_handle_destroy(uint16_t port_id,
2109 		uint32_t queue_id,
2110 		const struct rte_flow_op_attr *op_attr,
2111 		struct rte_flow_action_handle *action_handle,
2112 		void *user_data,
2113 		struct rte_flow_error *error)
2114 {
2115 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2116 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2117 	int ret;
2118 
2119 	ret = ops->async_action_handle_destroy(dev, queue_id, op_attr,
2120 					   action_handle, user_data, error);
2121 	ret = flow_err(port_id, ret, error);
2122 
2123 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2124 						   action_handle, user_data, ret);
2125 
2126 	return ret;
2127 }
2128 
2129 int
2130 rte_flow_async_action_handle_update(uint16_t port_id,
2131 		uint32_t queue_id,
2132 		const struct rte_flow_op_attr *op_attr,
2133 		struct rte_flow_action_handle *action_handle,
2134 		const void *update,
2135 		void *user_data,
2136 		struct rte_flow_error *error)
2137 {
2138 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2139 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2140 	int ret;
2141 
2142 	ret = ops->async_action_handle_update(dev, queue_id, op_attr,
2143 					  action_handle, update, user_data, error);
2144 	ret = flow_err(port_id, ret, error);
2145 
2146 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2147 						  action_handle, update,
2148 						  user_data, ret);
2149 
2150 	return ret;
2151 }
2152 
2153 int
2154 rte_flow_async_action_handle_query(uint16_t port_id,
2155 		uint32_t queue_id,
2156 		const struct rte_flow_op_attr *op_attr,
2157 		const struct rte_flow_action_handle *action_handle,
2158 		void *data,
2159 		void *user_data,
2160 		struct rte_flow_error *error)
2161 {
2162 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2163 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
2164 	int ret;
2165 
2166 	ret = ops->async_action_handle_query(dev, queue_id, op_attr,
2167 					  action_handle, data, user_data, error);
2168 	ret = flow_err(port_id, ret, error);
2169 
2170 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2171 						 action_handle, data, user_data,
2172 						 ret);
2173 
2174 	return ret;
2175 }
2176 
2177 int
2178 rte_flow_action_handle_query_update(uint16_t port_id,
2179 				    struct rte_flow_action_handle *handle,
2180 				    const void *update, void *query,
2181 				    enum rte_flow_query_update_mode mode,
2182 				    struct rte_flow_error *error)
2183 {
2184 	int ret;
2185 	struct rte_eth_dev *dev;
2186 	const struct rte_flow_ops *ops;
2187 
2188 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2189 	if (!handle)
2190 		return -EINVAL;
2191 	if (!update && !query)
2192 		return -EINVAL;
2193 	dev = &rte_eth_devices[port_id];
2194 	ops = rte_flow_ops_get(port_id, error);
2195 	if (!ops || !ops->action_handle_query_update)
2196 		return -ENOTSUP;
2197 	ret = ops->action_handle_query_update(dev, handle, update,
2198 					      query, mode, error);
2199 	return flow_err(port_id, ret, error);
2200 }
2201 
2202 int
2203 rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2204 					  const struct rte_flow_op_attr *attr,
2205 					  struct rte_flow_action_handle *handle,
2206 					  const void *update, void *query,
2207 					  enum rte_flow_query_update_mode mode,
2208 					  void *user_data,
2209 					  struct rte_flow_error *error)
2210 {
2211 	int ret;
2212 	struct rte_eth_dev *dev;
2213 	const struct rte_flow_ops *ops;
2214 
2215 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2216 	if (!handle)
2217 		return -EINVAL;
2218 	if (!update && !query)
2219 		return -EINVAL;
2220 	dev = &rte_eth_devices[port_id];
2221 	ops = rte_flow_ops_get(port_id, error);
2222 	if (!ops || !ops->async_action_handle_query_update)
2223 		return -ENOTSUP;
2224 	ret = ops->async_action_handle_query_update(dev, queue_id, attr,
2225 						    handle, update,
2226 						    query, mode,
2227 						    user_data, error);
2228 	return flow_err(port_id, ret, error);
2229 }
2230