xref: /dpdk/lib/ethdev/rte_flow.c (revision be5ded2f96072e887d5155516f8bbe69d1fb07ad)
199a2dd95SBruce Richardson /* SPDX-License-Identifier: BSD-3-Clause
299a2dd95SBruce Richardson  * Copyright 2016 6WIND S.A.
399a2dd95SBruce Richardson  * Copyright 2016 Mellanox Technologies, Ltd
499a2dd95SBruce Richardson  */
599a2dd95SBruce Richardson 
608966fe7STyler Retzlaff #include <stdalign.h>
799a2dd95SBruce Richardson #include <errno.h>
899a2dd95SBruce Richardson #include <stddef.h>
999a2dd95SBruce Richardson #include <stdint.h>
102744cb6eSThomas Monjalon #include <pthread.h>
1199a2dd95SBruce Richardson 
1299a2dd95SBruce Richardson #include <rte_common.h>
1399a2dd95SBruce Richardson #include <rte_errno.h>
1499a2dd95SBruce Richardson #include <rte_branch_prediction.h>
1599a2dd95SBruce Richardson #include <rte_string_fns.h>
1699a2dd95SBruce Richardson #include <rte_mbuf_dyn.h>
1799a2dd95SBruce Richardson #include "rte_flow_driver.h"
1899a2dd95SBruce Richardson #include "rte_flow.h"
1999a2dd95SBruce Richardson 
20ed04fd40SAnkur Dwivedi #include "ethdev_trace.h"
21ed04fd40SAnkur Dwivedi 
220e21c7c0SDavid Marchand #define FLOW_LOG RTE_ETHDEV_LOG_LINE
230e21c7c0SDavid Marchand 
2499a2dd95SBruce Richardson /* Mbuf dynamic field name for metadata. */
2599a2dd95SBruce Richardson int32_t rte_flow_dynf_metadata_offs = -1;
2699a2dd95SBruce Richardson 
2799a2dd95SBruce Richardson /* Mbuf dynamic field flag bit number for metadata. */
2899a2dd95SBruce Richardson uint64_t rte_flow_dynf_metadata_mask;
2999a2dd95SBruce Richardson 
3099a2dd95SBruce Richardson /**
3199a2dd95SBruce Richardson  * Flow elements description tables.
3299a2dd95SBruce Richardson  */
3399a2dd95SBruce Richardson struct rte_flow_desc_data {
3499a2dd95SBruce Richardson 	const char *name;
3599a2dd95SBruce Richardson 	size_t size;
366cf72047SGregory Etelson 	size_t (*desc_fn)(void *dst, const void *src);
3799a2dd95SBruce Richardson };
3899a2dd95SBruce Richardson 
396cf72047SGregory Etelson /**
406cf72047SGregory Etelson  *
416cf72047SGregory Etelson  * @param buf
426cf72047SGregory Etelson  * Destination memory.
436cf72047SGregory Etelson  * @param data
446cf72047SGregory Etelson  * Source memory
456cf72047SGregory Etelson  * @param size
466cf72047SGregory Etelson  * Requested copy size
476cf72047SGregory Etelson  * @param desc
486cf72047SGregory Etelson  * rte_flow_desc_item - for flow item conversion.
496cf72047SGregory Etelson  * rte_flow_desc_action - for flow action conversion.
506cf72047SGregory Etelson  * @param type
516cf72047SGregory Etelson  * Offset into the desc param or negative value for private flow elements.
526cf72047SGregory Etelson  */
536cf72047SGregory Etelson static inline size_t
546cf72047SGregory Etelson rte_flow_conv_copy(void *buf, const void *data, const size_t size,
556cf72047SGregory Etelson 		   const struct rte_flow_desc_data *desc, int type)
566cf72047SGregory Etelson {
576cf72047SGregory Etelson 	/**
586cf72047SGregory Etelson 	 * Allow PMD private flow item
596cf72047SGregory Etelson 	 */
60de39080bSGregory Etelson 	bool rte_type = type >= 0;
61de39080bSGregory Etelson 
62de39080bSGregory Etelson 	size_t sz = rte_type ? desc[type].size : sizeof(void *);
63bd6c8808SDariusz Sosnowski 	if (data == NULL)
646cf72047SGregory Etelson 		return 0;
65bd6c8808SDariusz Sosnowski 	if (buf != NULL)
666cf72047SGregory Etelson 		rte_memcpy(buf, data, (size > sz ? sz : size));
67de39080bSGregory Etelson 	if (rte_type && desc[type].desc_fn)
686cf72047SGregory Etelson 		sz += desc[type].desc_fn(size > 0 ? buf : NULL, data);
696cf72047SGregory Etelson 	return sz;
706cf72047SGregory Etelson }
716cf72047SGregory Etelson 
72dc4d860eSViacheslav Ovsiienko static size_t
73dc4d860eSViacheslav Ovsiienko rte_flow_item_flex_conv(void *buf, const void *data)
74dc4d860eSViacheslav Ovsiienko {
75dc4d860eSViacheslav Ovsiienko 	struct rte_flow_item_flex *dst = buf;
76dc4d860eSViacheslav Ovsiienko 	const struct rte_flow_item_flex *src = data;
77dc4d860eSViacheslav Ovsiienko 	if (buf) {
78dc4d860eSViacheslav Ovsiienko 		dst->pattern = rte_memcpy
79dc4d860eSViacheslav Ovsiienko 			((void *)((uintptr_t)(dst + 1)), src->pattern,
80dc4d860eSViacheslav Ovsiienko 			 src->length);
81dc4d860eSViacheslav Ovsiienko 	}
82dc4d860eSViacheslav Ovsiienko 	return src->length;
83dc4d860eSViacheslav Ovsiienko }
84dc4d860eSViacheslav Ovsiienko 
8599a2dd95SBruce Richardson /** Generate flow_item[] entry. */
8699a2dd95SBruce Richardson #define MK_FLOW_ITEM(t, s) \
8799a2dd95SBruce Richardson 	[RTE_FLOW_ITEM_TYPE_ ## t] = { \
8899a2dd95SBruce Richardson 		.name = # t, \
8999a2dd95SBruce Richardson 		.size = s,               \
906cf72047SGregory Etelson 		.desc_fn = NULL,\
916cf72047SGregory Etelson 	}
926cf72047SGregory Etelson 
936cf72047SGregory Etelson #define MK_FLOW_ITEM_FN(t, s, fn) \
946cf72047SGregory Etelson 	[RTE_FLOW_ITEM_TYPE_ ## t] = {\
956cf72047SGregory Etelson 		.name = # t,                 \
966cf72047SGregory Etelson 		.size = s,                   \
976cf72047SGregory Etelson 		.desc_fn = fn,               \
9899a2dd95SBruce Richardson 	}
9999a2dd95SBruce Richardson 
10099a2dd95SBruce Richardson /** Information about known flow pattern items. */
10199a2dd95SBruce Richardson static const struct rte_flow_desc_data rte_flow_desc_item[] = {
10299a2dd95SBruce Richardson 	MK_FLOW_ITEM(END, 0),
10399a2dd95SBruce Richardson 	MK_FLOW_ITEM(VOID, 0),
10499a2dd95SBruce Richardson 	MK_FLOW_ITEM(INVERT, 0),
10599a2dd95SBruce Richardson 	MK_FLOW_ITEM(ANY, sizeof(struct rte_flow_item_any)),
10699a2dd95SBruce Richardson 	MK_FLOW_ITEM(PORT_ID, sizeof(struct rte_flow_item_port_id)),
10799a2dd95SBruce Richardson 	MK_FLOW_ITEM(RAW, sizeof(struct rte_flow_item_raw)),
10899a2dd95SBruce Richardson 	MK_FLOW_ITEM(ETH, sizeof(struct rte_flow_item_eth)),
10999a2dd95SBruce Richardson 	MK_FLOW_ITEM(VLAN, sizeof(struct rte_flow_item_vlan)),
11099a2dd95SBruce Richardson 	MK_FLOW_ITEM(IPV4, sizeof(struct rte_flow_item_ipv4)),
11199a2dd95SBruce Richardson 	MK_FLOW_ITEM(IPV6, sizeof(struct rte_flow_item_ipv6)),
11299a2dd95SBruce Richardson 	MK_FLOW_ITEM(ICMP, sizeof(struct rte_flow_item_icmp)),
11399a2dd95SBruce Richardson 	MK_FLOW_ITEM(UDP, sizeof(struct rte_flow_item_udp)),
11499a2dd95SBruce Richardson 	MK_FLOW_ITEM(TCP, sizeof(struct rte_flow_item_tcp)),
11599a2dd95SBruce Richardson 	MK_FLOW_ITEM(SCTP, sizeof(struct rte_flow_item_sctp)),
11699a2dd95SBruce Richardson 	MK_FLOW_ITEM(VXLAN, sizeof(struct rte_flow_item_vxlan)),
11799a2dd95SBruce Richardson 	MK_FLOW_ITEM(E_TAG, sizeof(struct rte_flow_item_e_tag)),
11899a2dd95SBruce Richardson 	MK_FLOW_ITEM(NVGRE, sizeof(struct rte_flow_item_nvgre)),
11999a2dd95SBruce Richardson 	MK_FLOW_ITEM(MPLS, sizeof(struct rte_flow_item_mpls)),
12099a2dd95SBruce Richardson 	MK_FLOW_ITEM(GRE, sizeof(struct rte_flow_item_gre)),
12199a2dd95SBruce Richardson 	MK_FLOW_ITEM(FUZZY, sizeof(struct rte_flow_item_fuzzy)),
12299a2dd95SBruce Richardson 	MK_FLOW_ITEM(GTP, sizeof(struct rte_flow_item_gtp)),
12399a2dd95SBruce Richardson 	MK_FLOW_ITEM(GTPC, sizeof(struct rte_flow_item_gtp)),
12499a2dd95SBruce Richardson 	MK_FLOW_ITEM(GTPU, sizeof(struct rte_flow_item_gtp)),
12599a2dd95SBruce Richardson 	MK_FLOW_ITEM(ESP, sizeof(struct rte_flow_item_esp)),
12699a2dd95SBruce Richardson 	MK_FLOW_ITEM(GENEVE, sizeof(struct rte_flow_item_geneve)),
12799a2dd95SBruce Richardson 	MK_FLOW_ITEM(VXLAN_GPE, sizeof(struct rte_flow_item_vxlan_gpe)),
12899a2dd95SBruce Richardson 	MK_FLOW_ITEM(ARP_ETH_IPV4, sizeof(struct rte_flow_item_arp_eth_ipv4)),
12999a2dd95SBruce Richardson 	MK_FLOW_ITEM(IPV6_EXT, sizeof(struct rte_flow_item_ipv6_ext)),
13099a2dd95SBruce Richardson 	MK_FLOW_ITEM(IPV6_FRAG_EXT, sizeof(struct rte_flow_item_ipv6_frag_ext)),
13199a2dd95SBruce Richardson 	MK_FLOW_ITEM(ICMP6, sizeof(struct rte_flow_item_icmp6)),
132750ee81dSLeo Xu 	MK_FLOW_ITEM(ICMP6_ECHO_REQUEST, sizeof(struct rte_flow_item_icmp6_echo)),
133750ee81dSLeo Xu 	MK_FLOW_ITEM(ICMP6_ECHO_REPLY, sizeof(struct rte_flow_item_icmp6_echo)),
13499a2dd95SBruce Richardson 	MK_FLOW_ITEM(ICMP6_ND_NS, sizeof(struct rte_flow_item_icmp6_nd_ns)),
13599a2dd95SBruce Richardson 	MK_FLOW_ITEM(ICMP6_ND_NA, sizeof(struct rte_flow_item_icmp6_nd_na)),
13699a2dd95SBruce Richardson 	MK_FLOW_ITEM(ICMP6_ND_OPT, sizeof(struct rte_flow_item_icmp6_nd_opt)),
13799a2dd95SBruce Richardson 	MK_FLOW_ITEM(ICMP6_ND_OPT_SLA_ETH,
13899a2dd95SBruce Richardson 		     sizeof(struct rte_flow_item_icmp6_nd_opt_sla_eth)),
13999a2dd95SBruce Richardson 	MK_FLOW_ITEM(ICMP6_ND_OPT_TLA_ETH,
14099a2dd95SBruce Richardson 		     sizeof(struct rte_flow_item_icmp6_nd_opt_tla_eth)),
14199a2dd95SBruce Richardson 	MK_FLOW_ITEM(MARK, sizeof(struct rte_flow_item_mark)),
14299a2dd95SBruce Richardson 	MK_FLOW_ITEM(META, sizeof(struct rte_flow_item_meta)),
143738ef8f7SMichael Baum 	MK_FLOW_ITEM(RANDOM, sizeof(struct rte_flow_item_random)),
14499a2dd95SBruce Richardson 	MK_FLOW_ITEM(TAG, sizeof(struct rte_flow_item_tag)),
14599a2dd95SBruce Richardson 	MK_FLOW_ITEM(GRE_KEY, sizeof(rte_be32_t)),
146f61490bdSSean Zhang 	MK_FLOW_ITEM(GRE_OPTION, sizeof(struct rte_flow_item_gre_opt)),
14799a2dd95SBruce Richardson 	MK_FLOW_ITEM(GTP_PSC, sizeof(struct rte_flow_item_gtp_psc)),
14899a2dd95SBruce Richardson 	MK_FLOW_ITEM(PPPOES, sizeof(struct rte_flow_item_pppoe)),
14999a2dd95SBruce Richardson 	MK_FLOW_ITEM(PPPOED, sizeof(struct rte_flow_item_pppoe)),
15099a2dd95SBruce Richardson 	MK_FLOW_ITEM(PPPOE_PROTO_ID,
15199a2dd95SBruce Richardson 			sizeof(struct rte_flow_item_pppoe_proto_id)),
15299a2dd95SBruce Richardson 	MK_FLOW_ITEM(NSH, sizeof(struct rte_flow_item_nsh)),
15399a2dd95SBruce Richardson 	MK_FLOW_ITEM(IGMP, sizeof(struct rte_flow_item_igmp)),
15499a2dd95SBruce Richardson 	MK_FLOW_ITEM(AH, sizeof(struct rte_flow_item_ah)),
15599a2dd95SBruce Richardson 	MK_FLOW_ITEM(HIGIG2, sizeof(struct rte_flow_item_higig2_hdr)),
15699a2dd95SBruce Richardson 	MK_FLOW_ITEM(L2TPV3OIP, sizeof(struct rte_flow_item_l2tpv3oip)),
15799a2dd95SBruce Richardson 	MK_FLOW_ITEM(PFCP, sizeof(struct rte_flow_item_pfcp)),
15899a2dd95SBruce Richardson 	MK_FLOW_ITEM(ECPRI, sizeof(struct rte_flow_item_ecpri)),
15999a2dd95SBruce Richardson 	MK_FLOW_ITEM(GENEVE_OPT, sizeof(struct rte_flow_item_geneve_opt)),
1601d0b9c7dSGregory Etelson 	MK_FLOW_ITEM(INTEGRITY, sizeof(struct rte_flow_item_integrity)),
1619847fd12SBing Zhao 	MK_FLOW_ITEM(CONNTRACK, sizeof(uint32_t)),
162081e42daSIvan Malov 	MK_FLOW_ITEM(PORT_REPRESENTOR, sizeof(struct rte_flow_item_ethdev)),
16349863ae2SIvan Malov 	MK_FLOW_ITEM(REPRESENTED_PORT, sizeof(struct rte_flow_item_ethdev)),
164dc4d860eSViacheslav Ovsiienko 	MK_FLOW_ITEM_FN(FLEX, sizeof(struct rte_flow_item_flex),
165dc4d860eSViacheslav Ovsiienko 			rte_flow_item_flex_conv),
1663a929df1SJie Wang 	MK_FLOW_ITEM(L2TPV2, sizeof(struct rte_flow_item_l2tpv2)),
1673a929df1SJie Wang 	MK_FLOW_ITEM(PPP, sizeof(struct rte_flow_item_ppp)),
1683af7a4afSAlexander Kozyrev 	MK_FLOW_ITEM(METER_COLOR, sizeof(struct rte_flow_item_meter_color)),
169be944d46SRongwei Liu 	MK_FLOW_ITEM(IPV6_ROUTING_EXT, sizeof(struct rte_flow_item_ipv6_routing_ext)),
1703e3edab5SGregory Etelson 	MK_FLOW_ITEM(QUOTA, sizeof(struct rte_flow_item_quota)),
1718ebc396bSJiawei Wang 	MK_FLOW_ITEM(AGGR_AFFINITY, sizeof(struct rte_flow_item_aggr_affinity)),
17241f6bdc7SKiran Kumar K 	MK_FLOW_ITEM(TX_QUEUE, sizeof(struct rte_flow_item_tx_queue)),
173e9b8532eSDong Zhou 	MK_FLOW_ITEM(IB_BTH, sizeof(struct rte_flow_item_ib_bth)),
174a3d2c697SAlexander Kozyrev 	MK_FLOW_ITEM(PTYPE, sizeof(struct rte_flow_item_ptype)),
17558143b7bSSuanming Mou 	MK_FLOW_ITEM(COMPARE, sizeof(struct rte_flow_item_compare)),
17699a2dd95SBruce Richardson };
17799a2dd95SBruce Richardson 
17899a2dd95SBruce Richardson /** Generate flow_action[] entry. */
17999a2dd95SBruce Richardson #define MK_FLOW_ACTION(t, s) \
18099a2dd95SBruce Richardson 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
18199a2dd95SBruce Richardson 		.name = # t, \
18299a2dd95SBruce Richardson 		.size = s, \
1836cf72047SGregory Etelson 		.desc_fn = NULL,\
18499a2dd95SBruce Richardson 	}
18599a2dd95SBruce Richardson 
1866cf72047SGregory Etelson #define MK_FLOW_ACTION_FN(t, fn) \
1876cf72047SGregory Etelson 	[RTE_FLOW_ACTION_TYPE_ ## t] = { \
1886cf72047SGregory Etelson 		.name = # t, \
1896cf72047SGregory Etelson 		.size = 0, \
1906cf72047SGregory Etelson 		.desc_fn = fn,\
1916cf72047SGregory Etelson 	}
1926cf72047SGregory Etelson 
1936cf72047SGregory Etelson 
19499a2dd95SBruce Richardson /** Information about known flow actions. */
19599a2dd95SBruce Richardson static const struct rte_flow_desc_data rte_flow_desc_action[] = {
19699a2dd95SBruce Richardson 	MK_FLOW_ACTION(END, 0),
19799a2dd95SBruce Richardson 	MK_FLOW_ACTION(VOID, 0),
19899a2dd95SBruce Richardson 	MK_FLOW_ACTION(PASSTHRU, 0),
19999a2dd95SBruce Richardson 	MK_FLOW_ACTION(JUMP, sizeof(struct rte_flow_action_jump)),
20099a2dd95SBruce Richardson 	MK_FLOW_ACTION(MARK, sizeof(struct rte_flow_action_mark)),
20199a2dd95SBruce Richardson 	MK_FLOW_ACTION(FLAG, 0),
20299a2dd95SBruce Richardson 	MK_FLOW_ACTION(QUEUE, sizeof(struct rte_flow_action_queue)),
20399a2dd95SBruce Richardson 	MK_FLOW_ACTION(DROP, 0),
20499a2dd95SBruce Richardson 	MK_FLOW_ACTION(COUNT, sizeof(struct rte_flow_action_count)),
20599a2dd95SBruce Richardson 	MK_FLOW_ACTION(RSS, sizeof(struct rte_flow_action_rss)),
20699a2dd95SBruce Richardson 	MK_FLOW_ACTION(PF, 0),
20799a2dd95SBruce Richardson 	MK_FLOW_ACTION(VF, sizeof(struct rte_flow_action_vf)),
20899a2dd95SBruce Richardson 	MK_FLOW_ACTION(PORT_ID, sizeof(struct rte_flow_action_port_id)),
20999a2dd95SBruce Richardson 	MK_FLOW_ACTION(METER, sizeof(struct rte_flow_action_meter)),
21099a2dd95SBruce Richardson 	MK_FLOW_ACTION(SECURITY, sizeof(struct rte_flow_action_security)),
21199a2dd95SBruce Richardson 	MK_FLOW_ACTION(OF_DEC_NW_TTL, 0),
21299a2dd95SBruce Richardson 	MK_FLOW_ACTION(OF_POP_VLAN, 0),
21399a2dd95SBruce Richardson 	MK_FLOW_ACTION(OF_PUSH_VLAN,
21499a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_of_push_vlan)),
21599a2dd95SBruce Richardson 	MK_FLOW_ACTION(OF_SET_VLAN_VID,
21699a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_of_set_vlan_vid)),
21799a2dd95SBruce Richardson 	MK_FLOW_ACTION(OF_SET_VLAN_PCP,
21899a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_of_set_vlan_pcp)),
21999a2dd95SBruce Richardson 	MK_FLOW_ACTION(OF_POP_MPLS,
22099a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_of_pop_mpls)),
22199a2dd95SBruce Richardson 	MK_FLOW_ACTION(OF_PUSH_MPLS,
22299a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_of_push_mpls)),
22399a2dd95SBruce Richardson 	MK_FLOW_ACTION(VXLAN_ENCAP, sizeof(struct rte_flow_action_vxlan_encap)),
22499a2dd95SBruce Richardson 	MK_FLOW_ACTION(VXLAN_DECAP, 0),
225f59d78b5SSunyang Wu 	MK_FLOW_ACTION(NVGRE_ENCAP, sizeof(struct rte_flow_action_nvgre_encap)),
22699a2dd95SBruce Richardson 	MK_FLOW_ACTION(NVGRE_DECAP, 0),
22799a2dd95SBruce Richardson 	MK_FLOW_ACTION(RAW_ENCAP, sizeof(struct rte_flow_action_raw_encap)),
22899a2dd95SBruce Richardson 	MK_FLOW_ACTION(RAW_DECAP, sizeof(struct rte_flow_action_raw_decap)),
22999a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_IPV4_SRC,
23099a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_set_ipv4)),
23199a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_IPV4_DST,
23299a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_set_ipv4)),
23399a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_IPV6_SRC,
23499a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_set_ipv6)),
23599a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_IPV6_DST,
23699a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_set_ipv6)),
23799a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_TP_SRC,
23899a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_set_tp)),
23999a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_TP_DST,
24099a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_set_tp)),
24199a2dd95SBruce Richardson 	MK_FLOW_ACTION(MAC_SWAP, 0),
24299a2dd95SBruce Richardson 	MK_FLOW_ACTION(DEC_TTL, 0),
24399a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_TTL, sizeof(struct rte_flow_action_set_ttl)),
24499a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_MAC_SRC, sizeof(struct rte_flow_action_set_mac)),
24599a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_MAC_DST, sizeof(struct rte_flow_action_set_mac)),
24699a2dd95SBruce Richardson 	MK_FLOW_ACTION(INC_TCP_SEQ, sizeof(rte_be32_t)),
24799a2dd95SBruce Richardson 	MK_FLOW_ACTION(DEC_TCP_SEQ, sizeof(rte_be32_t)),
24899a2dd95SBruce Richardson 	MK_FLOW_ACTION(INC_TCP_ACK, sizeof(rte_be32_t)),
24999a2dd95SBruce Richardson 	MK_FLOW_ACTION(DEC_TCP_ACK, sizeof(rte_be32_t)),
25099a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_TAG, sizeof(struct rte_flow_action_set_tag)),
25199a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_META, sizeof(struct rte_flow_action_set_meta)),
25299a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_IPV4_DSCP, sizeof(struct rte_flow_action_set_dscp)),
25399a2dd95SBruce Richardson 	MK_FLOW_ACTION(SET_IPV6_DSCP, sizeof(struct rte_flow_action_set_dscp)),
25499a2dd95SBruce Richardson 	MK_FLOW_ACTION(AGE, sizeof(struct rte_flow_action_age)),
25599a2dd95SBruce Richardson 	MK_FLOW_ACTION(SAMPLE, sizeof(struct rte_flow_action_sample)),
25699a2dd95SBruce Richardson 	MK_FLOW_ACTION(MODIFY_FIELD,
25799a2dd95SBruce Richardson 		       sizeof(struct rte_flow_action_modify_field)),
25899a2dd95SBruce Richardson 	/**
2594b61b877SBing Zhao 	 * Indirect action represented as handle of type
2604b61b877SBing Zhao 	 * (struct rte_flow_action_handle *) stored in conf field (see
26199a2dd95SBruce Richardson 	 * struct rte_flow_action); no need for additional structure to * store
2624b61b877SBing Zhao 	 * indirect action handle.
26399a2dd95SBruce Richardson 	 */
2644b61b877SBing Zhao 	MK_FLOW_ACTION(INDIRECT, 0),
2659847fd12SBing Zhao 	MK_FLOW_ACTION(CONNTRACK, sizeof(struct rte_flow_action_conntrack)),
2668edb6bc0SIvan Malov 	MK_FLOW_ACTION(PORT_REPRESENTOR, sizeof(struct rte_flow_action_ethdev)),
26788caad25SIvan Malov 	MK_FLOW_ACTION(REPRESENTED_PORT, sizeof(struct rte_flow_action_ethdev)),
2689c4a0c18SAlexander Kozyrev 	MK_FLOW_ACTION(METER_MARK, sizeof(struct rte_flow_action_meter_mark)),
2696838dd4bSMichael Savisko 	MK_FLOW_ACTION(SEND_TO_KERNEL, 0),
2703e3edab5SGregory Etelson 	MK_FLOW_ACTION(QUOTA, sizeof(struct rte_flow_action_quota)),
271fad1e8f5SRongwei Liu 	MK_FLOW_ACTION(IPV6_EXT_PUSH, sizeof(struct rte_flow_action_ipv6_ext_push)),
272fad1e8f5SRongwei Liu 	MK_FLOW_ACTION(IPV6_EXT_REMOVE, sizeof(struct rte_flow_action_ipv6_ext_remove)),
27372a3dec7SGregory Etelson 	MK_FLOW_ACTION(INDIRECT_LIST,
27472a3dec7SGregory Etelson 		       sizeof(struct rte_flow_action_indirect_list)),
275b6c3089dSWenjing Qiao 	MK_FLOW_ACTION(PROG,
276b6c3089dSWenjing Qiao 		       sizeof(struct rte_flow_action_prog)),
277eb704df7SBing Zhao 	MK_FLOW_ACTION(NAT64, sizeof(struct rte_flow_action_nat64)),
2782c52a2b3SAlexander Kozyrev 	MK_FLOW_ACTION(JUMP_TO_TABLE_INDEX, sizeof(struct rte_flow_action_jump_to_table_index)),
27999a2dd95SBruce Richardson };
28099a2dd95SBruce Richardson 
28199a2dd95SBruce Richardson int
28299a2dd95SBruce Richardson rte_flow_dynf_metadata_register(void)
28399a2dd95SBruce Richardson {
28499a2dd95SBruce Richardson 	int offset;
28599a2dd95SBruce Richardson 	int flag;
28699a2dd95SBruce Richardson 
28799a2dd95SBruce Richardson 	static const struct rte_mbuf_dynfield desc_offs = {
28899a2dd95SBruce Richardson 		.name = RTE_MBUF_DYNFIELD_METADATA_NAME,
28999a2dd95SBruce Richardson 		.size = sizeof(uint32_t),
29008966fe7STyler Retzlaff 		.align = alignof(uint32_t),
29199a2dd95SBruce Richardson 	};
29299a2dd95SBruce Richardson 	static const struct rte_mbuf_dynflag desc_flag = {
29399a2dd95SBruce Richardson 		.name = RTE_MBUF_DYNFLAG_METADATA_NAME,
29499a2dd95SBruce Richardson 	};
29599a2dd95SBruce Richardson 
29699a2dd95SBruce Richardson 	offset = rte_mbuf_dynfield_register(&desc_offs);
29799a2dd95SBruce Richardson 	if (offset < 0)
29899a2dd95SBruce Richardson 		goto error;
29999a2dd95SBruce Richardson 	flag = rte_mbuf_dynflag_register(&desc_flag);
30099a2dd95SBruce Richardson 	if (flag < 0)
30199a2dd95SBruce Richardson 		goto error;
30299a2dd95SBruce Richardson 	rte_flow_dynf_metadata_offs = offset;
303e1823e08SThomas Monjalon 	rte_flow_dynf_metadata_mask = RTE_BIT64(flag);
304ed04fd40SAnkur Dwivedi 
305ed04fd40SAnkur Dwivedi 	rte_flow_trace_dynf_metadata_register(offset, RTE_BIT64(flag));
306ed04fd40SAnkur Dwivedi 
30799a2dd95SBruce Richardson 	return 0;
30899a2dd95SBruce Richardson 
30999a2dd95SBruce Richardson error:
31099a2dd95SBruce Richardson 	rte_flow_dynf_metadata_offs = -1;
311e1823e08SThomas Monjalon 	rte_flow_dynf_metadata_mask = UINT64_C(0);
31299a2dd95SBruce Richardson 	return -rte_errno;
31399a2dd95SBruce Richardson }
31499a2dd95SBruce Richardson 
31599a2dd95SBruce Richardson static inline void
31699a2dd95SBruce Richardson fts_enter(struct rte_eth_dev *dev)
31799a2dd95SBruce Richardson {
31899a2dd95SBruce Richardson 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
31999a2dd95SBruce Richardson 		pthread_mutex_lock(&dev->data->flow_ops_mutex);
32099a2dd95SBruce Richardson }
32199a2dd95SBruce Richardson 
32299a2dd95SBruce Richardson static inline void
32399a2dd95SBruce Richardson fts_exit(struct rte_eth_dev *dev)
32499a2dd95SBruce Richardson {
32599a2dd95SBruce Richardson 	if (!(dev->data->dev_flags & RTE_ETH_DEV_FLOW_OPS_THREAD_SAFE))
32699a2dd95SBruce Richardson 		pthread_mutex_unlock(&dev->data->flow_ops_mutex);
32799a2dd95SBruce Richardson }
32899a2dd95SBruce Richardson 
32999a2dd95SBruce Richardson static int
33099a2dd95SBruce Richardson flow_err(uint16_t port_id, int ret, struct rte_flow_error *error)
33199a2dd95SBruce Richardson {
33299a2dd95SBruce Richardson 	if (ret == 0)
33399a2dd95SBruce Richardson 		return 0;
33499a2dd95SBruce Richardson 	if (rte_eth_dev_is_removed(port_id))
33599a2dd95SBruce Richardson 		return rte_flow_error_set(error, EIO,
33699a2dd95SBruce Richardson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
33799a2dd95SBruce Richardson 					  NULL, rte_strerror(EIO));
33899a2dd95SBruce Richardson 	return ret;
33999a2dd95SBruce Richardson }
34099a2dd95SBruce Richardson 
34199a2dd95SBruce Richardson /* Get generic flow operations structure from a port. */
34299a2dd95SBruce Richardson const struct rte_flow_ops *
34399a2dd95SBruce Richardson rte_flow_ops_get(uint16_t port_id, struct rte_flow_error *error)
34499a2dd95SBruce Richardson {
34599a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
34699a2dd95SBruce Richardson 	const struct rte_flow_ops *ops;
34799a2dd95SBruce Richardson 	int code;
34899a2dd95SBruce Richardson 
34999a2dd95SBruce Richardson 	if (unlikely(!rte_eth_dev_is_valid_port(port_id)))
35099a2dd95SBruce Richardson 		code = ENODEV;
35199a2dd95SBruce Richardson 	else if (unlikely(dev->dev_ops->flow_ops_get == NULL))
35299a2dd95SBruce Richardson 		/* flow API not supported with this driver dev_ops */
35399a2dd95SBruce Richardson 		code = ENOSYS;
35499a2dd95SBruce Richardson 	else
35599a2dd95SBruce Richardson 		code = dev->dev_ops->flow_ops_get(dev, &ops);
35699a2dd95SBruce Richardson 	if (code == 0 && ops == NULL)
35799a2dd95SBruce Richardson 		/* flow API not supported with this device */
35899a2dd95SBruce Richardson 		code = ENOSYS;
35999a2dd95SBruce Richardson 
36099a2dd95SBruce Richardson 	if (code != 0) {
36199a2dd95SBruce Richardson 		rte_flow_error_set(error, code, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
36299a2dd95SBruce Richardson 				   NULL, rte_strerror(code));
36399a2dd95SBruce Richardson 		return NULL;
36499a2dd95SBruce Richardson 	}
36599a2dd95SBruce Richardson 	return ops;
36699a2dd95SBruce Richardson }
36799a2dd95SBruce Richardson 
36899a2dd95SBruce Richardson /* Check whether a flow rule can be created on a given port. */
36999a2dd95SBruce Richardson int
37099a2dd95SBruce Richardson rte_flow_validate(uint16_t port_id,
37199a2dd95SBruce Richardson 		  const struct rte_flow_attr *attr,
37299a2dd95SBruce Richardson 		  const struct rte_flow_item pattern[],
37399a2dd95SBruce Richardson 		  const struct rte_flow_action actions[],
37499a2dd95SBruce Richardson 		  struct rte_flow_error *error)
37599a2dd95SBruce Richardson {
37699a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
37799a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
37899a2dd95SBruce Richardson 	int ret;
37999a2dd95SBruce Richardson 
380bd2a4d4bSIvan Malov 	if (likely(!!attr) && attr->transfer &&
381bd2a4d4bSIvan Malov 	    (attr->ingress || attr->egress)) {
382bd2a4d4bSIvan Malov 		return rte_flow_error_set(error, EINVAL,
383bd2a4d4bSIvan Malov 					  RTE_FLOW_ERROR_TYPE_ATTR,
384bd2a4d4bSIvan Malov 					  attr, "cannot use attr ingress/egress with attr transfer");
385bd2a4d4bSIvan Malov 	}
386bd2a4d4bSIvan Malov 
38799a2dd95SBruce Richardson 	if (unlikely(!ops))
38899a2dd95SBruce Richardson 		return -rte_errno;
38999a2dd95SBruce Richardson 	if (likely(!!ops->validate)) {
39099a2dd95SBruce Richardson 		fts_enter(dev);
39199a2dd95SBruce Richardson 		ret = ops->validate(dev, attr, pattern, actions, error);
39299a2dd95SBruce Richardson 		fts_exit(dev);
393ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id, ret, error);
394ed04fd40SAnkur Dwivedi 
395ed04fd40SAnkur Dwivedi 		rte_flow_trace_validate(port_id, attr, pattern, actions, ret);
396ed04fd40SAnkur Dwivedi 
397ed04fd40SAnkur Dwivedi 		return ret;
39899a2dd95SBruce Richardson 	}
39999a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOSYS,
40099a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
40199a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOSYS));
40299a2dd95SBruce Richardson }
40399a2dd95SBruce Richardson 
40499a2dd95SBruce Richardson /* Create a flow rule on a given port. */
40599a2dd95SBruce Richardson struct rte_flow *
40699a2dd95SBruce Richardson rte_flow_create(uint16_t port_id,
40799a2dd95SBruce Richardson 		const struct rte_flow_attr *attr,
40899a2dd95SBruce Richardson 		const struct rte_flow_item pattern[],
40999a2dd95SBruce Richardson 		const struct rte_flow_action actions[],
41099a2dd95SBruce Richardson 		struct rte_flow_error *error)
41199a2dd95SBruce Richardson {
41299a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
41399a2dd95SBruce Richardson 	struct rte_flow *flow;
41499a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
41599a2dd95SBruce Richardson 
41699a2dd95SBruce Richardson 	if (unlikely(!ops))
41799a2dd95SBruce Richardson 		return NULL;
41899a2dd95SBruce Richardson 	if (likely(!!ops->create)) {
41999a2dd95SBruce Richardson 		fts_enter(dev);
42099a2dd95SBruce Richardson 		flow = ops->create(dev, attr, pattern, actions, error);
42199a2dd95SBruce Richardson 		fts_exit(dev);
42299a2dd95SBruce Richardson 		if (flow == NULL)
42399a2dd95SBruce Richardson 			flow_err(port_id, -rte_errno, error);
424ed04fd40SAnkur Dwivedi 
425ed04fd40SAnkur Dwivedi 		rte_flow_trace_create(port_id, attr, pattern, actions, flow);
426ed04fd40SAnkur Dwivedi 
42799a2dd95SBruce Richardson 		return flow;
42899a2dd95SBruce Richardson 	}
42999a2dd95SBruce Richardson 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
43099a2dd95SBruce Richardson 			   NULL, rte_strerror(ENOSYS));
43199a2dd95SBruce Richardson 	return NULL;
43299a2dd95SBruce Richardson }
43399a2dd95SBruce Richardson 
43499a2dd95SBruce Richardson /* Destroy a flow rule on a given port. */
43599a2dd95SBruce Richardson int
43699a2dd95SBruce Richardson rte_flow_destroy(uint16_t port_id,
43799a2dd95SBruce Richardson 		 struct rte_flow *flow,
43899a2dd95SBruce Richardson 		 struct rte_flow_error *error)
43999a2dd95SBruce Richardson {
44099a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
44199a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
44299a2dd95SBruce Richardson 	int ret;
44399a2dd95SBruce Richardson 
44499a2dd95SBruce Richardson 	if (unlikely(!ops))
44599a2dd95SBruce Richardson 		return -rte_errno;
44699a2dd95SBruce Richardson 	if (likely(!!ops->destroy)) {
44799a2dd95SBruce Richardson 		fts_enter(dev);
44899a2dd95SBruce Richardson 		ret = ops->destroy(dev, flow, error);
44999a2dd95SBruce Richardson 		fts_exit(dev);
450ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id, ret, error);
451ed04fd40SAnkur Dwivedi 
452ed04fd40SAnkur Dwivedi 		rte_flow_trace_destroy(port_id, flow, ret);
453ed04fd40SAnkur Dwivedi 
454ed04fd40SAnkur Dwivedi 		return ret;
45599a2dd95SBruce Richardson 	}
45699a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOSYS,
45799a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
45899a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOSYS));
45999a2dd95SBruce Richardson }
46099a2dd95SBruce Richardson 
4618f257a48SAlexander Kozyrev int
4628f257a48SAlexander Kozyrev rte_flow_actions_update(uint16_t port_id,
4638f257a48SAlexander Kozyrev 			struct rte_flow *flow,
4648f257a48SAlexander Kozyrev 			const struct rte_flow_action actions[],
4658f257a48SAlexander Kozyrev 			struct rte_flow_error *error)
4668f257a48SAlexander Kozyrev {
4678f257a48SAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
4688f257a48SAlexander Kozyrev 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
4698f257a48SAlexander Kozyrev 	int ret;
4708f257a48SAlexander Kozyrev 
4718f257a48SAlexander Kozyrev 	if (unlikely(!ops))
4728f257a48SAlexander Kozyrev 		return -rte_errno;
4738f257a48SAlexander Kozyrev 	if (likely(!!ops->actions_update)) {
4748f257a48SAlexander Kozyrev 		fts_enter(dev);
4758f257a48SAlexander Kozyrev 		ret = ops->actions_update(dev, flow, actions, error);
4768f257a48SAlexander Kozyrev 		fts_exit(dev);
4778f257a48SAlexander Kozyrev 
4788f257a48SAlexander Kozyrev 		rte_flow_trace_actions_update(port_id, flow, actions, ret);
4798f257a48SAlexander Kozyrev 
4808f257a48SAlexander Kozyrev 		return flow_err(port_id, ret, error);
4818f257a48SAlexander Kozyrev 	}
4828f257a48SAlexander Kozyrev 	return rte_flow_error_set(error, ENOSYS,
4838f257a48SAlexander Kozyrev 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
4848f257a48SAlexander Kozyrev 				  NULL, rte_strerror(ENOSYS));
4858f257a48SAlexander Kozyrev }
4868f257a48SAlexander Kozyrev 
48799a2dd95SBruce Richardson /* Destroy all flow rules associated with a port. */
48899a2dd95SBruce Richardson int
48999a2dd95SBruce Richardson rte_flow_flush(uint16_t port_id,
49099a2dd95SBruce Richardson 	       struct rte_flow_error *error)
49199a2dd95SBruce Richardson {
49299a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
49399a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
49499a2dd95SBruce Richardson 	int ret;
49599a2dd95SBruce Richardson 
49699a2dd95SBruce Richardson 	if (unlikely(!ops))
49799a2dd95SBruce Richardson 		return -rte_errno;
49899a2dd95SBruce Richardson 	if (likely(!!ops->flush)) {
49999a2dd95SBruce Richardson 		fts_enter(dev);
50099a2dd95SBruce Richardson 		ret = ops->flush(dev, error);
50199a2dd95SBruce Richardson 		fts_exit(dev);
502ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id, ret, error);
503ed04fd40SAnkur Dwivedi 
504ed04fd40SAnkur Dwivedi 		rte_flow_trace_flush(port_id, ret);
505ed04fd40SAnkur Dwivedi 
506ed04fd40SAnkur Dwivedi 		return ret;
50799a2dd95SBruce Richardson 	}
50899a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOSYS,
50999a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
51099a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOSYS));
51199a2dd95SBruce Richardson }
51299a2dd95SBruce Richardson 
51399a2dd95SBruce Richardson /* Query an existing flow rule. */
51499a2dd95SBruce Richardson int
51599a2dd95SBruce Richardson rte_flow_query(uint16_t port_id,
51699a2dd95SBruce Richardson 	       struct rte_flow *flow,
51799a2dd95SBruce Richardson 	       const struct rte_flow_action *action,
51899a2dd95SBruce Richardson 	       void *data,
51999a2dd95SBruce Richardson 	       struct rte_flow_error *error)
52099a2dd95SBruce Richardson {
52199a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
52299a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
52399a2dd95SBruce Richardson 	int ret;
52499a2dd95SBruce Richardson 
52599a2dd95SBruce Richardson 	if (!ops)
52699a2dd95SBruce Richardson 		return -rte_errno;
52799a2dd95SBruce Richardson 	if (likely(!!ops->query)) {
52899a2dd95SBruce Richardson 		fts_enter(dev);
52999a2dd95SBruce Richardson 		ret = ops->query(dev, flow, action, data, error);
53099a2dd95SBruce Richardson 		fts_exit(dev);
531ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id, ret, error);
532ed04fd40SAnkur Dwivedi 
533ed04fd40SAnkur Dwivedi 		rte_flow_trace_query(port_id, flow, action, data, ret);
534ed04fd40SAnkur Dwivedi 
535ed04fd40SAnkur Dwivedi 		return ret;
53699a2dd95SBruce Richardson 	}
53799a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOSYS,
53899a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
53999a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOSYS));
54099a2dd95SBruce Richardson }
54199a2dd95SBruce Richardson 
54299a2dd95SBruce Richardson /* Restrict ingress traffic to the defined flow rules. */
54399a2dd95SBruce Richardson int
54499a2dd95SBruce Richardson rte_flow_isolate(uint16_t port_id,
54599a2dd95SBruce Richardson 		 int set,
54699a2dd95SBruce Richardson 		 struct rte_flow_error *error)
54799a2dd95SBruce Richardson {
54899a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
54999a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
55099a2dd95SBruce Richardson 	int ret;
55199a2dd95SBruce Richardson 
55299a2dd95SBruce Richardson 	if (!ops)
55399a2dd95SBruce Richardson 		return -rte_errno;
55499a2dd95SBruce Richardson 	if (likely(!!ops->isolate)) {
55599a2dd95SBruce Richardson 		fts_enter(dev);
55699a2dd95SBruce Richardson 		ret = ops->isolate(dev, set, error);
55799a2dd95SBruce Richardson 		fts_exit(dev);
558ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id, ret, error);
559ed04fd40SAnkur Dwivedi 
560ed04fd40SAnkur Dwivedi 		rte_flow_trace_isolate(port_id, set, ret);
561ed04fd40SAnkur Dwivedi 
562ed04fd40SAnkur Dwivedi 		return ret;
56399a2dd95SBruce Richardson 	}
56499a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOSYS,
56599a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
56699a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOSYS));
56799a2dd95SBruce Richardson }
56899a2dd95SBruce Richardson 
56999a2dd95SBruce Richardson /* Initialize flow error structure. */
57099a2dd95SBruce Richardson int
57199a2dd95SBruce Richardson rte_flow_error_set(struct rte_flow_error *error,
57299a2dd95SBruce Richardson 		   int code,
57399a2dd95SBruce Richardson 		   enum rte_flow_error_type type,
57499a2dd95SBruce Richardson 		   const void *cause,
57599a2dd95SBruce Richardson 		   const char *message)
57699a2dd95SBruce Richardson {
57799a2dd95SBruce Richardson 	if (error) {
57899a2dd95SBruce Richardson 		*error = (struct rte_flow_error){
57999a2dd95SBruce Richardson 			.type = type,
58099a2dd95SBruce Richardson 			.cause = cause,
58199a2dd95SBruce Richardson 			.message = message,
58299a2dd95SBruce Richardson 		};
58399a2dd95SBruce Richardson 	}
58499a2dd95SBruce Richardson 	rte_errno = code;
58599a2dd95SBruce Richardson 	return -code;
58699a2dd95SBruce Richardson }
58799a2dd95SBruce Richardson 
58899a2dd95SBruce Richardson /** Pattern item specification types. */
58999a2dd95SBruce Richardson enum rte_flow_conv_item_spec_type {
59099a2dd95SBruce Richardson 	RTE_FLOW_CONV_ITEM_SPEC,
59199a2dd95SBruce Richardson 	RTE_FLOW_CONV_ITEM_LAST,
59299a2dd95SBruce Richardson 	RTE_FLOW_CONV_ITEM_MASK,
59399a2dd95SBruce Richardson };
59499a2dd95SBruce Richardson 
59599a2dd95SBruce Richardson /**
59699a2dd95SBruce Richardson  * Copy pattern item specification.
59799a2dd95SBruce Richardson  *
59899a2dd95SBruce Richardson  * @param[out] buf
59999a2dd95SBruce Richardson  *   Output buffer. Can be NULL if @p size is zero.
60099a2dd95SBruce Richardson  * @param size
60199a2dd95SBruce Richardson  *   Size of @p buf in bytes.
60299a2dd95SBruce Richardson  * @param[in] item
60399a2dd95SBruce Richardson  *   Pattern item to copy specification from.
60499a2dd95SBruce Richardson  * @param type
60599a2dd95SBruce Richardson  *   Specification selector for either @p spec, @p last or @p mask.
60699a2dd95SBruce Richardson  *
60799a2dd95SBruce Richardson  * @return
60899a2dd95SBruce Richardson  *   Number of bytes needed to store pattern item specification regardless
60999a2dd95SBruce Richardson  *   of @p size. @p buf contents are truncated to @p size if not large
61099a2dd95SBruce Richardson  *   enough.
61199a2dd95SBruce Richardson  */
61299a2dd95SBruce Richardson static size_t
61399a2dd95SBruce Richardson rte_flow_conv_item_spec(void *buf, const size_t size,
61499a2dd95SBruce Richardson 			const struct rte_flow_item *item,
61599a2dd95SBruce Richardson 			enum rte_flow_conv_item_spec_type type)
61699a2dd95SBruce Richardson {
61799a2dd95SBruce Richardson 	size_t off;
61899a2dd95SBruce Richardson 	const void *data =
61999a2dd95SBruce Richardson 		type == RTE_FLOW_CONV_ITEM_SPEC ? item->spec :
62099a2dd95SBruce Richardson 		type == RTE_FLOW_CONV_ITEM_LAST ? item->last :
62199a2dd95SBruce Richardson 		type == RTE_FLOW_CONV_ITEM_MASK ? item->mask :
62299a2dd95SBruce Richardson 		NULL;
62399a2dd95SBruce Richardson 
62499a2dd95SBruce Richardson 	switch (item->type) {
62599a2dd95SBruce Richardson 		union {
62699a2dd95SBruce Richardson 			const struct rte_flow_item_raw *raw;
627841a0445SMichael Baum 			const struct rte_flow_item_geneve_opt *geneve_opt;
62899a2dd95SBruce Richardson 		} spec;
62999a2dd95SBruce Richardson 		union {
63099a2dd95SBruce Richardson 			const struct rte_flow_item_raw *raw;
63199a2dd95SBruce Richardson 		} last;
63299a2dd95SBruce Richardson 		union {
63399a2dd95SBruce Richardson 			const struct rte_flow_item_raw *raw;
63499a2dd95SBruce Richardson 		} mask;
63599a2dd95SBruce Richardson 		union {
63699a2dd95SBruce Richardson 			const struct rte_flow_item_raw *raw;
637841a0445SMichael Baum 			const struct rte_flow_item_geneve_opt *geneve_opt;
63899a2dd95SBruce Richardson 		} src;
63999a2dd95SBruce Richardson 		union {
64099a2dd95SBruce Richardson 			struct rte_flow_item_raw *raw;
641841a0445SMichael Baum 			struct rte_flow_item_geneve_opt *geneve_opt;
64299a2dd95SBruce Richardson 		} dst;
643841a0445SMichael Baum 		void *deep_src;
64499a2dd95SBruce Richardson 		size_t tmp;
64599a2dd95SBruce Richardson 
64699a2dd95SBruce Richardson 	case RTE_FLOW_ITEM_TYPE_RAW:
64799a2dd95SBruce Richardson 		spec.raw = item->spec;
64899a2dd95SBruce Richardson 		last.raw = item->last ? item->last : item->spec;
64999a2dd95SBruce Richardson 		mask.raw = item->mask ? item->mask : &rte_flow_item_raw_mask;
65099a2dd95SBruce Richardson 		src.raw = data;
65199a2dd95SBruce Richardson 		dst.raw = buf;
65299a2dd95SBruce Richardson 		rte_memcpy(dst.raw,
65399a2dd95SBruce Richardson 			   (&(struct rte_flow_item_raw){
65499a2dd95SBruce Richardson 				.relative = src.raw->relative,
65599a2dd95SBruce Richardson 				.search = src.raw->search,
65699a2dd95SBruce Richardson 				.reserved = src.raw->reserved,
65799a2dd95SBruce Richardson 				.offset = src.raw->offset,
65899a2dd95SBruce Richardson 				.limit = src.raw->limit,
65999a2dd95SBruce Richardson 				.length = src.raw->length,
66099a2dd95SBruce Richardson 			   }),
66199a2dd95SBruce Richardson 			   size > sizeof(*dst.raw) ? sizeof(*dst.raw) : size);
66299a2dd95SBruce Richardson 		off = sizeof(*dst.raw);
66399a2dd95SBruce Richardson 		if (type == RTE_FLOW_CONV_ITEM_SPEC ||
66499a2dd95SBruce Richardson 		    (type == RTE_FLOW_CONV_ITEM_MASK &&
66599a2dd95SBruce Richardson 		     ((spec.raw->length & mask.raw->length) >=
66699a2dd95SBruce Richardson 		      (last.raw->length & mask.raw->length))))
66799a2dd95SBruce Richardson 			tmp = spec.raw->length & mask.raw->length;
66899a2dd95SBruce Richardson 		else
66999a2dd95SBruce Richardson 			tmp = last.raw->length & mask.raw->length;
67099a2dd95SBruce Richardson 		if (tmp) {
67199a2dd95SBruce Richardson 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.raw->pattern));
672841a0445SMichael Baum 			if (size >= off + tmp) {
673841a0445SMichael Baum 				deep_src = (void *)((uintptr_t)dst.raw + off);
674841a0445SMichael Baum 				dst.raw->pattern = rte_memcpy(deep_src,
675841a0445SMichael Baum 							      src.raw->pattern,
676841a0445SMichael Baum 							      tmp);
677841a0445SMichael Baum 			}
67899a2dd95SBruce Richardson 			off += tmp;
67999a2dd95SBruce Richardson 		}
68099a2dd95SBruce Richardson 		break;
681841a0445SMichael Baum 	case RTE_FLOW_ITEM_TYPE_GENEVE_OPT:
682841a0445SMichael Baum 		off = rte_flow_conv_copy(buf, data, size,
683841a0445SMichael Baum 					 rte_flow_desc_item, item->type);
684841a0445SMichael Baum 		spec.geneve_opt = item->spec;
685841a0445SMichael Baum 		src.geneve_opt = data;
686841a0445SMichael Baum 		dst.geneve_opt = buf;
687841a0445SMichael Baum 		tmp = spec.geneve_opt->option_len << 2;
688841a0445SMichael Baum 		if (size > 0 && src.geneve_opt->data) {
689841a0445SMichael Baum 			deep_src = (void *)((uintptr_t)(dst.geneve_opt + 1));
690841a0445SMichael Baum 			dst.geneve_opt->data = rte_memcpy(deep_src,
691841a0445SMichael Baum 							  src.geneve_opt->data,
692841a0445SMichael Baum 							  tmp);
693841a0445SMichael Baum 		}
694841a0445SMichael Baum 		off += tmp;
695841a0445SMichael Baum 		break;
69699a2dd95SBruce Richardson 	default:
6976cf72047SGregory Etelson 		off = rte_flow_conv_copy(buf, data, size,
6986cf72047SGregory Etelson 					 rte_flow_desc_item, item->type);
69999a2dd95SBruce Richardson 		break;
70099a2dd95SBruce Richardson 	}
70199a2dd95SBruce Richardson 	return off;
70299a2dd95SBruce Richardson }
70399a2dd95SBruce Richardson 
70499a2dd95SBruce Richardson /**
70599a2dd95SBruce Richardson  * Copy action configuration.
70699a2dd95SBruce Richardson  *
70799a2dd95SBruce Richardson  * @param[out] buf
70899a2dd95SBruce Richardson  *   Output buffer. Can be NULL if @p size is zero.
70999a2dd95SBruce Richardson  * @param size
71099a2dd95SBruce Richardson  *   Size of @p buf in bytes.
71199a2dd95SBruce Richardson  * @param[in] action
71299a2dd95SBruce Richardson  *   Action to copy configuration from.
71399a2dd95SBruce Richardson  *
71499a2dd95SBruce Richardson  * @return
71599a2dd95SBruce Richardson  *   Number of bytes needed to store pattern item specification regardless
71699a2dd95SBruce Richardson  *   of @p size. @p buf contents are truncated to @p size if not large
71799a2dd95SBruce Richardson  *   enough.
71899a2dd95SBruce Richardson  */
71999a2dd95SBruce Richardson static size_t
72099a2dd95SBruce Richardson rte_flow_conv_action_conf(void *buf, const size_t size,
72199a2dd95SBruce Richardson 			  const struct rte_flow_action *action)
72299a2dd95SBruce Richardson {
72399a2dd95SBruce Richardson 	size_t off;
72499a2dd95SBruce Richardson 
72599a2dd95SBruce Richardson 	switch (action->type) {
72699a2dd95SBruce Richardson 		union {
72799a2dd95SBruce Richardson 			const struct rte_flow_action_rss *rss;
72899a2dd95SBruce Richardson 			const struct rte_flow_action_vxlan_encap *vxlan_encap;
72999a2dd95SBruce Richardson 			const struct rte_flow_action_nvgre_encap *nvgre_encap;
73099a2dd95SBruce Richardson 		} src;
73199a2dd95SBruce Richardson 		union {
73299a2dd95SBruce Richardson 			struct rte_flow_action_rss *rss;
73399a2dd95SBruce Richardson 			struct rte_flow_action_vxlan_encap *vxlan_encap;
73499a2dd95SBruce Richardson 			struct rte_flow_action_nvgre_encap *nvgre_encap;
73599a2dd95SBruce Richardson 		} dst;
73699a2dd95SBruce Richardson 		size_t tmp;
73799a2dd95SBruce Richardson 		int ret;
73899a2dd95SBruce Richardson 
73999a2dd95SBruce Richardson 	case RTE_FLOW_ACTION_TYPE_RSS:
74099a2dd95SBruce Richardson 		src.rss = action->conf;
74199a2dd95SBruce Richardson 		dst.rss = buf;
74299a2dd95SBruce Richardson 		rte_memcpy(dst.rss,
74399a2dd95SBruce Richardson 			   (&(struct rte_flow_action_rss){
74499a2dd95SBruce Richardson 				.func = src.rss->func,
74599a2dd95SBruce Richardson 				.level = src.rss->level,
74699a2dd95SBruce Richardson 				.types = src.rss->types,
74799a2dd95SBruce Richardson 				.key_len = src.rss->key_len,
74899a2dd95SBruce Richardson 				.queue_num = src.rss->queue_num,
74999a2dd95SBruce Richardson 			   }),
75099a2dd95SBruce Richardson 			   size > sizeof(*dst.rss) ? sizeof(*dst.rss) : size);
75199a2dd95SBruce Richardson 		off = sizeof(*dst.rss);
75299a2dd95SBruce Richardson 		if (src.rss->key_len && src.rss->key) {
75399a2dd95SBruce Richardson 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->key));
75499a2dd95SBruce Richardson 			tmp = sizeof(*src.rss->key) * src.rss->key_len;
7553d67012aSRuifeng Wang 			if (size >= (uint64_t)off + (uint64_t)tmp)
75699a2dd95SBruce Richardson 				dst.rss->key = rte_memcpy
75799a2dd95SBruce Richardson 					((void *)((uintptr_t)dst.rss + off),
75899a2dd95SBruce Richardson 					 src.rss->key, tmp);
75999a2dd95SBruce Richardson 			off += tmp;
76099a2dd95SBruce Richardson 		}
76199a2dd95SBruce Richardson 		if (src.rss->queue_num) {
76299a2dd95SBruce Richardson 			off = RTE_ALIGN_CEIL(off, sizeof(*dst.rss->queue));
76399a2dd95SBruce Richardson 			tmp = sizeof(*src.rss->queue) * src.rss->queue_num;
7643d67012aSRuifeng Wang 			if (size >= (uint64_t)off + (uint64_t)tmp)
76599a2dd95SBruce Richardson 				dst.rss->queue = rte_memcpy
76699a2dd95SBruce Richardson 					((void *)((uintptr_t)dst.rss + off),
76799a2dd95SBruce Richardson 					 src.rss->queue, tmp);
76899a2dd95SBruce Richardson 			off += tmp;
76999a2dd95SBruce Richardson 		}
77099a2dd95SBruce Richardson 		break;
77199a2dd95SBruce Richardson 	case RTE_FLOW_ACTION_TYPE_VXLAN_ENCAP:
77299a2dd95SBruce Richardson 	case RTE_FLOW_ACTION_TYPE_NVGRE_ENCAP:
77399a2dd95SBruce Richardson 		src.vxlan_encap = action->conf;
77499a2dd95SBruce Richardson 		dst.vxlan_encap = buf;
77599a2dd95SBruce Richardson 		RTE_BUILD_BUG_ON(sizeof(*src.vxlan_encap) !=
77699a2dd95SBruce Richardson 				 sizeof(*src.nvgre_encap) ||
77799a2dd95SBruce Richardson 				 offsetof(struct rte_flow_action_vxlan_encap,
77899a2dd95SBruce Richardson 					  definition) !=
77999a2dd95SBruce Richardson 				 offsetof(struct rte_flow_action_nvgre_encap,
78099a2dd95SBruce Richardson 					  definition));
78199a2dd95SBruce Richardson 		off = sizeof(*dst.vxlan_encap);
78299a2dd95SBruce Richardson 		if (src.vxlan_encap->definition) {
78399a2dd95SBruce Richardson 			off = RTE_ALIGN_CEIL
78499a2dd95SBruce Richardson 				(off, sizeof(*dst.vxlan_encap->definition));
78599a2dd95SBruce Richardson 			ret = rte_flow_conv
78699a2dd95SBruce Richardson 				(RTE_FLOW_CONV_OP_PATTERN,
78799a2dd95SBruce Richardson 				 (void *)((uintptr_t)dst.vxlan_encap + off),
78899a2dd95SBruce Richardson 				 size > off ? size - off : 0,
78999a2dd95SBruce Richardson 				 src.vxlan_encap->definition, NULL);
79099a2dd95SBruce Richardson 			if (ret < 0)
79199a2dd95SBruce Richardson 				return 0;
79299a2dd95SBruce Richardson 			if (size >= off + ret)
79399a2dd95SBruce Richardson 				dst.vxlan_encap->definition =
79499a2dd95SBruce Richardson 					(void *)((uintptr_t)dst.vxlan_encap +
79599a2dd95SBruce Richardson 						 off);
79699a2dd95SBruce Richardson 			off += ret;
79799a2dd95SBruce Richardson 		}
79899a2dd95SBruce Richardson 		break;
79999a2dd95SBruce Richardson 	default:
8006cf72047SGregory Etelson 		off = rte_flow_conv_copy(buf, action->conf, size,
8016cf72047SGregory Etelson 					 rte_flow_desc_action, action->type);
80299a2dd95SBruce Richardson 		break;
80399a2dd95SBruce Richardson 	}
80499a2dd95SBruce Richardson 	return off;
80599a2dd95SBruce Richardson }
80699a2dd95SBruce Richardson 
80799a2dd95SBruce Richardson /**
80899a2dd95SBruce Richardson  * Copy a list of pattern items.
80999a2dd95SBruce Richardson  *
81099a2dd95SBruce Richardson  * @param[out] dst
81199a2dd95SBruce Richardson  *   Destination buffer. Can be NULL if @p size is zero.
81299a2dd95SBruce Richardson  * @param size
81399a2dd95SBruce Richardson  *   Size of @p dst in bytes.
81499a2dd95SBruce Richardson  * @param[in] src
81599a2dd95SBruce Richardson  *   Source pattern items.
81699a2dd95SBruce Richardson  * @param num
81799a2dd95SBruce Richardson  *   Maximum number of pattern items to process from @p src or 0 to process
81899a2dd95SBruce Richardson  *   the entire list. In both cases, processing stops after
81999a2dd95SBruce Richardson  *   RTE_FLOW_ITEM_TYPE_END is encountered.
82099a2dd95SBruce Richardson  * @param[out] error
82199a2dd95SBruce Richardson  *   Perform verbose error reporting if not NULL.
82299a2dd95SBruce Richardson  *
82399a2dd95SBruce Richardson  * @return
82499a2dd95SBruce Richardson  *   A positive value representing the number of bytes needed to store
82599a2dd95SBruce Richardson  *   pattern items regardless of @p size on success (@p buf contents are
82699a2dd95SBruce Richardson  *   truncated to @p size if not large enough), a negative errno value
82799a2dd95SBruce Richardson  *   otherwise and rte_errno is set.
82899a2dd95SBruce Richardson  */
82999a2dd95SBruce Richardson static int
83099a2dd95SBruce Richardson rte_flow_conv_pattern(struct rte_flow_item *dst,
83199a2dd95SBruce Richardson 		      const size_t size,
83299a2dd95SBruce Richardson 		      const struct rte_flow_item *src,
83399a2dd95SBruce Richardson 		      unsigned int num,
83499a2dd95SBruce Richardson 		      struct rte_flow_error *error)
83599a2dd95SBruce Richardson {
83699a2dd95SBruce Richardson 	uintptr_t data = (uintptr_t)dst;
83799a2dd95SBruce Richardson 	size_t off;
83899a2dd95SBruce Richardson 	size_t ret;
83999a2dd95SBruce Richardson 	unsigned int i;
84099a2dd95SBruce Richardson 
84199a2dd95SBruce Richardson 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
84299a2dd95SBruce Richardson 		/**
84399a2dd95SBruce Richardson 		 * allow PMD private flow item
84499a2dd95SBruce Richardson 		 */
84599a2dd95SBruce Richardson 		if (((int)src->type >= 0) &&
84699a2dd95SBruce Richardson 			((size_t)src->type >= RTE_DIM(rte_flow_desc_item) ||
84799a2dd95SBruce Richardson 		    !rte_flow_desc_item[src->type].name))
84899a2dd95SBruce Richardson 			return rte_flow_error_set
84999a2dd95SBruce Richardson 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ITEM, src,
85099a2dd95SBruce Richardson 				 "cannot convert unknown item type");
85199a2dd95SBruce Richardson 		if (size >= off + sizeof(*dst))
85299a2dd95SBruce Richardson 			*dst = (struct rte_flow_item){
85399a2dd95SBruce Richardson 				.type = src->type,
85499a2dd95SBruce Richardson 			};
85599a2dd95SBruce Richardson 		off += sizeof(*dst);
85699a2dd95SBruce Richardson 		if (!src->type)
85799a2dd95SBruce Richardson 			num = i + 1;
85899a2dd95SBruce Richardson 	}
85999a2dd95SBruce Richardson 	num = i;
86099a2dd95SBruce Richardson 	src -= num;
86199a2dd95SBruce Richardson 	dst -= num;
86299a2dd95SBruce Richardson 	do {
86399a2dd95SBruce Richardson 		if (src->spec) {
86499a2dd95SBruce Richardson 			off = RTE_ALIGN_CEIL(off, sizeof(double));
86599a2dd95SBruce Richardson 			ret = rte_flow_conv_item_spec
86699a2dd95SBruce Richardson 				((void *)(data + off),
86799a2dd95SBruce Richardson 				 size > off ? size - off : 0, src,
86899a2dd95SBruce Richardson 				 RTE_FLOW_CONV_ITEM_SPEC);
86999a2dd95SBruce Richardson 			if (size && size >= off + ret)
87099a2dd95SBruce Richardson 				dst->spec = (void *)(data + off);
87199a2dd95SBruce Richardson 			off += ret;
87299a2dd95SBruce Richardson 
87399a2dd95SBruce Richardson 		}
87499a2dd95SBruce Richardson 		if (src->last) {
87599a2dd95SBruce Richardson 			off = RTE_ALIGN_CEIL(off, sizeof(double));
87699a2dd95SBruce Richardson 			ret = rte_flow_conv_item_spec
87799a2dd95SBruce Richardson 				((void *)(data + off),
87899a2dd95SBruce Richardson 				 size > off ? size - off : 0, src,
87999a2dd95SBruce Richardson 				 RTE_FLOW_CONV_ITEM_LAST);
88099a2dd95SBruce Richardson 			if (size && size >= off + ret)
88199a2dd95SBruce Richardson 				dst->last = (void *)(data + off);
88299a2dd95SBruce Richardson 			off += ret;
88399a2dd95SBruce Richardson 		}
88499a2dd95SBruce Richardson 		if (src->mask) {
88599a2dd95SBruce Richardson 			off = RTE_ALIGN_CEIL(off, sizeof(double));
88699a2dd95SBruce Richardson 			ret = rte_flow_conv_item_spec
88799a2dd95SBruce Richardson 				((void *)(data + off),
88899a2dd95SBruce Richardson 				 size > off ? size - off : 0, src,
88999a2dd95SBruce Richardson 				 RTE_FLOW_CONV_ITEM_MASK);
89099a2dd95SBruce Richardson 			if (size && size >= off + ret)
89199a2dd95SBruce Richardson 				dst->mask = (void *)(data + off);
89299a2dd95SBruce Richardson 			off += ret;
89399a2dd95SBruce Richardson 		}
89499a2dd95SBruce Richardson 		++src;
89599a2dd95SBruce Richardson 		++dst;
89699a2dd95SBruce Richardson 	} while (--num);
89799a2dd95SBruce Richardson 	return off;
89899a2dd95SBruce Richardson }
89999a2dd95SBruce Richardson 
90099a2dd95SBruce Richardson /**
90199a2dd95SBruce Richardson  * Copy a list of actions.
90299a2dd95SBruce Richardson  *
90399a2dd95SBruce Richardson  * @param[out] dst
90499a2dd95SBruce Richardson  *   Destination buffer. Can be NULL if @p size is zero.
90599a2dd95SBruce Richardson  * @param size
90699a2dd95SBruce Richardson  *   Size of @p dst in bytes.
90799a2dd95SBruce Richardson  * @param[in] src
90899a2dd95SBruce Richardson  *   Source actions.
90999a2dd95SBruce Richardson  * @param num
91099a2dd95SBruce Richardson  *   Maximum number of actions to process from @p src or 0 to process the
91199a2dd95SBruce Richardson  *   entire list. In both cases, processing stops after
91299a2dd95SBruce Richardson  *   RTE_FLOW_ACTION_TYPE_END is encountered.
91399a2dd95SBruce Richardson  * @param[out] error
91499a2dd95SBruce Richardson  *   Perform verbose error reporting if not NULL.
91599a2dd95SBruce Richardson  *
91699a2dd95SBruce Richardson  * @return
91799a2dd95SBruce Richardson  *   A positive value representing the number of bytes needed to store
91899a2dd95SBruce Richardson  *   actions regardless of @p size on success (@p buf contents are truncated
91999a2dd95SBruce Richardson  *   to @p size if not large enough), a negative errno value otherwise and
92099a2dd95SBruce Richardson  *   rte_errno is set.
92199a2dd95SBruce Richardson  */
92299a2dd95SBruce Richardson static int
92399a2dd95SBruce Richardson rte_flow_conv_actions(struct rte_flow_action *dst,
92499a2dd95SBruce Richardson 		      const size_t size,
92599a2dd95SBruce Richardson 		      const struct rte_flow_action *src,
92699a2dd95SBruce Richardson 		      unsigned int num,
92799a2dd95SBruce Richardson 		      struct rte_flow_error *error)
92899a2dd95SBruce Richardson {
92999a2dd95SBruce Richardson 	uintptr_t data = (uintptr_t)dst;
93099a2dd95SBruce Richardson 	size_t off;
93199a2dd95SBruce Richardson 	size_t ret;
93299a2dd95SBruce Richardson 	unsigned int i;
93399a2dd95SBruce Richardson 
93499a2dd95SBruce Richardson 	for (i = 0, off = 0; !num || i != num; ++i, ++src, ++dst) {
93599a2dd95SBruce Richardson 		/**
93699a2dd95SBruce Richardson 		 * allow PMD private flow action
93799a2dd95SBruce Richardson 		 */
93899a2dd95SBruce Richardson 		if (((int)src->type >= 0) &&
93999a2dd95SBruce Richardson 		    ((size_t)src->type >= RTE_DIM(rte_flow_desc_action) ||
94099a2dd95SBruce Richardson 		    !rte_flow_desc_action[src->type].name))
94199a2dd95SBruce Richardson 			return rte_flow_error_set
94299a2dd95SBruce Richardson 				(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_ACTION,
94399a2dd95SBruce Richardson 				 src, "cannot convert unknown action type");
94499a2dd95SBruce Richardson 		if (size >= off + sizeof(*dst))
94599a2dd95SBruce Richardson 			*dst = (struct rte_flow_action){
94699a2dd95SBruce Richardson 				.type = src->type,
94799a2dd95SBruce Richardson 			};
94899a2dd95SBruce Richardson 		off += sizeof(*dst);
94999a2dd95SBruce Richardson 		if (!src->type)
95099a2dd95SBruce Richardson 			num = i + 1;
95199a2dd95SBruce Richardson 	}
95299a2dd95SBruce Richardson 	num = i;
95399a2dd95SBruce Richardson 	src -= num;
95499a2dd95SBruce Richardson 	dst -= num;
95599a2dd95SBruce Richardson 	do {
956fb131e29SSuanming Mou 		if (src->type == RTE_FLOW_ACTION_TYPE_INDIRECT) {
957fb131e29SSuanming Mou 			/*
958fb131e29SSuanming Mou 			 * Indirect action conf fills the indirect action
959fb131e29SSuanming Mou 			 * handler. Copy the action handle directly instead
960fb131e29SSuanming Mou 			 * of duplicating the pointer memory.
961fb131e29SSuanming Mou 			 */
962fb131e29SSuanming Mou 			if (size)
963fb131e29SSuanming Mou 				dst->conf = src->conf;
964fb131e29SSuanming Mou 		} else if (src->conf) {
96599a2dd95SBruce Richardson 			off = RTE_ALIGN_CEIL(off, sizeof(double));
96699a2dd95SBruce Richardson 			ret = rte_flow_conv_action_conf
96799a2dd95SBruce Richardson 				((void *)(data + off),
96899a2dd95SBruce Richardson 				 size > off ? size - off : 0, src);
96999a2dd95SBruce Richardson 			if (size && size >= off + ret)
97099a2dd95SBruce Richardson 				dst->conf = (void *)(data + off);
97199a2dd95SBruce Richardson 			off += ret;
97299a2dd95SBruce Richardson 		}
97399a2dd95SBruce Richardson 		++src;
97499a2dd95SBruce Richardson 		++dst;
97599a2dd95SBruce Richardson 	} while (--num);
97699a2dd95SBruce Richardson 	return off;
97799a2dd95SBruce Richardson }
97899a2dd95SBruce Richardson 
97999a2dd95SBruce Richardson /**
98099a2dd95SBruce Richardson  * Copy flow rule components.
98199a2dd95SBruce Richardson  *
98299a2dd95SBruce Richardson  * This comprises the flow rule descriptor itself, attributes, pattern and
98399a2dd95SBruce Richardson  * actions list. NULL components in @p src are skipped.
98499a2dd95SBruce Richardson  *
98599a2dd95SBruce Richardson  * @param[out] dst
98699a2dd95SBruce Richardson  *   Destination buffer. Can be NULL if @p size is zero.
98799a2dd95SBruce Richardson  * @param size
98899a2dd95SBruce Richardson  *   Size of @p dst in bytes.
98999a2dd95SBruce Richardson  * @param[in] src
99099a2dd95SBruce Richardson  *   Source flow rule descriptor.
99199a2dd95SBruce Richardson  * @param[out] error
99299a2dd95SBruce Richardson  *   Perform verbose error reporting if not NULL.
99399a2dd95SBruce Richardson  *
99499a2dd95SBruce Richardson  * @return
99599a2dd95SBruce Richardson  *   A positive value representing the number of bytes needed to store all
99699a2dd95SBruce Richardson  *   components including the descriptor regardless of @p size on success
99799a2dd95SBruce Richardson  *   (@p buf contents are truncated to @p size if not large enough), a
99899a2dd95SBruce Richardson  *   negative errno value otherwise and rte_errno is set.
99999a2dd95SBruce Richardson  */
100099a2dd95SBruce Richardson static int
100199a2dd95SBruce Richardson rte_flow_conv_rule(struct rte_flow_conv_rule *dst,
100299a2dd95SBruce Richardson 		   const size_t size,
100399a2dd95SBruce Richardson 		   const struct rte_flow_conv_rule *src,
100499a2dd95SBruce Richardson 		   struct rte_flow_error *error)
100599a2dd95SBruce Richardson {
100699a2dd95SBruce Richardson 	size_t off;
100799a2dd95SBruce Richardson 	int ret;
100899a2dd95SBruce Richardson 
100999a2dd95SBruce Richardson 	rte_memcpy(dst,
101099a2dd95SBruce Richardson 		   (&(struct rte_flow_conv_rule){
101199a2dd95SBruce Richardson 			.attr = NULL,
101299a2dd95SBruce Richardson 			.pattern = NULL,
101399a2dd95SBruce Richardson 			.actions = NULL,
101499a2dd95SBruce Richardson 		   }),
101599a2dd95SBruce Richardson 		   size > sizeof(*dst) ? sizeof(*dst) : size);
101699a2dd95SBruce Richardson 	off = sizeof(*dst);
101799a2dd95SBruce Richardson 	if (src->attr_ro) {
101899a2dd95SBruce Richardson 		off = RTE_ALIGN_CEIL(off, sizeof(double));
101999a2dd95SBruce Richardson 		if (size && size >= off + sizeof(*dst->attr))
102099a2dd95SBruce Richardson 			dst->attr = rte_memcpy
102199a2dd95SBruce Richardson 				((void *)((uintptr_t)dst + off),
102299a2dd95SBruce Richardson 				 src->attr_ro, sizeof(*dst->attr));
102399a2dd95SBruce Richardson 		off += sizeof(*dst->attr);
102499a2dd95SBruce Richardson 	}
102599a2dd95SBruce Richardson 	if (src->pattern_ro) {
102699a2dd95SBruce Richardson 		off = RTE_ALIGN_CEIL(off, sizeof(double));
102799a2dd95SBruce Richardson 		ret = rte_flow_conv_pattern((void *)((uintptr_t)dst + off),
102899a2dd95SBruce Richardson 					    size > off ? size - off : 0,
102999a2dd95SBruce Richardson 					    src->pattern_ro, 0, error);
103099a2dd95SBruce Richardson 		if (ret < 0)
103199a2dd95SBruce Richardson 			return ret;
103299a2dd95SBruce Richardson 		if (size && size >= off + (size_t)ret)
103399a2dd95SBruce Richardson 			dst->pattern = (void *)((uintptr_t)dst + off);
103499a2dd95SBruce Richardson 		off += ret;
103599a2dd95SBruce Richardson 	}
103699a2dd95SBruce Richardson 	if (src->actions_ro) {
103799a2dd95SBruce Richardson 		off = RTE_ALIGN_CEIL(off, sizeof(double));
103899a2dd95SBruce Richardson 		ret = rte_flow_conv_actions((void *)((uintptr_t)dst + off),
103999a2dd95SBruce Richardson 					    size > off ? size - off : 0,
104099a2dd95SBruce Richardson 					    src->actions_ro, 0, error);
104199a2dd95SBruce Richardson 		if (ret < 0)
104299a2dd95SBruce Richardson 			return ret;
104399a2dd95SBruce Richardson 		if (size >= off + (size_t)ret)
104499a2dd95SBruce Richardson 			dst->actions = (void *)((uintptr_t)dst + off);
104599a2dd95SBruce Richardson 		off += ret;
104699a2dd95SBruce Richardson 	}
104799a2dd95SBruce Richardson 	return off;
104899a2dd95SBruce Richardson }
104999a2dd95SBruce Richardson 
105099a2dd95SBruce Richardson /**
105199a2dd95SBruce Richardson  * Retrieve the name of a pattern item/action type.
105299a2dd95SBruce Richardson  *
105399a2dd95SBruce Richardson  * @param is_action
105499a2dd95SBruce Richardson  *   Nonzero when @p src represents an action type instead of a pattern item
105599a2dd95SBruce Richardson  *   type.
105699a2dd95SBruce Richardson  * @param is_ptr
105799a2dd95SBruce Richardson  *   Nonzero to write string address instead of contents into @p dst.
105899a2dd95SBruce Richardson  * @param[out] dst
105999a2dd95SBruce Richardson  *   Destination buffer. Can be NULL if @p size is zero.
106099a2dd95SBruce Richardson  * @param size
106199a2dd95SBruce Richardson  *   Size of @p dst in bytes.
106299a2dd95SBruce Richardson  * @param[in] src
106399a2dd95SBruce Richardson  *   Depending on @p is_action, source pattern item or action type cast as a
106499a2dd95SBruce Richardson  *   pointer.
106599a2dd95SBruce Richardson  * @param[out] error
106699a2dd95SBruce Richardson  *   Perform verbose error reporting if not NULL.
106799a2dd95SBruce Richardson  *
106899a2dd95SBruce Richardson  * @return
106999a2dd95SBruce Richardson  *   A positive value representing the number of bytes needed to store the
107099a2dd95SBruce Richardson  *   name or its address regardless of @p size on success (@p buf contents
107199a2dd95SBruce Richardson  *   are truncated to @p size if not large enough), a negative errno value
107299a2dd95SBruce Richardson  *   otherwise and rte_errno is set.
107399a2dd95SBruce Richardson  */
107499a2dd95SBruce Richardson static int
107599a2dd95SBruce Richardson rte_flow_conv_name(int is_action,
107699a2dd95SBruce Richardson 		   int is_ptr,
107799a2dd95SBruce Richardson 		   char *dst,
107899a2dd95SBruce Richardson 		   const size_t size,
107999a2dd95SBruce Richardson 		   const void *src,
108099a2dd95SBruce Richardson 		   struct rte_flow_error *error)
108199a2dd95SBruce Richardson {
108299a2dd95SBruce Richardson 	struct desc_info {
108399a2dd95SBruce Richardson 		const struct rte_flow_desc_data *data;
108499a2dd95SBruce Richardson 		size_t num;
108599a2dd95SBruce Richardson 	};
108699a2dd95SBruce Richardson 	static const struct desc_info info_rep[2] = {
108799a2dd95SBruce Richardson 		{ rte_flow_desc_item, RTE_DIM(rte_flow_desc_item), },
108899a2dd95SBruce Richardson 		{ rte_flow_desc_action, RTE_DIM(rte_flow_desc_action), },
108999a2dd95SBruce Richardson 	};
109099a2dd95SBruce Richardson 	const struct desc_info *const info = &info_rep[!!is_action];
109199a2dd95SBruce Richardson 	unsigned int type = (uintptr_t)src;
109299a2dd95SBruce Richardson 
109399a2dd95SBruce Richardson 	if (type >= info->num)
109499a2dd95SBruce Richardson 		return rte_flow_error_set
109599a2dd95SBruce Richardson 			(error, EINVAL, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
109699a2dd95SBruce Richardson 			 "unknown object type to retrieve the name of");
109799a2dd95SBruce Richardson 	if (!is_ptr)
109899a2dd95SBruce Richardson 		return strlcpy(dst, info->data[type].name, size);
109999a2dd95SBruce Richardson 	if (size >= sizeof(const char **))
110099a2dd95SBruce Richardson 		*((const char **)dst) = info->data[type].name;
110199a2dd95SBruce Richardson 	return sizeof(const char **);
110299a2dd95SBruce Richardson }
110399a2dd95SBruce Richardson 
110499a2dd95SBruce Richardson /** Helper function to convert flow API objects. */
110599a2dd95SBruce Richardson int
110699a2dd95SBruce Richardson rte_flow_conv(enum rte_flow_conv_op op,
110799a2dd95SBruce Richardson 	      void *dst,
110899a2dd95SBruce Richardson 	      size_t size,
110999a2dd95SBruce Richardson 	      const void *src,
111099a2dd95SBruce Richardson 	      struct rte_flow_error *error)
111199a2dd95SBruce Richardson {
1112ed04fd40SAnkur Dwivedi 	int ret;
1113ed04fd40SAnkur Dwivedi 
111499a2dd95SBruce Richardson 	switch (op) {
111599a2dd95SBruce Richardson 		const struct rte_flow_attr *attr;
1116bd6c8808SDariusz Sosnowski 		const struct rte_flow_item *item;
111799a2dd95SBruce Richardson 
111899a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_NONE:
1119ed04fd40SAnkur Dwivedi 		ret = 0;
1120ed04fd40SAnkur Dwivedi 		break;
112199a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_ATTR:
112299a2dd95SBruce Richardson 		attr = src;
112399a2dd95SBruce Richardson 		if (size > sizeof(*attr))
112499a2dd95SBruce Richardson 			size = sizeof(*attr);
112599a2dd95SBruce Richardson 		rte_memcpy(dst, attr, size);
1126ed04fd40SAnkur Dwivedi 		ret = sizeof(*attr);
1127ed04fd40SAnkur Dwivedi 		break;
112899a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_ITEM:
1129ed04fd40SAnkur Dwivedi 		ret = rte_flow_conv_pattern(dst, size, src, 1, error);
1130ed04fd40SAnkur Dwivedi 		break;
1131bd6c8808SDariusz Sosnowski 	case RTE_FLOW_CONV_OP_ITEM_MASK:
1132bd6c8808SDariusz Sosnowski 		item = src;
1133bd6c8808SDariusz Sosnowski 		if (item->mask == NULL) {
1134bd6c8808SDariusz Sosnowski 			ret = rte_flow_error_set(error, EINVAL, RTE_FLOW_ERROR_TYPE_ITEM_MASK,
1135bd6c8808SDariusz Sosnowski 						 item, "Mask not provided");
1136bd6c8808SDariusz Sosnowski 			break;
1137bd6c8808SDariusz Sosnowski 		}
1138bd6c8808SDariusz Sosnowski 		ret = rte_flow_conv_item_spec(dst, size, src, RTE_FLOW_CONV_ITEM_MASK);
1139bd6c8808SDariusz Sosnowski 		break;
114099a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_ACTION:
1141ed04fd40SAnkur Dwivedi 		ret = rte_flow_conv_actions(dst, size, src, 1, error);
1142ed04fd40SAnkur Dwivedi 		break;
114399a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_PATTERN:
1144ed04fd40SAnkur Dwivedi 		ret = rte_flow_conv_pattern(dst, size, src, 0, error);
1145ed04fd40SAnkur Dwivedi 		break;
114699a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_ACTIONS:
1147ed04fd40SAnkur Dwivedi 		ret = rte_flow_conv_actions(dst, size, src, 0, error);
1148ed04fd40SAnkur Dwivedi 		break;
114999a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_RULE:
1150ed04fd40SAnkur Dwivedi 		ret = rte_flow_conv_rule(dst, size, src, error);
1151ed04fd40SAnkur Dwivedi 		break;
115299a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_ITEM_NAME:
1153ed04fd40SAnkur Dwivedi 		ret = rte_flow_conv_name(0, 0, dst, size, src, error);
1154ed04fd40SAnkur Dwivedi 		break;
115599a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_ACTION_NAME:
1156ed04fd40SAnkur Dwivedi 		ret = rte_flow_conv_name(1, 0, dst, size, src, error);
1157ed04fd40SAnkur Dwivedi 		break;
115899a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_ITEM_NAME_PTR:
1159ed04fd40SAnkur Dwivedi 		ret = rte_flow_conv_name(0, 1, dst, size, src, error);
1160ed04fd40SAnkur Dwivedi 		break;
116199a2dd95SBruce Richardson 	case RTE_FLOW_CONV_OP_ACTION_NAME_PTR:
1162ed04fd40SAnkur Dwivedi 		ret = rte_flow_conv_name(1, 1, dst, size, src, error);
1163ed04fd40SAnkur Dwivedi 		break;
1164ed04fd40SAnkur Dwivedi 	default:
1165ed04fd40SAnkur Dwivedi 		ret = rte_flow_error_set
116699a2dd95SBruce Richardson 		(error, ENOTSUP, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
116799a2dd95SBruce Richardson 		 "unknown object conversion operation");
116899a2dd95SBruce Richardson 	}
116999a2dd95SBruce Richardson 
1170ed04fd40SAnkur Dwivedi 	rte_flow_trace_conv(op, dst, size, src, ret);
1171ed04fd40SAnkur Dwivedi 
1172ed04fd40SAnkur Dwivedi 	return ret;
1173ed04fd40SAnkur Dwivedi }
1174ed04fd40SAnkur Dwivedi 
117599a2dd95SBruce Richardson /** Store a full rte_flow description. */
117699a2dd95SBruce Richardson size_t
117799a2dd95SBruce Richardson rte_flow_copy(struct rte_flow_desc *desc, size_t len,
117899a2dd95SBruce Richardson 	      const struct rte_flow_attr *attr,
117999a2dd95SBruce Richardson 	      const struct rte_flow_item *items,
118099a2dd95SBruce Richardson 	      const struct rte_flow_action *actions)
118199a2dd95SBruce Richardson {
118299a2dd95SBruce Richardson 	/*
118399a2dd95SBruce Richardson 	 * Overlap struct rte_flow_conv with struct rte_flow_desc in order
118499a2dd95SBruce Richardson 	 * to convert the former to the latter without wasting space.
118599a2dd95SBruce Richardson 	 */
118699a2dd95SBruce Richardson 	struct rte_flow_conv_rule *dst =
118799a2dd95SBruce Richardson 		len ?
118899a2dd95SBruce Richardson 		(void *)((uintptr_t)desc +
118999a2dd95SBruce Richardson 			 (offsetof(struct rte_flow_desc, actions) -
119099a2dd95SBruce Richardson 			  offsetof(struct rte_flow_conv_rule, actions))) :
119199a2dd95SBruce Richardson 		NULL;
119299a2dd95SBruce Richardson 	size_t dst_size =
119399a2dd95SBruce Richardson 		len > sizeof(*desc) - sizeof(*dst) ?
119499a2dd95SBruce Richardson 		len - (sizeof(*desc) - sizeof(*dst)) :
119599a2dd95SBruce Richardson 		0;
119699a2dd95SBruce Richardson 	struct rte_flow_conv_rule src = {
119799a2dd95SBruce Richardson 		.attr_ro = NULL,
119899a2dd95SBruce Richardson 		.pattern_ro = items,
119999a2dd95SBruce Richardson 		.actions_ro = actions,
120099a2dd95SBruce Richardson 	};
120199a2dd95SBruce Richardson 	int ret;
120299a2dd95SBruce Richardson 
120399a2dd95SBruce Richardson 	RTE_BUILD_BUG_ON(sizeof(struct rte_flow_desc) <
120499a2dd95SBruce Richardson 			 sizeof(struct rte_flow_conv_rule));
120599a2dd95SBruce Richardson 	if (dst_size &&
120699a2dd95SBruce Richardson 	    (&dst->pattern != &desc->items ||
120799a2dd95SBruce Richardson 	     &dst->actions != &desc->actions ||
120899a2dd95SBruce Richardson 	     (uintptr_t)(dst + 1) != (uintptr_t)(desc + 1))) {
120999a2dd95SBruce Richardson 		rte_errno = EINVAL;
121099a2dd95SBruce Richardson 		return 0;
121199a2dd95SBruce Richardson 	}
121299a2dd95SBruce Richardson 	ret = rte_flow_conv(RTE_FLOW_CONV_OP_RULE, dst, dst_size, &src, NULL);
121399a2dd95SBruce Richardson 	if (ret < 0)
121499a2dd95SBruce Richardson 		return 0;
121599a2dd95SBruce Richardson 	ret += sizeof(*desc) - sizeof(*dst);
121699a2dd95SBruce Richardson 	rte_memcpy(desc,
121799a2dd95SBruce Richardson 		   (&(struct rte_flow_desc){
121899a2dd95SBruce Richardson 			.size = ret,
121999a2dd95SBruce Richardson 			.attr = *attr,
122099a2dd95SBruce Richardson 			.items = dst_size ? dst->pattern : NULL,
122199a2dd95SBruce Richardson 			.actions = dst_size ? dst->actions : NULL,
122299a2dd95SBruce Richardson 		   }),
122399a2dd95SBruce Richardson 		   len > sizeof(*desc) ? sizeof(*desc) : len);
1224ed04fd40SAnkur Dwivedi 
1225ed04fd40SAnkur Dwivedi 	rte_flow_trace_copy(desc, len, attr, items, actions, ret);
1226ed04fd40SAnkur Dwivedi 
122799a2dd95SBruce Richardson 	return ret;
122899a2dd95SBruce Richardson }
122999a2dd95SBruce Richardson 
123099a2dd95SBruce Richardson int
123199a2dd95SBruce Richardson rte_flow_dev_dump(uint16_t port_id, struct rte_flow *flow,
123299a2dd95SBruce Richardson 			FILE *file, struct rte_flow_error *error)
123399a2dd95SBruce Richardson {
123499a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
123599a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
123699a2dd95SBruce Richardson 	int ret;
123799a2dd95SBruce Richardson 
123899a2dd95SBruce Richardson 	if (unlikely(!ops))
123999a2dd95SBruce Richardson 		return -rte_errno;
124099a2dd95SBruce Richardson 	if (likely(!!ops->dev_dump)) {
124199a2dd95SBruce Richardson 		fts_enter(dev);
124299a2dd95SBruce Richardson 		ret = ops->dev_dump(dev, flow, file, error);
124399a2dd95SBruce Richardson 		fts_exit(dev);
124499a2dd95SBruce Richardson 		return flow_err(port_id, ret, error);
124599a2dd95SBruce Richardson 	}
124699a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOSYS,
124799a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
124899a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOSYS));
124999a2dd95SBruce Richardson }
125099a2dd95SBruce Richardson 
125199a2dd95SBruce Richardson int
125299a2dd95SBruce Richardson rte_flow_get_aged_flows(uint16_t port_id, void **contexts,
125399a2dd95SBruce Richardson 		    uint32_t nb_contexts, struct rte_flow_error *error)
125499a2dd95SBruce Richardson {
125599a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
125699a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
125799a2dd95SBruce Richardson 	int ret;
125899a2dd95SBruce Richardson 
125999a2dd95SBruce Richardson 	if (unlikely(!ops))
126099a2dd95SBruce Richardson 		return -rte_errno;
126199a2dd95SBruce Richardson 	if (likely(!!ops->get_aged_flows)) {
126299a2dd95SBruce Richardson 		fts_enter(dev);
126399a2dd95SBruce Richardson 		ret = ops->get_aged_flows(dev, contexts, nb_contexts, error);
126499a2dd95SBruce Richardson 		fts_exit(dev);
1265ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id, ret, error);
1266ed04fd40SAnkur Dwivedi 
1267ed04fd40SAnkur Dwivedi 		rte_flow_trace_get_aged_flows(port_id, contexts, nb_contexts, ret);
1268ed04fd40SAnkur Dwivedi 
1269ed04fd40SAnkur Dwivedi 		return ret;
127099a2dd95SBruce Richardson 	}
127199a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOTSUP,
127299a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
127399a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOTSUP));
127499a2dd95SBruce Richardson }
127599a2dd95SBruce Richardson 
1276966eb55eSMichael Baum int
1277966eb55eSMichael Baum rte_flow_get_q_aged_flows(uint16_t port_id, uint32_t queue_id, void **contexts,
1278966eb55eSMichael Baum 			  uint32_t nb_contexts, struct rte_flow_error *error)
1279966eb55eSMichael Baum {
1280966eb55eSMichael Baum 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1281966eb55eSMichael Baum 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1282966eb55eSMichael Baum 	int ret;
1283966eb55eSMichael Baum 
1284966eb55eSMichael Baum 	if (unlikely(!ops))
1285966eb55eSMichael Baum 		return -rte_errno;
1286966eb55eSMichael Baum 	if (likely(!!ops->get_q_aged_flows)) {
1287966eb55eSMichael Baum 		fts_enter(dev);
1288966eb55eSMichael Baum 		ret = ops->get_q_aged_flows(dev, queue_id, contexts,
1289966eb55eSMichael Baum 					    nb_contexts, error);
1290966eb55eSMichael Baum 		fts_exit(dev);
1291ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id, ret, error);
1292ed04fd40SAnkur Dwivedi 
1293ed04fd40SAnkur Dwivedi 		rte_flow_trace_get_q_aged_flows(port_id, queue_id, contexts,
1294ed04fd40SAnkur Dwivedi 						nb_contexts, ret);
1295ed04fd40SAnkur Dwivedi 
1296ed04fd40SAnkur Dwivedi 		return ret;
1297966eb55eSMichael Baum 	}
1298966eb55eSMichael Baum 	return rte_flow_error_set(error, ENOTSUP,
1299966eb55eSMichael Baum 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1300966eb55eSMichael Baum 				  NULL, rte_strerror(ENOTSUP));
1301966eb55eSMichael Baum }
1302966eb55eSMichael Baum 
13034b61b877SBing Zhao struct rte_flow_action_handle *
13044b61b877SBing Zhao rte_flow_action_handle_create(uint16_t port_id,
13054b61b877SBing Zhao 			      const struct rte_flow_indir_action_conf *conf,
130699a2dd95SBruce Richardson 			      const struct rte_flow_action *action,
130799a2dd95SBruce Richardson 			      struct rte_flow_error *error)
130899a2dd95SBruce Richardson {
13094b61b877SBing Zhao 	struct rte_flow_action_handle *handle;
131099a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
131199a2dd95SBruce Richardson 
131299a2dd95SBruce Richardson 	if (unlikely(!ops))
131399a2dd95SBruce Richardson 		return NULL;
13144b61b877SBing Zhao 	if (unlikely(!ops->action_handle_create)) {
131599a2dd95SBruce Richardson 		rte_flow_error_set(error, ENOSYS,
131699a2dd95SBruce Richardson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
131799a2dd95SBruce Richardson 				   rte_strerror(ENOSYS));
131899a2dd95SBruce Richardson 		return NULL;
131999a2dd95SBruce Richardson 	}
13204b61b877SBing Zhao 	handle = ops->action_handle_create(&rte_eth_devices[port_id],
132199a2dd95SBruce Richardson 					   conf, action, error);
13224b61b877SBing Zhao 	if (handle == NULL)
132399a2dd95SBruce Richardson 		flow_err(port_id, -rte_errno, error);
1324ed04fd40SAnkur Dwivedi 
1325ed04fd40SAnkur Dwivedi 	rte_flow_trace_action_handle_create(port_id, conf, action, handle);
1326ed04fd40SAnkur Dwivedi 
13274b61b877SBing Zhao 	return handle;
132899a2dd95SBruce Richardson }
132999a2dd95SBruce Richardson 
133099a2dd95SBruce Richardson int
13314b61b877SBing Zhao rte_flow_action_handle_destroy(uint16_t port_id,
13324b61b877SBing Zhao 			       struct rte_flow_action_handle *handle,
133399a2dd95SBruce Richardson 			       struct rte_flow_error *error)
133499a2dd95SBruce Richardson {
133599a2dd95SBruce Richardson 	int ret;
133699a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
133799a2dd95SBruce Richardson 
133899a2dd95SBruce Richardson 	if (unlikely(!ops))
133999a2dd95SBruce Richardson 		return -rte_errno;
13404b61b877SBing Zhao 	if (unlikely(!ops->action_handle_destroy))
134199a2dd95SBruce Richardson 		return rte_flow_error_set(error, ENOSYS,
134299a2dd95SBruce Richardson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
134399a2dd95SBruce Richardson 					  NULL, rte_strerror(ENOSYS));
13444b61b877SBing Zhao 	ret = ops->action_handle_destroy(&rte_eth_devices[port_id],
13454b61b877SBing Zhao 					 handle, error);
1346ed04fd40SAnkur Dwivedi 	ret = flow_err(port_id, ret, error);
1347ed04fd40SAnkur Dwivedi 
1348ed04fd40SAnkur Dwivedi 	rte_flow_trace_action_handle_destroy(port_id, handle, ret);
1349ed04fd40SAnkur Dwivedi 
1350ed04fd40SAnkur Dwivedi 	return ret;
135199a2dd95SBruce Richardson }
135299a2dd95SBruce Richardson 
135399a2dd95SBruce Richardson int
13544b61b877SBing Zhao rte_flow_action_handle_update(uint16_t port_id,
13554b61b877SBing Zhao 			      struct rte_flow_action_handle *handle,
13564b61b877SBing Zhao 			      const void *update,
135799a2dd95SBruce Richardson 			      struct rte_flow_error *error)
135899a2dd95SBruce Richardson {
135999a2dd95SBruce Richardson 	int ret;
136099a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
136199a2dd95SBruce Richardson 
136299a2dd95SBruce Richardson 	if (unlikely(!ops))
136399a2dd95SBruce Richardson 		return -rte_errno;
13644b61b877SBing Zhao 	if (unlikely(!ops->action_handle_update))
136599a2dd95SBruce Richardson 		return rte_flow_error_set(error, ENOSYS,
136699a2dd95SBruce Richardson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
136799a2dd95SBruce Richardson 					  NULL, rte_strerror(ENOSYS));
13684b61b877SBing Zhao 	ret = ops->action_handle_update(&rte_eth_devices[port_id], handle,
136999a2dd95SBruce Richardson 					update, error);
1370ed04fd40SAnkur Dwivedi 	ret = flow_err(port_id, ret, error);
1371ed04fd40SAnkur Dwivedi 
1372ed04fd40SAnkur Dwivedi 	rte_flow_trace_action_handle_update(port_id, handle, update, ret);
1373ed04fd40SAnkur Dwivedi 
1374ed04fd40SAnkur Dwivedi 	return ret;
137599a2dd95SBruce Richardson }
137699a2dd95SBruce Richardson 
137799a2dd95SBruce Richardson int
13784b61b877SBing Zhao rte_flow_action_handle_query(uint16_t port_id,
13794b61b877SBing Zhao 			     const struct rte_flow_action_handle *handle,
138099a2dd95SBruce Richardson 			     void *data,
138199a2dd95SBruce Richardson 			     struct rte_flow_error *error)
138299a2dd95SBruce Richardson {
138399a2dd95SBruce Richardson 	int ret;
138499a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
138599a2dd95SBruce Richardson 
138699a2dd95SBruce Richardson 	if (unlikely(!ops))
138799a2dd95SBruce Richardson 		return -rte_errno;
13884b61b877SBing Zhao 	if (unlikely(!ops->action_handle_query))
138999a2dd95SBruce Richardson 		return rte_flow_error_set(error, ENOSYS,
139099a2dd95SBruce Richardson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
139199a2dd95SBruce Richardson 					  NULL, rte_strerror(ENOSYS));
13924b61b877SBing Zhao 	ret = ops->action_handle_query(&rte_eth_devices[port_id], handle,
139399a2dd95SBruce Richardson 				       data, error);
1394ed04fd40SAnkur Dwivedi 	ret = flow_err(port_id, ret, error);
1395ed04fd40SAnkur Dwivedi 
1396ed04fd40SAnkur Dwivedi 	rte_flow_trace_action_handle_query(port_id, handle, data, ret);
1397ed04fd40SAnkur Dwivedi 
1398ed04fd40SAnkur Dwivedi 	return ret;
139999a2dd95SBruce Richardson }
140099a2dd95SBruce Richardson 
140199a2dd95SBruce Richardson int
140299a2dd95SBruce Richardson rte_flow_tunnel_decap_set(uint16_t port_id,
140399a2dd95SBruce Richardson 			  struct rte_flow_tunnel *tunnel,
140499a2dd95SBruce Richardson 			  struct rte_flow_action **actions,
140599a2dd95SBruce Richardson 			  uint32_t *num_of_actions,
140699a2dd95SBruce Richardson 			  struct rte_flow_error *error)
140799a2dd95SBruce Richardson {
140899a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
140999a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1410ed04fd40SAnkur Dwivedi 	int ret;
141199a2dd95SBruce Richardson 
141299a2dd95SBruce Richardson 	if (unlikely(!ops))
141399a2dd95SBruce Richardson 		return -rte_errno;
141499a2dd95SBruce Richardson 	if (likely(!!ops->tunnel_decap_set)) {
1415ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id,
141699a2dd95SBruce Richardson 			       ops->tunnel_decap_set(dev, tunnel, actions,
141799a2dd95SBruce Richardson 						     num_of_actions, error),
141899a2dd95SBruce Richardson 			       error);
1419ed04fd40SAnkur Dwivedi 
1420ed04fd40SAnkur Dwivedi 		rte_flow_trace_tunnel_decap_set(port_id, tunnel, actions,
1421ed04fd40SAnkur Dwivedi 						num_of_actions, ret);
1422ed04fd40SAnkur Dwivedi 
1423ed04fd40SAnkur Dwivedi 		return ret;
142499a2dd95SBruce Richardson 	}
142599a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOTSUP,
142699a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
142799a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOTSUP));
142899a2dd95SBruce Richardson }
142999a2dd95SBruce Richardson 
143099a2dd95SBruce Richardson int
143199a2dd95SBruce Richardson rte_flow_tunnel_match(uint16_t port_id,
143299a2dd95SBruce Richardson 		      struct rte_flow_tunnel *tunnel,
143399a2dd95SBruce Richardson 		      struct rte_flow_item **items,
143499a2dd95SBruce Richardson 		      uint32_t *num_of_items,
143599a2dd95SBruce Richardson 		      struct rte_flow_error *error)
143699a2dd95SBruce Richardson {
143799a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
143899a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1439ed04fd40SAnkur Dwivedi 	int ret;
144099a2dd95SBruce Richardson 
144199a2dd95SBruce Richardson 	if (unlikely(!ops))
144299a2dd95SBruce Richardson 		return -rte_errno;
144399a2dd95SBruce Richardson 	if (likely(!!ops->tunnel_match)) {
1444ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id,
144599a2dd95SBruce Richardson 			       ops->tunnel_match(dev, tunnel, items,
144699a2dd95SBruce Richardson 						 num_of_items, error),
144799a2dd95SBruce Richardson 			       error);
1448ed04fd40SAnkur Dwivedi 
1449ed04fd40SAnkur Dwivedi 		rte_flow_trace_tunnel_match(port_id, tunnel, items, num_of_items,
1450ed04fd40SAnkur Dwivedi 					    ret);
1451ed04fd40SAnkur Dwivedi 
1452ed04fd40SAnkur Dwivedi 		return ret;
145399a2dd95SBruce Richardson 	}
145499a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOTSUP,
145599a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
145699a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOTSUP));
145799a2dd95SBruce Richardson }
145899a2dd95SBruce Richardson 
145999a2dd95SBruce Richardson int
146099a2dd95SBruce Richardson rte_flow_get_restore_info(uint16_t port_id,
146199a2dd95SBruce Richardson 			  struct rte_mbuf *m,
146299a2dd95SBruce Richardson 			  struct rte_flow_restore_info *restore_info,
146399a2dd95SBruce Richardson 			  struct rte_flow_error *error)
146499a2dd95SBruce Richardson {
146599a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
146699a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1467ed04fd40SAnkur Dwivedi 	int ret;
146899a2dd95SBruce Richardson 
146999a2dd95SBruce Richardson 	if (unlikely(!ops))
147099a2dd95SBruce Richardson 		return -rte_errno;
147199a2dd95SBruce Richardson 	if (likely(!!ops->get_restore_info)) {
1472ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id,
147399a2dd95SBruce Richardson 			       ops->get_restore_info(dev, m, restore_info,
147499a2dd95SBruce Richardson 						     error),
147599a2dd95SBruce Richardson 			       error);
1476ed04fd40SAnkur Dwivedi 
1477ed04fd40SAnkur Dwivedi 		rte_flow_trace_get_restore_info(port_id, m, restore_info, ret);
1478ed04fd40SAnkur Dwivedi 
1479ed04fd40SAnkur Dwivedi 		return ret;
148099a2dd95SBruce Richardson 	}
148199a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOTSUP,
148299a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
148399a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOTSUP));
148499a2dd95SBruce Richardson }
148599a2dd95SBruce Richardson 
1486fca8cba4SDavid Marchand static struct {
1487fca8cba4SDavid Marchand 	const struct rte_mbuf_dynflag desc;
1488fca8cba4SDavid Marchand 	uint64_t value;
1489fca8cba4SDavid Marchand } flow_restore_info_dynflag = {
1490fca8cba4SDavid Marchand 	.desc = { .name = "RTE_MBUF_F_RX_RESTORE_INFO", },
1491fca8cba4SDavid Marchand };
1492fca8cba4SDavid Marchand 
1493fca8cba4SDavid Marchand uint64_t
1494fca8cba4SDavid Marchand rte_flow_restore_info_dynflag(void)
1495fca8cba4SDavid Marchand {
1496fca8cba4SDavid Marchand 	return flow_restore_info_dynflag.value;
1497fca8cba4SDavid Marchand }
1498fca8cba4SDavid Marchand 
1499fca8cba4SDavid Marchand int
1500fca8cba4SDavid Marchand rte_flow_restore_info_dynflag_register(void)
1501fca8cba4SDavid Marchand {
1502fca8cba4SDavid Marchand 	if (flow_restore_info_dynflag.value == 0) {
1503fca8cba4SDavid Marchand 		int offset = rte_mbuf_dynflag_register(&flow_restore_info_dynflag.desc);
1504fca8cba4SDavid Marchand 
1505fca8cba4SDavid Marchand 		if (offset < 0)
1506fca8cba4SDavid Marchand 			return -1;
1507fca8cba4SDavid Marchand 		flow_restore_info_dynflag.value = RTE_BIT64(offset);
1508fca8cba4SDavid Marchand 	}
1509fca8cba4SDavid Marchand 
1510fca8cba4SDavid Marchand 	return 0;
1511fca8cba4SDavid Marchand }
1512fca8cba4SDavid Marchand 
151399a2dd95SBruce Richardson int
151499a2dd95SBruce Richardson rte_flow_tunnel_action_decap_release(uint16_t port_id,
151599a2dd95SBruce Richardson 				     struct rte_flow_action *actions,
151699a2dd95SBruce Richardson 				     uint32_t num_of_actions,
151799a2dd95SBruce Richardson 				     struct rte_flow_error *error)
151899a2dd95SBruce Richardson {
151999a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
152099a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1521ed04fd40SAnkur Dwivedi 	int ret;
152299a2dd95SBruce Richardson 
152399a2dd95SBruce Richardson 	if (unlikely(!ops))
152499a2dd95SBruce Richardson 		return -rte_errno;
152599a2dd95SBruce Richardson 	if (likely(!!ops->tunnel_action_decap_release)) {
1526ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id,
152799a2dd95SBruce Richardson 			       ops->tunnel_action_decap_release(dev, actions,
152899a2dd95SBruce Richardson 								num_of_actions,
152999a2dd95SBruce Richardson 								error),
153099a2dd95SBruce Richardson 			       error);
1531ed04fd40SAnkur Dwivedi 
1532ed04fd40SAnkur Dwivedi 		rte_flow_trace_tunnel_action_decap_release(port_id, actions,
1533ed04fd40SAnkur Dwivedi 							   num_of_actions, ret);
1534ed04fd40SAnkur Dwivedi 
1535ed04fd40SAnkur Dwivedi 		return ret;
153699a2dd95SBruce Richardson 	}
153799a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOTSUP,
153899a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
153999a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOTSUP));
154099a2dd95SBruce Richardson }
154199a2dd95SBruce Richardson 
154299a2dd95SBruce Richardson int
154399a2dd95SBruce Richardson rte_flow_tunnel_item_release(uint16_t port_id,
154499a2dd95SBruce Richardson 			     struct rte_flow_item *items,
154599a2dd95SBruce Richardson 			     uint32_t num_of_items,
154699a2dd95SBruce Richardson 			     struct rte_flow_error *error)
154799a2dd95SBruce Richardson {
154899a2dd95SBruce Richardson 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
154999a2dd95SBruce Richardson 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1550ed04fd40SAnkur Dwivedi 	int ret;
155199a2dd95SBruce Richardson 
155299a2dd95SBruce Richardson 	if (unlikely(!ops))
155399a2dd95SBruce Richardson 		return -rte_errno;
155499a2dd95SBruce Richardson 	if (likely(!!ops->tunnel_item_release)) {
1555ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id,
155699a2dd95SBruce Richardson 			       ops->tunnel_item_release(dev, items,
155799a2dd95SBruce Richardson 							num_of_items, error),
155899a2dd95SBruce Richardson 			       error);
1559ed04fd40SAnkur Dwivedi 
1560ed04fd40SAnkur Dwivedi 		rte_flow_trace_tunnel_item_release(port_id, items, num_of_items, ret);
1561ed04fd40SAnkur Dwivedi 
1562ed04fd40SAnkur Dwivedi 		return ret;
156399a2dd95SBruce Richardson 	}
156499a2dd95SBruce Richardson 	return rte_flow_error_set(error, ENOTSUP,
156599a2dd95SBruce Richardson 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
156699a2dd95SBruce Richardson 				  NULL, rte_strerror(ENOTSUP));
156799a2dd95SBruce Richardson }
15681179f05cSIvan Malov 
15691179f05cSIvan Malov int
15701179f05cSIvan Malov rte_flow_pick_transfer_proxy(uint16_t port_id, uint16_t *proxy_port_id,
15711179f05cSIvan Malov 			     struct rte_flow_error *error)
15721179f05cSIvan Malov {
15731179f05cSIvan Malov 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
15741179f05cSIvan Malov 	struct rte_eth_dev *dev;
1575ed04fd40SAnkur Dwivedi 	int ret;
15761179f05cSIvan Malov 
15771179f05cSIvan Malov 	if (unlikely(ops == NULL))
15781179f05cSIvan Malov 		return -rte_errno;
15791179f05cSIvan Malov 
15801179f05cSIvan Malov 	if (ops->pick_transfer_proxy == NULL) {
15811179f05cSIvan Malov 		*proxy_port_id = port_id;
15821179f05cSIvan Malov 		return 0;
15831179f05cSIvan Malov 	}
15841179f05cSIvan Malov 
15851179f05cSIvan Malov 	dev = &rte_eth_devices[port_id];
15861179f05cSIvan Malov 
1587ed04fd40SAnkur Dwivedi 	ret = flow_err(port_id,
15881179f05cSIvan Malov 		       ops->pick_transfer_proxy(dev, proxy_port_id, error),
15891179f05cSIvan Malov 		       error);
1590ed04fd40SAnkur Dwivedi 
1591ed04fd40SAnkur Dwivedi 	rte_flow_trace_pick_transfer_proxy(port_id, proxy_port_id, ret);
1592ed04fd40SAnkur Dwivedi 
1593ed04fd40SAnkur Dwivedi 	return ret;
15941179f05cSIvan Malov }
1595dc4d860eSViacheslav Ovsiienko 
1596dc4d860eSViacheslav Ovsiienko struct rte_flow_item_flex_handle *
1597dc4d860eSViacheslav Ovsiienko rte_flow_flex_item_create(uint16_t port_id,
1598dc4d860eSViacheslav Ovsiienko 			  const struct rte_flow_item_flex_conf *conf,
1599dc4d860eSViacheslav Ovsiienko 			  struct rte_flow_error *error)
1600dc4d860eSViacheslav Ovsiienko {
1601dc4d860eSViacheslav Ovsiienko 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1602dc4d860eSViacheslav Ovsiienko 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1603dc4d860eSViacheslav Ovsiienko 	struct rte_flow_item_flex_handle *handle;
1604dc4d860eSViacheslav Ovsiienko 
1605dc4d860eSViacheslav Ovsiienko 	if (unlikely(!ops))
1606dc4d860eSViacheslav Ovsiienko 		return NULL;
1607dc4d860eSViacheslav Ovsiienko 	if (unlikely(!ops->flex_item_create)) {
1608dc4d860eSViacheslav Ovsiienko 		rte_flow_error_set(error, ENOTSUP,
1609dc4d860eSViacheslav Ovsiienko 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1610dc4d860eSViacheslav Ovsiienko 				   NULL, rte_strerror(ENOTSUP));
1611dc4d860eSViacheslav Ovsiienko 		return NULL;
1612dc4d860eSViacheslav Ovsiienko 	}
1613dc4d860eSViacheslav Ovsiienko 	handle = ops->flex_item_create(dev, conf, error);
1614dc4d860eSViacheslav Ovsiienko 	if (handle == NULL)
1615dc4d860eSViacheslav Ovsiienko 		flow_err(port_id, -rte_errno, error);
1616ed04fd40SAnkur Dwivedi 
1617ed04fd40SAnkur Dwivedi 	rte_flow_trace_flex_item_create(port_id, conf, handle);
1618ed04fd40SAnkur Dwivedi 
1619dc4d860eSViacheslav Ovsiienko 	return handle;
1620dc4d860eSViacheslav Ovsiienko }
1621dc4d860eSViacheslav Ovsiienko 
1622dc4d860eSViacheslav Ovsiienko int
1623dc4d860eSViacheslav Ovsiienko rte_flow_flex_item_release(uint16_t port_id,
1624dc4d860eSViacheslav Ovsiienko 			   const struct rte_flow_item_flex_handle *handle,
1625dc4d860eSViacheslav Ovsiienko 			   struct rte_flow_error *error)
1626dc4d860eSViacheslav Ovsiienko {
1627dc4d860eSViacheslav Ovsiienko 	int ret;
1628dc4d860eSViacheslav Ovsiienko 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1629dc4d860eSViacheslav Ovsiienko 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1630dc4d860eSViacheslav Ovsiienko 
1631dc4d860eSViacheslav Ovsiienko 	if (unlikely(!ops || !ops->flex_item_release))
1632dc4d860eSViacheslav Ovsiienko 		return rte_flow_error_set(error, ENOTSUP,
1633dc4d860eSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1634dc4d860eSViacheslav Ovsiienko 					  NULL, rte_strerror(ENOTSUP));
1635dc4d860eSViacheslav Ovsiienko 	ret = ops->flex_item_release(dev, handle, error);
1636ed04fd40SAnkur Dwivedi 	ret = flow_err(port_id, ret, error);
1637ed04fd40SAnkur Dwivedi 
1638ed04fd40SAnkur Dwivedi 	rte_flow_trace_flex_item_release(port_id, handle, ret);
1639ed04fd40SAnkur Dwivedi 
1640ed04fd40SAnkur Dwivedi 	return ret;
1641dc4d860eSViacheslav Ovsiienko }
16424ff58b73SAlexander Kozyrev 
16434ff58b73SAlexander Kozyrev int
16444ff58b73SAlexander Kozyrev rte_flow_info_get(uint16_t port_id,
16454ff58b73SAlexander Kozyrev 		  struct rte_flow_port_info *port_info,
1646197e820cSAlexander Kozyrev 		  struct rte_flow_queue_info *queue_info,
16474ff58b73SAlexander Kozyrev 		  struct rte_flow_error *error)
16484ff58b73SAlexander Kozyrev {
16494ff58b73SAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
16504ff58b73SAlexander Kozyrev 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1651ed04fd40SAnkur Dwivedi 	int ret;
16524ff58b73SAlexander Kozyrev 
16534ff58b73SAlexander Kozyrev 	if (unlikely(!ops))
16544ff58b73SAlexander Kozyrev 		return -rte_errno;
16554ff58b73SAlexander Kozyrev 	if (dev->data->dev_configured == 0) {
16560e21c7c0SDavid Marchand 		FLOW_LOG(INFO,
16570e21c7c0SDavid Marchand 			"Device with port_id=%"PRIu16" is not configured.",
16584ff58b73SAlexander Kozyrev 			port_id);
16594ff58b73SAlexander Kozyrev 		return -EINVAL;
16604ff58b73SAlexander Kozyrev 	}
16614ff58b73SAlexander Kozyrev 	if (port_info == NULL) {
16620e21c7c0SDavid Marchand 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
16634ff58b73SAlexander Kozyrev 		return -EINVAL;
16644ff58b73SAlexander Kozyrev 	}
16654ff58b73SAlexander Kozyrev 	if (likely(!!ops->info_get)) {
1666ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id,
1667197e820cSAlexander Kozyrev 			       ops->info_get(dev, port_info, queue_info, error),
16684ff58b73SAlexander Kozyrev 			       error);
1669ed04fd40SAnkur Dwivedi 
1670ed04fd40SAnkur Dwivedi 		rte_flow_trace_info_get(port_id, port_info, queue_info, ret);
1671ed04fd40SAnkur Dwivedi 
1672ed04fd40SAnkur Dwivedi 		return ret;
16734ff58b73SAlexander Kozyrev 	}
16744ff58b73SAlexander Kozyrev 	return rte_flow_error_set(error, ENOTSUP,
16754ff58b73SAlexander Kozyrev 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
16764ff58b73SAlexander Kozyrev 				  NULL, rte_strerror(ENOTSUP));
16774ff58b73SAlexander Kozyrev }
16784ff58b73SAlexander Kozyrev 
16794ff58b73SAlexander Kozyrev int
16804ff58b73SAlexander Kozyrev rte_flow_configure(uint16_t port_id,
16814ff58b73SAlexander Kozyrev 		   const struct rte_flow_port_attr *port_attr,
1682197e820cSAlexander Kozyrev 		   uint16_t nb_queue,
1683197e820cSAlexander Kozyrev 		   const struct rte_flow_queue_attr *queue_attr[],
16844ff58b73SAlexander Kozyrev 		   struct rte_flow_error *error)
16854ff58b73SAlexander Kozyrev {
16864ff58b73SAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
16874ff58b73SAlexander Kozyrev 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
16884ff58b73SAlexander Kozyrev 	int ret;
16894ff58b73SAlexander Kozyrev 
16904ff58b73SAlexander Kozyrev 	if (unlikely(!ops))
16914ff58b73SAlexander Kozyrev 		return -rte_errno;
16924ff58b73SAlexander Kozyrev 	if (dev->data->dev_configured == 0) {
16930e21c7c0SDavid Marchand 		FLOW_LOG(INFO,
16940e21c7c0SDavid Marchand 			"Device with port_id=%"PRIu16" is not configured.",
16954ff58b73SAlexander Kozyrev 			port_id);
16964ff58b73SAlexander Kozyrev 		return -EINVAL;
16974ff58b73SAlexander Kozyrev 	}
16984ff58b73SAlexander Kozyrev 	if (dev->data->dev_started != 0) {
16990e21c7c0SDavid Marchand 		FLOW_LOG(INFO,
17000e21c7c0SDavid Marchand 			"Device with port_id=%"PRIu16" already started.",
17014ff58b73SAlexander Kozyrev 			port_id);
17024ff58b73SAlexander Kozyrev 		return -EINVAL;
17034ff58b73SAlexander Kozyrev 	}
17044ff58b73SAlexander Kozyrev 	if (port_attr == NULL) {
17050e21c7c0SDavid Marchand 		FLOW_LOG(ERR, "Port %"PRIu16" info is NULL.", port_id);
17064ff58b73SAlexander Kozyrev 		return -EINVAL;
17074ff58b73SAlexander Kozyrev 	}
1708197e820cSAlexander Kozyrev 	if (queue_attr == NULL) {
17090e21c7c0SDavid Marchand 		FLOW_LOG(ERR, "Port %"PRIu16" queue info is NULL.", port_id);
1710197e820cSAlexander Kozyrev 		return -EINVAL;
1711197e820cSAlexander Kozyrev 	}
1712f5b2846dSViacheslav Ovsiienko 	if ((port_attr->flags & RTE_FLOW_PORT_FLAG_SHARE_INDIRECT) &&
1713f5b2846dSViacheslav Ovsiienko 	     !rte_eth_dev_is_valid_port(port_attr->host_port_id)) {
1714f5b2846dSViacheslav Ovsiienko 		return rte_flow_error_set(error, ENODEV,
1715f5b2846dSViacheslav Ovsiienko 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1716f5b2846dSViacheslav Ovsiienko 					  NULL, rte_strerror(ENODEV));
1717f5b2846dSViacheslav Ovsiienko 	}
17184ff58b73SAlexander Kozyrev 	if (likely(!!ops->configure)) {
1719197e820cSAlexander Kozyrev 		ret = ops->configure(dev, port_attr, nb_queue, queue_attr, error);
17204ff58b73SAlexander Kozyrev 		if (ret == 0)
17214ff58b73SAlexander Kozyrev 			dev->data->flow_configured = 1;
1722ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id, ret, error);
1723ed04fd40SAnkur Dwivedi 
1724ed04fd40SAnkur Dwivedi 		rte_flow_trace_configure(port_id, port_attr, nb_queue, queue_attr, ret);
1725ed04fd40SAnkur Dwivedi 
1726ed04fd40SAnkur Dwivedi 		return ret;
17274ff58b73SAlexander Kozyrev 	}
17284ff58b73SAlexander Kozyrev 	return rte_flow_error_set(error, ENOTSUP,
17294ff58b73SAlexander Kozyrev 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
17304ff58b73SAlexander Kozyrev 				  NULL, rte_strerror(ENOTSUP));
17314ff58b73SAlexander Kozyrev }
1732f076bcfbSAlexander Kozyrev 
1733f076bcfbSAlexander Kozyrev struct rte_flow_pattern_template *
1734f076bcfbSAlexander Kozyrev rte_flow_pattern_template_create(uint16_t port_id,
1735f076bcfbSAlexander Kozyrev 		const struct rte_flow_pattern_template_attr *template_attr,
1736f076bcfbSAlexander Kozyrev 		const struct rte_flow_item pattern[],
1737f076bcfbSAlexander Kozyrev 		struct rte_flow_error *error)
1738f076bcfbSAlexander Kozyrev {
1739f076bcfbSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1740f076bcfbSAlexander Kozyrev 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1741f076bcfbSAlexander Kozyrev 	struct rte_flow_pattern_template *template;
1742f076bcfbSAlexander Kozyrev 
1743f076bcfbSAlexander Kozyrev 	if (unlikely(!ops))
1744f076bcfbSAlexander Kozyrev 		return NULL;
1745f076bcfbSAlexander Kozyrev 	if (dev->data->flow_configured == 0) {
17460e21c7c0SDavid Marchand 		FLOW_LOG(INFO,
17470e21c7c0SDavid Marchand 			"Flow engine on port_id=%"PRIu16" is not configured.",
1748f076bcfbSAlexander Kozyrev 			port_id);
1749f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1750f076bcfbSAlexander Kozyrev 				RTE_FLOW_ERROR_TYPE_STATE,
1751f076bcfbSAlexander Kozyrev 				NULL, rte_strerror(EINVAL));
1752f076bcfbSAlexander Kozyrev 		return NULL;
1753f076bcfbSAlexander Kozyrev 	}
1754f076bcfbSAlexander Kozyrev 	if (template_attr == NULL) {
17550e21c7c0SDavid Marchand 		FLOW_LOG(ERR,
17560e21c7c0SDavid Marchand 			     "Port %"PRIu16" template attr is NULL.",
1757f076bcfbSAlexander Kozyrev 			     port_id);
1758f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1759f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_ATTR,
1760f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1761f076bcfbSAlexander Kozyrev 		return NULL;
1762f076bcfbSAlexander Kozyrev 	}
1763f076bcfbSAlexander Kozyrev 	if (pattern == NULL) {
17640e21c7c0SDavid Marchand 		FLOW_LOG(ERR,
17650e21c7c0SDavid Marchand 			     "Port %"PRIu16" pattern is NULL.",
1766f076bcfbSAlexander Kozyrev 			     port_id);
1767f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1768f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_ATTR,
1769f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1770f076bcfbSAlexander Kozyrev 		return NULL;
1771f076bcfbSAlexander Kozyrev 	}
1772f076bcfbSAlexander Kozyrev 	if (likely(!!ops->pattern_template_create)) {
1773f076bcfbSAlexander Kozyrev 		template = ops->pattern_template_create(dev, template_attr,
1774f076bcfbSAlexander Kozyrev 							pattern, error);
1775f076bcfbSAlexander Kozyrev 		if (template == NULL)
1776f076bcfbSAlexander Kozyrev 			flow_err(port_id, -rte_errno, error);
1777ed04fd40SAnkur Dwivedi 
1778ed04fd40SAnkur Dwivedi 		rte_flow_trace_pattern_template_create(port_id, template_attr,
1779ed04fd40SAnkur Dwivedi 						       pattern, template);
1780ed04fd40SAnkur Dwivedi 
1781f076bcfbSAlexander Kozyrev 		return template;
1782f076bcfbSAlexander Kozyrev 	}
1783f076bcfbSAlexander Kozyrev 	rte_flow_error_set(error, ENOTSUP,
1784f076bcfbSAlexander Kozyrev 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1785f076bcfbSAlexander Kozyrev 			   NULL, rte_strerror(ENOTSUP));
1786f076bcfbSAlexander Kozyrev 	return NULL;
1787f076bcfbSAlexander Kozyrev }
1788f076bcfbSAlexander Kozyrev 
1789f076bcfbSAlexander Kozyrev int
1790f076bcfbSAlexander Kozyrev rte_flow_pattern_template_destroy(uint16_t port_id,
1791f076bcfbSAlexander Kozyrev 		struct rte_flow_pattern_template *pattern_template,
1792f076bcfbSAlexander Kozyrev 		struct rte_flow_error *error)
1793f076bcfbSAlexander Kozyrev {
1794f076bcfbSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1795f076bcfbSAlexander Kozyrev 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1796ed04fd40SAnkur Dwivedi 	int ret;
1797f076bcfbSAlexander Kozyrev 
1798f076bcfbSAlexander Kozyrev 	if (unlikely(!ops))
1799f076bcfbSAlexander Kozyrev 		return -rte_errno;
1800f076bcfbSAlexander Kozyrev 	if (unlikely(pattern_template == NULL))
1801f076bcfbSAlexander Kozyrev 		return 0;
1802f076bcfbSAlexander Kozyrev 	if (likely(!!ops->pattern_template_destroy)) {
1803ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id,
1804f076bcfbSAlexander Kozyrev 			       ops->pattern_template_destroy(dev,
1805f076bcfbSAlexander Kozyrev 							     pattern_template,
1806f076bcfbSAlexander Kozyrev 							     error),
1807f076bcfbSAlexander Kozyrev 			       error);
1808ed04fd40SAnkur Dwivedi 
1809ed04fd40SAnkur Dwivedi 		rte_flow_trace_pattern_template_destroy(port_id, pattern_template,
1810ed04fd40SAnkur Dwivedi 							ret);
1811ed04fd40SAnkur Dwivedi 
1812ed04fd40SAnkur Dwivedi 		return ret;
1813f076bcfbSAlexander Kozyrev 	}
1814f076bcfbSAlexander Kozyrev 	return rte_flow_error_set(error, ENOTSUP,
1815f076bcfbSAlexander Kozyrev 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1816f076bcfbSAlexander Kozyrev 				  NULL, rte_strerror(ENOTSUP));
1817f076bcfbSAlexander Kozyrev }
1818f076bcfbSAlexander Kozyrev 
1819f076bcfbSAlexander Kozyrev struct rte_flow_actions_template *
1820f076bcfbSAlexander Kozyrev rte_flow_actions_template_create(uint16_t port_id,
1821f076bcfbSAlexander Kozyrev 			const struct rte_flow_actions_template_attr *template_attr,
1822f076bcfbSAlexander Kozyrev 			const struct rte_flow_action actions[],
1823f076bcfbSAlexander Kozyrev 			const struct rte_flow_action masks[],
1824f076bcfbSAlexander Kozyrev 			struct rte_flow_error *error)
1825f076bcfbSAlexander Kozyrev {
1826f076bcfbSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1827f076bcfbSAlexander Kozyrev 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1828f076bcfbSAlexander Kozyrev 	struct rte_flow_actions_template *template;
1829f076bcfbSAlexander Kozyrev 
1830f076bcfbSAlexander Kozyrev 	if (unlikely(!ops))
1831f076bcfbSAlexander Kozyrev 		return NULL;
1832f076bcfbSAlexander Kozyrev 	if (dev->data->flow_configured == 0) {
18330e21c7c0SDavid Marchand 		FLOW_LOG(INFO,
18340e21c7c0SDavid Marchand 			"Flow engine on port_id=%"PRIu16" is not configured.",
1835f076bcfbSAlexander Kozyrev 			port_id);
1836f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1837f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_STATE,
1838f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1839f076bcfbSAlexander Kozyrev 		return NULL;
1840f076bcfbSAlexander Kozyrev 	}
1841f076bcfbSAlexander Kozyrev 	if (template_attr == NULL) {
18420e21c7c0SDavid Marchand 		FLOW_LOG(ERR,
18430e21c7c0SDavid Marchand 			     "Port %"PRIu16" template attr is NULL.",
1844f076bcfbSAlexander Kozyrev 			     port_id);
1845f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1846f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_ATTR,
1847f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1848f076bcfbSAlexander Kozyrev 		return NULL;
1849f076bcfbSAlexander Kozyrev 	}
1850f076bcfbSAlexander Kozyrev 	if (actions == NULL) {
18510e21c7c0SDavid Marchand 		FLOW_LOG(ERR,
18520e21c7c0SDavid Marchand 			     "Port %"PRIu16" actions is NULL.",
1853f076bcfbSAlexander Kozyrev 			     port_id);
1854f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1855f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_ATTR,
1856f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1857f076bcfbSAlexander Kozyrev 		return NULL;
1858f076bcfbSAlexander Kozyrev 	}
1859f076bcfbSAlexander Kozyrev 	if (masks == NULL) {
18600e21c7c0SDavid Marchand 		FLOW_LOG(ERR,
18610e21c7c0SDavid Marchand 			     "Port %"PRIu16" masks is NULL.",
1862f076bcfbSAlexander Kozyrev 			     port_id);
1863f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1864f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_ATTR,
1865f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1866f076bcfbSAlexander Kozyrev 
1867f076bcfbSAlexander Kozyrev 	}
1868f076bcfbSAlexander Kozyrev 	if (likely(!!ops->actions_template_create)) {
1869f076bcfbSAlexander Kozyrev 		template = ops->actions_template_create(dev, template_attr,
1870f076bcfbSAlexander Kozyrev 							actions, masks, error);
1871f076bcfbSAlexander Kozyrev 		if (template == NULL)
1872f076bcfbSAlexander Kozyrev 			flow_err(port_id, -rte_errno, error);
1873ed04fd40SAnkur Dwivedi 
1874ed04fd40SAnkur Dwivedi 		rte_flow_trace_actions_template_create(port_id, template_attr, actions,
1875ed04fd40SAnkur Dwivedi 						       masks, template);
1876ed04fd40SAnkur Dwivedi 
1877f076bcfbSAlexander Kozyrev 		return template;
1878f076bcfbSAlexander Kozyrev 	}
1879f076bcfbSAlexander Kozyrev 	rte_flow_error_set(error, ENOTSUP,
1880f076bcfbSAlexander Kozyrev 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1881f076bcfbSAlexander Kozyrev 			   NULL, rte_strerror(ENOTSUP));
1882f076bcfbSAlexander Kozyrev 	return NULL;
1883f076bcfbSAlexander Kozyrev }
1884f076bcfbSAlexander Kozyrev 
1885f076bcfbSAlexander Kozyrev int
1886f076bcfbSAlexander Kozyrev rte_flow_actions_template_destroy(uint16_t port_id,
1887f076bcfbSAlexander Kozyrev 			struct rte_flow_actions_template *actions_template,
1888f076bcfbSAlexander Kozyrev 			struct rte_flow_error *error)
1889f076bcfbSAlexander Kozyrev {
1890f076bcfbSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1891f076bcfbSAlexander Kozyrev 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1892ed04fd40SAnkur Dwivedi 	int ret;
1893f076bcfbSAlexander Kozyrev 
1894f076bcfbSAlexander Kozyrev 	if (unlikely(!ops))
1895f076bcfbSAlexander Kozyrev 		return -rte_errno;
1896f076bcfbSAlexander Kozyrev 	if (unlikely(actions_template == NULL))
1897f076bcfbSAlexander Kozyrev 		return 0;
1898f076bcfbSAlexander Kozyrev 	if (likely(!!ops->actions_template_destroy)) {
1899ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id,
1900f076bcfbSAlexander Kozyrev 			       ops->actions_template_destroy(dev,
1901f076bcfbSAlexander Kozyrev 							     actions_template,
1902f076bcfbSAlexander Kozyrev 							     error),
1903f076bcfbSAlexander Kozyrev 			       error);
1904ed04fd40SAnkur Dwivedi 
1905ed04fd40SAnkur Dwivedi 		rte_flow_trace_actions_template_destroy(port_id, actions_template,
1906ed04fd40SAnkur Dwivedi 							ret);
1907ed04fd40SAnkur Dwivedi 
1908ed04fd40SAnkur Dwivedi 		return ret;
1909f076bcfbSAlexander Kozyrev 	}
1910f076bcfbSAlexander Kozyrev 	return rte_flow_error_set(error, ENOTSUP,
1911f076bcfbSAlexander Kozyrev 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1912f076bcfbSAlexander Kozyrev 				  NULL, rte_strerror(ENOTSUP));
1913f076bcfbSAlexander Kozyrev }
1914f076bcfbSAlexander Kozyrev 
1915f076bcfbSAlexander Kozyrev struct rte_flow_template_table *
1916f076bcfbSAlexander Kozyrev rte_flow_template_table_create(uint16_t port_id,
1917f076bcfbSAlexander Kozyrev 			const struct rte_flow_template_table_attr *table_attr,
1918f076bcfbSAlexander Kozyrev 			struct rte_flow_pattern_template *pattern_templates[],
1919f076bcfbSAlexander Kozyrev 			uint8_t nb_pattern_templates,
1920f076bcfbSAlexander Kozyrev 			struct rte_flow_actions_template *actions_templates[],
1921f076bcfbSAlexander Kozyrev 			uint8_t nb_actions_templates,
1922f076bcfbSAlexander Kozyrev 			struct rte_flow_error *error)
1923f076bcfbSAlexander Kozyrev {
1924f076bcfbSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1925f076bcfbSAlexander Kozyrev 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1926f076bcfbSAlexander Kozyrev 	struct rte_flow_template_table *table;
1927f076bcfbSAlexander Kozyrev 
1928f076bcfbSAlexander Kozyrev 	if (unlikely(!ops))
1929f076bcfbSAlexander Kozyrev 		return NULL;
1930f076bcfbSAlexander Kozyrev 	if (dev->data->flow_configured == 0) {
19310e21c7c0SDavid Marchand 		FLOW_LOG(INFO,
19320e21c7c0SDavid Marchand 			"Flow engine on port_id=%"PRIu16" is not configured.",
1933f076bcfbSAlexander Kozyrev 			port_id);
1934f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1935f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_STATE,
1936f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1937f076bcfbSAlexander Kozyrev 		return NULL;
1938f076bcfbSAlexander Kozyrev 	}
1939f076bcfbSAlexander Kozyrev 	if (table_attr == NULL) {
19400e21c7c0SDavid Marchand 		FLOW_LOG(ERR,
19410e21c7c0SDavid Marchand 			     "Port %"PRIu16" table attr is NULL.",
1942f076bcfbSAlexander Kozyrev 			     port_id);
1943f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1944f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_ATTR,
1945f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1946f076bcfbSAlexander Kozyrev 		return NULL;
1947f076bcfbSAlexander Kozyrev 	}
1948f076bcfbSAlexander Kozyrev 	if (pattern_templates == NULL) {
19490e21c7c0SDavid Marchand 		FLOW_LOG(ERR,
19500e21c7c0SDavid Marchand 			     "Port %"PRIu16" pattern templates is NULL.",
1951f076bcfbSAlexander Kozyrev 			     port_id);
1952f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1953f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_ATTR,
1954f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1955f076bcfbSAlexander Kozyrev 		return NULL;
1956f076bcfbSAlexander Kozyrev 	}
1957f076bcfbSAlexander Kozyrev 	if (actions_templates == NULL) {
19580e21c7c0SDavid Marchand 		FLOW_LOG(ERR,
19590e21c7c0SDavid Marchand 			     "Port %"PRIu16" actions templates is NULL.",
1960f076bcfbSAlexander Kozyrev 			     port_id);
1961f076bcfbSAlexander Kozyrev 		rte_flow_error_set(error, EINVAL,
1962f076bcfbSAlexander Kozyrev 				   RTE_FLOW_ERROR_TYPE_ATTR,
1963f076bcfbSAlexander Kozyrev 				   NULL, rte_strerror(EINVAL));
1964f076bcfbSAlexander Kozyrev 		return NULL;
1965f076bcfbSAlexander Kozyrev 	}
1966f076bcfbSAlexander Kozyrev 	if (likely(!!ops->template_table_create)) {
1967f076bcfbSAlexander Kozyrev 		table = ops->template_table_create(dev, table_attr,
1968f076bcfbSAlexander Kozyrev 					pattern_templates, nb_pattern_templates,
1969f076bcfbSAlexander Kozyrev 					actions_templates, nb_actions_templates,
1970f076bcfbSAlexander Kozyrev 					error);
1971f076bcfbSAlexander Kozyrev 		if (table == NULL)
1972f076bcfbSAlexander Kozyrev 			flow_err(port_id, -rte_errno, error);
1973ed04fd40SAnkur Dwivedi 
1974ed04fd40SAnkur Dwivedi 		rte_flow_trace_template_table_create(port_id, table_attr,
1975ed04fd40SAnkur Dwivedi 						     pattern_templates,
1976ed04fd40SAnkur Dwivedi 						     nb_pattern_templates,
1977ed04fd40SAnkur Dwivedi 						     actions_templates,
1978ed04fd40SAnkur Dwivedi 						     nb_actions_templates, table);
1979ed04fd40SAnkur Dwivedi 
1980f076bcfbSAlexander Kozyrev 		return table;
1981f076bcfbSAlexander Kozyrev 	}
1982f076bcfbSAlexander Kozyrev 	rte_flow_error_set(error, ENOTSUP,
1983f076bcfbSAlexander Kozyrev 			   RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
1984f076bcfbSAlexander Kozyrev 			   NULL, rte_strerror(ENOTSUP));
1985f076bcfbSAlexander Kozyrev 	return NULL;
1986f076bcfbSAlexander Kozyrev }
1987f076bcfbSAlexander Kozyrev 
1988f076bcfbSAlexander Kozyrev int
1989f076bcfbSAlexander Kozyrev rte_flow_template_table_destroy(uint16_t port_id,
1990f076bcfbSAlexander Kozyrev 				struct rte_flow_template_table *template_table,
1991f076bcfbSAlexander Kozyrev 				struct rte_flow_error *error)
1992f076bcfbSAlexander Kozyrev {
1993f076bcfbSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
1994f076bcfbSAlexander Kozyrev 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
1995ed04fd40SAnkur Dwivedi 	int ret;
1996f076bcfbSAlexander Kozyrev 
1997f076bcfbSAlexander Kozyrev 	if (unlikely(!ops))
1998f076bcfbSAlexander Kozyrev 		return -rte_errno;
1999f076bcfbSAlexander Kozyrev 	if (unlikely(template_table == NULL))
2000f076bcfbSAlexander Kozyrev 		return 0;
2001f076bcfbSAlexander Kozyrev 	if (likely(!!ops->template_table_destroy)) {
2002ed04fd40SAnkur Dwivedi 		ret = flow_err(port_id,
2003f076bcfbSAlexander Kozyrev 			       ops->template_table_destroy(dev,
2004f076bcfbSAlexander Kozyrev 							   template_table,
2005f076bcfbSAlexander Kozyrev 							   error),
2006f076bcfbSAlexander Kozyrev 			       error);
2007ed04fd40SAnkur Dwivedi 
2008ed04fd40SAnkur Dwivedi 		rte_flow_trace_template_table_destroy(port_id, template_table,
2009ed04fd40SAnkur Dwivedi 						      ret);
2010ed04fd40SAnkur Dwivedi 
2011ed04fd40SAnkur Dwivedi 		return ret;
2012f076bcfbSAlexander Kozyrev 	}
2013f076bcfbSAlexander Kozyrev 	return rte_flow_error_set(error, ENOTSUP,
2014f076bcfbSAlexander Kozyrev 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
2015f076bcfbSAlexander Kozyrev 				  NULL, rte_strerror(ENOTSUP));
2016f076bcfbSAlexander Kozyrev }
2017197e820cSAlexander Kozyrev 
20188a26a658STomer Shmilovich int
20198a26a658STomer Shmilovich rte_flow_group_set_miss_actions(uint16_t port_id,
20208a26a658STomer Shmilovich 				uint32_t group_id,
20218a26a658STomer Shmilovich 				const struct rte_flow_group_attr *attr,
20228a26a658STomer Shmilovich 				const struct rte_flow_action actions[],
20238a26a658STomer Shmilovich 				struct rte_flow_error *error)
20248a26a658STomer Shmilovich {
20258a26a658STomer Shmilovich 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
20268a26a658STomer Shmilovich 	const struct rte_flow_ops *ops = rte_flow_ops_get(port_id, error);
20278a26a658STomer Shmilovich 
20288a26a658STomer Shmilovich 	if (unlikely(!ops))
20298a26a658STomer Shmilovich 		return -rte_errno;
20308a26a658STomer Shmilovich 	if (likely(!!ops->group_set_miss_actions)) {
20318a26a658STomer Shmilovich 		return flow_err(port_id,
20328a26a658STomer Shmilovich 				ops->group_set_miss_actions(dev, group_id, attr, actions, error),
20338a26a658STomer Shmilovich 				error);
20348a26a658STomer Shmilovich 	}
20358a26a658STomer Shmilovich 	return rte_flow_error_set(error, ENOTSUP,
20368a26a658STomer Shmilovich 				  RTE_FLOW_ERROR_TYPE_UNSPECIFIED,
20378a26a658STomer Shmilovich 				  NULL, rte_strerror(ENOTSUP));
20388a26a658STomer Shmilovich }
20398a26a658STomer Shmilovich 
2040197e820cSAlexander Kozyrev struct rte_flow *
2041197e820cSAlexander Kozyrev rte_flow_async_create(uint16_t port_id,
2042197e820cSAlexander Kozyrev 		      uint32_t queue_id,
2043197e820cSAlexander Kozyrev 		      const struct rte_flow_op_attr *op_attr,
2044197e820cSAlexander Kozyrev 		      struct rte_flow_template_table *template_table,
2045197e820cSAlexander Kozyrev 		      const struct rte_flow_item pattern[],
2046197e820cSAlexander Kozyrev 		      uint8_t pattern_template_index,
2047197e820cSAlexander Kozyrev 		      const struct rte_flow_action actions[],
2048197e820cSAlexander Kozyrev 		      uint8_t actions_template_index,
2049197e820cSAlexander Kozyrev 		      void *user_data,
2050197e820cSAlexander Kozyrev 		      struct rte_flow_error *error)
2051197e820cSAlexander Kozyrev {
2052197e820cSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2053197e820cSAlexander Kozyrev 	struct rte_flow *flow;
2054197e820cSAlexander Kozyrev 
2055537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2056537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id)) {
2057537bfddaSDariusz Sosnowski 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2058537bfddaSDariusz Sosnowski 				   rte_strerror(ENODEV));
2059537bfddaSDariusz Sosnowski 		return NULL;
2060537bfddaSDariusz Sosnowski 	}
2061537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create == NULL) {
2062537bfddaSDariusz Sosnowski 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2063537bfddaSDariusz Sosnowski 				   rte_strerror(ENOSYS));
2064537bfddaSDariusz Sosnowski 		return NULL;
2065537bfddaSDariusz Sosnowski 	}
2066537bfddaSDariusz Sosnowski #endif
2067537bfddaSDariusz Sosnowski 
2068537bfddaSDariusz Sosnowski 	flow = dev->flow_fp_ops->async_create(dev, queue_id,
2069197e820cSAlexander Kozyrev 					      op_attr, template_table,
2070197e820cSAlexander Kozyrev 					      pattern, pattern_template_index,
2071197e820cSAlexander Kozyrev 					      actions, actions_template_index,
2072197e820cSAlexander Kozyrev 					      user_data, error);
2073ed04fd40SAnkur Dwivedi 
2074ed04fd40SAnkur Dwivedi 	rte_flow_trace_async_create(port_id, queue_id, op_attr, template_table,
2075ed04fd40SAnkur Dwivedi 				    pattern, pattern_template_index, actions,
2076ed04fd40SAnkur Dwivedi 				    actions_template_index, user_data, flow);
2077ed04fd40SAnkur Dwivedi 
2078197e820cSAlexander Kozyrev 	return flow;
2079197e820cSAlexander Kozyrev }
2080197e820cSAlexander Kozyrev 
208160261a00SAlexander Kozyrev struct rte_flow *
208260261a00SAlexander Kozyrev rte_flow_async_create_by_index(uint16_t port_id,
208360261a00SAlexander Kozyrev 			       uint32_t queue_id,
208460261a00SAlexander Kozyrev 			       const struct rte_flow_op_attr *op_attr,
208560261a00SAlexander Kozyrev 			       struct rte_flow_template_table *template_table,
208660261a00SAlexander Kozyrev 			       uint32_t rule_index,
208760261a00SAlexander Kozyrev 			       const struct rte_flow_action actions[],
208860261a00SAlexander Kozyrev 			       uint8_t actions_template_index,
208960261a00SAlexander Kozyrev 			       void *user_data,
209060261a00SAlexander Kozyrev 			       struct rte_flow_error *error)
209160261a00SAlexander Kozyrev {
209260261a00SAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2093*be5ded2fSAlexander Kozyrev 	struct rte_flow *flow;
209460261a00SAlexander Kozyrev 
2095537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2096537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id)) {
2097537bfddaSDariusz Sosnowski 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2098537bfddaSDariusz Sosnowski 				   rte_strerror(ENODEV));
2099537bfddaSDariusz Sosnowski 		return NULL;
2100537bfddaSDariusz Sosnowski 	}
2101537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_create_by_index == NULL) {
2102537bfddaSDariusz Sosnowski 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2103537bfddaSDariusz Sosnowski 				   rte_strerror(ENOSYS));
2104537bfddaSDariusz Sosnowski 		return NULL;
2105537bfddaSDariusz Sosnowski 	}
2106537bfddaSDariusz Sosnowski #endif
2107537bfddaSDariusz Sosnowski 
2108*be5ded2fSAlexander Kozyrev 	flow = dev->flow_fp_ops->async_create_by_index(dev, queue_id,
210960261a00SAlexander Kozyrev 						       op_attr, template_table, rule_index,
211060261a00SAlexander Kozyrev 						       actions, actions_template_index,
211160261a00SAlexander Kozyrev 						       user_data, error);
2112*be5ded2fSAlexander Kozyrev 
2113*be5ded2fSAlexander Kozyrev 	rte_flow_trace_async_create_by_index(port_id, queue_id, op_attr, template_table, rule_index,
2114*be5ded2fSAlexander Kozyrev 					     actions, actions_template_index, user_data, flow);
2115*be5ded2fSAlexander Kozyrev 
2116*be5ded2fSAlexander Kozyrev 	return flow;
211760261a00SAlexander Kozyrev }
211860261a00SAlexander Kozyrev 
2119933f18dbSAlexander Kozyrev struct rte_flow *
2120933f18dbSAlexander Kozyrev rte_flow_async_create_by_index_with_pattern(uint16_t port_id,
2121933f18dbSAlexander Kozyrev 					    uint32_t queue_id,
2122933f18dbSAlexander Kozyrev 					    const struct rte_flow_op_attr *op_attr,
2123933f18dbSAlexander Kozyrev 					    struct rte_flow_template_table *template_table,
2124933f18dbSAlexander Kozyrev 					    uint32_t rule_index,
2125933f18dbSAlexander Kozyrev 					    const struct rte_flow_item pattern[],
2126933f18dbSAlexander Kozyrev 					    uint8_t pattern_template_index,
2127933f18dbSAlexander Kozyrev 					    const struct rte_flow_action actions[],
2128933f18dbSAlexander Kozyrev 					    uint8_t actions_template_index,
2129933f18dbSAlexander Kozyrev 					    void *user_data,
2130933f18dbSAlexander Kozyrev 					    struct rte_flow_error *error)
2131933f18dbSAlexander Kozyrev {
2132933f18dbSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2133*be5ded2fSAlexander Kozyrev 	struct rte_flow *flow;
2134933f18dbSAlexander Kozyrev 
2135933f18dbSAlexander Kozyrev #ifdef RTE_FLOW_DEBUG
2136933f18dbSAlexander Kozyrev 	if (!rte_eth_dev_is_valid_port(port_id)) {
2137933f18dbSAlexander Kozyrev 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2138933f18dbSAlexander Kozyrev 				   rte_strerror(ENODEV));
2139933f18dbSAlexander Kozyrev 		return NULL;
2140933f18dbSAlexander Kozyrev 	}
2141933f18dbSAlexander Kozyrev 	if (dev->flow_fp_ops == NULL ||
2142933f18dbSAlexander Kozyrev 	    dev->flow_fp_ops->async_create_by_index_with_pattern == NULL) {
2143933f18dbSAlexander Kozyrev 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2144933f18dbSAlexander Kozyrev 				   rte_strerror(ENOSYS));
2145933f18dbSAlexander Kozyrev 		return NULL;
2146933f18dbSAlexander Kozyrev 	}
2147933f18dbSAlexander Kozyrev #endif
2148933f18dbSAlexander Kozyrev 
2149*be5ded2fSAlexander Kozyrev 	flow = dev->flow_fp_ops->async_create_by_index_with_pattern(dev, queue_id, op_attr,
2150933f18dbSAlexander Kozyrev 								    template_table, rule_index,
2151933f18dbSAlexander Kozyrev 								    pattern, pattern_template_index,
2152933f18dbSAlexander Kozyrev 								    actions, actions_template_index,
2153933f18dbSAlexander Kozyrev 								    user_data, error);
2154*be5ded2fSAlexander Kozyrev 
2155*be5ded2fSAlexander Kozyrev 	rte_flow_trace_async_create_by_index_with_pattern(port_id, queue_id, op_attr,
2156*be5ded2fSAlexander Kozyrev 							  template_table, rule_index, pattern,
2157*be5ded2fSAlexander Kozyrev 							  pattern_template_index, actions,
2158*be5ded2fSAlexander Kozyrev 							  actions_template_index, user_data, flow);
2159*be5ded2fSAlexander Kozyrev 
2160*be5ded2fSAlexander Kozyrev 	return flow;
2161933f18dbSAlexander Kozyrev }
2162933f18dbSAlexander Kozyrev 
2163197e820cSAlexander Kozyrev int
2164197e820cSAlexander Kozyrev rte_flow_async_destroy(uint16_t port_id,
2165197e820cSAlexander Kozyrev 		       uint32_t queue_id,
2166197e820cSAlexander Kozyrev 		       const struct rte_flow_op_attr *op_attr,
2167197e820cSAlexander Kozyrev 		       struct rte_flow *flow,
2168197e820cSAlexander Kozyrev 		       void *user_data,
2169197e820cSAlexander Kozyrev 		       struct rte_flow_error *error)
2170197e820cSAlexander Kozyrev {
2171197e820cSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2172ed04fd40SAnkur Dwivedi 	int ret;
2173197e820cSAlexander Kozyrev 
2174537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2175537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2176537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2177537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2178537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_destroy == NULL)
2179537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2180537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2181537bfddaSDariusz Sosnowski #endif
2182537bfddaSDariusz Sosnowski 
2183537bfddaSDariusz Sosnowski 	ret = dev->flow_fp_ops->async_destroy(dev, queue_id,
2184197e820cSAlexander Kozyrev 					      op_attr, flow,
2185537bfddaSDariusz Sosnowski 					      user_data, error);
2186ed04fd40SAnkur Dwivedi 
2187ed04fd40SAnkur Dwivedi 	rte_flow_trace_async_destroy(port_id, queue_id, op_attr, flow,
2188ed04fd40SAnkur Dwivedi 				     user_data, ret);
2189ed04fd40SAnkur Dwivedi 
2190ed04fd40SAnkur Dwivedi 	return ret;
2191197e820cSAlexander Kozyrev }
2192197e820cSAlexander Kozyrev 
2193197e820cSAlexander Kozyrev int
21948f257a48SAlexander Kozyrev rte_flow_async_actions_update(uint16_t port_id,
21958f257a48SAlexander Kozyrev 			      uint32_t queue_id,
21968f257a48SAlexander Kozyrev 			      const struct rte_flow_op_attr *op_attr,
21978f257a48SAlexander Kozyrev 			      struct rte_flow *flow,
21988f257a48SAlexander Kozyrev 			      const struct rte_flow_action actions[],
21998f257a48SAlexander Kozyrev 			      uint8_t actions_template_index,
22008f257a48SAlexander Kozyrev 			      void *user_data,
22018f257a48SAlexander Kozyrev 			      struct rte_flow_error *error)
22028f257a48SAlexander Kozyrev {
22038f257a48SAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
22048f257a48SAlexander Kozyrev 	int ret;
22058f257a48SAlexander Kozyrev 
2206537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2207537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2208537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2209537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2210537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_actions_update == NULL)
2211537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2212537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2213537bfddaSDariusz Sosnowski #endif
2214537bfddaSDariusz Sosnowski 
2215537bfddaSDariusz Sosnowski 	ret = dev->flow_fp_ops->async_actions_update(dev, queue_id, op_attr,
22168f257a48SAlexander Kozyrev 						     flow, actions,
22178f257a48SAlexander Kozyrev 						     actions_template_index,
2218537bfddaSDariusz Sosnowski 						     user_data, error);
22198f257a48SAlexander Kozyrev 
22208f257a48SAlexander Kozyrev 	rte_flow_trace_async_actions_update(port_id, queue_id, op_attr, flow,
22218f257a48SAlexander Kozyrev 					    actions, actions_template_index,
22228f257a48SAlexander Kozyrev 					    user_data, ret);
22238f257a48SAlexander Kozyrev 
22248f257a48SAlexander Kozyrev 	return ret;
22258f257a48SAlexander Kozyrev }
22268f257a48SAlexander Kozyrev 
22278f257a48SAlexander Kozyrev int
2228197e820cSAlexander Kozyrev rte_flow_push(uint16_t port_id,
2229197e820cSAlexander Kozyrev 	      uint32_t queue_id,
2230197e820cSAlexander Kozyrev 	      struct rte_flow_error *error)
2231197e820cSAlexander Kozyrev {
2232197e820cSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2233ed04fd40SAnkur Dwivedi 	int ret;
2234197e820cSAlexander Kozyrev 
2235537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2236537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2237537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2238537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2239537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->push == NULL)
2240537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2241537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2242537bfddaSDariusz Sosnowski #endif
2243537bfddaSDariusz Sosnowski 
2244537bfddaSDariusz Sosnowski 	ret = dev->flow_fp_ops->push(dev, queue_id, error);
2245ed04fd40SAnkur Dwivedi 
2246ed04fd40SAnkur Dwivedi 	rte_flow_trace_push(port_id, queue_id, ret);
2247ed04fd40SAnkur Dwivedi 
2248ed04fd40SAnkur Dwivedi 	return ret;
2249197e820cSAlexander Kozyrev }
2250197e820cSAlexander Kozyrev 
2251197e820cSAlexander Kozyrev int
2252197e820cSAlexander Kozyrev rte_flow_pull(uint16_t port_id,
2253197e820cSAlexander Kozyrev 	      uint32_t queue_id,
2254197e820cSAlexander Kozyrev 	      struct rte_flow_op_result res[],
2255197e820cSAlexander Kozyrev 	      uint16_t n_res,
2256197e820cSAlexander Kozyrev 	      struct rte_flow_error *error)
2257197e820cSAlexander Kozyrev {
2258197e820cSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2259197e820cSAlexander Kozyrev 	int ret;
2260197e820cSAlexander Kozyrev 
2261537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2262537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2263537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2264537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2265537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->pull == NULL)
2266537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2267537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2268537bfddaSDariusz Sosnowski #endif
2269ed04fd40SAnkur Dwivedi 
2270537bfddaSDariusz Sosnowski 	ret = dev->flow_fp_ops->pull(dev, queue_id, res, n_res, error);
2271ed04fd40SAnkur Dwivedi 
2272537bfddaSDariusz Sosnowski 	rte_flow_trace_pull(port_id, queue_id, res, n_res, ret);
2273537bfddaSDariusz Sosnowski 
2274537bfddaSDariusz Sosnowski 	return ret;
2275197e820cSAlexander Kozyrev }
227613cd6d5cSAlexander Kozyrev 
227713cd6d5cSAlexander Kozyrev struct rte_flow_action_handle *
227813cd6d5cSAlexander Kozyrev rte_flow_async_action_handle_create(uint16_t port_id,
227913cd6d5cSAlexander Kozyrev 		uint32_t queue_id,
228013cd6d5cSAlexander Kozyrev 		const struct rte_flow_op_attr *op_attr,
228113cd6d5cSAlexander Kozyrev 		const struct rte_flow_indir_action_conf *indir_action_conf,
228213cd6d5cSAlexander Kozyrev 		const struct rte_flow_action *action,
228313cd6d5cSAlexander Kozyrev 		void *user_data,
228413cd6d5cSAlexander Kozyrev 		struct rte_flow_error *error)
228513cd6d5cSAlexander Kozyrev {
228613cd6d5cSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
228713cd6d5cSAlexander Kozyrev 	struct rte_flow_action_handle *handle;
228813cd6d5cSAlexander Kozyrev 
2289537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2290537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id)) {
2291537bfddaSDariusz Sosnowski 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2292537bfddaSDariusz Sosnowski 				   rte_strerror(ENODEV));
2293537bfddaSDariusz Sosnowski 		return NULL;
2294537bfddaSDariusz Sosnowski 	}
2295537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_create == NULL) {
2296537bfddaSDariusz Sosnowski 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2297537bfddaSDariusz Sosnowski 				   rte_strerror(ENOSYS));
2298537bfddaSDariusz Sosnowski 		return NULL;
2299537bfddaSDariusz Sosnowski 	}
2300537bfddaSDariusz Sosnowski #endif
2301537bfddaSDariusz Sosnowski 
2302537bfddaSDariusz Sosnowski 	handle = dev->flow_fp_ops->async_action_handle_create(dev, queue_id, op_attr,
2303537bfddaSDariusz Sosnowski 							      indir_action_conf, action,
2304537bfddaSDariusz Sosnowski 							      user_data, error);
2305ed04fd40SAnkur Dwivedi 
2306ed04fd40SAnkur Dwivedi 	rte_flow_trace_async_action_handle_create(port_id, queue_id, op_attr,
2307ed04fd40SAnkur Dwivedi 						  indir_action_conf, action,
2308ed04fd40SAnkur Dwivedi 						  user_data, handle);
2309ed04fd40SAnkur Dwivedi 
231013cd6d5cSAlexander Kozyrev 	return handle;
231113cd6d5cSAlexander Kozyrev }
231213cd6d5cSAlexander Kozyrev 
231313cd6d5cSAlexander Kozyrev int
231413cd6d5cSAlexander Kozyrev rte_flow_async_action_handle_destroy(uint16_t port_id,
231513cd6d5cSAlexander Kozyrev 		uint32_t queue_id,
231613cd6d5cSAlexander Kozyrev 		const struct rte_flow_op_attr *op_attr,
231713cd6d5cSAlexander Kozyrev 		struct rte_flow_action_handle *action_handle,
231813cd6d5cSAlexander Kozyrev 		void *user_data,
231913cd6d5cSAlexander Kozyrev 		struct rte_flow_error *error)
232013cd6d5cSAlexander Kozyrev {
232113cd6d5cSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
232213cd6d5cSAlexander Kozyrev 	int ret;
232313cd6d5cSAlexander Kozyrev 
2324537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2325537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2326537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2327537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2328537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_destroy == NULL)
2329537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2330537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2331537bfddaSDariusz Sosnowski #endif
2332537bfddaSDariusz Sosnowski 
2333537bfddaSDariusz Sosnowski 	ret = dev->flow_fp_ops->async_action_handle_destroy(dev, queue_id, op_attr,
233413cd6d5cSAlexander Kozyrev 							    action_handle, user_data, error);
2335ed04fd40SAnkur Dwivedi 
2336ed04fd40SAnkur Dwivedi 	rte_flow_trace_async_action_handle_destroy(port_id, queue_id, op_attr,
2337ed04fd40SAnkur Dwivedi 						   action_handle, user_data, ret);
2338ed04fd40SAnkur Dwivedi 
2339ed04fd40SAnkur Dwivedi 	return ret;
234013cd6d5cSAlexander Kozyrev }
234113cd6d5cSAlexander Kozyrev 
234213cd6d5cSAlexander Kozyrev int
234313cd6d5cSAlexander Kozyrev rte_flow_async_action_handle_update(uint16_t port_id,
234413cd6d5cSAlexander Kozyrev 		uint32_t queue_id,
234513cd6d5cSAlexander Kozyrev 		const struct rte_flow_op_attr *op_attr,
234613cd6d5cSAlexander Kozyrev 		struct rte_flow_action_handle *action_handle,
234713cd6d5cSAlexander Kozyrev 		const void *update,
234813cd6d5cSAlexander Kozyrev 		void *user_data,
234913cd6d5cSAlexander Kozyrev 		struct rte_flow_error *error)
235013cd6d5cSAlexander Kozyrev {
235113cd6d5cSAlexander Kozyrev 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
235213cd6d5cSAlexander Kozyrev 	int ret;
235313cd6d5cSAlexander Kozyrev 
2354537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2355537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2356537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2357537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2358537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_update == NULL)
2359537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2360537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2361537bfddaSDariusz Sosnowski #endif
2362537bfddaSDariusz Sosnowski 
2363537bfddaSDariusz Sosnowski 	ret = dev->flow_fp_ops->async_action_handle_update(dev, queue_id, op_attr,
236413cd6d5cSAlexander Kozyrev 							   action_handle, update, user_data, error);
2365ed04fd40SAnkur Dwivedi 
2366ed04fd40SAnkur Dwivedi 	rte_flow_trace_async_action_handle_update(port_id, queue_id, op_attr,
2367ed04fd40SAnkur Dwivedi 						  action_handle, update,
2368ed04fd40SAnkur Dwivedi 						  user_data, ret);
2369ed04fd40SAnkur Dwivedi 
2370ed04fd40SAnkur Dwivedi 	return ret;
237113cd6d5cSAlexander Kozyrev }
2372c9dc0384SSuanming Mou 
2373c9dc0384SSuanming Mou int
2374c9dc0384SSuanming Mou rte_flow_async_action_handle_query(uint16_t port_id,
2375c9dc0384SSuanming Mou 		uint32_t queue_id,
2376c9dc0384SSuanming Mou 		const struct rte_flow_op_attr *op_attr,
2377c9dc0384SSuanming Mou 		const struct rte_flow_action_handle *action_handle,
2378c9dc0384SSuanming Mou 		void *data,
2379c9dc0384SSuanming Mou 		void *user_data,
2380c9dc0384SSuanming Mou 		struct rte_flow_error *error)
2381c9dc0384SSuanming Mou {
2382c9dc0384SSuanming Mou 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2383c9dc0384SSuanming Mou 	int ret;
2384c9dc0384SSuanming Mou 
2385537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2386537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2387537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2388537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2389537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query == NULL)
2390537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2391537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2392537bfddaSDariusz Sosnowski #endif
2393537bfddaSDariusz Sosnowski 
2394537bfddaSDariusz Sosnowski 	ret = dev->flow_fp_ops->async_action_handle_query(dev, queue_id, op_attr,
2395c9dc0384SSuanming Mou 							  action_handle, data, user_data, error);
2396ed04fd40SAnkur Dwivedi 
2397ed04fd40SAnkur Dwivedi 	rte_flow_trace_async_action_handle_query(port_id, queue_id, op_attr,
2398ed04fd40SAnkur Dwivedi 						 action_handle, data, user_data,
2399ed04fd40SAnkur Dwivedi 						 ret);
2400ed04fd40SAnkur Dwivedi 
2401ed04fd40SAnkur Dwivedi 	return ret;
2402c9dc0384SSuanming Mou }
2403044c47b4SGregory Etelson 
2404044c47b4SGregory Etelson int
2405044c47b4SGregory Etelson rte_flow_action_handle_query_update(uint16_t port_id,
2406044c47b4SGregory Etelson 				    struct rte_flow_action_handle *handle,
2407044c47b4SGregory Etelson 				    const void *update, void *query,
2408044c47b4SGregory Etelson 				    enum rte_flow_query_update_mode mode,
2409044c47b4SGregory Etelson 				    struct rte_flow_error *error)
2410044c47b4SGregory Etelson {
2411044c47b4SGregory Etelson 	int ret;
2412044c47b4SGregory Etelson 	struct rte_eth_dev *dev;
2413044c47b4SGregory Etelson 	const struct rte_flow_ops *ops;
2414044c47b4SGregory Etelson 
2415044c47b4SGregory Etelson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2416044c47b4SGregory Etelson 	if (!handle)
2417044c47b4SGregory Etelson 		return -EINVAL;
2418044c47b4SGregory Etelson 	if (!update && !query)
2419044c47b4SGregory Etelson 		return -EINVAL;
2420044c47b4SGregory Etelson 	dev = &rte_eth_devices[port_id];
2421044c47b4SGregory Etelson 	ops = rte_flow_ops_get(port_id, error);
2422044c47b4SGregory Etelson 	if (!ops || !ops->action_handle_query_update)
2423044c47b4SGregory Etelson 		return -ENOTSUP;
2424044c47b4SGregory Etelson 	ret = ops->action_handle_query_update(dev, handle, update,
2425044c47b4SGregory Etelson 					      query, mode, error);
2426044c47b4SGregory Etelson 	return flow_err(port_id, ret, error);
2427044c47b4SGregory Etelson }
2428044c47b4SGregory Etelson 
2429044c47b4SGregory Etelson int
2430044c47b4SGregory Etelson rte_flow_async_action_handle_query_update(uint16_t port_id, uint32_t queue_id,
2431044c47b4SGregory Etelson 					  const struct rte_flow_op_attr *attr,
2432044c47b4SGregory Etelson 					  struct rte_flow_action_handle *handle,
2433044c47b4SGregory Etelson 					  const void *update, void *query,
2434044c47b4SGregory Etelson 					  enum rte_flow_query_update_mode mode,
2435044c47b4SGregory Etelson 					  void *user_data,
2436044c47b4SGregory Etelson 					  struct rte_flow_error *error)
2437044c47b4SGregory Etelson {
2438537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
2439044c47b4SGregory Etelson 
2440537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2441537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2442537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2443537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2444537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_handle_query_update == NULL)
2445537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2446537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2447537bfddaSDariusz Sosnowski #endif
2448537bfddaSDariusz Sosnowski 
2449537bfddaSDariusz Sosnowski 	return dev->flow_fp_ops->async_action_handle_query_update(dev, queue_id, attr,
2450044c47b4SGregory Etelson 								  handle, update,
2451044c47b4SGregory Etelson 								  query, mode,
2452044c47b4SGregory Etelson 								  user_data, error);
2453044c47b4SGregory Etelson }
245472a3dec7SGregory Etelson 
245572a3dec7SGregory Etelson struct rte_flow_action_list_handle *
245672a3dec7SGregory Etelson rte_flow_action_list_handle_create(uint16_t port_id,
245772a3dec7SGregory Etelson 				   const
245872a3dec7SGregory Etelson 				   struct rte_flow_indir_action_conf *conf,
245972a3dec7SGregory Etelson 				   const struct rte_flow_action *actions,
246072a3dec7SGregory Etelson 				   struct rte_flow_error *error)
246172a3dec7SGregory Etelson {
246272a3dec7SGregory Etelson 	int ret;
246372a3dec7SGregory Etelson 	struct rte_eth_dev *dev;
246472a3dec7SGregory Etelson 	const struct rte_flow_ops *ops;
246572a3dec7SGregory Etelson 	struct rte_flow_action_list_handle *handle;
246672a3dec7SGregory Etelson 
246772a3dec7SGregory Etelson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, NULL);
246872a3dec7SGregory Etelson 	ops = rte_flow_ops_get(port_id, error);
246972a3dec7SGregory Etelson 	if (!ops || !ops->action_list_handle_create) {
247072a3dec7SGregory Etelson 		rte_flow_error_set(error, ENOTSUP,
247172a3dec7SGregory Etelson 				   RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
247272a3dec7SGregory Etelson 				   "action_list handle not supported");
247372a3dec7SGregory Etelson 		return NULL;
247472a3dec7SGregory Etelson 	}
247572a3dec7SGregory Etelson 	dev = &rte_eth_devices[port_id];
247672a3dec7SGregory Etelson 	handle = ops->action_list_handle_create(dev, conf, actions, error);
247772a3dec7SGregory Etelson 	ret = flow_err(port_id, -rte_errno, error);
247872a3dec7SGregory Etelson 	rte_flow_trace_action_list_handle_create(port_id, conf, actions, ret);
247972a3dec7SGregory Etelson 	return handle;
248072a3dec7SGregory Etelson }
248172a3dec7SGregory Etelson 
248272a3dec7SGregory Etelson int
248372a3dec7SGregory Etelson rte_flow_action_list_handle_destroy(uint16_t port_id,
248472a3dec7SGregory Etelson 				    struct rte_flow_action_list_handle *handle,
248572a3dec7SGregory Etelson 				    struct rte_flow_error *error)
248672a3dec7SGregory Etelson {
248772a3dec7SGregory Etelson 	int ret;
248872a3dec7SGregory Etelson 	struct rte_eth_dev *dev;
248972a3dec7SGregory Etelson 	const struct rte_flow_ops *ops;
249072a3dec7SGregory Etelson 
249172a3dec7SGregory Etelson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
249272a3dec7SGregory Etelson 	ops = rte_flow_ops_get(port_id, error);
249372a3dec7SGregory Etelson 	if (!ops || !ops->action_list_handle_destroy)
249472a3dec7SGregory Etelson 		return rte_flow_error_set(error, ENOTSUP,
249572a3dec7SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
249672a3dec7SGregory Etelson 					  "action_list handle not supported");
249772a3dec7SGregory Etelson 	dev = &rte_eth_devices[port_id];
249872a3dec7SGregory Etelson 	ret = ops->action_list_handle_destroy(dev, handle, error);
249972a3dec7SGregory Etelson 	ret = flow_err(port_id, ret, error);
250072a3dec7SGregory Etelson 	rte_flow_trace_action_list_handle_destroy(port_id, handle, ret);
250172a3dec7SGregory Etelson 	return ret;
250272a3dec7SGregory Etelson }
250372a3dec7SGregory Etelson 
250472a3dec7SGregory Etelson struct rte_flow_action_list_handle *
250572a3dec7SGregory Etelson rte_flow_async_action_list_handle_create(uint16_t port_id, uint32_t queue_id,
250672a3dec7SGregory Etelson 					 const struct rte_flow_op_attr *attr,
250772a3dec7SGregory Etelson 					 const struct rte_flow_indir_action_conf *conf,
250872a3dec7SGregory Etelson 					 const struct rte_flow_action *actions,
250972a3dec7SGregory Etelson 					 void *user_data,
251072a3dec7SGregory Etelson 					 struct rte_flow_error *error)
251172a3dec7SGregory Etelson {
2512537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
251372a3dec7SGregory Etelson 	struct rte_flow_action_list_handle *handle;
2514537bfddaSDariusz Sosnowski 	int ret;
251572a3dec7SGregory Etelson 
2516537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2517537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id)) {
2518537bfddaSDariusz Sosnowski 		rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2519537bfddaSDariusz Sosnowski 				   rte_strerror(ENODEV));
252072a3dec7SGregory Etelson 		return NULL;
252172a3dec7SGregory Etelson 	}
2522537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_create == NULL) {
2523537bfddaSDariusz Sosnowski 		rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2524537bfddaSDariusz Sosnowski 				   rte_strerror(ENOSYS));
2525537bfddaSDariusz Sosnowski 		return NULL;
2526537bfddaSDariusz Sosnowski 	}
2527537bfddaSDariusz Sosnowski #endif
2528537bfddaSDariusz Sosnowski 
2529537bfddaSDariusz Sosnowski 	handle = dev->flow_fp_ops->async_action_list_handle_create(dev, queue_id, attr, conf,
253072a3dec7SGregory Etelson 								   actions, user_data,
253172a3dec7SGregory Etelson 								   error);
253272a3dec7SGregory Etelson 	ret = flow_err(port_id, -rte_errno, error);
2533537bfddaSDariusz Sosnowski 
253472a3dec7SGregory Etelson 	rte_flow_trace_async_action_list_handle_create(port_id, queue_id, attr,
253572a3dec7SGregory Etelson 						       conf, actions, user_data,
253672a3dec7SGregory Etelson 						       ret);
253772a3dec7SGregory Etelson 	return handle;
253872a3dec7SGregory Etelson }
253972a3dec7SGregory Etelson 
254072a3dec7SGregory Etelson int
254172a3dec7SGregory Etelson rte_flow_async_action_list_handle_destroy(uint16_t port_id, uint32_t queue_id,
254272a3dec7SGregory Etelson 				 const struct rte_flow_op_attr *op_attr,
254372a3dec7SGregory Etelson 				 struct rte_flow_action_list_handle *handle,
254472a3dec7SGregory Etelson 				 void *user_data, struct rte_flow_error *error)
254572a3dec7SGregory Etelson {
2546537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
254772a3dec7SGregory Etelson 	int ret;
254872a3dec7SGregory Etelson 
2549537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2550537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2551537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2552537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2553537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL || dev->flow_fp_ops->async_action_list_handle_destroy == NULL)
2554537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2555537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2556537bfddaSDariusz Sosnowski #endif
2557537bfddaSDariusz Sosnowski 
2558537bfddaSDariusz Sosnowski 	ret = dev->flow_fp_ops->async_action_list_handle_destroy(dev, queue_id, op_attr,
255972a3dec7SGregory Etelson 								 handle, user_data, error);
2560537bfddaSDariusz Sosnowski 
256172a3dec7SGregory Etelson 	rte_flow_trace_async_action_list_handle_destroy(port_id, queue_id,
256272a3dec7SGregory Etelson 							op_attr, handle,
256372a3dec7SGregory Etelson 							user_data, ret);
256472a3dec7SGregory Etelson 	return ret;
256572a3dec7SGregory Etelson }
256672a3dec7SGregory Etelson 
256772a3dec7SGregory Etelson int
256872a3dec7SGregory Etelson rte_flow_action_list_handle_query_update(uint16_t port_id,
256972a3dec7SGregory Etelson 			 const struct rte_flow_action_list_handle *handle,
257072a3dec7SGregory Etelson 			 const void **update, void **query,
257172a3dec7SGregory Etelson 			 enum rte_flow_query_update_mode mode,
257272a3dec7SGregory Etelson 			 struct rte_flow_error *error)
257372a3dec7SGregory Etelson {
257472a3dec7SGregory Etelson 	int ret;
257572a3dec7SGregory Etelson 	struct rte_eth_dev *dev;
257672a3dec7SGregory Etelson 	const struct rte_flow_ops *ops;
257772a3dec7SGregory Etelson 
257872a3dec7SGregory Etelson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
257972a3dec7SGregory Etelson 	ops = rte_flow_ops_get(port_id, error);
258072a3dec7SGregory Etelson 	if (!ops || !ops->action_list_handle_query_update)
258172a3dec7SGregory Etelson 		return rte_flow_error_set(error, ENOTSUP,
258272a3dec7SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
258372a3dec7SGregory Etelson 					  "action_list query_update not supported");
258472a3dec7SGregory Etelson 	dev = &rte_eth_devices[port_id];
258572a3dec7SGregory Etelson 	ret = ops->action_list_handle_query_update(dev, handle, update, query,
258672a3dec7SGregory Etelson 						   mode, error);
258772a3dec7SGregory Etelson 	ret = flow_err(port_id, ret, error);
258872a3dec7SGregory Etelson 	rte_flow_trace_action_list_handle_query_update(port_id, handle, update,
258972a3dec7SGregory Etelson 						       query, mode, ret);
259072a3dec7SGregory Etelson 	return ret;
259172a3dec7SGregory Etelson }
259272a3dec7SGregory Etelson 
259372a3dec7SGregory Etelson int
259472a3dec7SGregory Etelson rte_flow_async_action_list_handle_query_update(uint16_t port_id, uint32_t queue_id,
259572a3dec7SGregory Etelson 			 const struct rte_flow_op_attr *attr,
259672a3dec7SGregory Etelson 			 const struct rte_flow_action_list_handle *handle,
259772a3dec7SGregory Etelson 			 const void **update, void **query,
259872a3dec7SGregory Etelson 			 enum rte_flow_query_update_mode mode,
259972a3dec7SGregory Etelson 			 void *user_data, struct rte_flow_error *error)
260072a3dec7SGregory Etelson {
2601537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev = &rte_eth_devices[port_id];
260272a3dec7SGregory Etelson 	int ret;
260372a3dec7SGregory Etelson 
2604537bfddaSDariusz Sosnowski #ifdef RTE_FLOW_DEBUG
2605537bfddaSDariusz Sosnowski 	if (!rte_eth_dev_is_valid_port(port_id))
2606537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENODEV, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2607537bfddaSDariusz Sosnowski 					  rte_strerror(ENODEV));
2608537bfddaSDariusz Sosnowski 	if (dev->flow_fp_ops == NULL ||
2609537bfddaSDariusz Sosnowski 	    dev->flow_fp_ops->async_action_list_handle_query_update == NULL)
2610537bfddaSDariusz Sosnowski 		return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2611537bfddaSDariusz Sosnowski 					  rte_strerror(ENOSYS));
2612537bfddaSDariusz Sosnowski #endif
2613537bfddaSDariusz Sosnowski 
2614537bfddaSDariusz Sosnowski 	ret = dev->flow_fp_ops->async_action_list_handle_query_update(dev, queue_id, attr,
261572a3dec7SGregory Etelson 								      handle, update, query,
261672a3dec7SGregory Etelson 								      mode, user_data,
261772a3dec7SGregory Etelson 								      error);
2618537bfddaSDariusz Sosnowski 
261972a3dec7SGregory Etelson 	rte_flow_trace_async_action_list_handle_query_update(port_id, queue_id,
262072a3dec7SGregory Etelson 							     attr, handle,
262172a3dec7SGregory Etelson 							     update, query,
262272a3dec7SGregory Etelson 							     mode, user_data,
262372a3dec7SGregory Etelson 							     ret);
262472a3dec7SGregory Etelson 	return ret;
262572a3dec7SGregory Etelson }
2626ffe18b05SOri Kam 
2627ffe18b05SOri Kam int
2628ffe18b05SOri Kam rte_flow_calc_table_hash(uint16_t port_id, const struct rte_flow_template_table *table,
2629ffe18b05SOri Kam 			 const struct rte_flow_item pattern[], uint8_t pattern_template_index,
2630ffe18b05SOri Kam 			 uint32_t *hash, struct rte_flow_error *error)
2631ffe18b05SOri Kam {
2632ffe18b05SOri Kam 	int ret;
2633ffe18b05SOri Kam 	struct rte_eth_dev *dev;
2634ffe18b05SOri Kam 	const struct rte_flow_ops *ops;
2635ffe18b05SOri Kam 
2636ffe18b05SOri Kam 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
2637ffe18b05SOri Kam 	ops = rte_flow_ops_get(port_id, error);
2638ffe18b05SOri Kam 	if (!ops || !ops->flow_calc_table_hash)
2639ffe18b05SOri Kam 		return rte_flow_error_set(error, ENOTSUP,
2640ffe18b05SOri Kam 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2641ffe18b05SOri Kam 					  "action_list async query_update not supported");
2642ffe18b05SOri Kam 	dev = &rte_eth_devices[port_id];
2643ffe18b05SOri Kam 	ret = ops->flow_calc_table_hash(dev, table, pattern, pattern_template_index,
2644ffe18b05SOri Kam 					hash, error);
2645ffe18b05SOri Kam 	return flow_err(port_id, ret, error);
2646ffe18b05SOri Kam }
2647537bfddaSDariusz Sosnowski 
26489733f099SOri Kam int
26499733f099SOri Kam rte_flow_calc_encap_hash(uint16_t port_id, const struct rte_flow_item pattern[],
26509733f099SOri Kam 			 enum rte_flow_encap_hash_field dest_field, uint8_t hash_len,
26519733f099SOri Kam 			 uint8_t *hash, struct rte_flow_error *error)
26529733f099SOri Kam {
26539733f099SOri Kam 	int ret;
26549733f099SOri Kam 	struct rte_eth_dev *dev;
26559733f099SOri Kam 	const struct rte_flow_ops *ops;
26569733f099SOri Kam 
26579733f099SOri Kam 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
26589733f099SOri Kam 	ops = rte_flow_ops_get(port_id, error);
26599733f099SOri Kam 	if (!ops || !ops->flow_calc_encap_hash)
26609733f099SOri Kam 		return rte_flow_error_set(error, ENOTSUP,
26619733f099SOri Kam 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
26629733f099SOri Kam 					  "calc encap hash is not supported");
26639733f099SOri Kam 	if (dest_field > RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID)
26649733f099SOri Kam 		return rte_flow_error_set(error, EINVAL,
26659733f099SOri Kam 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
26669733f099SOri Kam 					  "hash dest field is not defined");
26679733f099SOri Kam 	if ((dest_field == RTE_FLOW_ENCAP_HASH_FIELD_SRC_PORT && hash_len != 2) ||
26689733f099SOri Kam 	    (dest_field == RTE_FLOW_ENCAP_HASH_FIELD_NVGRE_FLOW_ID && hash_len != 1))
26699733f099SOri Kam 		return rte_flow_error_set(error, EINVAL,
26709733f099SOri Kam 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
26719733f099SOri Kam 					  "hash len doesn't match the requested field len");
26729733f099SOri Kam 	dev = &rte_eth_devices[port_id];
26739733f099SOri Kam 	ret = ops->flow_calc_encap_hash(dev, pattern, dest_field, hash, error);
26749733f099SOri Kam 	return flow_err(port_id, ret, error);
26759733f099SOri Kam }
26769733f099SOri Kam 
267799231e48SGregory Etelson bool
267899231e48SGregory Etelson rte_flow_template_table_resizable(__rte_unused uint16_t port_id,
267999231e48SGregory Etelson 				  const struct rte_flow_template_table_attr *tbl_attr)
268099231e48SGregory Etelson {
268199231e48SGregory Etelson 	return (tbl_attr->specialize &
268299231e48SGregory Etelson 		RTE_FLOW_TABLE_SPECIALIZE_RESIZABLE) != 0;
268399231e48SGregory Etelson }
268499231e48SGregory Etelson 
268599231e48SGregory Etelson int
268699231e48SGregory Etelson rte_flow_template_table_resize(uint16_t port_id,
268799231e48SGregory Etelson 			       struct rte_flow_template_table *table,
268899231e48SGregory Etelson 			       uint32_t nb_rules,
268999231e48SGregory Etelson 			       struct rte_flow_error *error)
269099231e48SGregory Etelson {
269199231e48SGregory Etelson 	int ret;
269299231e48SGregory Etelson 	struct rte_eth_dev *dev;
269399231e48SGregory Etelson 	const struct rte_flow_ops *ops;
269499231e48SGregory Etelson 
269599231e48SGregory Etelson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
269699231e48SGregory Etelson 	ops = rte_flow_ops_get(port_id, error);
269799231e48SGregory Etelson 	if (!ops || !ops->flow_template_table_resize)
269899231e48SGregory Etelson 		return rte_flow_error_set(error, ENOTSUP,
269999231e48SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
270099231e48SGregory Etelson 					  "flow_template_table_resize not supported");
270199231e48SGregory Etelson 	dev = &rte_eth_devices[port_id];
270299231e48SGregory Etelson 	ret = ops->flow_template_table_resize(dev, table, nb_rules, error);
270399231e48SGregory Etelson 	ret = flow_err(port_id, ret, error);
270499231e48SGregory Etelson 	rte_flow_trace_template_table_resize(port_id, table, nb_rules, ret);
270599231e48SGregory Etelson 	return ret;
270699231e48SGregory Etelson }
270799231e48SGregory Etelson 
270899231e48SGregory Etelson int
270999231e48SGregory Etelson rte_flow_async_update_resized(uint16_t port_id, uint32_t queue,
271099231e48SGregory Etelson 			      const struct rte_flow_op_attr *attr,
271199231e48SGregory Etelson 			      struct rte_flow *rule, void *user_data,
271299231e48SGregory Etelson 			      struct rte_flow_error *error)
271399231e48SGregory Etelson {
271499231e48SGregory Etelson 	int ret;
271599231e48SGregory Etelson 	struct rte_eth_dev *dev;
271699231e48SGregory Etelson 	const struct rte_flow_ops *ops;
271799231e48SGregory Etelson 
271899231e48SGregory Etelson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
271999231e48SGregory Etelson 	ops = rte_flow_ops_get(port_id, error);
272099231e48SGregory Etelson 	if (!ops || !ops->flow_update_resized)
272199231e48SGregory Etelson 		return rte_flow_error_set(error, ENOTSUP,
272299231e48SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
272399231e48SGregory Etelson 					  "async_flow_async_transfer not supported");
272499231e48SGregory Etelson 	dev = &rte_eth_devices[port_id];
272599231e48SGregory Etelson 	ret = ops->flow_update_resized(dev, queue, attr, rule, user_data, error);
272699231e48SGregory Etelson 	ret = flow_err(port_id, ret, error);
272799231e48SGregory Etelson 	rte_flow_trace_async_update_resized(port_id, queue, attr,
272899231e48SGregory Etelson 					    rule, user_data, ret);
272999231e48SGregory Etelson 	return ret;
273099231e48SGregory Etelson }
273199231e48SGregory Etelson 
273299231e48SGregory Etelson int
273399231e48SGregory Etelson rte_flow_template_table_resize_complete(uint16_t port_id,
273499231e48SGregory Etelson 					struct rte_flow_template_table *table,
273599231e48SGregory Etelson 					struct rte_flow_error *error)
273699231e48SGregory Etelson {
273799231e48SGregory Etelson 	int ret;
273899231e48SGregory Etelson 	struct rte_eth_dev *dev;
273999231e48SGregory Etelson 	const struct rte_flow_ops *ops;
274099231e48SGregory Etelson 
274199231e48SGregory Etelson 	RTE_ETH_VALID_PORTID_OR_ERR_RET(port_id, -ENODEV);
274299231e48SGregory Etelson 	ops = rte_flow_ops_get(port_id, error);
274399231e48SGregory Etelson 	if (!ops || !ops->flow_template_table_resize_complete)
274499231e48SGregory Etelson 		return rte_flow_error_set(error, ENOTSUP,
274599231e48SGregory Etelson 					  RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
274699231e48SGregory Etelson 					  "flow_template_table_transfer_complete not supported");
274799231e48SGregory Etelson 	dev = &rte_eth_devices[port_id];
274899231e48SGregory Etelson 	ret = ops->flow_template_table_resize_complete(dev, table, error);
274999231e48SGregory Etelson 	ret = flow_err(port_id, ret, error);
275099231e48SGregory Etelson 	rte_flow_trace_table_resize_complete(port_id, table, ret);
275199231e48SGregory Etelson 	return ret;
275299231e48SGregory Etelson }
275399231e48SGregory Etelson 
2754537bfddaSDariusz Sosnowski static struct rte_flow *
2755537bfddaSDariusz Sosnowski rte_flow_dummy_async_create(struct rte_eth_dev *dev __rte_unused,
2756537bfddaSDariusz Sosnowski 			    uint32_t queue __rte_unused,
2757537bfddaSDariusz Sosnowski 			    const struct rte_flow_op_attr *attr __rte_unused,
2758537bfddaSDariusz Sosnowski 			    struct rte_flow_template_table *table __rte_unused,
2759537bfddaSDariusz Sosnowski 			    const struct rte_flow_item items[] __rte_unused,
2760537bfddaSDariusz Sosnowski 			    uint8_t pattern_template_index __rte_unused,
2761537bfddaSDariusz Sosnowski 			    const struct rte_flow_action actions[] __rte_unused,
2762537bfddaSDariusz Sosnowski 			    uint8_t action_template_index __rte_unused,
2763537bfddaSDariusz Sosnowski 			    void *user_data __rte_unused,
2764537bfddaSDariusz Sosnowski 			    struct rte_flow_error *error)
2765537bfddaSDariusz Sosnowski {
2766537bfddaSDariusz Sosnowski 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2767537bfddaSDariusz Sosnowski 			   rte_strerror(ENOSYS));
2768537bfddaSDariusz Sosnowski 	return NULL;
2769537bfddaSDariusz Sosnowski }
2770537bfddaSDariusz Sosnowski 
2771537bfddaSDariusz Sosnowski static struct rte_flow *
2772537bfddaSDariusz Sosnowski rte_flow_dummy_async_create_by_index(struct rte_eth_dev *dev __rte_unused,
2773537bfddaSDariusz Sosnowski 				     uint32_t queue __rte_unused,
2774537bfddaSDariusz Sosnowski 				     const struct rte_flow_op_attr *attr __rte_unused,
2775537bfddaSDariusz Sosnowski 				     struct rte_flow_template_table *table __rte_unused,
2776537bfddaSDariusz Sosnowski 				     uint32_t rule_index __rte_unused,
2777537bfddaSDariusz Sosnowski 				     const struct rte_flow_action actions[] __rte_unused,
2778537bfddaSDariusz Sosnowski 				     uint8_t action_template_index __rte_unused,
2779537bfddaSDariusz Sosnowski 				     void *user_data __rte_unused,
2780537bfddaSDariusz Sosnowski 				     struct rte_flow_error *error)
2781537bfddaSDariusz Sosnowski {
2782537bfddaSDariusz Sosnowski 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2783537bfddaSDariusz Sosnowski 			   rte_strerror(ENOSYS));
2784537bfddaSDariusz Sosnowski 	return NULL;
2785537bfddaSDariusz Sosnowski }
2786537bfddaSDariusz Sosnowski 
2787933f18dbSAlexander Kozyrev static struct rte_flow *
2788933f18dbSAlexander Kozyrev rte_flow_dummy_async_create_by_index_with_pattern(struct rte_eth_dev *dev __rte_unused,
2789933f18dbSAlexander Kozyrev 						uint32_t queue __rte_unused,
2790933f18dbSAlexander Kozyrev 						const struct rte_flow_op_attr *attr __rte_unused,
2791933f18dbSAlexander Kozyrev 						struct rte_flow_template_table *table __rte_unused,
2792933f18dbSAlexander Kozyrev 						uint32_t rule_index __rte_unused,
2793933f18dbSAlexander Kozyrev 						const struct rte_flow_item items[] __rte_unused,
2794933f18dbSAlexander Kozyrev 						uint8_t pattern_template_index __rte_unused,
2795933f18dbSAlexander Kozyrev 						const struct rte_flow_action actions[] __rte_unused,
2796933f18dbSAlexander Kozyrev 						uint8_t action_template_index __rte_unused,
2797933f18dbSAlexander Kozyrev 						void *user_data __rte_unused,
2798933f18dbSAlexander Kozyrev 						struct rte_flow_error *error)
2799933f18dbSAlexander Kozyrev {
2800933f18dbSAlexander Kozyrev 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2801933f18dbSAlexander Kozyrev 			   rte_strerror(ENOSYS));
2802933f18dbSAlexander Kozyrev 	return NULL;
2803933f18dbSAlexander Kozyrev }
2804933f18dbSAlexander Kozyrev 
2805537bfddaSDariusz Sosnowski static int
2806537bfddaSDariusz Sosnowski rte_flow_dummy_async_actions_update(struct rte_eth_dev *dev __rte_unused,
2807537bfddaSDariusz Sosnowski 				    uint32_t queue_id __rte_unused,
2808537bfddaSDariusz Sosnowski 				    const struct rte_flow_op_attr *op_attr __rte_unused,
2809537bfddaSDariusz Sosnowski 				    struct rte_flow *flow __rte_unused,
2810537bfddaSDariusz Sosnowski 				    const struct rte_flow_action actions[] __rte_unused,
2811537bfddaSDariusz Sosnowski 				    uint8_t actions_template_index __rte_unused,
2812537bfddaSDariusz Sosnowski 				    void *user_data __rte_unused,
2813537bfddaSDariusz Sosnowski 				    struct rte_flow_error *error)
2814537bfddaSDariusz Sosnowski {
2815537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2816537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2817537bfddaSDariusz Sosnowski }
2818537bfddaSDariusz Sosnowski 
2819537bfddaSDariusz Sosnowski static int
2820537bfddaSDariusz Sosnowski rte_flow_dummy_async_destroy(struct rte_eth_dev *dev __rte_unused,
2821537bfddaSDariusz Sosnowski 			     uint32_t queue_id __rte_unused,
2822537bfddaSDariusz Sosnowski 			     const struct rte_flow_op_attr *op_attr __rte_unused,
2823537bfddaSDariusz Sosnowski 			     struct rte_flow *flow __rte_unused,
2824537bfddaSDariusz Sosnowski 			     void *user_data __rte_unused,
2825537bfddaSDariusz Sosnowski 			     struct rte_flow_error *error)
2826537bfddaSDariusz Sosnowski {
2827537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2828537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2829537bfddaSDariusz Sosnowski }
2830537bfddaSDariusz Sosnowski 
2831537bfddaSDariusz Sosnowski static int
2832537bfddaSDariusz Sosnowski rte_flow_dummy_push(struct rte_eth_dev *dev __rte_unused,
2833537bfddaSDariusz Sosnowski 		    uint32_t queue_id __rte_unused,
2834537bfddaSDariusz Sosnowski 		    struct rte_flow_error *error)
2835537bfddaSDariusz Sosnowski {
2836537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2837537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2838537bfddaSDariusz Sosnowski }
2839537bfddaSDariusz Sosnowski 
2840537bfddaSDariusz Sosnowski static int
2841537bfddaSDariusz Sosnowski rte_flow_dummy_pull(struct rte_eth_dev *dev __rte_unused,
2842537bfddaSDariusz Sosnowski 		    uint32_t queue_id __rte_unused,
2843537bfddaSDariusz Sosnowski 		    struct rte_flow_op_result res[] __rte_unused,
2844537bfddaSDariusz Sosnowski 		    uint16_t n_res __rte_unused,
2845537bfddaSDariusz Sosnowski 		    struct rte_flow_error *error)
2846537bfddaSDariusz Sosnowski {
2847537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2848537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2849537bfddaSDariusz Sosnowski }
2850537bfddaSDariusz Sosnowski 
2851537bfddaSDariusz Sosnowski static struct rte_flow_action_handle *
2852537bfddaSDariusz Sosnowski rte_flow_dummy_async_action_handle_create(
2853537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev __rte_unused,
2854537bfddaSDariusz Sosnowski 	uint32_t queue_id __rte_unused,
2855537bfddaSDariusz Sosnowski 	const struct rte_flow_op_attr *op_attr __rte_unused,
2856537bfddaSDariusz Sosnowski 	const struct rte_flow_indir_action_conf *indir_action_conf __rte_unused,
2857537bfddaSDariusz Sosnowski 	const struct rte_flow_action *action __rte_unused,
2858537bfddaSDariusz Sosnowski 	void *user_data __rte_unused,
2859537bfddaSDariusz Sosnowski 	struct rte_flow_error *error)
2860537bfddaSDariusz Sosnowski {
2861537bfddaSDariusz Sosnowski 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2862537bfddaSDariusz Sosnowski 			   rte_strerror(ENOSYS));
2863537bfddaSDariusz Sosnowski 	return NULL;
2864537bfddaSDariusz Sosnowski }
2865537bfddaSDariusz Sosnowski 
2866537bfddaSDariusz Sosnowski static int
2867537bfddaSDariusz Sosnowski rte_flow_dummy_async_action_handle_destroy(
2868537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev __rte_unused,
2869537bfddaSDariusz Sosnowski 	uint32_t queue_id __rte_unused,
2870537bfddaSDariusz Sosnowski 	const struct rte_flow_op_attr *op_attr __rte_unused,
2871537bfddaSDariusz Sosnowski 	struct rte_flow_action_handle *action_handle __rte_unused,
2872537bfddaSDariusz Sosnowski 	void *user_data __rte_unused,
2873537bfddaSDariusz Sosnowski 	struct rte_flow_error *error)
2874537bfddaSDariusz Sosnowski {
2875537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2876537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2877537bfddaSDariusz Sosnowski }
2878537bfddaSDariusz Sosnowski 
2879537bfddaSDariusz Sosnowski static int
2880537bfddaSDariusz Sosnowski rte_flow_dummy_async_action_handle_update(
2881537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev __rte_unused,
2882537bfddaSDariusz Sosnowski 	uint32_t queue_id __rte_unused,
2883537bfddaSDariusz Sosnowski 	const struct rte_flow_op_attr *op_attr __rte_unused,
2884537bfddaSDariusz Sosnowski 	struct rte_flow_action_handle *action_handle __rte_unused,
2885537bfddaSDariusz Sosnowski 	const void *update __rte_unused,
2886537bfddaSDariusz Sosnowski 	void *user_data __rte_unused,
2887537bfddaSDariusz Sosnowski 	struct rte_flow_error *error)
2888537bfddaSDariusz Sosnowski {
2889537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2890537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2891537bfddaSDariusz Sosnowski }
2892537bfddaSDariusz Sosnowski 
2893537bfddaSDariusz Sosnowski static int
2894537bfddaSDariusz Sosnowski rte_flow_dummy_async_action_handle_query(
2895537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev __rte_unused,
2896537bfddaSDariusz Sosnowski 	uint32_t queue_id __rte_unused,
2897537bfddaSDariusz Sosnowski 	const struct rte_flow_op_attr *op_attr __rte_unused,
2898537bfddaSDariusz Sosnowski 	const struct rte_flow_action_handle *action_handle __rte_unused,
2899537bfddaSDariusz Sosnowski 	void *data __rte_unused,
2900537bfddaSDariusz Sosnowski 	void *user_data __rte_unused,
2901537bfddaSDariusz Sosnowski 	struct rte_flow_error *error)
2902537bfddaSDariusz Sosnowski {
2903537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2904537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2905537bfddaSDariusz Sosnowski }
2906537bfddaSDariusz Sosnowski 
2907537bfddaSDariusz Sosnowski static int
2908537bfddaSDariusz Sosnowski rte_flow_dummy_async_action_handle_query_update(
2909537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev __rte_unused,
2910537bfddaSDariusz Sosnowski 	uint32_t queue_id __rte_unused,
2911537bfddaSDariusz Sosnowski 	const struct rte_flow_op_attr *attr __rte_unused,
2912537bfddaSDariusz Sosnowski 	struct rte_flow_action_handle *handle __rte_unused,
2913537bfddaSDariusz Sosnowski 	const void *update __rte_unused,
2914537bfddaSDariusz Sosnowski 	void *query __rte_unused,
2915537bfddaSDariusz Sosnowski 	enum rte_flow_query_update_mode mode __rte_unused,
2916537bfddaSDariusz Sosnowski 	void *user_data __rte_unused,
2917537bfddaSDariusz Sosnowski 	struct rte_flow_error *error)
2918537bfddaSDariusz Sosnowski {
2919537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2920537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2921537bfddaSDariusz Sosnowski }
2922537bfddaSDariusz Sosnowski 
2923537bfddaSDariusz Sosnowski static struct rte_flow_action_list_handle *
2924537bfddaSDariusz Sosnowski rte_flow_dummy_async_action_list_handle_create(
2925537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev __rte_unused,
2926537bfddaSDariusz Sosnowski 	uint32_t queue_id __rte_unused,
2927537bfddaSDariusz Sosnowski 	const struct rte_flow_op_attr *attr __rte_unused,
2928537bfddaSDariusz Sosnowski 	const struct rte_flow_indir_action_conf *conf __rte_unused,
2929537bfddaSDariusz Sosnowski 	const struct rte_flow_action *actions __rte_unused,
2930537bfddaSDariusz Sosnowski 	void *user_data __rte_unused,
2931537bfddaSDariusz Sosnowski 	struct rte_flow_error *error)
2932537bfddaSDariusz Sosnowski {
2933537bfddaSDariusz Sosnowski 	rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2934537bfddaSDariusz Sosnowski 			   rte_strerror(ENOSYS));
2935537bfddaSDariusz Sosnowski 	return NULL;
2936537bfddaSDariusz Sosnowski }
2937537bfddaSDariusz Sosnowski 
2938537bfddaSDariusz Sosnowski static int
2939537bfddaSDariusz Sosnowski rte_flow_dummy_async_action_list_handle_destroy(
2940537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev __rte_unused,
2941537bfddaSDariusz Sosnowski 	uint32_t queue_id __rte_unused,
2942537bfddaSDariusz Sosnowski 	const struct rte_flow_op_attr *op_attr __rte_unused,
2943537bfddaSDariusz Sosnowski 	struct rte_flow_action_list_handle *handle __rte_unused,
2944537bfddaSDariusz Sosnowski 	void *user_data __rte_unused,
2945537bfddaSDariusz Sosnowski 	struct rte_flow_error *error)
2946537bfddaSDariusz Sosnowski {
2947537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2948537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2949537bfddaSDariusz Sosnowski }
2950537bfddaSDariusz Sosnowski 
2951537bfddaSDariusz Sosnowski static int
2952537bfddaSDariusz Sosnowski rte_flow_dummy_async_action_list_handle_query_update(
2953537bfddaSDariusz Sosnowski 	struct rte_eth_dev *dev __rte_unused,
2954537bfddaSDariusz Sosnowski 	uint32_t queue_id __rte_unused,
2955537bfddaSDariusz Sosnowski 	const struct rte_flow_op_attr *attr __rte_unused,
2956537bfddaSDariusz Sosnowski 	const struct rte_flow_action_list_handle *handle __rte_unused,
2957537bfddaSDariusz Sosnowski 	const void **update __rte_unused,
2958537bfddaSDariusz Sosnowski 	void **query __rte_unused,
2959537bfddaSDariusz Sosnowski 	enum rte_flow_query_update_mode mode __rte_unused,
2960537bfddaSDariusz Sosnowski 	void *user_data __rte_unused,
2961537bfddaSDariusz Sosnowski 	struct rte_flow_error *error)
2962537bfddaSDariusz Sosnowski {
2963537bfddaSDariusz Sosnowski 	return rte_flow_error_set(error, ENOSYS, RTE_FLOW_ERROR_TYPE_UNSPECIFIED, NULL,
2964537bfddaSDariusz Sosnowski 				  rte_strerror(ENOSYS));
2965537bfddaSDariusz Sosnowski }
2966537bfddaSDariusz Sosnowski 
2967537bfddaSDariusz Sosnowski struct rte_flow_fp_ops rte_flow_fp_default_ops = {
2968537bfddaSDariusz Sosnowski 	.async_create = rte_flow_dummy_async_create,
2969537bfddaSDariusz Sosnowski 	.async_create_by_index = rte_flow_dummy_async_create_by_index,
2970537bfddaSDariusz Sosnowski 	.async_actions_update = rte_flow_dummy_async_actions_update,
2971933f18dbSAlexander Kozyrev 	.async_create_by_index_with_pattern = rte_flow_dummy_async_create_by_index_with_pattern,
2972537bfddaSDariusz Sosnowski 	.async_destroy = rte_flow_dummy_async_destroy,
2973537bfddaSDariusz Sosnowski 	.push = rte_flow_dummy_push,
2974537bfddaSDariusz Sosnowski 	.pull = rte_flow_dummy_pull,
2975537bfddaSDariusz Sosnowski 	.async_action_handle_create = rte_flow_dummy_async_action_handle_create,
2976537bfddaSDariusz Sosnowski 	.async_action_handle_destroy = rte_flow_dummy_async_action_handle_destroy,
2977537bfddaSDariusz Sosnowski 	.async_action_handle_update = rte_flow_dummy_async_action_handle_update,
2978537bfddaSDariusz Sosnowski 	.async_action_handle_query = rte_flow_dummy_async_action_handle_query,
2979537bfddaSDariusz Sosnowski 	.async_action_handle_query_update = rte_flow_dummy_async_action_handle_query_update,
2980537bfddaSDariusz Sosnowski 	.async_action_list_handle_create = rte_flow_dummy_async_action_list_handle_create,
2981537bfddaSDariusz Sosnowski 	.async_action_list_handle_destroy = rte_flow_dummy_async_action_list_handle_destroy,
2982537bfddaSDariusz Sosnowski 	.async_action_list_handle_query_update =
2983537bfddaSDariusz Sosnowski 		rte_flow_dummy_async_action_list_handle_query_update,
2984537bfddaSDariusz Sosnowski };
2985