xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_tun.h (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #ifndef _BNXT_TUN_H_
7 #define _BNXT_TUN_H_
8 
9 #include <inttypes.h>
10 #include <stdbool.h>
11 #include <sys/queue.h>
12 
13 #include "rte_ethdev.h"
14 
15 #include "ulp_template_db_enum.h"
16 #include "ulp_template_struct.h"
17 
18 #define	BNXT_OUTER_TUN_SIGNATURE(l3_tun, params)		\
19 	((l3_tun) &&					\
20 	 ULP_BITMAP_ISSET((params)->act_bitmap.bits,	\
21 			  BNXT_ULP_ACTION_BIT_JUMP))
22 #define	BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params)		\
23 	((l3_tun) && (l3_tun_decap) &&					\
24 	 !ULP_BITMAP_ISSET((params)->hdr_bitmap.bits,			\
25 			   BNXT_ULP_HDR_BIT_O_ETH))
26 
27 #define	BNXT_FIRST_INNER_TUN_FLOW(state, inner_tun_sig)	\
28 	((state) == BNXT_ULP_FLOW_STATE_NORMAL && (inner_tun_sig))
29 #define	BNXT_INNER_TUN_FLOW(state, inner_tun_sig)		\
30 	((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (inner_tun_sig))
31 #define	BNXT_OUTER_TUN_FLOW(outer_tun_sig)		((outer_tun_sig))
32 
33 /* It is invalid to get another outer flow offload request
34  * for the same tunnel, while the outer flow is already offloaded.
35  */
36 #define	BNXT_REJECT_OUTER_TUN_FLOW(state, outer_tun_sig)	\
37 	((state) == BNXT_ULP_FLOW_STATE_TUN_O_OFFLD && (outer_tun_sig))
38 /* It is invalid to get another inner flow offload request
39  * for the same tunnel, while the outer flow is not yet offloaded.
40  */
41 #define	BNXT_REJECT_INNER_TUN_FLOW(state, inner_tun_sig)	\
42 	((state) == BNXT_ULP_FLOW_STATE_TUN_I_CACHED && (inner_tun_sig))
43 
44 #define	ULP_TUN_O_DMAC_HDR_FIELD_INDEX	1
45 #define	ULP_TUN_O_IPV4_DIP_INDEX	19
46 #define	ULP_TUN_O_IPV6_DIP_INDEX	17
47 
48 /* When a flow offload request comes the following state transitions
49  * happen based on the order in which the outer & inner flow offload
50  * requests arrive.
51  *
52  * If inner tunnel flow offload request arrives first then the flow
53  * state will change from BNXT_ULP_FLOW_STATE_NORMAL to
54  * BNXT_ULP_FLOW_STATE_TUN_I_CACHED and the following outer tunnel
55  * flow offload request will change the state of the flow to
56  * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD from BNXT_ULP_FLOW_STATE_TUN_I_CACHED.
57  *
58  * If outer tunnel flow offload request arrives first then the flow state
59  * will change from BNXT_ULP_FLOW_STATE_NORMAL to
60  * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD.
61  *
62  * Once the flow state is in BNXT_ULP_FLOW_STATE_TUN_O_OFFLD, any inner
63  * tunnel flow offload requests after that point will be treated as a
64  * normal flow and the tunnel flow state remains in
65  * BNXT_ULP_FLOW_STATE_TUN_O_OFFLD
66  */
67 enum bnxt_ulp_tun_flow_state {
68 	BNXT_ULP_FLOW_STATE_NORMAL = 0,
69 	BNXT_ULP_FLOW_STATE_TUN_O_OFFLD,
70 	BNXT_ULP_FLOW_STATE_TUN_I_CACHED
71 };
72 
73 struct ulp_per_port_flow_info {
74 	enum bnxt_ulp_tun_flow_state	state;
75 	uint32_t			first_tun_i_fid;
76 	struct ulp_rte_parser_params	first_inner_tun_params;
77 };
78 
79 struct bnxt_tun_cache_entry {
80 	bool				valid;
81 	bool				t_dst_ip_valid;
82 	uint8_t				t_dmac[RTE_ETHER_ADDR_LEN];
83 	union {
84 		rte_be32_t		t_dst_ip;
85 		uint8_t			t_dst_ip6[16];
86 	};
87 	uint32_t			outer_tun_flow_id;
88 	uint16_t			outer_tun_rej_cnt;
89 	uint16_t			inner_tun_rej_cnt;
90 	struct ulp_per_port_flow_info	tun_flow_info[RTE_MAX_ETHPORTS];
91 };
92 
93 void
94 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx);
95 
96 void
97 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid);
98 
99 #endif
100