xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_tun.c (revision 0c036a1485b9d9163a8fa8059ed5272d060c05e0)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2023 Broadcom
3  * All rights reserved.
4  */
5 
6 #include "bnxt.h"
7 #include "bnxt_ulp.h"
8 #include "bnxt_ulp_utils.h"
9 #include "ulp_tun.h"
10 #include "ulp_utils.h"
11 
12 /* returns negative on error, 1 if new entry is allocated or zero if old */
13 int32_t
14 ulp_app_tun_search_entry(struct bnxt_ulp_context *ulp_ctx,
15 			 struct rte_flow_tunnel *app_tunnel,
16 			 struct bnxt_flow_app_tun_ent **tun_entry)
17 {
18 	struct bnxt_flow_app_tun_ent *tun_ent_list;
19 	int32_t i, rc = 0, free_entry = -1;
20 
21 	tun_ent_list = bnxt_ulp_cntxt_ptr2_app_tun_list_get(ulp_ctx);
22 	if (!tun_ent_list) {
23 		BNXT_DRV_DBG(ERR, "unable to get the app tunnel list\n");
24 		return -EINVAL;
25 	}
26 
27 	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
28 		if (!tun_ent_list[i].ref_cnt) {
29 			if (free_entry < 0)
30 				free_entry = i;
31 		} else {
32 			if (!memcmp(&tun_ent_list[i].app_tunnel,
33 				    app_tunnel,
34 				    sizeof(struct rte_flow_tunnel))) {
35 				*tun_entry =  &tun_ent_list[i];
36 				tun_ent_list[free_entry].ref_cnt++;
37 				return rc;
38 			}
39 		}
40 	}
41 	if (free_entry >= 0) {
42 		*tun_entry =  &tun_ent_list[free_entry];
43 		memcpy(&tun_ent_list[free_entry].app_tunnel, app_tunnel,
44 		       sizeof(struct rte_flow_tunnel));
45 		tun_ent_list[free_entry].ref_cnt = 1;
46 		rc = 1;
47 	} else {
48 		BNXT_DRV_DBG(ERR, "ulp app tunnel list is full\n");
49 		return -ENOMEM;
50 	}
51 
52 	return rc;
53 }
54 
55 void
56 ulp_app_tun_entry_delete(struct bnxt_flow_app_tun_ent *tun_entry)
57 {
58 	if (tun_entry) {
59 		if (tun_entry->ref_cnt) {
60 			tun_entry->ref_cnt--;
61 			if (!tun_entry->ref_cnt)
62 				memset(tun_entry, 0,
63 				       sizeof(struct bnxt_flow_app_tun_ent));
64 		}
65 	}
66 }
67 
68 int32_t
69 ulp_app_tun_entry_set_decap_action(struct bnxt_flow_app_tun_ent *tun_entry)
70 {
71 	if (!tun_entry)
72 		return -EINVAL;
73 
74 	tun_entry->action.type = (typeof(tun_entry->action.type))
75 			      BNXT_RTE_FLOW_ACTION_TYPE_VXLAN_DECAP;
76 	tun_entry->action.conf = tun_entry;
77 	return 0;
78 }
79 
80 int32_t
81 ulp_app_tun_entry_set_decap_item(struct bnxt_flow_app_tun_ent *tun_entry)
82 {
83 	if (!tun_entry)
84 		return -EINVAL;
85 
86 	tun_entry->item.type = (typeof(tun_entry->item.type))
87 			      BNXT_RTE_FLOW_ITEM_TYPE_VXLAN_DECAP;
88 	tun_entry->item.spec = tun_entry;
89 	tun_entry->item.last = NULL;
90 	tun_entry->item.mask = NULL;
91 	return 0;
92 }
93 
94 struct bnxt_flow_app_tun_ent *
95 ulp_app_tun_match_entry(struct bnxt_ulp_context *ulp_ctx,
96 			const void *ctx)
97 {
98 	struct bnxt_flow_app_tun_ent *tun_ent_list;
99 	int32_t i;
100 
101 	tun_ent_list = bnxt_ulp_cntxt_ptr2_app_tun_list_get(ulp_ctx);
102 	if (!tun_ent_list) {
103 		BNXT_DRV_DBG(ERR, "unable to get the app tunnel list\n");
104 		return NULL;
105 	}
106 
107 	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
108 		if (&tun_ent_list[i] == ctx)
109 			return &tun_ent_list[i];
110 	}
111 	return NULL;
112 }
113 
114 static int32_t
115 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
116 		  struct bnxt_tun_cache_entry **tun_entry,
117 		  uint16_t *tun_idx)
118 {
119 	int32_t i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
120 	struct bnxt_tun_cache_entry *tun_tbl;
121 	uint32_t dip_idx, dmac_idx, use_ipv4 = 0;
122 
123 	tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
124 	if (!tun_tbl) {
125 		BNXT_DRV_DBG(ERR, "Error: could not get Tunnel table\n");
126 		return BNXT_TF_RC_ERROR;
127 	}
128 
129 	/* get the outer destination ip field index */
130 	dip_idx = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_TUN_OFF_DIP_ID);
131 	dmac_idx = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_TUN_OFF_DMAC_ID);
132 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
133 		use_ipv4 = 1;
134 
135 	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
136 		if (!tun_tbl[i].t_dst_ip_valid) {
137 			if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
138 				first_free_entry = i;
139 			continue;
140 		}
141 		/* match on the destination ip of the tunnel */
142 		if ((use_ipv4 && !memcmp(&tun_tbl[i].t_dst_ip,
143 					 params->hdr_field[dip_idx].spec,
144 					 sizeof(rte_be32_t))) ||
145 		    (!use_ipv4 &&
146 		     !memcmp(tun_tbl[i].t_dst_ip6,
147 			     params->hdr_field[dip_idx].spec,
148 			     sizeof(((struct bnxt_tun_cache_entry *)
149 				     NULL)->t_dst_ip6)))) {
150 			*tun_entry = &tun_tbl[i];
151 			*tun_idx = i;
152 			return 0;
153 		}
154 	}
155 	if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID) {
156 		BNXT_DRV_DBG(ERR,
157 			     "Error: No entry available in tunnel table\n");
158 		return BNXT_TF_RC_ERROR;
159 	}
160 
161 	*tun_idx = first_free_entry;
162 	*tun_entry = &tun_tbl[first_free_entry];
163 	tun_tbl[first_free_entry].t_dst_ip_valid = true;
164 
165 	/* Update the destination ip and mac */
166 	if (use_ipv4)
167 		memcpy(&tun_tbl[first_free_entry].t_dst_ip,
168 		       params->hdr_field[dip_idx].spec, sizeof(rte_be32_t));
169 	else
170 		memcpy(tun_tbl[first_free_entry].t_dst_ip6,
171 		       params->hdr_field[dip_idx].spec,
172 		       sizeof(((struct bnxt_tun_cache_entry *)
173 				     NULL)->t_dst_ip6));
174 	memcpy(tun_tbl[first_free_entry].t_dmac,
175 	       params->hdr_field[dmac_idx].spec, RTE_ETHER_ADDR_LEN);
176 
177 	return 0;
178 }
179 
180 /* Tunnel API to delete the tunnel entry */
181 void
182 ulp_tunnel_offload_entry_clear(struct bnxt_tun_cache_entry *tun_tbl,
183 			       uint8_t tun_idx)
184 {
185 	memset(&tun_tbl[tun_idx], 0, sizeof(struct bnxt_tun_cache_entry));
186 }
187 
188 /* Tunnel API to perform tunnel offload process when there is F1/F2 flows */
189 int32_t
190 ulp_tunnel_offload_process(struct ulp_rte_parser_params *params)
191 {
192 	struct bnxt_tun_cache_entry *tun_entry;
193 	uint16_t tun_idx;
194 	int32_t rc = BNXT_TF_RC_SUCCESS;
195 
196 	/* Perform the tunnel offload only for F1 and F2 flows */
197 	if (!ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
198 			      BNXT_ULP_HDR_BIT_F1) &&
199 	    !ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
200 			      BNXT_ULP_HDR_BIT_F2))
201 		return rc;
202 
203 	/* search for the tunnel entry if not found create one */
204 	rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
205 	if (rc == BNXT_TF_RC_ERROR)
206 		return rc;
207 
208 	/* Tunnel offload for the outer Tunnel flow */
209 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
210 			     BNXT_ULP_HDR_BIT_F1)) {
211 		/* Reset the JUMP action bit in the action bitmap as we don't
212 		 * offload this action.
213 		 */
214 		ULP_BITMAP_RESET(params->act_bitmap.bits,
215 				 BNXT_ULP_ACT_BIT_JUMP);
216 		params->parent_flow = true;
217 		params->tun_idx = tun_idx;
218 		tun_entry->outer_tun_flow_id = params->fid;
219 	} else if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits,
220 			     BNXT_ULP_HDR_BIT_F2)) {
221 		/* add the vxlan decap action for F2 flows */
222 		ULP_BITMAP_SET(params->act_bitmap.bits,
223 			       BNXT_ULP_ACT_BIT_VXLAN_DECAP);
224 		params->child_flow = true;
225 		params->tun_idx = tun_idx;
226 		params->parent_flow = false;
227 	}
228 	ULP_COMP_FLD_IDX_WR(params, BNXT_ULP_CF_IDX_TUNNEL_ID, tun_idx);
229 	return rc;
230 }
231