xref: /dpdk/drivers/net/bnxt/tf_ulp/ulp_tun.c (revision 68a03efeed657e6e05f281479b33b51102797e15)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2014-2021 Broadcom
3  * All rights reserved.
4  */
5 
6 #include <rte_malloc.h>
7 
8 #include "ulp_tun.h"
9 #include "ulp_rte_parser.h"
10 #include "ulp_template_db_enum.h"
11 #include "ulp_template_struct.h"
12 #include "ulp_matcher.h"
13 #include "ulp_mapper.h"
14 #include "ulp_flow_db.h"
15 
16 /* This function programs the outer tunnel flow in the hardware. */
17 static int32_t
18 ulp_install_outer_tun_flow(struct ulp_rte_parser_params *params,
19 			   struct bnxt_tun_cache_entry *tun_entry,
20 			   uint16_t tun_idx)
21 {
22 	struct bnxt_ulp_mapper_create_parms mparms = { 0 };
23 	int ret;
24 
25 	/* Reset the JUMP action bit in the action bitmap as we don't
26 	 * offload this action.
27 	 */
28 	ULP_BITMAP_RESET(params->act_bitmap.bits, BNXT_ULP_ACTION_BIT_JUMP);
29 
30 	ULP_BITMAP_SET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_F1);
31 
32 	ret = ulp_matcher_pattern_match(params, &params->class_id);
33 	if (ret != BNXT_TF_RC_SUCCESS)
34 		goto err;
35 
36 	ret = ulp_matcher_action_match(params, &params->act_tmpl);
37 	if (ret != BNXT_TF_RC_SUCCESS)
38 		goto err;
39 
40 	params->parent_flow = true;
41 	bnxt_ulp_init_mapper_params(&mparms, params,
42 				    BNXT_ULP_FDB_TYPE_REGULAR);
43 	mparms.tun_idx = tun_idx;
44 
45 	/* Call the ulp mapper to create the flow in the hardware. */
46 	ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
47 	if (ret)
48 		goto err;
49 
50 	/* Store the tunnel dmac in the tunnel cache table and use it while
51 	 * programming tunnel flow F2.
52 	 */
53 	memcpy(tun_entry->t_dmac,
54 	       &params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX].spec,
55 	       RTE_ETHER_ADDR_LEN);
56 
57 	tun_entry->valid = true;
58 	tun_entry->tun_flow_info[params->port_id].state =
59 				BNXT_ULP_FLOW_STATE_TUN_O_OFFLD;
60 	tun_entry->outer_tun_flow_id = params->fid;
61 
62 	/* F1 and it's related F2s are correlated based on
63 	 * Tunnel Destination IP Address.
64 	 */
65 	if (tun_entry->t_dst_ip_valid)
66 		goto done;
67 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
68 		memcpy(&tun_entry->t_dst_ip,
69 		       &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
70 		       sizeof(rte_be32_t));
71 	else
72 		memcpy(tun_entry->t_dst_ip6,
73 		       &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
74 		       sizeof(tun_entry->t_dst_ip6));
75 	tun_entry->t_dst_ip_valid = true;
76 
77 done:
78 	return BNXT_TF_RC_FID;
79 
80 err:
81 	memset(tun_entry, 0, sizeof(struct bnxt_tun_cache_entry));
82 	return BNXT_TF_RC_ERROR;
83 }
84 
85 /* This function programs the inner tunnel flow in the hardware. */
86 static void
87 ulp_install_inner_tun_flow(struct bnxt_tun_cache_entry *tun_entry,
88 			   struct ulp_rte_parser_params *tun_o_params)
89 {
90 	struct bnxt_ulp_mapper_create_parms mparms = { 0 };
91 	struct ulp_per_port_flow_info *flow_info;
92 	struct ulp_rte_parser_params *params;
93 	int ret;
94 
95 	/* F2 doesn't have tunnel dmac, use the tunnel dmac that was
96 	 * stored during F1 programming.
97 	 */
98 	flow_info = &tun_entry->tun_flow_info[tun_o_params->port_id];
99 	params = &flow_info->first_inner_tun_params;
100 	memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
101 	       tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
102 	params->parent_fid = tun_entry->outer_tun_flow_id;
103 	params->fid = flow_info->first_tun_i_fid;
104 
105 	bnxt_ulp_init_mapper_params(&mparms, params,
106 				    BNXT_ULP_FDB_TYPE_REGULAR);
107 
108 	ret = ulp_mapper_flow_create(params->ulp_ctx, &mparms);
109 	if (ret)
110 		PMD_DRV_LOG(ERR, "Failed to create F2 flow.");
111 }
112 
113 /* This function either install outer tunnel flow & inner tunnel flow
114  * or just the outer tunnel flow based on the flow state.
115  */
116 static int32_t
117 ulp_post_process_outer_tun_flow(struct ulp_rte_parser_params *params,
118 			     struct bnxt_tun_cache_entry *tun_entry,
119 			     uint16_t tun_idx)
120 {
121 	enum bnxt_ulp_tun_flow_state flow_state;
122 	int ret;
123 
124 	flow_state = tun_entry->tun_flow_info[params->port_id].state;
125 	ret = ulp_install_outer_tun_flow(params, tun_entry, tun_idx);
126 	if (ret == BNXT_TF_RC_ERROR) {
127 		PMD_DRV_LOG(ERR, "Failed to create outer tunnel flow.");
128 		return ret;
129 	}
130 
131 	/* If flow_state == BNXT_ULP_FLOW_STATE_NORMAL before installing
132 	 * F1, that means F2 is not deferred. Hence, no need to install F2.
133 	 */
134 	if (flow_state != BNXT_ULP_FLOW_STATE_NORMAL)
135 		ulp_install_inner_tun_flow(tun_entry, params);
136 
137 	return BNXT_TF_RC_FID;
138 }
139 
140 /* This function will be called if inner tunnel flow request comes before
141  * outer tunnel flow request.
142  */
143 static int32_t
144 ulp_post_process_first_inner_tun_flow(struct ulp_rte_parser_params *params,
145 				      struct bnxt_tun_cache_entry *tun_entry)
146 {
147 	struct ulp_per_port_flow_info *flow_info;
148 	int ret;
149 
150 	ret = ulp_matcher_pattern_match(params, &params->class_id);
151 	if (ret != BNXT_TF_RC_SUCCESS)
152 		return BNXT_TF_RC_ERROR;
153 
154 	ret = ulp_matcher_action_match(params, &params->act_tmpl);
155 	if (ret != BNXT_TF_RC_SUCCESS)
156 		return BNXT_TF_RC_ERROR;
157 
158 	/* If Tunnel F2 flow comes first then we can't install it in the
159 	 * hardware, because, F2 flow will not have L2 context information.
160 	 * So, just cache the F2 information and program it in the context
161 	 * of F1 flow installation.
162 	 */
163 	flow_info = &tun_entry->tun_flow_info[params->port_id];
164 	memcpy(&flow_info->first_inner_tun_params, params,
165 	       sizeof(struct ulp_rte_parser_params));
166 
167 	flow_info->first_tun_i_fid = params->fid;
168 	flow_info->state = BNXT_ULP_FLOW_STATE_TUN_I_CACHED;
169 
170 	/* F1 and it's related F2s are correlated based on
171 	 * Tunnel Destination IP Address. It could be already set, if
172 	 * the inner flow got offloaded first.
173 	 */
174 	if (tun_entry->t_dst_ip_valid)
175 		goto done;
176 	if (ULP_BITMAP_ISSET(params->hdr_bitmap.bits, BNXT_ULP_HDR_BIT_O_IPV4))
177 		memcpy(&tun_entry->t_dst_ip,
178 		       &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
179 		       sizeof(rte_be32_t));
180 	else
181 		memcpy(tun_entry->t_dst_ip6,
182 		       &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
183 		       sizeof(tun_entry->t_dst_ip6));
184 	tun_entry->t_dst_ip_valid = true;
185 
186 done:
187 	return BNXT_TF_RC_FID;
188 }
189 
190 /* This function will be called if inner tunnel flow request comes after
191  * the outer tunnel flow request.
192  */
193 static int32_t
194 ulp_post_process_inner_tun_flow(struct ulp_rte_parser_params *params,
195 				struct bnxt_tun_cache_entry *tun_entry)
196 {
197 	memcpy(&params->hdr_field[ULP_TUN_O_DMAC_HDR_FIELD_INDEX],
198 	       tun_entry->t_dmac, RTE_ETHER_ADDR_LEN);
199 
200 	params->parent_fid = tun_entry->outer_tun_flow_id;
201 
202 	return BNXT_TF_RC_NORMAL;
203 }
204 
205 static int32_t
206 ulp_get_tun_entry(struct ulp_rte_parser_params *params,
207 		  struct bnxt_tun_cache_entry **tun_entry,
208 		  uint16_t *tun_idx)
209 {
210 	int i, first_free_entry = BNXT_ULP_TUN_ENTRY_INVALID;
211 	struct bnxt_tun_cache_entry *tun_tbl;
212 	bool tun_entry_found = false, free_entry_found = false;
213 
214 	tun_tbl = bnxt_ulp_cntxt_ptr2_tun_tbl_get(params->ulp_ctx);
215 	if (!tun_tbl)
216 		return BNXT_TF_RC_ERROR;
217 
218 	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES; i++) {
219 		if (!memcmp(&tun_tbl[i].t_dst_ip,
220 			    &params->hdr_field[ULP_TUN_O_IPV4_DIP_INDEX].spec,
221 			    sizeof(rte_be32_t)) ||
222 		    !memcmp(&tun_tbl[i].t_dst_ip6,
223 			    &params->hdr_field[ULP_TUN_O_IPV6_DIP_INDEX].spec,
224 			    16)) {
225 			tun_entry_found = true;
226 			break;
227 		}
228 
229 		if (!tun_tbl[i].t_dst_ip_valid && !free_entry_found) {
230 			first_free_entry = i;
231 			free_entry_found = true;
232 		}
233 	}
234 
235 	if (tun_entry_found) {
236 		*tun_entry = &tun_tbl[i];
237 		*tun_idx = i;
238 	} else {
239 		if (first_free_entry == BNXT_ULP_TUN_ENTRY_INVALID)
240 			return BNXT_TF_RC_ERROR;
241 		*tun_entry = &tun_tbl[first_free_entry];
242 		*tun_idx = first_free_entry;
243 	}
244 
245 	return 0;
246 }
247 
248 int32_t
249 ulp_post_process_tun_flow(struct ulp_rte_parser_params *params)
250 {
251 	bool outer_tun_sig, inner_tun_sig, first_inner_tun_flow;
252 	bool outer_tun_reject, inner_tun_reject, outer_tun_flow, inner_tun_flow;
253 	enum bnxt_ulp_tun_flow_state flow_state;
254 	struct bnxt_tun_cache_entry *tun_entry;
255 	uint32_t l3_tun, l3_tun_decap;
256 	uint16_t tun_idx;
257 	int rc;
258 
259 	/* Computational fields that indicate it's a TUNNEL DECAP flow */
260 	l3_tun = ULP_COMP_FLD_IDX_RD(params, BNXT_ULP_CF_IDX_L3_TUN);
261 	l3_tun_decap = ULP_COMP_FLD_IDX_RD(params,
262 					   BNXT_ULP_CF_IDX_L3_TUN_DECAP);
263 	if (!l3_tun)
264 		return BNXT_TF_RC_NORMAL;
265 
266 	rc = ulp_get_tun_entry(params, &tun_entry, &tun_idx);
267 	if (rc == BNXT_TF_RC_ERROR)
268 		return rc;
269 
270 	flow_state = tun_entry->tun_flow_info[params->port_id].state;
271 	/* Outer tunnel flow validation */
272 	outer_tun_sig = BNXT_OUTER_TUN_SIGNATURE(l3_tun, params);
273 	outer_tun_flow = BNXT_OUTER_TUN_FLOW(outer_tun_sig);
274 	outer_tun_reject = BNXT_REJECT_OUTER_TUN_FLOW(flow_state,
275 						      outer_tun_sig);
276 
277 	/* Inner tunnel flow validation */
278 	inner_tun_sig = BNXT_INNER_TUN_SIGNATURE(l3_tun, l3_tun_decap, params);
279 	first_inner_tun_flow = BNXT_FIRST_INNER_TUN_FLOW(flow_state,
280 							 inner_tun_sig);
281 	inner_tun_flow = BNXT_INNER_TUN_FLOW(flow_state, inner_tun_sig);
282 	inner_tun_reject = BNXT_REJECT_INNER_TUN_FLOW(flow_state,
283 						      inner_tun_sig);
284 
285 	if (outer_tun_reject) {
286 		tun_entry->outer_tun_rej_cnt++;
287 		BNXT_TF_DBG(ERR,
288 			    "Tunnel F1 flow rejected, COUNT: %d\n",
289 			    tun_entry->outer_tun_rej_cnt);
290 	/* Inner tunnel flow is rejected if it comes between first inner
291 	 * tunnel flow and outer flow requests.
292 	 */
293 	} else if (inner_tun_reject) {
294 		tun_entry->inner_tun_rej_cnt++;
295 		BNXT_TF_DBG(ERR,
296 			    "Tunnel F2 flow rejected, COUNT: %d\n",
297 			    tun_entry->inner_tun_rej_cnt);
298 	}
299 
300 	if (outer_tun_reject || inner_tun_reject)
301 		return BNXT_TF_RC_ERROR;
302 	else if (first_inner_tun_flow)
303 		return ulp_post_process_first_inner_tun_flow(params, tun_entry);
304 	else if (outer_tun_flow)
305 		return ulp_post_process_outer_tun_flow(params, tun_entry,
306 						       tun_idx);
307 	else if (inner_tun_flow)
308 		return ulp_post_process_inner_tun_flow(params, tun_entry);
309 	else
310 		return BNXT_TF_RC_NORMAL;
311 }
312 
313 void
314 ulp_clear_tun_entry(struct bnxt_tun_cache_entry *tun_tbl, uint8_t tun_idx)
315 {
316 	memset(&tun_tbl[tun_idx], 0,
317 		sizeof(struct bnxt_tun_cache_entry));
318 }
319 
320 /* When a dpdk application offloads the same tunnel inner flow
321  * on all the uplink ports, a tunnel inner flow entry is cached
322  * even if it is not for the right uplink port. Such tunnel
323  * inner flows will eventually get aged out as there won't be
324  * any traffic on these ports. When such a flow destroy is
325  * called, cleanup the tunnel inner flow entry.
326  */
327 void
328 ulp_clear_tun_inner_entry(struct bnxt_tun_cache_entry *tun_tbl, uint32_t fid)
329 {
330 	struct ulp_per_port_flow_info *flow_info;
331 	int i, j;
332 
333 	for (i = 0; i < BNXT_ULP_MAX_TUN_CACHE_ENTRIES ; i++) {
334 		for (j = 0; j < RTE_MAX_ETHPORTS; j++) {
335 			flow_info = &tun_tbl[i].tun_flow_info[j];
336 			if (flow_info->first_tun_i_fid == fid &&
337 			    flow_info->state == BNXT_ULP_FLOW_STATE_TUN_I_CACHED)
338 				memset(flow_info, 0, sizeof(*flow_info));
339 		}
340 	}
341 }
342