xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_api.c (revision 25a2a0dc3de31ca0a6fbc9371cf3dd85dfd74b07)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include "flow_api_engine.h"
7 #include "flow_api_nic_setup.h"
8 #include "ntnic_mod_reg.h"
9 
10 #include "flow_filter.h"
11 
12 const char *dbg_res_descr[] = {
13 	/* RES_QUEUE */ "RES_QUEUE",
14 	/* RES_CAT_CFN */ "RES_CAT_CFN",
15 	/* RES_CAT_COT */ "RES_CAT_COT",
16 	/* RES_CAT_EXO */ "RES_CAT_EXO",
17 	/* RES_CAT_LEN */ "RES_CAT_LEN",
18 	/* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE",
19 	/* RES_KM_CATEGORY */ "RES_KM_CATEGORY",
20 	/* RES_HSH_RCP */ "RES_HSH_RCP",
21 	/* RES_PDB_RCP */ "RES_PDB_RCP",
22 	/* RES_QSL_RCP */ "RES_QSL_RCP",
23 	/* RES_QSL_LTX */ "RES_QSL_LTX",
24 	/* RES_QSL_QST */ "RES_QSL_QST",
25 	/* RES_SLC_LR_RCP */ "RES_SLC_LR_RCP",
26 	/* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE",
27 	/* RES_FLM_RCP */ "RES_FLM_RCP",
28 	/* RES_TPE_RCP */ "RES_TPE_RCP",
29 	/* RES_TPE_EXT */ "RES_TPE_EXT",
30 	/* RES_TPE_RPL */ "RES_TPE_RPL",
31 	/* RES_COUNT */ "RES_COUNT",
32 	/* RES_INVALID */ "RES_INVALID"
33 };
34 
35 static struct flow_nic_dev *dev_base;
36 static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER;
37 
38 void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx)
39 {
40 	flow_nic_mark_resource_unused(ndev, res_type, idx);
41 }
42 
43 int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index)
44 {
45 	NT_LOG(DBG, FILTER, "De-reference resource %s idx %i (before ref cnt %i)",
46 		dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]);
47 	assert(flow_nic_is_resource_used(ndev, res_type, index));
48 	assert(ndev->res[res_type].ref[index]);
49 	/* deref */
50 	ndev->res[res_type].ref[index]--;
51 
52 	if (!ndev->res[res_type].ref[index])
53 		flow_nic_free_resource(ndev, res_type, index);
54 
55 	return !!ndev->res[res_type].ref[index];/* if 0 resource has been freed */
56 }
57 
58 /*
59  * Device Management API
60  */
61 
62 static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *eth_dev)
63 {
64 	struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL;
65 
66 	while (dev) {
67 		if (dev == eth_dev) {
68 			if (prev)
69 				prev->next = dev->next;
70 
71 			else
72 				ndev->eth_base = dev->next;
73 
74 			return 0;
75 		}
76 
77 		prev = dev;
78 		dev = dev->next;
79 	}
80 
81 	return -1;
82 }
83 
84 static void flow_ndev_reset(struct flow_nic_dev *ndev)
85 {
86 	/* Delete all eth-port devices created on this NIC device */
87 	while (ndev->eth_base)
88 		flow_delete_eth_dev(ndev->eth_base);
89 
90 	km_free_ndev_resource_management(&ndev->km_res_handle);
91 	kcc_free_ndev_resource_management(&ndev->kcc_res_handle);
92 
93 	ndev->flow_unique_id_counter = 0;
94 
95 #ifdef FLOW_DEBUG
96 	/*
97 	 * free all resources default allocated, initially for this NIC DEV
98 	 * Is not really needed since the bitmap will be freed in a sec. Therefore
99 	 * only in debug mode
100 	 */
101 
102 	/* Check if all resources has been released */
103 	NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i", ndev->adapter_no);
104 
105 	for (unsigned int i = 0; i < RES_COUNT; i++) {
106 		int err = 0;
107 #if defined(FLOW_DEBUG)
108 		NT_LOG(DBG, FILTER, "RES state for: %s", dbg_res_descr[i]);
109 #endif
110 
111 		for (unsigned int ii = 0; ii < ndev->res[i].resource_count; ii++) {
112 			int ref = ndev->res[i].ref[ii];
113 			int used = flow_nic_is_resource_used(ndev, i, ii);
114 
115 			if (ref || used) {
116 				NT_LOG(DBG, FILTER, "  [%i]: ref cnt %i, used %i", ii, ref,
117 					used);
118 				err = 1;
119 			}
120 		}
121 
122 		if (err)
123 			NT_LOG(DBG, FILTER, "ERROR - some resources not freed");
124 	}
125 
126 #endif
127 }
128 
129 int flow_delete_eth_dev(struct flow_eth_dev *eth_dev)
130 {
131 	struct flow_nic_dev *ndev = eth_dev->ndev;
132 
133 	if (!ndev) {
134 		/* Error invalid nic device */
135 		return -1;
136 	}
137 
138 	NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i", eth_dev, eth_dev->port);
139 
140 #ifdef FLOW_DEBUG
141 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_WRITE);
142 #endif
143 
144 	/* delete all created flows from this device */
145 	pthread_mutex_lock(&ndev->mtx);
146 
147 	/*
148 	 * remove unmatched queue if setup in QSL
149 	 * remove exception queue setting in QSL UNM
150 	 */
151 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_DEST_QUEUE, eth_dev->port, 0);
152 	hw_mod_qsl_unmq_set(&ndev->be, HW_QSL_UNMQ_EN, eth_dev->port, 0);
153 	hw_mod_qsl_unmq_flush(&ndev->be, eth_dev->port, 1);
154 
155 #ifdef FLOW_DEBUG
156 	ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
157 #endif
158 
159 #ifndef SCATTER_GATHER
160 
161 	/* free rx queues */
162 	for (int i = 0; i < eth_dev->num_queues; i++) {
163 		ndev->be.iface->free_rx_queue(ndev->be.be_dev, eth_dev->rx_queue[i].hw_id);
164 		flow_nic_deref_resource(ndev, RES_QUEUE, eth_dev->rx_queue[i].id);
165 	}
166 
167 #endif
168 
169 	/* take eth_dev out of ndev list */
170 	if (nic_remove_eth_port_dev(ndev, eth_dev) != 0)
171 		NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found", eth_dev);
172 
173 	pthread_mutex_unlock(&ndev->mtx);
174 
175 	/* free eth_dev */
176 	free(eth_dev);
177 
178 	return 0;
179 }
180 
181 /*
182  * Flow API NIC Setup
183  * Flow backend creation function - register and initialize common backend API to FPA modules
184  */
185 
186 static int init_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type,
187 	uint32_t count)
188 {
189 	assert(ndev->res[res_type].alloc_bm == NULL);
190 	/* allocate bitmap and ref counter */
191 	ndev->res[res_type].alloc_bm =
192 		calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t));
193 
194 	if (ndev->res[res_type].alloc_bm) {
195 		ndev->res[res_type].ref =
196 			(uint32_t *)&ndev->res[res_type].alloc_bm[BIT_CONTAINER_8_ALIGN(count)];
197 		ndev->res[res_type].resource_count = count;
198 		return 0;
199 	}
200 
201 	return -1;
202 }
203 
204 static void done_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type)
205 {
206 	assert(ndev);
207 
208 	if (ndev->res[res_type].alloc_bm)
209 		free(ndev->res[res_type].alloc_bm);
210 }
211 
212 static void list_insert_flow_nic(struct flow_nic_dev *ndev)
213 {
214 	pthread_mutex_lock(&base_mtx);
215 	ndev->next = dev_base;
216 	dev_base = ndev;
217 	pthread_mutex_unlock(&base_mtx);
218 }
219 
220 static int list_remove_flow_nic(struct flow_nic_dev *ndev)
221 {
222 	pthread_mutex_lock(&base_mtx);
223 	struct flow_nic_dev *nic_dev = dev_base, *prev = NULL;
224 
225 	while (nic_dev) {
226 		if (nic_dev == ndev) {
227 			if (prev)
228 				prev->next = nic_dev->next;
229 
230 			else
231 				dev_base = nic_dev->next;
232 
233 			pthread_mutex_unlock(&base_mtx);
234 			return 0;
235 		}
236 
237 		prev = nic_dev;
238 		nic_dev = nic_dev->next;
239 	}
240 
241 	pthread_mutex_unlock(&base_mtx);
242 	return -1;
243 }
244 
245 struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_backend_ops *be_if,
246 	void *be_dev)
247 {
248 	(void)adapter_no;
249 
250 	if (!be_if || be_if->version != 1) {
251 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
252 		return NULL;
253 	}
254 
255 	struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev));
256 
257 	if (!ndev) {
258 		NT_LOG(ERR, FILTER, "ERROR: calloc failed");
259 		return NULL;
260 	}
261 
262 	/*
263 	 * To dump module initialization writes use
264 	 * FLOW_BACKEND_DEBUG_MODE_WRITE
265 	 * then remember to set it ...NONE afterwards again
266 	 */
267 	be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE);
268 
269 	if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0)
270 		goto err_exit;
271 
272 	ndev->adapter_no = adapter_no;
273 
274 	ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ? 256 : ndev->be.num_rx_ports);
275 
276 	/*
277 	 * Free resources in NIC must be managed by this module
278 	 * Get resource sizes and create resource manager elements
279 	 */
280 	if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues))
281 		goto err_exit;
282 
283 	if (init_resource_elements(ndev, RES_CAT_CFN, ndev->be.cat.nb_cat_funcs))
284 		goto err_exit;
285 
286 	if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories))
287 		goto err_exit;
288 
289 	if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext))
290 		goto err_exit;
291 
292 	if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len))
293 		goto err_exit;
294 
295 	if (init_resource_elements(ndev, RES_KM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
296 		goto err_exit;
297 
298 	if (init_resource_elements(ndev, RES_KM_CATEGORY, ndev->be.km.nb_categories))
299 		goto err_exit;
300 
301 	if (init_resource_elements(ndev, RES_HSH_RCP, ndev->be.hsh.nb_rcp))
302 		goto err_exit;
303 
304 	if (init_resource_elements(ndev, RES_PDB_RCP, ndev->be.pdb.nb_pdb_rcp_categories))
305 		goto err_exit;
306 
307 	if (init_resource_elements(ndev, RES_QSL_RCP, ndev->be.qsl.nb_rcp_categories))
308 		goto err_exit;
309 
310 	if (init_resource_elements(ndev, RES_QSL_QST, ndev->be.qsl.nb_qst_entries))
311 		goto err_exit;
312 
313 	if (init_resource_elements(ndev, RES_SLC_LR_RCP, ndev->be.max_categories))
314 		goto err_exit;
315 
316 	if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE, ndev->be.cat.nb_flow_types))
317 		goto err_exit;
318 
319 	if (init_resource_elements(ndev, RES_FLM_RCP, ndev->be.flm.nb_categories))
320 		goto err_exit;
321 
322 	if (init_resource_elements(ndev, RES_TPE_RCP, ndev->be.tpe.nb_rcp_categories))
323 		goto err_exit;
324 
325 	if (init_resource_elements(ndev, RES_TPE_EXT, ndev->be.tpe.nb_rpl_ext_categories))
326 		goto err_exit;
327 
328 	if (init_resource_elements(ndev, RES_TPE_RPL, ndev->be.tpe.nb_rpl_depth))
329 		goto err_exit;
330 
331 	if (init_resource_elements(ndev, RES_SCRUB_RCP, ndev->be.flm.nb_scrub_profiles))
332 		goto err_exit;
333 
334 	/* may need IPF, COR */
335 
336 	/* check all defined has been initialized */
337 	for (int i = 0; i < RES_COUNT; i++)
338 		assert(ndev->res[i].alloc_bm);
339 
340 	pthread_mutex_init(&ndev->mtx, NULL);
341 	list_insert_flow_nic(ndev);
342 
343 	return ndev;
344 
345 err_exit:
346 
347 	if (ndev)
348 		flow_api_done(ndev);
349 
350 	NT_LOG(DBG, FILTER, "ERR: %s", __func__);
351 	return NULL;
352 }
353 
354 int flow_api_done(struct flow_nic_dev *ndev)
355 {
356 	NT_LOG(DBG, FILTER, "FLOW API DONE");
357 
358 	if (ndev) {
359 		flow_ndev_reset(ndev);
360 
361 		/* delete resource management allocations for this ndev */
362 		for (int i = 0; i < RES_COUNT; i++)
363 			done_resource_elements(ndev, i);
364 
365 		flow_api_backend_done(&ndev->be);
366 		list_remove_flow_nic(ndev);
367 		free(ndev);
368 	}
369 
370 	return 0;
371 }
372 
373 void *flow_api_get_be_dev(struct flow_nic_dev *ndev)
374 {
375 	if (!ndev) {
376 		NT_LOG(DBG, FILTER, "ERR: %s", __func__);
377 		return NULL;
378 	}
379 
380 	return ndev->be.be_dev;
381 }
382 
383 static const struct flow_filter_ops ops = {
384 	.flow_filter_init = flow_filter_init,
385 	.flow_filter_done = flow_filter_done,
386 };
387 
388 void init_flow_filter(void)
389 {
390 	register_flow_filter_ops(&ops);
391 }
392