1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * Copyright(c) 2023 Napatech A/S 4 */ 5 6 #include "flow_api_engine.h" 7 #include "flow_api_nic_setup.h" 8 #include "ntnic_mod_reg.h" 9 10 #include "flow_filter.h" 11 12 const char *dbg_res_descr[] = { 13 /* RES_QUEUE */ "RES_QUEUE", 14 /* RES_CAT_CFN */ "RES_CAT_CFN", 15 /* RES_CAT_COT */ "RES_CAT_COT", 16 /* RES_CAT_EXO */ "RES_CAT_EXO", 17 /* RES_CAT_LEN */ "RES_CAT_LEN", 18 /* RES_KM_FLOW_TYPE */ "RES_KM_FLOW_TYPE", 19 /* RES_KM_CATEGORY */ "RES_KM_CATEGORY", 20 /* RES_HSH_RCP */ "RES_HSH_RCP", 21 /* RES_PDB_RCP */ "RES_PDB_RCP", 22 /* RES_QSL_RCP */ "RES_QSL_RCP", 23 /* RES_QSL_LTX */ "RES_QSL_LTX", 24 /* RES_QSL_QST */ "RES_QSL_QST", 25 /* RES_SLC_LR_RCP */ "RES_SLC_LR_RCP", 26 /* RES_FLM_FLOW_TYPE */ "RES_FLM_FLOW_TYPE", 27 /* RES_FLM_RCP */ "RES_FLM_RCP", 28 /* RES_TPE_RCP */ "RES_TPE_RCP", 29 /* RES_TPE_EXT */ "RES_TPE_EXT", 30 /* RES_TPE_RPL */ "RES_TPE_RPL", 31 /* RES_COUNT */ "RES_COUNT", 32 /* RES_INVALID */ "RES_INVALID" 33 }; 34 35 static struct flow_nic_dev *dev_base; 36 static pthread_mutex_t base_mtx = PTHREAD_MUTEX_INITIALIZER; 37 38 void flow_nic_free_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int idx) 39 { 40 flow_nic_mark_resource_unused(ndev, res_type, idx); 41 } 42 43 int flow_nic_deref_resource(struct flow_nic_dev *ndev, enum res_type_e res_type, int index) 44 { 45 NT_LOG(DBG, FILTER, "De-reference resource %s idx %i (before ref cnt %i)", 46 dbg_res_descr[res_type], index, ndev->res[res_type].ref[index]); 47 assert(flow_nic_is_resource_used(ndev, res_type, index)); 48 assert(ndev->res[res_type].ref[index]); 49 /* deref */ 50 ndev->res[res_type].ref[index]--; 51 52 if (!ndev->res[res_type].ref[index]) 53 flow_nic_free_resource(ndev, res_type, index); 54 55 return !!ndev->res[res_type].ref[index];/* if 0 resource has been freed */ 56 } 57 58 /* 59 * Device Management API 60 */ 61 62 static int nic_remove_eth_port_dev(struct flow_nic_dev *ndev, struct flow_eth_dev *eth_dev) 63 { 64 struct flow_eth_dev *dev = ndev->eth_base, *prev = NULL; 65 66 while (dev) { 67 if (dev == eth_dev) { 68 if (prev) 69 prev->next = dev->next; 70 71 else 72 ndev->eth_base = dev->next; 73 74 return 0; 75 } 76 77 prev = dev; 78 dev = dev->next; 79 } 80 81 return -1; 82 } 83 84 static void flow_ndev_reset(struct flow_nic_dev *ndev) 85 { 86 /* Delete all eth-port devices created on this NIC device */ 87 while (ndev->eth_base) 88 flow_delete_eth_dev(ndev->eth_base); 89 90 km_free_ndev_resource_management(&ndev->km_res_handle); 91 kcc_free_ndev_resource_management(&ndev->kcc_res_handle); 92 93 ndev->flow_unique_id_counter = 0; 94 95 #ifdef FLOW_DEBUG 96 /* 97 * free all resources default allocated, initially for this NIC DEV 98 * Is not really needed since the bitmap will be freed in a sec. Therefore 99 * only in debug mode 100 */ 101 102 /* Check if all resources has been released */ 103 NT_LOG(DBG, FILTER, "Delete NIC DEV Adaptor %i", ndev->adapter_no); 104 105 for (unsigned int i = 0; i < RES_COUNT; i++) { 106 int err = 0; 107 #if defined(FLOW_DEBUG) 108 NT_LOG(DBG, FILTER, "RES state for: %s", dbg_res_descr[i]); 109 #endif 110 111 for (unsigned int ii = 0; ii < ndev->res[i].resource_count; ii++) { 112 int ref = ndev->res[i].ref[ii]; 113 int used = flow_nic_is_resource_used(ndev, i, ii); 114 115 if (ref || used) { 116 NT_LOG(DBG, FILTER, " [%i]: ref cnt %i, used %i", ii, ref, 117 used); 118 err = 1; 119 } 120 } 121 122 if (err) 123 NT_LOG(DBG, FILTER, "ERROR - some resources not freed"); 124 } 125 126 #endif 127 } 128 129 int flow_delete_eth_dev(struct flow_eth_dev *eth_dev) 130 { 131 struct flow_nic_dev *ndev = eth_dev->ndev; 132 133 if (!ndev) { 134 /* Error invalid nic device */ 135 return -1; 136 } 137 138 NT_LOG(DBG, FILTER, "Delete eth-port device %p, port %i", eth_dev, eth_dev->port); 139 140 #ifdef FLOW_DEBUG 141 ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_WRITE); 142 #endif 143 144 /* delete all created flows from this device */ 145 pthread_mutex_lock(&ndev->mtx); 146 147 #ifdef FLOW_DEBUG 148 ndev->be.iface->set_debug_mode(ndev->be.be_dev, FLOW_BACKEND_DEBUG_MODE_NONE); 149 #endif 150 151 #ifndef SCATTER_GATHER 152 153 /* free rx queues */ 154 for (int i = 0; i < eth_dev->num_queues; i++) { 155 ndev->be.iface->free_rx_queue(ndev->be.be_dev, eth_dev->rx_queue[i].hw_id); 156 flow_nic_deref_resource(ndev, RES_QUEUE, eth_dev->rx_queue[i].id); 157 } 158 159 #endif 160 161 /* take eth_dev out of ndev list */ 162 if (nic_remove_eth_port_dev(ndev, eth_dev) != 0) 163 NT_LOG(ERR, FILTER, "ERROR : eth_dev %p not found", eth_dev); 164 165 pthread_mutex_unlock(&ndev->mtx); 166 167 /* free eth_dev */ 168 free(eth_dev); 169 170 return 0; 171 } 172 173 /* 174 * Flow API NIC Setup 175 * Flow backend creation function - register and initialize common backend API to FPA modules 176 */ 177 178 static int init_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type, 179 uint32_t count) 180 { 181 assert(ndev->res[res_type].alloc_bm == NULL); 182 /* allocate bitmap and ref counter */ 183 ndev->res[res_type].alloc_bm = 184 calloc(1, BIT_CONTAINER_8_ALIGN(count) + count * sizeof(uint32_t)); 185 186 if (ndev->res[res_type].alloc_bm) { 187 ndev->res[res_type].ref = 188 (uint32_t *)&ndev->res[res_type].alloc_bm[BIT_CONTAINER_8_ALIGN(count)]; 189 ndev->res[res_type].resource_count = count; 190 return 0; 191 } 192 193 return -1; 194 } 195 196 static void done_resource_elements(struct flow_nic_dev *ndev, enum res_type_e res_type) 197 { 198 assert(ndev); 199 200 if (ndev->res[res_type].alloc_bm) 201 free(ndev->res[res_type].alloc_bm); 202 } 203 204 static void list_insert_flow_nic(struct flow_nic_dev *ndev) 205 { 206 pthread_mutex_lock(&base_mtx); 207 ndev->next = dev_base; 208 dev_base = ndev; 209 pthread_mutex_unlock(&base_mtx); 210 } 211 212 static int list_remove_flow_nic(struct flow_nic_dev *ndev) 213 { 214 pthread_mutex_lock(&base_mtx); 215 struct flow_nic_dev *nic_dev = dev_base, *prev = NULL; 216 217 while (nic_dev) { 218 if (nic_dev == ndev) { 219 if (prev) 220 prev->next = nic_dev->next; 221 222 else 223 dev_base = nic_dev->next; 224 225 pthread_mutex_unlock(&base_mtx); 226 return 0; 227 } 228 229 prev = nic_dev; 230 nic_dev = nic_dev->next; 231 } 232 233 pthread_mutex_unlock(&base_mtx); 234 return -1; 235 } 236 237 struct flow_nic_dev *flow_api_create(uint8_t adapter_no, const struct flow_api_backend_ops *be_if, 238 void *be_dev) 239 { 240 (void)adapter_no; 241 242 if (!be_if || be_if->version != 1) { 243 NT_LOG(DBG, FILTER, "ERR: %s", __func__); 244 return NULL; 245 } 246 247 struct flow_nic_dev *ndev = calloc(1, sizeof(struct flow_nic_dev)); 248 249 if (!ndev) { 250 NT_LOG(ERR, FILTER, "ERROR: calloc failed"); 251 return NULL; 252 } 253 254 /* 255 * To dump module initialization writes use 256 * FLOW_BACKEND_DEBUG_MODE_WRITE 257 * then remember to set it ...NONE afterwards again 258 */ 259 be_if->set_debug_mode(be_dev, FLOW_BACKEND_DEBUG_MODE_NONE); 260 261 if (flow_api_backend_init(&ndev->be, be_if, be_dev) != 0) 262 goto err_exit; 263 264 ndev->adapter_no = adapter_no; 265 266 ndev->ports = (uint16_t)((ndev->be.num_rx_ports > 256) ? 256 : ndev->be.num_rx_ports); 267 268 /* 269 * Free resources in NIC must be managed by this module 270 * Get resource sizes and create resource manager elements 271 */ 272 if (init_resource_elements(ndev, RES_QUEUE, ndev->be.max_queues)) 273 goto err_exit; 274 275 if (init_resource_elements(ndev, RES_CAT_CFN, ndev->be.cat.nb_cat_funcs)) 276 goto err_exit; 277 278 if (init_resource_elements(ndev, RES_CAT_COT, ndev->be.max_categories)) 279 goto err_exit; 280 281 if (init_resource_elements(ndev, RES_CAT_EXO, ndev->be.cat.nb_pm_ext)) 282 goto err_exit; 283 284 if (init_resource_elements(ndev, RES_CAT_LEN, ndev->be.cat.nb_len)) 285 goto err_exit; 286 287 if (init_resource_elements(ndev, RES_KM_FLOW_TYPE, ndev->be.cat.nb_flow_types)) 288 goto err_exit; 289 290 if (init_resource_elements(ndev, RES_KM_CATEGORY, ndev->be.km.nb_categories)) 291 goto err_exit; 292 293 if (init_resource_elements(ndev, RES_SLC_LR_RCP, ndev->be.max_categories)) 294 goto err_exit; 295 296 if (init_resource_elements(ndev, RES_FLM_FLOW_TYPE, ndev->be.cat.nb_flow_types)) 297 goto err_exit; 298 299 if (init_resource_elements(ndev, RES_FLM_RCP, ndev->be.flm.nb_categories)) 300 goto err_exit; 301 302 if (init_resource_elements(ndev, RES_SCRUB_RCP, ndev->be.flm.nb_scrub_profiles)) 303 goto err_exit; 304 305 /* may need IPF, COR */ 306 307 /* check all defined has been initialized */ 308 for (int i = 0; i < RES_COUNT; i++) 309 assert(ndev->res[i].alloc_bm); 310 311 pthread_mutex_init(&ndev->mtx, NULL); 312 list_insert_flow_nic(ndev); 313 314 return ndev; 315 316 err_exit: 317 318 if (ndev) 319 flow_api_done(ndev); 320 321 NT_LOG(DBG, FILTER, "ERR: %s", __func__); 322 return NULL; 323 } 324 325 int flow_api_done(struct flow_nic_dev *ndev) 326 { 327 NT_LOG(DBG, FILTER, "FLOW API DONE"); 328 329 if (ndev) { 330 flow_ndev_reset(ndev); 331 332 /* delete resource management allocations for this ndev */ 333 for (int i = 0; i < RES_COUNT; i++) 334 done_resource_elements(ndev, i); 335 336 flow_api_backend_done(&ndev->be); 337 list_remove_flow_nic(ndev); 338 free(ndev); 339 } 340 341 return 0; 342 } 343 344 void *flow_api_get_be_dev(struct flow_nic_dev *ndev) 345 { 346 if (!ndev) { 347 NT_LOG(DBG, FILTER, "ERR: %s", __func__); 348 return NULL; 349 } 350 351 return ndev->be.be_dev; 352 } 353 354 static const struct flow_filter_ops ops = { 355 .flow_filter_init = flow_filter_init, 356 .flow_filter_done = flow_filter_done, 357 }; 358 359 void init_flow_filter(void) 360 { 361 register_flow_filter_ops(&ops); 362 } 363