1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * Copyright(c) 2023 Napatech A/S 4 */ 5 6 #include <stdint.h> 7 8 #include "flow_nthw_info.h" 9 #include "flow_nthw_ifr.h" 10 #include "flow_nthw_cat.h" 11 #include "flow_nthw_km.h" 12 #include "flow_nthw_flm.h" 13 #include "ntnic_mod_reg.h" 14 #include "nthw_fpga_model.h" 15 #include "hw_mod_backend.h" 16 17 /* 18 * Binary Flow API backend implementation into ntservice driver 19 * 20 * General note on this backend implementation: 21 * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing 22 */ 23 24 static struct backend_dev_s { 25 uint8_t adapter_no; 26 enum debug_mode_e dmode; 27 struct info_nthw *p_info_nthw; 28 struct cat_nthw *p_cat_nthw; 29 struct km_nthw *p_km_nthw; 30 struct flm_nthw *p_flm_nthw; 31 struct ifr_nthw *p_ifr_nthw; /* TPE module */ 32 } be_devs[MAX_PHYS_ADAPTERS]; 33 34 #define CHECK_DEBUG_ON(be, mod, inst) \ 35 int __debug__ = 0; \ 36 if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug) \ 37 do { \ 38 mod##_nthw_set_debug_mode((inst), 0xFF); \ 39 __debug__ = 1; \ 40 } while (0) 41 42 #define CHECK_DEBUG_OFF(mod, inst) \ 43 do { \ 44 if (__debug__) \ 45 mod##_nthw_set_debug_mode((inst), 0); \ 46 } while (0) 47 48 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev); 49 static void bin_flow_backend_done(void *be_dev); 50 51 static int set_debug_mode(void *be_dev, enum debug_mode_e mode) 52 { 53 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 54 be->dmode = mode; 55 return 0; 56 } 57 58 /* 59 * INFO 60 */ 61 62 static int get_nb_phy_ports(void *be_dev) 63 { 64 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 65 return info_nthw_get_nb_phy_ports(be->p_info_nthw); 66 } 67 68 static int get_nb_rx_ports(void *be_dev) 69 { 70 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 71 return info_nthw_get_nb_rx_ports(be->p_info_nthw); 72 } 73 74 static int get_ltx_avail(void *be_dev) 75 { 76 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 77 return info_nthw_get_ltx_avail(be->p_info_nthw); 78 } 79 80 static int get_nb_cat_funcs(void *be_dev) 81 { 82 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 83 return info_nthw_get_nb_cat_funcs(be->p_info_nthw); 84 } 85 86 static int get_nb_categories(void *be_dev) 87 { 88 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 89 return info_nthw_get_nb_categories(be->p_info_nthw); 90 } 91 92 static int get_nb_cat_km_if_cnt(void *be_dev) 93 { 94 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 95 return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw); 96 } 97 98 static int get_nb_cat_km_if_m0(void *be_dev) 99 { 100 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 101 return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw); 102 } 103 104 static int get_nb_cat_km_if_m1(void *be_dev) 105 { 106 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 107 return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw); 108 } 109 110 static int get_nb_queues(void *be_dev) 111 { 112 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 113 return info_nthw_get_nb_queues(be->p_info_nthw); 114 } 115 116 static int get_nb_km_flow_types(void *be_dev) 117 { 118 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 119 return info_nthw_get_nb_km_flow_types(be->p_info_nthw); 120 } 121 122 static int get_nb_pm_ext(void *be_dev) 123 { 124 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 125 return info_nthw_get_nb_pm_ext(be->p_info_nthw); 126 } 127 128 static int get_nb_len(void *be_dev) 129 { 130 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 131 return info_nthw_get_nb_len(be->p_info_nthw); 132 } 133 134 static int get_kcc_size(void *be_dev) 135 { 136 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 137 return info_nthw_get_kcc_size(be->p_info_nthw); 138 } 139 140 static int get_kcc_banks(void *be_dev) 141 { 142 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 143 return info_nthw_get_kcc_banks(be->p_info_nthw); 144 } 145 146 static int get_nb_km_categories(void *be_dev) 147 { 148 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 149 return info_nthw_get_nb_km_categories(be->p_info_nthw); 150 } 151 152 static int get_nb_km_cam_banks(void *be_dev) 153 { 154 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 155 return info_nthw_get_nb_km_cam_banks(be->p_info_nthw); 156 } 157 158 static int get_nb_km_cam_record_words(void *be_dev) 159 { 160 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 161 return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw); 162 } 163 164 static int get_nb_km_cam_records(void *be_dev) 165 { 166 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 167 return info_nthw_get_nb_km_cam_records(be->p_info_nthw); 168 } 169 170 static int get_nb_km_tcam_banks(void *be_dev) 171 { 172 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 173 return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw); 174 } 175 176 static int get_nb_km_tcam_bank_width(void *be_dev) 177 { 178 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 179 return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw); 180 } 181 182 static int get_nb_flm_categories(void *be_dev) 183 { 184 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 185 return info_nthw_get_nb_flm_categories(be->p_info_nthw); 186 } 187 188 static int get_nb_flm_size_mb(void *be_dev) 189 { 190 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 191 return info_nthw_get_nb_flm_size_mb(be->p_info_nthw); 192 } 193 194 static int get_nb_flm_entry_size(void *be_dev) 195 { 196 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 197 return info_nthw_get_nb_flm_entry_size(be->p_info_nthw); 198 } 199 200 static int get_nb_flm_variant(void *be_dev) 201 { 202 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 203 return info_nthw_get_nb_flm_variant(be->p_info_nthw); 204 } 205 206 static int get_nb_flm_prios(void *be_dev) 207 { 208 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 209 return info_nthw_get_nb_flm_prios(be->p_info_nthw); 210 } 211 212 static int get_nb_flm_pst_profiles(void *be_dev) 213 { 214 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 215 return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw); 216 } 217 218 static int get_nb_flm_scrub_profiles(void *be_dev) 219 { 220 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 221 return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw); 222 } 223 224 static int get_nb_flm_load_aps_max(void *be_dev) 225 { 226 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 227 return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw); 228 } 229 230 static int get_nb_qsl_categories(void *be_dev) 231 { 232 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 233 return info_nthw_get_nb_qsl_categories(be->p_info_nthw); 234 } 235 236 static int get_nb_qsl_qst_entries(void *be_dev) 237 { 238 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 239 return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw); 240 } 241 242 static int get_nb_pdb_categories(void *be_dev) 243 { 244 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 245 return info_nthw_get_nb_pdb_categories(be->p_info_nthw); 246 } 247 248 static int get_nb_roa_categories(void *be_dev) 249 { 250 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 251 return info_nthw_get_nb_roa_categories(be->p_info_nthw); 252 } 253 254 static int get_nb_tpe_categories(void *be_dev) 255 { 256 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 257 return info_nthw_get_nb_tpe_categories(be->p_info_nthw); 258 } 259 260 static int get_nb_tx_cpy_writers(void *be_dev) 261 { 262 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 263 return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw); 264 } 265 266 static int get_nb_tx_cpy_mask_mem(void *be_dev) 267 { 268 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 269 return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw); 270 } 271 272 static int get_nb_tx_rpl_depth(void *be_dev) 273 { 274 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 275 return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw); 276 } 277 278 static int get_nb_tx_rpl_ext_categories(void *be_dev) 279 { 280 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 281 return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw); 282 } 283 284 static int get_nb_tpe_ifr_categories(void *be_dev) 285 { 286 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 287 return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw); 288 } 289 290 static int get_nb_rpp_per_ps(void *be_dev) 291 { 292 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 293 return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw); 294 } 295 296 static int get_nb_hsh_categories(void *be_dev) 297 { 298 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 299 return info_nthw_get_nb_hsh_categories(be->p_info_nthw); 300 } 301 302 static int get_nb_hsh_toeplitz(void *be_dev) 303 { 304 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 305 return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw); 306 } 307 308 /* 309 * CAT 310 */ 311 312 static bool cat_get_present(void *be_dev) 313 { 314 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 315 return be->p_cat_nthw != NULL; 316 } 317 318 static uint32_t cat_get_version(void *be_dev) 319 { 320 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 321 return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) | 322 (nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff)); 323 } 324 325 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt) 326 { 327 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 328 329 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 330 331 if (cat->ver == 18) { 332 cat_nthw_cfn_cnt(be->p_cat_nthw, 1U); 333 334 for (int i = 0; i < cnt; i++) { 335 cat_nthw_cfn_select(be->p_cat_nthw, cat_func); 336 cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable); 337 cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv); 338 cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv); 339 cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl); 340 cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp); 341 cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac); 342 cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2); 343 cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag); 344 cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan); 345 cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls); 346 cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3); 347 cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag); 348 cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw, 349 cat->v18.cfn[cat_func].ptc_ip_prot); 350 cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4); 351 cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel); 352 cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2); 353 cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw, 354 cat->v18.cfn[cat_func].ptc_tnl_vlan); 355 cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw, 356 cat->v18.cfn[cat_func].ptc_tnl_mpls); 357 cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3); 358 cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw, 359 cat->v18.cfn[cat_func].ptc_tnl_frag); 360 cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw, 361 cat->v18.cfn[cat_func].ptc_tnl_ip_prot); 362 cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4); 363 364 cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv); 365 cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv); 366 cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs); 367 cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc); 368 cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs); 369 cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs); 370 371 cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port); 372 373 cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp); 374 cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct); 375 cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv); 376 cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb); 377 cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv); 378 cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv); 379 cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv); 380 381 cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc); 382 cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv); 383 cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or); 384 cat_nthw_cfn_flush(be->p_cat_nthw); 385 cat_func++; 386 } 387 388 } else if (cat->ver == 21) { 389 cat_nthw_cfn_cnt(be->p_cat_nthw, 1U); 390 391 for (int i = 0; i < cnt; i++) { 392 cat_nthw_cfn_select(be->p_cat_nthw, cat_func); 393 cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable); 394 cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv); 395 cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv); 396 cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl); 397 cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp); 398 cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac); 399 cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2); 400 cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag); 401 cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan); 402 cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls); 403 cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3); 404 cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag); 405 cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw, 406 cat->v21.cfn[cat_func].ptc_ip_prot); 407 cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4); 408 cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel); 409 cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2); 410 cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw, 411 cat->v21.cfn[cat_func].ptc_tnl_vlan); 412 cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw, 413 cat->v21.cfn[cat_func].ptc_tnl_mpls); 414 cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3); 415 cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw, 416 cat->v21.cfn[cat_func].ptc_tnl_frag); 417 cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw, 418 cat->v21.cfn[cat_func].ptc_tnl_ip_prot); 419 cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4); 420 421 cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv); 422 cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv); 423 cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs); 424 cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc); 425 cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs); 426 cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs); 427 cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw, 428 cat->v21.cfn[cat_func].err_tnl_l3_cs); 429 cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw, 430 cat->v21.cfn[cat_func].err_tnl_l4_cs); 431 cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw, 432 cat->v21.cfn[cat_func].err_ttl_exp); 433 cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw, 434 cat->v21.cfn[cat_func].err_tnl_ttl_exp); 435 436 cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port); 437 438 cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp); 439 cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct); 440 cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv); 441 cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb); 442 cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv); 443 cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv); 444 cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv); 445 446 cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc); 447 cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv); 448 cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or); 449 450 if (be->p_cat_nthw->m_km_if_cnt > 1) 451 cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or); 452 453 cat_nthw_cfn_flush(be->p_cat_nthw); 454 cat_func++; 455 } 456 } 457 458 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 459 return 0; 460 } 461 462 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index, 463 int cnt) 464 { 465 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 466 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 467 468 if (cat->ver == 18) { 469 cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U); 470 471 for (int i = 0; i < cnt; i++) { 472 cat_nthw_kce_select(be->p_cat_nthw, 0, index + i); 473 cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm); 474 cat_nthw_kce_flush(be->p_cat_nthw, 0); 475 } 476 477 } else if (cat->ver == 21) { 478 cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U); 479 480 for (int i = 0; i < cnt; i++) { 481 cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i); 482 cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx, 483 cat->v21.kce[index + i].enable_bm[km_if_idx]); 484 cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx); 485 } 486 } 487 488 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 489 return 0; 490 } 491 492 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func, 493 int cnt) 494 { 495 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 496 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 497 498 if (cat->ver == 18) { 499 cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U); 500 501 for (int i = 0; i < cnt; i++) { 502 cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func); 503 cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category); 504 cat_nthw_kcs_flush(be->p_cat_nthw, 0); 505 cat_func++; 506 } 507 508 } else if (cat->ver == 21) { 509 cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U); 510 511 for (int i = 0; i < cnt; i++) { 512 cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func); 513 cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx, 514 cat->v21.kcs[cat_func].category[km_if_idx]); 515 cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx); 516 cat_func++; 517 } 518 } 519 520 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 521 return 0; 522 } 523 524 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index, 525 int cnt) 526 { 527 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 528 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 529 530 if (cat->ver == 18) { 531 cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1); 532 533 for (int i = 0; i < cnt; i++) { 534 cat_nthw_fte_select(be->p_cat_nthw, 0, index + i); 535 cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm); 536 cat_nthw_fte_flush(be->p_cat_nthw, 0); 537 } 538 539 } else if (cat->ver == 21) { 540 cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1); 541 542 for (int i = 0; i < cnt; i++) { 543 cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i); 544 cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx, 545 cat->v21.fte[index + i].enable_bm[km_if_idx]); 546 cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx); 547 } 548 } 549 550 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 551 return 0; 552 } 553 554 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt) 555 { 556 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 557 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 558 559 if (cat->ver == 18 || cat->ver == 21) { 560 cat_nthw_cte_cnt(be->p_cat_nthw, 1); 561 562 for (int i = 0; i < cnt; i++) { 563 cat_nthw_cte_select(be->p_cat_nthw, cat_func); 564 cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col); 565 cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor); 566 cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh); 567 cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl); 568 cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf); 569 cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc); 570 cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb); 571 cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk); 572 cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst); 573 cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp); 574 cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe); 575 576 cat_nthw_cte_flush(be->p_cat_nthw); 577 cat_func++; 578 } 579 } 580 581 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 582 return 0; 583 } 584 585 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt) 586 { 587 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 588 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 589 590 if (cat->ver == 18 || cat->ver == 21) { 591 cat_nthw_cts_cnt(be->p_cat_nthw, 1); 592 593 for (int i = 0; i < cnt; i++) { 594 cat_nthw_cts_select(be->p_cat_nthw, index + i); 595 cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a); 596 cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b); 597 cat_nthw_cts_flush(be->p_cat_nthw); 598 } 599 } 600 601 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 602 return 0; 603 } 604 605 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt) 606 { 607 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 608 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 609 610 if (cat->ver == 18 || cat->ver == 21) { 611 cat_nthw_cot_cnt(be->p_cat_nthw, 1); 612 613 for (int i = 0; i < cnt; i++) { 614 cat_nthw_cot_select(be->p_cat_nthw, cat_func + i); 615 cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color); 616 cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km); 617 cat_nthw_cot_flush(be->p_cat_nthw); 618 } 619 } 620 621 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 622 return 0; 623 } 624 625 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt) 626 { 627 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 628 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 629 630 if (cat->ver == 18 || cat->ver == 21) { 631 cat_nthw_cct_cnt(be->p_cat_nthw, 1); 632 633 for (int i = 0; i < cnt; i++) { 634 cat_nthw_cct_select(be->p_cat_nthw, index + i); 635 cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color); 636 cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km); 637 cat_nthw_cct_flush(be->p_cat_nthw); 638 } 639 } 640 641 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 642 return 0; 643 } 644 645 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt) 646 { 647 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 648 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 649 650 if (cat->ver == 18 || cat->ver == 21) { 651 cat_nthw_exo_cnt(be->p_cat_nthw, 1); 652 653 for (int i = 0; i < cnt; i++) { 654 cat_nthw_exo_select(be->p_cat_nthw, ext_index + i); 655 cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn); 656 cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs); 657 cat_nthw_exo_flush(be->p_cat_nthw); 658 } 659 } 660 661 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 662 return 0; 663 } 664 665 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt) 666 { 667 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 668 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 669 670 if (cat->ver == 18 || cat->ver == 21) { 671 cat_nthw_rck_cnt(be->p_cat_nthw, 1); 672 673 for (int i = 0; i < cnt; i++) { 674 cat_nthw_rck_select(be->p_cat_nthw, index + i); 675 cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data); 676 cat_nthw_rck_flush(be->p_cat_nthw); 677 } 678 } 679 680 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 681 return 0; 682 } 683 684 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt) 685 { 686 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 687 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 688 689 if (cat->ver == 18 || cat->ver == 21) { 690 cat_nthw_len_cnt(be->p_cat_nthw, 1); 691 692 for (int i = 0; i < cnt; i++) { 693 cat_nthw_len_select(be->p_cat_nthw, len_index + i); 694 cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower); 695 cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper); 696 cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1); 697 cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2); 698 cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv); 699 cat_nthw_len_flush(be->p_cat_nthw); 700 } 701 } 702 703 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 704 return 0; 705 } 706 707 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt) 708 { 709 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 710 CHECK_DEBUG_ON(be, cat, be->p_cat_nthw); 711 712 if (cat->ver == 18 || cat->ver == 21) { 713 cat_nthw_kcc_cnt(be->p_cat_nthw, 1); 714 715 for (int i = 0; i < cnt; i++) { 716 cat_nthw_kcc_select(be->p_cat_nthw, len_index + i); 717 cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key); 718 cat_nthw_kcc_category(be->p_cat_nthw, 719 cat->v18.kcc_cam[len_index + i].category); 720 cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id); 721 cat_nthw_kcc_flush(be->p_cat_nthw); 722 } 723 } 724 725 CHECK_DEBUG_OFF(cat, be->p_cat_nthw); 726 return 0; 727 } 728 729 /* 730 * KM 731 */ 732 733 static bool km_get_present(void *be_dev) 734 { 735 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 736 return be->p_km_nthw != NULL; 737 } 738 739 static uint32_t km_get_version(void *be_dev) 740 { 741 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 742 return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) | 743 (nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff)); 744 } 745 746 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt) 747 { 748 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 749 750 CHECK_DEBUG_ON(be, km, be->p_km_nthw); 751 752 if (km->ver == 7) { 753 km_nthw_rcp_cnt(be->p_km_nthw, 1); 754 755 for (int i = 0; i < cnt; i++) { 756 km_nthw_rcp_select(be->p_km_nthw, category + i); 757 km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn); 758 km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs); 759 km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a); 760 km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b); 761 km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn); 762 km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs); 763 km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a); 764 km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b); 765 km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn); 766 km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs); 767 km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a); 768 km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b); 769 km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn); 770 km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs); 771 km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a); 772 km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b); 773 km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch); 774 km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a); 775 km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b); 776 km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a); 777 km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b); 778 km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual); 779 km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired); 780 km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a); 781 km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b); 782 km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a); 783 km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b); 784 km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a); 785 km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b); 786 km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a); 787 km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b); 788 km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a); 789 km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b); 790 km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a); 791 km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b); 792 km_nthw_rcp_synergy_mode(be->p_km_nthw, 793 km->v7.rcp[category + i].synergy_mode); 794 km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn); 795 km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs); 796 km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn); 797 km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs); 798 km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn); 799 km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs); 800 km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn); 801 km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs); 802 km_nthw_rcp_flush(be->p_km_nthw); 803 } 804 } 805 806 CHECK_DEBUG_OFF(km, be->p_km_nthw); 807 return 0; 808 } 809 810 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt) 811 { 812 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 813 CHECK_DEBUG_ON(be, km, be->p_km_nthw); 814 815 if (km->ver == 7) { 816 km_nthw_cam_cnt(be->p_km_nthw, 1); 817 818 for (int i = 0; i < cnt; i++) { 819 km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i); 820 km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0); 821 km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1); 822 km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2); 823 km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3); 824 km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4); 825 km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5); 826 km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0); 827 km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1); 828 km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2); 829 km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3); 830 km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4); 831 km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5); 832 km_nthw_cam_flush(be->p_km_nthw); 833 } 834 } 835 836 CHECK_DEBUG_OFF(km, be->p_km_nthw); 837 return 0; 838 } 839 840 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value, 841 int cnt) 842 { 843 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 844 CHECK_DEBUG_ON(be, km, be->p_km_nthw); 845 846 if (km->ver == 7) { 847 int start_idx = bank * 4 * 256 + byte * 256 + value; 848 km_nthw_tcam_cnt(be->p_km_nthw, 1); 849 850 for (int i = 0; i < cnt; i++) { 851 if (km->v7.tcam[start_idx + i].dirty) { 852 km_nthw_tcam_select(be->p_km_nthw, start_idx + i); 853 km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t); 854 km_nthw_tcam_flush(be->p_km_nthw); 855 km->v7.tcam[start_idx + i].dirty = 0; 856 } 857 } 858 } 859 860 CHECK_DEBUG_OFF(km, be->p_km_nthw); 861 return 0; 862 } 863 864 /* 865 * bank is the TCAM bank, index is the index within the bank (0..71) 866 */ 867 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt) 868 { 869 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 870 CHECK_DEBUG_ON(be, km, be->p_km_nthw); 871 872 if (km->ver == 7) { 873 /* TCAM bank width in version 3 = 72 */ 874 km_nthw_tci_cnt(be->p_km_nthw, 1); 875 876 for (int i = 0; i < cnt; i++) { 877 km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i); 878 km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color); 879 km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft); 880 km_nthw_tci_flush(be->p_km_nthw); 881 } 882 } 883 884 CHECK_DEBUG_OFF(km, be->p_km_nthw); 885 return 0; 886 } 887 888 /* 889 * bank is the TCAM bank, index is the index within the bank (0..71) 890 */ 891 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt) 892 { 893 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 894 CHECK_DEBUG_ON(be, km, be->p_km_nthw); 895 896 if (km->ver == 7) { 897 /* TCAM bank width in version 3 = 72 */ 898 km_nthw_tcq_cnt(be->p_km_nthw, 1); 899 900 for (int i = 0; i < cnt; i++) { 901 /* adr = lover 4 bits = bank, upper 7 bits = index */ 902 km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i); 903 km_nthw_tcq_bank_mask(be->p_km_nthw, 904 km->v7.tcq[bank + (index << 4) + i].bank_mask); 905 km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual); 906 km_nthw_tcq_flush(be->p_km_nthw); 907 } 908 } 909 910 CHECK_DEBUG_OFF(km, be->p_km_nthw); 911 return 0; 912 } 913 914 /* 915 * FLM 916 */ 917 918 static bool flm_get_present(void *be_dev) 919 { 920 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 921 return be->p_flm_nthw != NULL; 922 } 923 924 static uint32_t flm_get_version(void *be_dev) 925 { 926 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 927 return (uint32_t)((nthw_module_get_major_version(be->p_flm_nthw->m_flm) << 16) | 928 (nthw_module_get_minor_version(be->p_flm_nthw->m_flm) & 0xffff)); 929 } 930 931 static int flm_control_flush(void *be_dev, const struct flm_func_s *flm) 932 { 933 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 934 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 935 936 if (flm->ver >= 25) { 937 flm_nthw_control_enable(be->p_flm_nthw, flm->v25.control->enable); 938 flm_nthw_control_init(be->p_flm_nthw, flm->v25.control->init); 939 flm_nthw_control_lds(be->p_flm_nthw, flm->v25.control->lds); 940 flm_nthw_control_lfs(be->p_flm_nthw, flm->v25.control->lfs); 941 flm_nthw_control_lis(be->p_flm_nthw, flm->v25.control->lis); 942 flm_nthw_control_uds(be->p_flm_nthw, flm->v25.control->uds); 943 flm_nthw_control_uis(be->p_flm_nthw, flm->v25.control->uis); 944 flm_nthw_control_rds(be->p_flm_nthw, flm->v25.control->rds); 945 flm_nthw_control_ris(be->p_flm_nthw, flm->v25.control->ris); 946 flm_nthw_control_pds(be->p_flm_nthw, flm->v25.control->pds); 947 flm_nthw_control_pis(be->p_flm_nthw, flm->v25.control->pis); 948 flm_nthw_control_crcwr(be->p_flm_nthw, flm->v25.control->crcwr); 949 flm_nthw_control_crcrd(be->p_flm_nthw, flm->v25.control->crcrd); 950 flm_nthw_control_rbl(be->p_flm_nthw, flm->v25.control->rbl); 951 flm_nthw_control_eab(be->p_flm_nthw, flm->v25.control->eab); 952 flm_nthw_control_split_sdram_usage(be->p_flm_nthw, 953 flm->v25.control->split_sdram_usage); 954 flm_nthw_control_flush(be->p_flm_nthw); 955 } 956 957 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 958 return 0; 959 } 960 961 static int flm_status_flush(void *be_dev, const struct flm_func_s *flm) 962 { 963 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 964 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 965 966 if (flm->ver >= 25) { 967 /* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */ 968 flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 0); 969 flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 0); 970 flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 0); 971 flm_nthw_status_cache_buf_crit(be->p_flm_nthw, 972 &flm->v25.status->cache_buf_critical, 0); 973 flm_nthw_status_flush(be->p_flm_nthw); 974 } 975 976 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 977 return 0; 978 } 979 980 static int flm_status_update(void *be_dev, const struct flm_func_s *flm) 981 { 982 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 983 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 984 985 if (flm->ver >= 25) { 986 flm_nthw_status_update(be->p_flm_nthw); 987 flm_nthw_status_calib_success(be->p_flm_nthw, &flm->v25.status->calib_success, 1); 988 flm_nthw_status_calib_fail(be->p_flm_nthw, &flm->v25.status->calib_fail, 1); 989 flm_nthw_status_initdone(be->p_flm_nthw, &flm->v25.status->initdone, 1); 990 flm_nthw_status_idle(be->p_flm_nthw, &flm->v25.status->idle, 1); 991 flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 1); 992 flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 1); 993 flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 1); 994 flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v25.status->eft_bp, 1); 995 flm_nthw_status_cache_buf_crit(be->p_flm_nthw, 996 &flm->v25.status->cache_buf_critical, 1); 997 } 998 999 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1000 return 0; 1001 } 1002 1003 static int flm_scan_flush(void *be_dev, const struct flm_func_s *flm) 1004 { 1005 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1006 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1007 1008 if (flm->ver >= 25) { 1009 flm_nthw_scan_i(be->p_flm_nthw, flm->v25.scan->i); 1010 flm_nthw_scan_flush(be->p_flm_nthw); 1011 } 1012 1013 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1014 return 0; 1015 } 1016 1017 static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm) 1018 { 1019 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1020 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1021 1022 if (flm->ver >= 25) { 1023 flm_nthw_load_bin(be->p_flm_nthw, flm->v25.load_bin->bin); 1024 flm_nthw_load_bin_flush(be->p_flm_nthw); 1025 } 1026 1027 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1028 return 0; 1029 } 1030 1031 static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm) 1032 { 1033 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1034 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1035 1036 if (flm->ver >= 25) { 1037 flm_nthw_prio_limit0(be->p_flm_nthw, flm->v25.prio->limit0); 1038 flm_nthw_prio_ft0(be->p_flm_nthw, flm->v25.prio->ft0); 1039 flm_nthw_prio_limit1(be->p_flm_nthw, flm->v25.prio->limit1); 1040 flm_nthw_prio_ft1(be->p_flm_nthw, flm->v25.prio->ft1); 1041 flm_nthw_prio_limit2(be->p_flm_nthw, flm->v25.prio->limit2); 1042 flm_nthw_prio_ft2(be->p_flm_nthw, flm->v25.prio->ft2); 1043 flm_nthw_prio_limit3(be->p_flm_nthw, flm->v25.prio->limit3); 1044 flm_nthw_prio_ft3(be->p_flm_nthw, flm->v25.prio->ft3); 1045 flm_nthw_prio_flush(be->p_flm_nthw); 1046 } 1047 1048 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1049 return 0; 1050 } 1051 1052 static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt) 1053 { 1054 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1055 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1056 1057 if (flm->ver >= 25) { 1058 flm_nthw_pst_cnt(be->p_flm_nthw, 1); 1059 1060 for (int i = 0; i < cnt; i++) { 1061 flm_nthw_pst_select(be->p_flm_nthw, index + i); 1062 flm_nthw_pst_bp(be->p_flm_nthw, flm->v25.pst[index + i].bp); 1063 flm_nthw_pst_pp(be->p_flm_nthw, flm->v25.pst[index + i].pp); 1064 flm_nthw_pst_tp(be->p_flm_nthw, flm->v25.pst[index + i].tp); 1065 flm_nthw_pst_flush(be->p_flm_nthw); 1066 } 1067 } 1068 1069 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1070 return 0; 1071 } 1072 1073 static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt) 1074 { 1075 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1076 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1077 1078 if (flm->ver >= 25) { 1079 flm_nthw_rcp_cnt(be->p_flm_nthw, 1); 1080 1081 for (int i = 0; i < cnt; i++) { 1082 flm_nthw_rcp_select(be->p_flm_nthw, index + i); 1083 flm_nthw_rcp_lookup(be->p_flm_nthw, flm->v25.rcp[index + i].lookup); 1084 flm_nthw_rcp_qw0_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_dyn); 1085 flm_nthw_rcp_qw0_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_ofs); 1086 flm_nthw_rcp_qw0_sel(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_sel); 1087 flm_nthw_rcp_qw4_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_dyn); 1088 flm_nthw_rcp_qw4_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_ofs); 1089 flm_nthw_rcp_sw8_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_dyn); 1090 flm_nthw_rcp_sw8_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_ofs); 1091 flm_nthw_rcp_sw8_sel(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_sel); 1092 flm_nthw_rcp_sw9_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_dyn); 1093 flm_nthw_rcp_sw9_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_ofs); 1094 flm_nthw_rcp_mask(be->p_flm_nthw, flm->v25.rcp[index + i].mask); 1095 flm_nthw_rcp_kid(be->p_flm_nthw, flm->v25.rcp[index + i].kid); 1096 flm_nthw_rcp_opn(be->p_flm_nthw, flm->v25.rcp[index + i].opn); 1097 flm_nthw_rcp_ipn(be->p_flm_nthw, flm->v25.rcp[index + i].ipn); 1098 flm_nthw_rcp_byt_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].byt_dyn); 1099 flm_nthw_rcp_byt_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].byt_ofs); 1100 flm_nthw_rcp_txplm(be->p_flm_nthw, flm->v25.rcp[index + i].txplm); 1101 flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw, 1102 flm->v25.rcp[index + i].auto_ipv4_mask); 1103 flm_nthw_rcp_flush(be->p_flm_nthw); 1104 } 1105 } 1106 1107 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1108 return 0; 1109 } 1110 1111 static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt) 1112 { 1113 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1114 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1115 1116 if (flm->ver >= 25) { 1117 flm_nthw_scrub_cnt(be->p_flm_nthw, 1); 1118 1119 for (int i = 0; i < cnt; i++) { 1120 flm_nthw_scrub_select(be->p_flm_nthw, index + i); 1121 flm_nthw_scrub_t(be->p_flm_nthw, flm->v25.scrub[index + i].t); 1122 flm_nthw_scrub_r(be->p_flm_nthw, flm->v25.scrub[index + i].r); 1123 flm_nthw_scrub_del(be->p_flm_nthw, flm->v25.scrub[index + i].del); 1124 flm_nthw_scrub_inf(be->p_flm_nthw, flm->v25.scrub[index + i].inf); 1125 flm_nthw_scrub_flush(be->p_flm_nthw); 1126 } 1127 } 1128 1129 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1130 return 0; 1131 } 1132 1133 static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm) 1134 { 1135 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1136 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1137 1138 if (flm->ver >= 25) { 1139 flm_nthw_buf_ctrl_update(be->p_flm_nthw, 1140 &flm->v25.buf_ctrl->lrn_free, 1141 &flm->v25.buf_ctrl->inf_avail, 1142 &flm->v25.buf_ctrl->sta_avail); 1143 } 1144 1145 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1146 return 0; 1147 } 1148 1149 static int flm_stat_update(void *be_dev, const struct flm_func_s *flm) 1150 { 1151 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1152 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1153 1154 if (flm->ver >= 25) { 1155 flm_nthw_stat_lrn_done_update(be->p_flm_nthw); 1156 flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw); 1157 flm_nthw_stat_lrn_fail_update(be->p_flm_nthw); 1158 flm_nthw_stat_unl_done_update(be->p_flm_nthw); 1159 flm_nthw_stat_unl_ignore_update(be->p_flm_nthw); 1160 flm_nthw_stat_rel_done_update(be->p_flm_nthw); 1161 flm_nthw_stat_rel_ignore_update(be->p_flm_nthw); 1162 flm_nthw_stat_aul_done_update(be->p_flm_nthw); 1163 flm_nthw_stat_aul_ignore_update(be->p_flm_nthw); 1164 flm_nthw_stat_aul_fail_update(be->p_flm_nthw); 1165 flm_nthw_stat_tul_done_update(be->p_flm_nthw); 1166 flm_nthw_stat_flows_update(be->p_flm_nthw); 1167 flm_nthw_load_lps_update(be->p_flm_nthw); 1168 flm_nthw_load_aps_update(be->p_flm_nthw); 1169 1170 flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v25.lrn_done->cnt, 1); 1171 flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw, &flm->v25.lrn_ignore->cnt, 1); 1172 flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v25.lrn_fail->cnt, 1); 1173 flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v25.unl_done->cnt, 1); 1174 flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw, &flm->v25.unl_ignore->cnt, 1); 1175 flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v25.rel_done->cnt, 1); 1176 flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw, &flm->v25.rel_ignore->cnt, 1); 1177 flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v25.aul_done->cnt, 1); 1178 flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw, &flm->v25.aul_ignore->cnt, 1); 1179 flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v25.aul_fail->cnt, 1); 1180 flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v25.tul_done->cnt, 1); 1181 flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v25.flows->cnt, 1); 1182 1183 flm_nthw_stat_prb_done_update(be->p_flm_nthw); 1184 flm_nthw_stat_prb_ignore_update(be->p_flm_nthw); 1185 flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v25.prb_done->cnt, 1); 1186 flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw, &flm->v25.prb_ignore->cnt, 1); 1187 1188 flm_nthw_load_lps_cnt(be->p_flm_nthw, &flm->v25.load_lps->lps, 1); 1189 flm_nthw_load_aps_cnt(be->p_flm_nthw, &flm->v25.load_aps->aps, 1); 1190 } 1191 1192 if (flm->ver >= 25) { 1193 flm_nthw_stat_sta_done_update(be->p_flm_nthw); 1194 flm_nthw_stat_inf_done_update(be->p_flm_nthw); 1195 flm_nthw_stat_inf_skip_update(be->p_flm_nthw); 1196 flm_nthw_stat_pck_hit_update(be->p_flm_nthw); 1197 flm_nthw_stat_pck_miss_update(be->p_flm_nthw); 1198 flm_nthw_stat_pck_unh_update(be->p_flm_nthw); 1199 flm_nthw_stat_pck_dis_update(be->p_flm_nthw); 1200 flm_nthw_stat_csh_hit_update(be->p_flm_nthw); 1201 flm_nthw_stat_csh_miss_update(be->p_flm_nthw); 1202 flm_nthw_stat_csh_unh_update(be->p_flm_nthw); 1203 flm_nthw_stat_cuc_start_update(be->p_flm_nthw); 1204 flm_nthw_stat_cuc_move_update(be->p_flm_nthw); 1205 1206 flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v25.sta_done->cnt, 1); 1207 flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v25.inf_done->cnt, 1); 1208 flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v25.inf_skip->cnt, 1); 1209 flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v25.pck_hit->cnt, 1); 1210 flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v25.pck_miss->cnt, 1); 1211 flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v25.pck_unh->cnt, 1); 1212 flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v25.pck_dis->cnt, 1); 1213 flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v25.csh_hit->cnt, 1); 1214 flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v25.csh_miss->cnt, 1); 1215 flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v25.csh_unh->cnt, 1); 1216 flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v25.cuc_start->cnt, 1); 1217 flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v25.cuc_move->cnt, 1); 1218 } 1219 1220 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1221 return 0; 1222 } 1223 1224 static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm, const uint32_t *lrn_data, 1225 uint32_t records, uint32_t *handled_records, 1226 uint32_t words_per_record, uint32_t *inf_word_cnt, 1227 uint32_t *sta_word_cnt) 1228 { 1229 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1230 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1231 1232 int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, records, words_per_record, 1233 handled_records, &flm->v25.buf_ctrl->lrn_free, 1234 &flm->v25.buf_ctrl->inf_avail, 1235 &flm->v25.buf_ctrl->sta_avail); 1236 1237 *inf_word_cnt = flm->v25.buf_ctrl->inf_avail; 1238 *sta_word_cnt = flm->v25.buf_ctrl->sta_avail; 1239 1240 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1241 return ret; 1242 } 1243 1244 static int flm_inf_sta_data_update(void *be_dev, const struct flm_func_s *flm, uint32_t *inf_data, 1245 uint32_t inf_size, uint32_t *inf_word_cnt, uint32_t *sta_data, 1246 uint32_t sta_size, uint32_t *sta_word_cnt) 1247 { 1248 struct backend_dev_s *be = (struct backend_dev_s *)be_dev; 1249 CHECK_DEBUG_ON(be, flm, be->p_flm_nthw); 1250 1251 int ret = flm_nthw_inf_sta_data_update(be->p_flm_nthw, inf_data, inf_size, sta_data, 1252 sta_size, &flm->v25.buf_ctrl->lrn_free, 1253 &flm->v25.buf_ctrl->inf_avail, 1254 &flm->v25.buf_ctrl->sta_avail); 1255 1256 *inf_word_cnt = flm->v25.buf_ctrl->inf_avail; 1257 *sta_word_cnt = flm->v25.buf_ctrl->sta_avail; 1258 1259 CHECK_DEBUG_OFF(flm, be->p_flm_nthw); 1260 return ret; 1261 } 1262 1263 /* 1264 * DBS 1265 */ 1266 1267 static int alloc_rx_queue(void *be_dev, int queue_id) 1268 { 1269 (void)be_dev; 1270 (void)queue_id; 1271 NT_LOG(ERR, FILTER, "ERROR alloc Rx queue"); 1272 return -1; 1273 } 1274 1275 static int free_rx_queue(void *be_dev, int hw_queue) 1276 { 1277 (void)be_dev; 1278 (void)hw_queue; 1279 NT_LOG(ERR, FILTER, "ERROR free Rx queue"); 1280 return 0; 1281 } 1282 1283 const struct flow_api_backend_ops flow_be_iface = { 1284 1, 1285 1286 set_debug_mode, 1287 get_nb_phy_ports, 1288 get_nb_rx_ports, 1289 get_ltx_avail, 1290 get_nb_cat_funcs, 1291 get_nb_categories, 1292 get_nb_cat_km_if_cnt, 1293 get_nb_cat_km_if_m0, 1294 get_nb_cat_km_if_m1, 1295 get_nb_queues, 1296 get_nb_km_flow_types, 1297 get_nb_pm_ext, 1298 get_nb_len, 1299 get_kcc_size, 1300 get_kcc_banks, 1301 get_nb_km_categories, 1302 get_nb_km_cam_banks, 1303 get_nb_km_cam_record_words, 1304 get_nb_km_cam_records, 1305 get_nb_km_tcam_banks, 1306 get_nb_km_tcam_bank_width, 1307 get_nb_flm_categories, 1308 get_nb_flm_size_mb, 1309 get_nb_flm_entry_size, 1310 get_nb_flm_variant, 1311 get_nb_flm_prios, 1312 get_nb_flm_pst_profiles, 1313 get_nb_flm_scrub_profiles, 1314 get_nb_flm_load_aps_max, 1315 get_nb_qsl_categories, 1316 get_nb_qsl_qst_entries, 1317 get_nb_pdb_categories, 1318 get_nb_roa_categories, 1319 get_nb_tpe_categories, 1320 get_nb_tx_cpy_writers, 1321 get_nb_tx_cpy_mask_mem, 1322 get_nb_tx_rpl_depth, 1323 get_nb_tx_rpl_ext_categories, 1324 get_nb_tpe_ifr_categories, 1325 get_nb_rpp_per_ps, 1326 get_nb_hsh_categories, 1327 get_nb_hsh_toeplitz, 1328 1329 alloc_rx_queue, 1330 free_rx_queue, 1331 1332 cat_get_present, 1333 cat_get_version, 1334 cat_cfn_flush, 1335 1336 cat_kce_flush, 1337 cat_kcs_flush, 1338 cat_fte_flush, 1339 1340 cat_cte_flush, 1341 cat_cts_flush, 1342 cat_cot_flush, 1343 cat_cct_flush, 1344 cat_exo_flush, 1345 cat_rck_flush, 1346 cat_len_flush, 1347 cat_kcc_flush, 1348 1349 km_get_present, 1350 km_get_version, 1351 km_rcp_flush, 1352 km_cam_flush, 1353 km_tcam_flush, 1354 km_tci_flush, 1355 km_tcq_flush, 1356 1357 flm_get_present, 1358 flm_get_version, 1359 flm_control_flush, 1360 flm_status_flush, 1361 flm_status_update, 1362 flm_scan_flush, 1363 flm_load_bin_flush, 1364 flm_prio_flush, 1365 flm_pst_flush, 1366 flm_rcp_flush, 1367 flm_scrub_flush, 1368 flm_buf_ctrl_update, 1369 flm_stat_update, 1370 flm_lrn_data_flush, 1371 flm_inf_sta_data_update, 1372 }; 1373 1374 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev) 1375 { 1376 uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no; 1377 1378 struct info_nthw *pinfonthw = info_nthw_new(); 1379 info_nthw_init(pinfonthw, p_fpga, physical_adapter_no); 1380 be_devs[physical_adapter_no].p_info_nthw = pinfonthw; 1381 1382 /* Init nthw CAT */ 1383 if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) { 1384 struct cat_nthw *pcatnthw = cat_nthw_new(); 1385 cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no); 1386 be_devs[physical_adapter_no].p_cat_nthw = pcatnthw; 1387 1388 } else { 1389 be_devs[physical_adapter_no].p_cat_nthw = NULL; 1390 } 1391 1392 /* Init nthw KM */ 1393 if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) { 1394 struct km_nthw *pkmnthw = km_nthw_new(); 1395 km_nthw_init(pkmnthw, p_fpga, physical_adapter_no); 1396 be_devs[physical_adapter_no].p_km_nthw = pkmnthw; 1397 1398 } else { 1399 be_devs[physical_adapter_no].p_km_nthw = NULL; 1400 } 1401 1402 /* Init nthw FLM */ 1403 if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) { 1404 struct flm_nthw *pflmnthw = flm_nthw_new(); 1405 flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no); 1406 be_devs[physical_adapter_no].p_flm_nthw = pflmnthw; 1407 1408 } else { 1409 be_devs[physical_adapter_no].p_flm_nthw = NULL; 1410 } 1411 1412 /* Init nthw IFR */ 1413 if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) { 1414 struct ifr_nthw *ifrnthw = ifr_nthw_new(); 1415 ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no); 1416 be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw; 1417 1418 } else { 1419 be_devs[physical_adapter_no].p_ifr_nthw = NULL; 1420 } 1421 1422 be_devs[physical_adapter_no].adapter_no = physical_adapter_no; 1423 *dev = (void *)&be_devs[physical_adapter_no]; 1424 1425 return &flow_be_iface; 1426 } 1427 1428 static void bin_flow_backend_done(void *dev) 1429 { 1430 struct backend_dev_s *be_dev = (struct backend_dev_s *)dev; 1431 info_nthw_delete(be_dev->p_info_nthw); 1432 cat_nthw_delete(be_dev->p_cat_nthw); 1433 km_nthw_delete(be_dev->p_km_nthw); 1434 flm_nthw_delete(be_dev->p_flm_nthw); 1435 } 1436 1437 static const struct flow_backend_ops ops = { 1438 .bin_flow_backend_init = bin_flow_backend_init, 1439 .bin_flow_backend_done = bin_flow_backend_done, 1440 }; 1441 1442 void flow_backend_init(void) 1443 { 1444 register_flow_backend_ops(&ops); 1445 } 1446