xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_backend/flow_backend.c (revision 3005c75d6b55c73eeb2c25406b7901bac5b54d6d)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include <stdint.h>
7 
8 #include "flow_nthw_info.h"
9 #include "flow_nthw_cat.h"
10 #include "flow_nthw_km.h"
11 #include "ntnic_mod_reg.h"
12 #include "nthw_fpga_model.h"
13 #include "hw_mod_backend.h"
14 
15 /*
16  * Binary Flow API backend implementation into ntservice driver
17  *
18  * General note on this backend implementation:
19  * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
20  */
21 
22 static struct backend_dev_s {
23 	uint8_t adapter_no;
24 	enum debug_mode_e dmode;
25 	struct info_nthw *p_info_nthw;
26 	struct cat_nthw *p_cat_nthw;
27 	struct km_nthw *p_km_nthw;
28 } be_devs[MAX_PHYS_ADAPTERS];
29 
30 #define CHECK_DEBUG_ON(be, mod, inst)                                                             \
31 	int __debug__ = 0;                                                                        \
32 	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug)                        \
33 		do {                                                                              \
34 			mod##_nthw_set_debug_mode((inst), 0xFF);                                  \
35 			__debug__ = 1;                                                            \
36 	} while (0)
37 
38 #define CHECK_DEBUG_OFF(mod, inst)                                                                \
39 	do {                                                                                      \
40 		if (__debug__)                                                                    \
41 			mod##_nthw_set_debug_mode((inst), 0);                                     \
42 	} while (0)
43 
44 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev);
45 static void bin_flow_backend_done(void *be_dev);
46 
47 static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
48 {
49 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
50 	be->dmode = mode;
51 	return 0;
52 }
53 
54 /*
55  * INFO
56  */
57 
58 static int get_nb_phy_ports(void *be_dev)
59 {
60 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
61 	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
62 }
63 
64 static int get_nb_rx_ports(void *be_dev)
65 {
66 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
67 	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
68 }
69 
70 static int get_ltx_avail(void *be_dev)
71 {
72 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
73 	return info_nthw_get_ltx_avail(be->p_info_nthw);
74 }
75 
76 static int get_nb_cat_funcs(void *be_dev)
77 {
78 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
79 	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
80 }
81 
82 static int get_nb_categories(void *be_dev)
83 {
84 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
85 	return info_nthw_get_nb_categories(be->p_info_nthw);
86 }
87 
88 static int get_nb_cat_km_if_cnt(void *be_dev)
89 {
90 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
91 	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
92 }
93 
94 static int get_nb_cat_km_if_m0(void *be_dev)
95 {
96 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
97 	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
98 }
99 
100 static int get_nb_cat_km_if_m1(void *be_dev)
101 {
102 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
103 	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
104 }
105 
106 static int get_nb_queues(void *be_dev)
107 {
108 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
109 	return info_nthw_get_nb_queues(be->p_info_nthw);
110 }
111 
112 static int get_nb_km_flow_types(void *be_dev)
113 {
114 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
115 	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
116 }
117 
118 static int get_nb_pm_ext(void *be_dev)
119 {
120 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
121 	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
122 }
123 
124 static int get_nb_len(void *be_dev)
125 {
126 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
127 	return info_nthw_get_nb_len(be->p_info_nthw);
128 }
129 
130 static int get_kcc_size(void *be_dev)
131 {
132 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
133 	return info_nthw_get_kcc_size(be->p_info_nthw);
134 }
135 
136 static int get_kcc_banks(void *be_dev)
137 {
138 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
139 	return info_nthw_get_kcc_banks(be->p_info_nthw);
140 }
141 
142 static int get_nb_km_categories(void *be_dev)
143 {
144 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
145 	return info_nthw_get_nb_km_categories(be->p_info_nthw);
146 }
147 
148 static int get_nb_km_cam_banks(void *be_dev)
149 {
150 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
151 	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
152 }
153 
154 static int get_nb_km_cam_record_words(void *be_dev)
155 {
156 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
157 	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
158 }
159 
160 static int get_nb_km_cam_records(void *be_dev)
161 {
162 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
163 	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
164 }
165 
166 static int get_nb_km_tcam_banks(void *be_dev)
167 {
168 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
169 	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
170 }
171 
172 static int get_nb_km_tcam_bank_width(void *be_dev)
173 {
174 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
175 	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
176 }
177 
178 static int get_nb_flm_categories(void *be_dev)
179 {
180 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
181 	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
182 }
183 
184 static int get_nb_flm_size_mb(void *be_dev)
185 {
186 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
187 	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
188 }
189 
190 static int get_nb_flm_entry_size(void *be_dev)
191 {
192 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
193 	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
194 }
195 
196 static int get_nb_flm_variant(void *be_dev)
197 {
198 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
199 	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
200 }
201 
202 static int get_nb_flm_prios(void *be_dev)
203 {
204 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
205 	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
206 }
207 
208 static int get_nb_flm_pst_profiles(void *be_dev)
209 {
210 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
211 	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
212 }
213 
214 static int get_nb_flm_scrub_profiles(void *be_dev)
215 {
216 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
217 	return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw);
218 }
219 
220 static int get_nb_flm_load_aps_max(void *be_dev)
221 {
222 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
223 	return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw);
224 }
225 
226 static int get_nb_qsl_categories(void *be_dev)
227 {
228 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
229 	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
230 }
231 
232 static int get_nb_qsl_qst_entries(void *be_dev)
233 {
234 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
235 	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
236 }
237 
238 static int get_nb_pdb_categories(void *be_dev)
239 {
240 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
241 	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
242 }
243 
244 static int get_nb_roa_categories(void *be_dev)
245 {
246 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
247 	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
248 }
249 
250 static int get_nb_tpe_categories(void *be_dev)
251 {
252 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
253 	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
254 }
255 
256 static int get_nb_tx_cpy_writers(void *be_dev)
257 {
258 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
259 	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
260 }
261 
262 static int get_nb_tx_cpy_mask_mem(void *be_dev)
263 {
264 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
265 	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
266 }
267 
268 static int get_nb_tx_rpl_depth(void *be_dev)
269 {
270 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
271 	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
272 }
273 
274 static int get_nb_tx_rpl_ext_categories(void *be_dev)
275 {
276 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
277 	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
278 }
279 
280 static int get_nb_tpe_ifr_categories(void *be_dev)
281 {
282 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
283 	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
284 }
285 
286 static int get_nb_rpp_per_ps(void *be_dev)
287 {
288 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
289 	return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw);
290 }
291 
292 static int get_nb_hsh_categories(void *be_dev)
293 {
294 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
295 	return info_nthw_get_nb_hsh_categories(be->p_info_nthw);
296 }
297 
298 static int get_nb_hsh_toeplitz(void *be_dev)
299 {
300 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
301 	return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw);
302 }
303 
304 /*
305  * CAT
306  */
307 
308 static bool cat_get_present(void *be_dev)
309 {
310 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
311 	return be->p_cat_nthw != NULL;
312 }
313 
314 static uint32_t cat_get_version(void *be_dev)
315 {
316 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
317 	return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
318 			(nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff));
319 }
320 
321 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
322 {
323 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
324 
325 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
326 
327 	if (cat->ver == 18) {
328 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
329 
330 		for (int i = 0; i < cnt; i++) {
331 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
332 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable);
333 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv);
334 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv);
335 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl);
336 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp);
337 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac);
338 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2);
339 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag);
340 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan);
341 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls);
342 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3);
343 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag);
344 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
345 				cat->v18.cfn[cat_func].ptc_ip_prot);
346 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4);
347 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel);
348 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2);
349 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
350 				cat->v18.cfn[cat_func].ptc_tnl_vlan);
351 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
352 				cat->v18.cfn[cat_func].ptc_tnl_mpls);
353 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3);
354 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
355 				cat->v18.cfn[cat_func].ptc_tnl_frag);
356 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
357 				cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
358 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4);
359 
360 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv);
361 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv);
362 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs);
363 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc);
364 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs);
365 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs);
366 
367 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port);
368 
369 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp);
370 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct);
371 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv);
372 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb);
373 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv);
374 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv);
375 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv);
376 
377 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
378 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv);
379 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or);
380 			cat_nthw_cfn_flush(be->p_cat_nthw);
381 			cat_func++;
382 		}
383 
384 	} else if (cat->ver == 21) {
385 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
386 
387 		for (int i = 0; i < cnt; i++) {
388 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
389 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable);
390 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv);
391 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv);
392 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl);
393 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp);
394 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac);
395 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2);
396 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag);
397 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan);
398 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls);
399 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3);
400 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag);
401 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
402 				cat->v21.cfn[cat_func].ptc_ip_prot);
403 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4);
404 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel);
405 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2);
406 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
407 				cat->v21.cfn[cat_func].ptc_tnl_vlan);
408 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
409 				cat->v21.cfn[cat_func].ptc_tnl_mpls);
410 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3);
411 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
412 				cat->v21.cfn[cat_func].ptc_tnl_frag);
413 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
414 				cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
415 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4);
416 
417 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv);
418 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv);
419 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs);
420 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc);
421 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs);
422 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs);
423 			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
424 				cat->v21.cfn[cat_func].err_tnl_l3_cs);
425 			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
426 				cat->v21.cfn[cat_func].err_tnl_l4_cs);
427 			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
428 				cat->v21.cfn[cat_func].err_ttl_exp);
429 			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
430 				cat->v21.cfn[cat_func].err_tnl_ttl_exp);
431 
432 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port);
433 
434 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp);
435 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct);
436 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv);
437 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb);
438 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv);
439 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv);
440 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv);
441 
442 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
443 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv);
444 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or);
445 
446 			if (be->p_cat_nthw->m_km_if_cnt > 1)
447 				cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or);
448 
449 			cat_nthw_cfn_flush(be->p_cat_nthw);
450 			cat_func++;
451 		}
452 	}
453 
454 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
455 	return 0;
456 }
457 
458 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
459 	int cnt)
460 {
461 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
462 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
463 
464 	if (cat->ver == 18) {
465 		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
466 
467 		for (int i = 0; i < cnt; i++) {
468 			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
469 			cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm);
470 			cat_nthw_kce_flush(be->p_cat_nthw, 0);
471 		}
472 
473 	} else if (cat->ver == 21) {
474 		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
475 
476 		for (int i = 0; i < cnt; i++) {
477 			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
478 			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
479 				cat->v21.kce[index + i].enable_bm[km_if_idx]);
480 			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
481 		}
482 	}
483 
484 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
485 	return 0;
486 }
487 
488 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func,
489 	int cnt)
490 {
491 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
492 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
493 
494 	if (cat->ver == 18) {
495 		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
496 
497 		for (int i = 0; i < cnt; i++) {
498 			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
499 			cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category);
500 			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
501 			cat_func++;
502 		}
503 
504 	} else if (cat->ver == 21) {
505 		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
506 
507 		for (int i = 0; i < cnt; i++) {
508 			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
509 			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
510 				cat->v21.kcs[cat_func].category[km_if_idx]);
511 			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
512 			cat_func++;
513 		}
514 	}
515 
516 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
517 	return 0;
518 }
519 
520 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
521 	int cnt)
522 {
523 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
524 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
525 
526 	if (cat->ver == 18) {
527 		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
528 
529 		for (int i = 0; i < cnt; i++) {
530 			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
531 			cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm);
532 			cat_nthw_fte_flush(be->p_cat_nthw, 0);
533 		}
534 
535 	} else if (cat->ver == 21) {
536 		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
537 
538 		for (int i = 0; i < cnt; i++) {
539 			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
540 			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
541 				cat->v21.fte[index + i].enable_bm[km_if_idx]);
542 			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
543 		}
544 	}
545 
546 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
547 	return 0;
548 }
549 
550 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
551 {
552 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
553 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
554 
555 	if (cat->ver == 18 || cat->ver == 21) {
556 		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
557 
558 		for (int i = 0; i < cnt; i++) {
559 			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
560 			cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col);
561 			cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor);
562 			cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh);
563 			cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl);
564 			cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf);
565 			cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc);
566 			cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb);
567 			cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk);
568 			cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst);
569 			cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp);
570 			cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe);
571 
572 			cat_nthw_cte_flush(be->p_cat_nthw);
573 			cat_func++;
574 		}
575 	}
576 
577 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
578 	return 0;
579 }
580 
581 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
582 {
583 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
584 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
585 
586 	if (cat->ver == 18 || cat->ver == 21) {
587 		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
588 
589 		for (int i = 0; i < cnt; i++) {
590 			cat_nthw_cts_select(be->p_cat_nthw, index + i);
591 			cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a);
592 			cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b);
593 			cat_nthw_cts_flush(be->p_cat_nthw);
594 		}
595 	}
596 
597 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
598 	return 0;
599 }
600 
601 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
602 {
603 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
604 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
605 
606 	if (cat->ver == 18 || cat->ver == 21) {
607 		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
608 
609 		for (int i = 0; i < cnt; i++) {
610 			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
611 			cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color);
612 			cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km);
613 			cat_nthw_cot_flush(be->p_cat_nthw);
614 		}
615 	}
616 
617 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
618 	return 0;
619 }
620 
621 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
622 {
623 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
624 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
625 
626 	if (cat->ver == 18 || cat->ver == 21) {
627 		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
628 
629 		for (int i = 0; i < cnt; i++) {
630 			cat_nthw_cct_select(be->p_cat_nthw, index + i);
631 			cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color);
632 			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
633 			cat_nthw_cct_flush(be->p_cat_nthw);
634 		}
635 	}
636 
637 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
638 	return 0;
639 }
640 
641 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt)
642 {
643 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
644 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
645 
646 	if (cat->ver == 18 || cat->ver == 21) {
647 		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
648 
649 		for (int i = 0; i < cnt; i++) {
650 			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
651 			cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn);
652 			cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs);
653 			cat_nthw_exo_flush(be->p_cat_nthw);
654 		}
655 	}
656 
657 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
658 	return 0;
659 }
660 
661 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
662 {
663 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
664 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
665 
666 	if (cat->ver == 18 || cat->ver == 21) {
667 		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
668 
669 		for (int i = 0; i < cnt; i++) {
670 			cat_nthw_rck_select(be->p_cat_nthw, index + i);
671 			cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data);
672 			cat_nthw_rck_flush(be->p_cat_nthw);
673 		}
674 	}
675 
676 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
677 	return 0;
678 }
679 
680 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
681 {
682 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
683 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
684 
685 	if (cat->ver == 18 || cat->ver == 21) {
686 		cat_nthw_len_cnt(be->p_cat_nthw, 1);
687 
688 		for (int i = 0; i < cnt; i++) {
689 			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
690 			cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower);
691 			cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper);
692 			cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1);
693 			cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2);
694 			cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv);
695 			cat_nthw_len_flush(be->p_cat_nthw);
696 		}
697 	}
698 
699 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
700 	return 0;
701 }
702 
703 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
704 {
705 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
706 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
707 
708 	if (cat->ver == 18 || cat->ver == 21) {
709 		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
710 
711 		for (int i = 0; i < cnt; i++) {
712 			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
713 			cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key);
714 			cat_nthw_kcc_category(be->p_cat_nthw,
715 				cat->v18.kcc_cam[len_index + i].category);
716 			cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id);
717 			cat_nthw_kcc_flush(be->p_cat_nthw);
718 		}
719 	}
720 
721 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
722 	return 0;
723 }
724 
725 /*
726  * KM
727  */
728 
729 static bool km_get_present(void *be_dev)
730 {
731 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
732 	return be->p_km_nthw != NULL;
733 }
734 
735 static uint32_t km_get_version(void *be_dev)
736 {
737 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
738 	return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) |
739 			(nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
740 }
741 
742 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt)
743 {
744 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
745 
746 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
747 
748 	if (km->ver == 7) {
749 		km_nthw_rcp_cnt(be->p_km_nthw, 1);
750 
751 		for (int i = 0; i < cnt; i++) {
752 			km_nthw_rcp_select(be->p_km_nthw, category + i);
753 			km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn);
754 			km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs);
755 			km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a);
756 			km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b);
757 			km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn);
758 			km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs);
759 			km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a);
760 			km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b);
761 			km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn);
762 			km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs);
763 			km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a);
764 			km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b);
765 			km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn);
766 			km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs);
767 			km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a);
768 			km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b);
769 			km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch);
770 			km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a);
771 			km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b);
772 			km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a);
773 			km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b);
774 			km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual);
775 			km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired);
776 			km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a);
777 			km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b);
778 			km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a);
779 			km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b);
780 			km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a);
781 			km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b);
782 			km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a);
783 			km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b);
784 			km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a);
785 			km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b);
786 			km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a);
787 			km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b);
788 			km_nthw_rcp_synergy_mode(be->p_km_nthw,
789 				km->v7.rcp[category + i].synergy_mode);
790 			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn);
791 			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs);
792 			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn);
793 			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs);
794 			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn);
795 			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs);
796 			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn);
797 			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs);
798 			km_nthw_rcp_flush(be->p_km_nthw);
799 		}
800 	}
801 
802 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
803 	return 0;
804 }
805 
806 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt)
807 {
808 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
809 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
810 
811 	if (km->ver == 7) {
812 		km_nthw_cam_cnt(be->p_km_nthw, 1);
813 
814 		for (int i = 0; i < cnt; i++) {
815 			km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i);
816 			km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0);
817 			km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1);
818 			km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2);
819 			km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3);
820 			km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4);
821 			km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5);
822 			km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0);
823 			km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1);
824 			km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2);
825 			km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3);
826 			km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4);
827 			km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5);
828 			km_nthw_cam_flush(be->p_km_nthw);
829 		}
830 	}
831 
832 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
833 	return 0;
834 }
835 
836 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value,
837 	int cnt)
838 {
839 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
840 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
841 
842 	if (km->ver == 7) {
843 		int start_idx = bank * 4 * 256 + byte * 256 + value;
844 		km_nthw_tcam_cnt(be->p_km_nthw, 1);
845 
846 		for (int i = 0; i < cnt; i++) {
847 			if (km->v7.tcam[start_idx + i].dirty) {
848 				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
849 				km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t);
850 				km_nthw_tcam_flush(be->p_km_nthw);
851 				km->v7.tcam[start_idx + i].dirty = 0;
852 			}
853 		}
854 	}
855 
856 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
857 	return 0;
858 }
859 
860 /*
861  * bank is the TCAM bank, index is the index within the bank (0..71)
862  */
863 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
864 {
865 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
866 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
867 
868 	if (km->ver == 7) {
869 		/* TCAM bank width in version 3 = 72 */
870 		km_nthw_tci_cnt(be->p_km_nthw, 1);
871 
872 		for (int i = 0; i < cnt; i++) {
873 			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
874 			km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color);
875 			km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft);
876 			km_nthw_tci_flush(be->p_km_nthw);
877 		}
878 	}
879 
880 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
881 	return 0;
882 }
883 
884 /*
885  * bank is the TCAM bank, index is the index within the bank (0..71)
886  */
887 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
888 {
889 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
890 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
891 
892 	if (km->ver == 7) {
893 		/* TCAM bank width in version 3 = 72 */
894 		km_nthw_tcq_cnt(be->p_km_nthw, 1);
895 
896 		for (int i = 0; i < cnt; i++) {
897 			/* adr = lover 4 bits = bank, upper 7 bits = index */
898 			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
899 			km_nthw_tcq_bank_mask(be->p_km_nthw,
900 				km->v7.tcq[bank + (index << 4) + i].bank_mask);
901 			km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual);
902 			km_nthw_tcq_flush(be->p_km_nthw);
903 		}
904 	}
905 
906 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
907 	return 0;
908 }
909 
910 /*
911  * DBS
912  */
913 
914 static int alloc_rx_queue(void *be_dev, int queue_id)
915 {
916 	(void)be_dev;
917 	(void)queue_id;
918 	NT_LOG(ERR, FILTER, "ERROR alloc Rx queue");
919 	return -1;
920 }
921 
922 static int free_rx_queue(void *be_dev, int hw_queue)
923 {
924 	(void)be_dev;
925 	(void)hw_queue;
926 	NT_LOG(ERR, FILTER, "ERROR free Rx queue");
927 	return 0;
928 }
929 
930 const struct flow_api_backend_ops flow_be_iface = {
931 	1,
932 
933 	set_debug_mode,
934 	get_nb_phy_ports,
935 	get_nb_rx_ports,
936 	get_ltx_avail,
937 	get_nb_cat_funcs,
938 	get_nb_categories,
939 	get_nb_cat_km_if_cnt,
940 	get_nb_cat_km_if_m0,
941 	get_nb_cat_km_if_m1,
942 	get_nb_queues,
943 	get_nb_km_flow_types,
944 	get_nb_pm_ext,
945 	get_nb_len,
946 	get_kcc_size,
947 	get_kcc_banks,
948 	get_nb_km_categories,
949 	get_nb_km_cam_banks,
950 	get_nb_km_cam_record_words,
951 	get_nb_km_cam_records,
952 	get_nb_km_tcam_banks,
953 	get_nb_km_tcam_bank_width,
954 	get_nb_flm_categories,
955 	get_nb_flm_size_mb,
956 	get_nb_flm_entry_size,
957 	get_nb_flm_variant,
958 	get_nb_flm_prios,
959 	get_nb_flm_pst_profiles,
960 	get_nb_flm_scrub_profiles,
961 	get_nb_flm_load_aps_max,
962 	get_nb_qsl_categories,
963 	get_nb_qsl_qst_entries,
964 	get_nb_pdb_categories,
965 	get_nb_roa_categories,
966 	get_nb_tpe_categories,
967 	get_nb_tx_cpy_writers,
968 	get_nb_tx_cpy_mask_mem,
969 	get_nb_tx_rpl_depth,
970 	get_nb_tx_rpl_ext_categories,
971 	get_nb_tpe_ifr_categories,
972 	get_nb_rpp_per_ps,
973 	get_nb_hsh_categories,
974 	get_nb_hsh_toeplitz,
975 
976 	alloc_rx_queue,
977 	free_rx_queue,
978 
979 	cat_get_present,
980 	cat_get_version,
981 	cat_cfn_flush,
982 
983 	cat_kce_flush,
984 	cat_kcs_flush,
985 	cat_fte_flush,
986 
987 	cat_cte_flush,
988 	cat_cts_flush,
989 	cat_cot_flush,
990 	cat_cct_flush,
991 	cat_exo_flush,
992 	cat_rck_flush,
993 	cat_len_flush,
994 	cat_kcc_flush,
995 
996 	km_get_present,
997 	km_get_version,
998 	km_rcp_flush,
999 	km_cam_flush,
1000 	km_tcam_flush,
1001 	km_tci_flush,
1002 	km_tcq_flush,
1003 };
1004 
1005 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
1006 {
1007 	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
1008 
1009 	struct info_nthw *pinfonthw = info_nthw_new();
1010 	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
1011 	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
1012 
1013 	/* Init nthw CAT */
1014 	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1015 		struct cat_nthw *pcatnthw = cat_nthw_new();
1016 		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
1017 		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
1018 
1019 	} else {
1020 		be_devs[physical_adapter_no].p_cat_nthw = NULL;
1021 	}
1022 
1023 	/* Init nthw KM */
1024 	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1025 		struct km_nthw *pkmnthw = km_nthw_new();
1026 		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
1027 		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
1028 
1029 	} else {
1030 		be_devs[physical_adapter_no].p_km_nthw = NULL;
1031 	}
1032 
1033 	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
1034 	*dev = (void *)&be_devs[physical_adapter_no];
1035 
1036 	return &flow_be_iface;
1037 }
1038 
1039 static void bin_flow_backend_done(void *dev)
1040 {
1041 	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
1042 	info_nthw_delete(be_dev->p_info_nthw);
1043 	cat_nthw_delete(be_dev->p_cat_nthw);
1044 	km_nthw_delete(be_dev->p_km_nthw);
1045 }
1046 
1047 static const struct flow_backend_ops ops = {
1048 	.bin_flow_backend_init = bin_flow_backend_init,
1049 	.bin_flow_backend_done = bin_flow_backend_done,
1050 };
1051 
1052 void flow_backend_init(void)
1053 {
1054 	register_flow_backend_ops(&ops);
1055 }
1056