xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_backend/flow_backend.c (revision 25a2a0dc3de31ca0a6fbc9371cf3dd85dfd74b07)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include <stdint.h>
7 
8 #include "flow_nthw_info.h"
9 #include "flow_nthw_ifr.h"
10 #include "flow_nthw_cat.h"
11 #include "flow_nthw_csu.h"
12 #include "flow_nthw_km.h"
13 #include "flow_nthw_flm.h"
14 #include "flow_nthw_hfu.h"
15 #include "flow_nthw_hsh.h"
16 #include "flow_nthw_qsl.h"
17 #include "flow_nthw_slc_lr.h"
18 #include "flow_nthw_pdb.h"
19 #include "flow_nthw_rpp_lr.h"
20 #include "flow_nthw_tx_cpy.h"
21 #include "flow_nthw_tx_ins.h"
22 #include "flow_nthw_tx_rpl.h"
23 #include "ntnic_mod_reg.h"
24 #include "nthw_fpga_model.h"
25 #include "hw_mod_backend.h"
26 
27 /*
28  * Binary Flow API backend implementation into ntservice driver
29  *
30  * General note on this backend implementation:
31  * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
32  */
33 
34 static struct backend_dev_s {
35 	uint8_t adapter_no;
36 	enum debug_mode_e dmode;
37 	struct info_nthw *p_info_nthw;
38 	struct cat_nthw *p_cat_nthw;
39 	struct km_nthw *p_km_nthw;
40 	struct flm_nthw *p_flm_nthw;
41 	struct hsh_nthw *p_hsh_nthw;
42 	struct qsl_nthw *p_qsl_nthw;
43 	struct slc_lr_nthw *p_slc_lr_nthw;
44 	struct pdb_nthw *p_pdb_nthw;
45 	struct hfu_nthw *p_hfu_nthw;    /* TPE module */
46 	struct rpp_lr_nthw *p_rpp_lr_nthw;      /* TPE module */
47 	struct tx_cpy_nthw *p_tx_cpy_nthw;      /* TPE module */
48 	struct tx_ins_nthw *p_tx_ins_nthw;      /* TPE module */
49 	struct tx_rpl_nthw *p_tx_rpl_nthw;      /* TPE module */
50 	struct csu_nthw *p_csu_nthw;    /* TPE module */
51 	struct ifr_nthw *p_ifr_nthw;    /* TPE module */
52 } be_devs[MAX_PHYS_ADAPTERS];
53 
54 #define CHECK_DEBUG_ON(be, mod, inst)                                                             \
55 	int __debug__ = 0;                                                                        \
56 	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug)                        \
57 		do {                                                                              \
58 			mod##_nthw_set_debug_mode((inst), 0xFF);                                  \
59 			__debug__ = 1;                                                            \
60 	} while (0)
61 
62 #define CHECK_DEBUG_OFF(mod, inst)                                                                \
63 	do {                                                                                      \
64 		if (__debug__)                                                                    \
65 			mod##_nthw_set_debug_mode((inst), 0);                                     \
66 	} while (0)
67 
68 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev);
69 static void bin_flow_backend_done(void *be_dev);
70 
71 static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
72 {
73 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
74 	be->dmode = mode;
75 	return 0;
76 }
77 
78 /*
79  * INFO
80  */
81 
82 static int get_nb_phy_ports(void *be_dev)
83 {
84 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
85 	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
86 }
87 
88 static int get_nb_rx_ports(void *be_dev)
89 {
90 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
91 	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
92 }
93 
94 static int get_ltx_avail(void *be_dev)
95 {
96 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
97 	return info_nthw_get_ltx_avail(be->p_info_nthw);
98 }
99 
100 static int get_nb_cat_funcs(void *be_dev)
101 {
102 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
103 	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
104 }
105 
106 static int get_nb_categories(void *be_dev)
107 {
108 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
109 	return info_nthw_get_nb_categories(be->p_info_nthw);
110 }
111 
112 static int get_nb_cat_km_if_cnt(void *be_dev)
113 {
114 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
115 	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
116 }
117 
118 static int get_nb_cat_km_if_m0(void *be_dev)
119 {
120 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
121 	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
122 }
123 
124 static int get_nb_cat_km_if_m1(void *be_dev)
125 {
126 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
127 	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
128 }
129 
130 static int get_nb_queues(void *be_dev)
131 {
132 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
133 	return info_nthw_get_nb_queues(be->p_info_nthw);
134 }
135 
136 static int get_nb_km_flow_types(void *be_dev)
137 {
138 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
139 	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
140 }
141 
142 static int get_nb_pm_ext(void *be_dev)
143 {
144 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
145 	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
146 }
147 
148 static int get_nb_len(void *be_dev)
149 {
150 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
151 	return info_nthw_get_nb_len(be->p_info_nthw);
152 }
153 
154 static int get_kcc_size(void *be_dev)
155 {
156 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
157 	return info_nthw_get_kcc_size(be->p_info_nthw);
158 }
159 
160 static int get_kcc_banks(void *be_dev)
161 {
162 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
163 	return info_nthw_get_kcc_banks(be->p_info_nthw);
164 }
165 
166 static int get_nb_km_categories(void *be_dev)
167 {
168 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
169 	return info_nthw_get_nb_km_categories(be->p_info_nthw);
170 }
171 
172 static int get_nb_km_cam_banks(void *be_dev)
173 {
174 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
175 	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
176 }
177 
178 static int get_nb_km_cam_record_words(void *be_dev)
179 {
180 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
181 	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
182 }
183 
184 static int get_nb_km_cam_records(void *be_dev)
185 {
186 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
187 	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
188 }
189 
190 static int get_nb_km_tcam_banks(void *be_dev)
191 {
192 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
193 	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
194 }
195 
196 static int get_nb_km_tcam_bank_width(void *be_dev)
197 {
198 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
199 	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
200 }
201 
202 static int get_nb_flm_categories(void *be_dev)
203 {
204 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
205 	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
206 }
207 
208 static int get_nb_flm_size_mb(void *be_dev)
209 {
210 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
211 	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
212 }
213 
214 static int get_nb_flm_entry_size(void *be_dev)
215 {
216 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
217 	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
218 }
219 
220 static int get_nb_flm_variant(void *be_dev)
221 {
222 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
223 	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
224 }
225 
226 static int get_nb_flm_prios(void *be_dev)
227 {
228 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
229 	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
230 }
231 
232 static int get_nb_flm_pst_profiles(void *be_dev)
233 {
234 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
235 	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
236 }
237 
238 static int get_nb_flm_scrub_profiles(void *be_dev)
239 {
240 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
241 	return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw);
242 }
243 
244 static int get_nb_flm_load_aps_max(void *be_dev)
245 {
246 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
247 	return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw);
248 }
249 
250 static int get_nb_qsl_categories(void *be_dev)
251 {
252 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
253 	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
254 }
255 
256 static int get_nb_qsl_qst_entries(void *be_dev)
257 {
258 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
259 	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
260 }
261 
262 static int get_nb_pdb_categories(void *be_dev)
263 {
264 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
265 	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
266 }
267 
268 static int get_nb_roa_categories(void *be_dev)
269 {
270 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
271 	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
272 }
273 
274 static int get_nb_tpe_categories(void *be_dev)
275 {
276 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
277 	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
278 }
279 
280 static int get_nb_tx_cpy_writers(void *be_dev)
281 {
282 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
283 	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
284 }
285 
286 static int get_nb_tx_cpy_mask_mem(void *be_dev)
287 {
288 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
289 	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
290 }
291 
292 static int get_nb_tx_rpl_depth(void *be_dev)
293 {
294 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
295 	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
296 }
297 
298 static int get_nb_tx_rpl_ext_categories(void *be_dev)
299 {
300 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
301 	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
302 }
303 
304 static int get_nb_tpe_ifr_categories(void *be_dev)
305 {
306 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
307 	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
308 }
309 
310 static int get_nb_rpp_per_ps(void *be_dev)
311 {
312 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
313 	return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw);
314 }
315 
316 static int get_nb_hsh_categories(void *be_dev)
317 {
318 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
319 	return info_nthw_get_nb_hsh_categories(be->p_info_nthw);
320 }
321 
322 static int get_nb_hsh_toeplitz(void *be_dev)
323 {
324 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
325 	return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw);
326 }
327 
328 /*
329  * CAT
330  */
331 
332 static bool cat_get_present(void *be_dev)
333 {
334 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
335 	return be->p_cat_nthw != NULL;
336 }
337 
338 static uint32_t cat_get_version(void *be_dev)
339 {
340 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
341 	return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
342 			(nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff));
343 }
344 
345 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
346 {
347 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
348 
349 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
350 
351 	if (cat->ver == 18) {
352 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
353 
354 		for (int i = 0; i < cnt; i++) {
355 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
356 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable);
357 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv);
358 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv);
359 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl);
360 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp);
361 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac);
362 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2);
363 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag);
364 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan);
365 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls);
366 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3);
367 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag);
368 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
369 				cat->v18.cfn[cat_func].ptc_ip_prot);
370 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4);
371 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel);
372 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2);
373 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
374 				cat->v18.cfn[cat_func].ptc_tnl_vlan);
375 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
376 				cat->v18.cfn[cat_func].ptc_tnl_mpls);
377 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3);
378 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
379 				cat->v18.cfn[cat_func].ptc_tnl_frag);
380 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
381 				cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
382 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4);
383 
384 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv);
385 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv);
386 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs);
387 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc);
388 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs);
389 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs);
390 
391 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port);
392 
393 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp);
394 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct);
395 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv);
396 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb);
397 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv);
398 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv);
399 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv);
400 
401 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
402 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv);
403 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or);
404 			cat_nthw_cfn_flush(be->p_cat_nthw);
405 			cat_func++;
406 		}
407 
408 	} else if (cat->ver == 21) {
409 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
410 
411 		for (int i = 0; i < cnt; i++) {
412 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
413 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable);
414 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv);
415 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv);
416 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl);
417 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp);
418 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac);
419 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2);
420 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag);
421 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan);
422 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls);
423 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3);
424 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag);
425 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
426 				cat->v21.cfn[cat_func].ptc_ip_prot);
427 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4);
428 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel);
429 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2);
430 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
431 				cat->v21.cfn[cat_func].ptc_tnl_vlan);
432 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
433 				cat->v21.cfn[cat_func].ptc_tnl_mpls);
434 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3);
435 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
436 				cat->v21.cfn[cat_func].ptc_tnl_frag);
437 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
438 				cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
439 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4);
440 
441 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv);
442 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv);
443 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs);
444 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc);
445 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs);
446 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs);
447 			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
448 				cat->v21.cfn[cat_func].err_tnl_l3_cs);
449 			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
450 				cat->v21.cfn[cat_func].err_tnl_l4_cs);
451 			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
452 				cat->v21.cfn[cat_func].err_ttl_exp);
453 			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
454 				cat->v21.cfn[cat_func].err_tnl_ttl_exp);
455 
456 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port);
457 
458 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp);
459 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct);
460 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv);
461 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb);
462 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv);
463 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv);
464 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv);
465 
466 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
467 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv);
468 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or);
469 
470 			if (be->p_cat_nthw->m_km_if_cnt > 1)
471 				cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or);
472 
473 			cat_nthw_cfn_flush(be->p_cat_nthw);
474 			cat_func++;
475 		}
476 	}
477 
478 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
479 	return 0;
480 }
481 
482 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
483 	int cnt)
484 {
485 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
486 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
487 
488 	if (cat->ver == 18) {
489 		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
490 
491 		for (int i = 0; i < cnt; i++) {
492 			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
493 			cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm);
494 			cat_nthw_kce_flush(be->p_cat_nthw, 0);
495 		}
496 
497 	} else if (cat->ver == 21) {
498 		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
499 
500 		for (int i = 0; i < cnt; i++) {
501 			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
502 			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
503 				cat->v21.kce[index + i].enable_bm[km_if_idx]);
504 			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
505 		}
506 	}
507 
508 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
509 	return 0;
510 }
511 
512 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func,
513 	int cnt)
514 {
515 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
516 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
517 
518 	if (cat->ver == 18) {
519 		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
520 
521 		for (int i = 0; i < cnt; i++) {
522 			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
523 			cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category);
524 			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
525 			cat_func++;
526 		}
527 
528 	} else if (cat->ver == 21) {
529 		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
530 
531 		for (int i = 0; i < cnt; i++) {
532 			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
533 			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
534 				cat->v21.kcs[cat_func].category[km_if_idx]);
535 			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
536 			cat_func++;
537 		}
538 	}
539 
540 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
541 	return 0;
542 }
543 
544 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
545 	int cnt)
546 {
547 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
548 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
549 
550 	if (cat->ver == 18) {
551 		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
552 
553 		for (int i = 0; i < cnt; i++) {
554 			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
555 			cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm);
556 			cat_nthw_fte_flush(be->p_cat_nthw, 0);
557 		}
558 
559 	} else if (cat->ver == 21) {
560 		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
561 
562 		for (int i = 0; i < cnt; i++) {
563 			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
564 			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
565 				cat->v21.fte[index + i].enable_bm[km_if_idx]);
566 			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
567 		}
568 	}
569 
570 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
571 	return 0;
572 }
573 
574 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
575 {
576 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
577 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
578 
579 	if (cat->ver == 18 || cat->ver == 21) {
580 		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
581 
582 		for (int i = 0; i < cnt; i++) {
583 			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
584 			cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col);
585 			cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor);
586 			cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh);
587 			cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl);
588 			cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf);
589 			cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc);
590 			cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb);
591 			cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk);
592 			cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst);
593 			cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp);
594 			cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe);
595 
596 			cat_nthw_cte_flush(be->p_cat_nthw);
597 			cat_func++;
598 		}
599 	}
600 
601 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
602 	return 0;
603 }
604 
605 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
606 {
607 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
608 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
609 
610 	if (cat->ver == 18 || cat->ver == 21) {
611 		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
612 
613 		for (int i = 0; i < cnt; i++) {
614 			cat_nthw_cts_select(be->p_cat_nthw, index + i);
615 			cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a);
616 			cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b);
617 			cat_nthw_cts_flush(be->p_cat_nthw);
618 		}
619 	}
620 
621 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
622 	return 0;
623 }
624 
625 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
626 {
627 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
628 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
629 
630 	if (cat->ver == 18 || cat->ver == 21) {
631 		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
632 
633 		for (int i = 0; i < cnt; i++) {
634 			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
635 			cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color);
636 			cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km);
637 			cat_nthw_cot_flush(be->p_cat_nthw);
638 		}
639 	}
640 
641 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
642 	return 0;
643 }
644 
645 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
646 {
647 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
648 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
649 
650 	if (cat->ver == 18 || cat->ver == 21) {
651 		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
652 
653 		for (int i = 0; i < cnt; i++) {
654 			cat_nthw_cct_select(be->p_cat_nthw, index + i);
655 			cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color);
656 			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
657 			cat_nthw_cct_flush(be->p_cat_nthw);
658 		}
659 	}
660 
661 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
662 	return 0;
663 }
664 
665 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt)
666 {
667 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
668 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
669 
670 	if (cat->ver == 18 || cat->ver == 21) {
671 		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
672 
673 		for (int i = 0; i < cnt; i++) {
674 			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
675 			cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn);
676 			cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs);
677 			cat_nthw_exo_flush(be->p_cat_nthw);
678 		}
679 	}
680 
681 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
682 	return 0;
683 }
684 
685 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
686 {
687 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
688 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
689 
690 	if (cat->ver == 18 || cat->ver == 21) {
691 		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
692 
693 		for (int i = 0; i < cnt; i++) {
694 			cat_nthw_rck_select(be->p_cat_nthw, index + i);
695 			cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data);
696 			cat_nthw_rck_flush(be->p_cat_nthw);
697 		}
698 	}
699 
700 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
701 	return 0;
702 }
703 
704 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
705 {
706 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
707 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
708 
709 	if (cat->ver == 18 || cat->ver == 21) {
710 		cat_nthw_len_cnt(be->p_cat_nthw, 1);
711 
712 		for (int i = 0; i < cnt; i++) {
713 			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
714 			cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower);
715 			cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper);
716 			cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1);
717 			cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2);
718 			cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv);
719 			cat_nthw_len_flush(be->p_cat_nthw);
720 		}
721 	}
722 
723 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
724 	return 0;
725 }
726 
727 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
728 {
729 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
730 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
731 
732 	if (cat->ver == 18 || cat->ver == 21) {
733 		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
734 
735 		for (int i = 0; i < cnt; i++) {
736 			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
737 			cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key);
738 			cat_nthw_kcc_category(be->p_cat_nthw,
739 				cat->v18.kcc_cam[len_index + i].category);
740 			cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id);
741 			cat_nthw_kcc_flush(be->p_cat_nthw);
742 		}
743 	}
744 
745 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
746 	return 0;
747 }
748 
749 /*
750  * KM
751  */
752 
753 static bool km_get_present(void *be_dev)
754 {
755 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
756 	return be->p_km_nthw != NULL;
757 }
758 
759 static uint32_t km_get_version(void *be_dev)
760 {
761 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
762 	return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) |
763 			(nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
764 }
765 
766 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt)
767 {
768 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
769 
770 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
771 
772 	if (km->ver == 7) {
773 		km_nthw_rcp_cnt(be->p_km_nthw, 1);
774 
775 		for (int i = 0; i < cnt; i++) {
776 			km_nthw_rcp_select(be->p_km_nthw, category + i);
777 			km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn);
778 			km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs);
779 			km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a);
780 			km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b);
781 			km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn);
782 			km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs);
783 			km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a);
784 			km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b);
785 			km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn);
786 			km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs);
787 			km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a);
788 			km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b);
789 			km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn);
790 			km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs);
791 			km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a);
792 			km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b);
793 			km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch);
794 			km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a);
795 			km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b);
796 			km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a);
797 			km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b);
798 			km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual);
799 			km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired);
800 			km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a);
801 			km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b);
802 			km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a);
803 			km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b);
804 			km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a);
805 			km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b);
806 			km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a);
807 			km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b);
808 			km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a);
809 			km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b);
810 			km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a);
811 			km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b);
812 			km_nthw_rcp_synergy_mode(be->p_km_nthw,
813 				km->v7.rcp[category + i].synergy_mode);
814 			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn);
815 			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs);
816 			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn);
817 			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs);
818 			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn);
819 			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs);
820 			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn);
821 			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs);
822 			km_nthw_rcp_flush(be->p_km_nthw);
823 		}
824 	}
825 
826 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
827 	return 0;
828 }
829 
830 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt)
831 {
832 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
833 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
834 
835 	if (km->ver == 7) {
836 		km_nthw_cam_cnt(be->p_km_nthw, 1);
837 
838 		for (int i = 0; i < cnt; i++) {
839 			km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i);
840 			km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0);
841 			km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1);
842 			km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2);
843 			km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3);
844 			km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4);
845 			km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5);
846 			km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0);
847 			km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1);
848 			km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2);
849 			km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3);
850 			km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4);
851 			km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5);
852 			km_nthw_cam_flush(be->p_km_nthw);
853 		}
854 	}
855 
856 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
857 	return 0;
858 }
859 
860 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value,
861 	int cnt)
862 {
863 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
864 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
865 
866 	if (km->ver == 7) {
867 		int start_idx = bank * 4 * 256 + byte * 256 + value;
868 		km_nthw_tcam_cnt(be->p_km_nthw, 1);
869 
870 		for (int i = 0; i < cnt; i++) {
871 			if (km->v7.tcam[start_idx + i].dirty) {
872 				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
873 				km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t);
874 				km_nthw_tcam_flush(be->p_km_nthw);
875 				km->v7.tcam[start_idx + i].dirty = 0;
876 			}
877 		}
878 	}
879 
880 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
881 	return 0;
882 }
883 
884 /*
885  * bank is the TCAM bank, index is the index within the bank (0..71)
886  */
887 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
888 {
889 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
890 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
891 
892 	if (km->ver == 7) {
893 		/* TCAM bank width in version 3 = 72 */
894 		km_nthw_tci_cnt(be->p_km_nthw, 1);
895 
896 		for (int i = 0; i < cnt; i++) {
897 			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
898 			km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color);
899 			km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft);
900 			km_nthw_tci_flush(be->p_km_nthw);
901 		}
902 	}
903 
904 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
905 	return 0;
906 }
907 
908 /*
909  * bank is the TCAM bank, index is the index within the bank (0..71)
910  */
911 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
912 {
913 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
914 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
915 
916 	if (km->ver == 7) {
917 		/* TCAM bank width in version 3 = 72 */
918 		km_nthw_tcq_cnt(be->p_km_nthw, 1);
919 
920 		for (int i = 0; i < cnt; i++) {
921 			/* adr = lover 4 bits = bank, upper 7 bits = index */
922 			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
923 			km_nthw_tcq_bank_mask(be->p_km_nthw,
924 				km->v7.tcq[bank + (index << 4) + i].bank_mask);
925 			km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual);
926 			km_nthw_tcq_flush(be->p_km_nthw);
927 		}
928 	}
929 
930 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
931 	return 0;
932 }
933 
934 /*
935  * FLM
936  */
937 
938 static bool flm_get_present(void *be_dev)
939 {
940 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
941 	return be->p_flm_nthw != NULL;
942 }
943 
944 static uint32_t flm_get_version(void *be_dev)
945 {
946 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
947 	return (uint32_t)((nthw_module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
948 			(nthw_module_get_minor_version(be->p_flm_nthw->m_flm) & 0xffff));
949 }
950 
951 static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
952 {
953 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
954 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
955 
956 	if (flm->ver >= 25) {
957 		flm_nthw_control_enable(be->p_flm_nthw, flm->v25.control->enable);
958 		flm_nthw_control_init(be->p_flm_nthw, flm->v25.control->init);
959 		flm_nthw_control_lds(be->p_flm_nthw, flm->v25.control->lds);
960 		flm_nthw_control_lfs(be->p_flm_nthw, flm->v25.control->lfs);
961 		flm_nthw_control_lis(be->p_flm_nthw, flm->v25.control->lis);
962 		flm_nthw_control_uds(be->p_flm_nthw, flm->v25.control->uds);
963 		flm_nthw_control_uis(be->p_flm_nthw, flm->v25.control->uis);
964 		flm_nthw_control_rds(be->p_flm_nthw, flm->v25.control->rds);
965 		flm_nthw_control_ris(be->p_flm_nthw, flm->v25.control->ris);
966 		flm_nthw_control_pds(be->p_flm_nthw, flm->v25.control->pds);
967 		flm_nthw_control_pis(be->p_flm_nthw, flm->v25.control->pis);
968 		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v25.control->crcwr);
969 		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v25.control->crcrd);
970 		flm_nthw_control_rbl(be->p_flm_nthw, flm->v25.control->rbl);
971 		flm_nthw_control_eab(be->p_flm_nthw, flm->v25.control->eab);
972 		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
973 			flm->v25.control->split_sdram_usage);
974 		flm_nthw_control_flush(be->p_flm_nthw);
975 	}
976 
977 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
978 	return 0;
979 }
980 
981 static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
982 {
983 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
984 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
985 
986 	if (flm->ver >= 25) {
987 		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
988 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 0);
989 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 0);
990 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 0);
991 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
992 			&flm->v25.status->cache_buf_critical, 0);
993 		flm_nthw_status_flush(be->p_flm_nthw);
994 	}
995 
996 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
997 	return 0;
998 }
999 
1000 static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
1001 {
1002 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1003 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1004 
1005 	if (flm->ver >= 25) {
1006 		flm_nthw_status_update(be->p_flm_nthw);
1007 		flm_nthw_status_calib_success(be->p_flm_nthw, &flm->v25.status->calib_success, 1);
1008 		flm_nthw_status_calib_fail(be->p_flm_nthw, &flm->v25.status->calib_fail, 1);
1009 		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v25.status->initdone, 1);
1010 		flm_nthw_status_idle(be->p_flm_nthw, &flm->v25.status->idle, 1);
1011 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 1);
1012 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 1);
1013 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 1);
1014 		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v25.status->eft_bp, 1);
1015 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
1016 			&flm->v25.status->cache_buf_critical, 1);
1017 	}
1018 
1019 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1020 	return 0;
1021 }
1022 
1023 static int flm_scan_flush(void *be_dev, const struct flm_func_s *flm)
1024 {
1025 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1026 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1027 
1028 	if (flm->ver >= 25) {
1029 		flm_nthw_scan_i(be->p_flm_nthw, flm->v25.scan->i);
1030 		flm_nthw_scan_flush(be->p_flm_nthw);
1031 	}
1032 
1033 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1034 	return 0;
1035 }
1036 
1037 static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
1038 {
1039 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1040 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1041 
1042 	if (flm->ver >= 25) {
1043 		flm_nthw_load_bin(be->p_flm_nthw, flm->v25.load_bin->bin);
1044 		flm_nthw_load_bin_flush(be->p_flm_nthw);
1045 	}
1046 
1047 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1048 	return 0;
1049 }
1050 
1051 static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
1052 {
1053 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1054 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1055 
1056 	if (flm->ver >= 25) {
1057 		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v25.prio->limit0);
1058 		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v25.prio->ft0);
1059 		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v25.prio->limit1);
1060 		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v25.prio->ft1);
1061 		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v25.prio->limit2);
1062 		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v25.prio->ft2);
1063 		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v25.prio->limit3);
1064 		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v25.prio->ft3);
1065 		flm_nthw_prio_flush(be->p_flm_nthw);
1066 	}
1067 
1068 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1069 	return 0;
1070 }
1071 
1072 static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1073 {
1074 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1075 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1076 
1077 	if (flm->ver >= 25) {
1078 		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
1079 
1080 		for (int i = 0; i < cnt; i++) {
1081 			flm_nthw_pst_select(be->p_flm_nthw, index + i);
1082 			flm_nthw_pst_bp(be->p_flm_nthw, flm->v25.pst[index + i].bp);
1083 			flm_nthw_pst_pp(be->p_flm_nthw, flm->v25.pst[index + i].pp);
1084 			flm_nthw_pst_tp(be->p_flm_nthw, flm->v25.pst[index + i].tp);
1085 			flm_nthw_pst_flush(be->p_flm_nthw);
1086 		}
1087 	}
1088 
1089 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1090 	return 0;
1091 }
1092 
1093 static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1094 {
1095 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1096 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1097 
1098 	if (flm->ver >= 25) {
1099 		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
1100 
1101 		for (int i = 0; i < cnt; i++) {
1102 			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
1103 			flm_nthw_rcp_lookup(be->p_flm_nthw, flm->v25.rcp[index + i].lookup);
1104 			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_dyn);
1105 			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_ofs);
1106 			flm_nthw_rcp_qw0_sel(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_sel);
1107 			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_dyn);
1108 			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_ofs);
1109 			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_dyn);
1110 			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_ofs);
1111 			flm_nthw_rcp_sw8_sel(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_sel);
1112 			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_dyn);
1113 			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_ofs);
1114 			flm_nthw_rcp_mask(be->p_flm_nthw, flm->v25.rcp[index + i].mask);
1115 			flm_nthw_rcp_kid(be->p_flm_nthw, flm->v25.rcp[index + i].kid);
1116 			flm_nthw_rcp_opn(be->p_flm_nthw, flm->v25.rcp[index + i].opn);
1117 			flm_nthw_rcp_ipn(be->p_flm_nthw, flm->v25.rcp[index + i].ipn);
1118 			flm_nthw_rcp_byt_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].byt_dyn);
1119 			flm_nthw_rcp_byt_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].byt_ofs);
1120 			flm_nthw_rcp_txplm(be->p_flm_nthw, flm->v25.rcp[index + i].txplm);
1121 			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
1122 				flm->v25.rcp[index + i].auto_ipv4_mask);
1123 			flm_nthw_rcp_flush(be->p_flm_nthw);
1124 		}
1125 	}
1126 
1127 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1128 	return 0;
1129 }
1130 
1131 static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1132 {
1133 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1134 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1135 
1136 	if (flm->ver >= 25) {
1137 		flm_nthw_scrub_cnt(be->p_flm_nthw, 1);
1138 
1139 		for (int i = 0; i < cnt; i++) {
1140 			flm_nthw_scrub_select(be->p_flm_nthw, index + i);
1141 			flm_nthw_scrub_t(be->p_flm_nthw, flm->v25.scrub[index + i].t);
1142 			flm_nthw_scrub_r(be->p_flm_nthw, flm->v25.scrub[index + i].r);
1143 			flm_nthw_scrub_del(be->p_flm_nthw, flm->v25.scrub[index + i].del);
1144 			flm_nthw_scrub_inf(be->p_flm_nthw, flm->v25.scrub[index + i].inf);
1145 			flm_nthw_scrub_flush(be->p_flm_nthw);
1146 		}
1147 	}
1148 
1149 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1150 	return 0;
1151 }
1152 
1153 static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
1154 {
1155 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1156 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1157 
1158 	if (flm->ver >= 25) {
1159 		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
1160 			&flm->v25.buf_ctrl->lrn_free,
1161 			&flm->v25.buf_ctrl->inf_avail,
1162 			&flm->v25.buf_ctrl->sta_avail);
1163 	}
1164 
1165 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1166 	return 0;
1167 }
1168 
1169 static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
1170 {
1171 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1172 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1173 
1174 	if (flm->ver >= 25) {
1175 		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
1176 		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
1177 		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
1178 		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
1179 		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
1180 		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
1181 		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
1182 		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
1183 		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
1184 		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
1185 		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
1186 		flm_nthw_stat_flows_update(be->p_flm_nthw);
1187 		flm_nthw_load_lps_update(be->p_flm_nthw);
1188 		flm_nthw_load_aps_update(be->p_flm_nthw);
1189 
1190 		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v25.lrn_done->cnt, 1);
1191 		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw, &flm->v25.lrn_ignore->cnt, 1);
1192 		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v25.lrn_fail->cnt, 1);
1193 		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v25.unl_done->cnt, 1);
1194 		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw, &flm->v25.unl_ignore->cnt, 1);
1195 		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v25.rel_done->cnt, 1);
1196 		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw, &flm->v25.rel_ignore->cnt, 1);
1197 		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v25.aul_done->cnt, 1);
1198 		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw, &flm->v25.aul_ignore->cnt, 1);
1199 		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v25.aul_fail->cnt, 1);
1200 		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v25.tul_done->cnt, 1);
1201 		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v25.flows->cnt, 1);
1202 
1203 		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
1204 		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
1205 		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v25.prb_done->cnt, 1);
1206 		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw, &flm->v25.prb_ignore->cnt, 1);
1207 
1208 		flm_nthw_load_lps_cnt(be->p_flm_nthw, &flm->v25.load_lps->lps, 1);
1209 		flm_nthw_load_aps_cnt(be->p_flm_nthw, &flm->v25.load_aps->aps, 1);
1210 	}
1211 
1212 	if (flm->ver >= 25) {
1213 		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
1214 		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
1215 		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
1216 		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
1217 		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
1218 		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
1219 		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
1220 		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
1221 		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
1222 		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
1223 		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
1224 		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
1225 
1226 		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v25.sta_done->cnt, 1);
1227 		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v25.inf_done->cnt, 1);
1228 		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v25.inf_skip->cnt, 1);
1229 		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v25.pck_hit->cnt, 1);
1230 		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v25.pck_miss->cnt, 1);
1231 		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v25.pck_unh->cnt, 1);
1232 		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v25.pck_dis->cnt, 1);
1233 		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v25.csh_hit->cnt, 1);
1234 		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v25.csh_miss->cnt, 1);
1235 		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v25.csh_unh->cnt, 1);
1236 		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v25.cuc_start->cnt, 1);
1237 		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v25.cuc_move->cnt, 1);
1238 	}
1239 
1240 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1241 	return 0;
1242 }
1243 
1244 static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm, const uint32_t *lrn_data,
1245 	uint32_t records, uint32_t *handled_records,
1246 	uint32_t words_per_record, uint32_t *inf_word_cnt,
1247 	uint32_t *sta_word_cnt)
1248 {
1249 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1250 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1251 
1252 	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, records, words_per_record,
1253 			handled_records, &flm->v25.buf_ctrl->lrn_free,
1254 			&flm->v25.buf_ctrl->inf_avail,
1255 			&flm->v25.buf_ctrl->sta_avail);
1256 
1257 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1258 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1259 
1260 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1261 	return ret;
1262 }
1263 
1264 static int flm_inf_sta_data_update(void *be_dev, const struct flm_func_s *flm, uint32_t *inf_data,
1265 	uint32_t inf_size, uint32_t *inf_word_cnt, uint32_t *sta_data,
1266 	uint32_t sta_size, uint32_t *sta_word_cnt)
1267 {
1268 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1269 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1270 
1271 	int ret = flm_nthw_inf_sta_data_update(be->p_flm_nthw, inf_data, inf_size, sta_data,
1272 			sta_size, &flm->v25.buf_ctrl->lrn_free,
1273 			&flm->v25.buf_ctrl->inf_avail,
1274 			&flm->v25.buf_ctrl->sta_avail);
1275 
1276 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1277 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1278 
1279 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1280 	return ret;
1281 }
1282 
1283 /*
1284  * HSH
1285  */
1286 
1287 static bool hsh_get_present(void *be_dev)
1288 {
1289 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1290 	return be->p_hsh_nthw != NULL;
1291 }
1292 
1293 static uint32_t hsh_get_version(void *be_dev)
1294 {
1295 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1296 	return (uint32_t)((nthw_module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
1297 			(nthw_module_get_minor_version(be->p_hsh_nthw->m_hsh) & 0xffff));
1298 }
1299 
1300 static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh, int category, int cnt)
1301 {
1302 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1303 	CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
1304 
1305 	if (hsh->ver == 5) {
1306 		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
1307 
1308 		for (int i = 0; i < cnt; i++) {
1309 			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
1310 			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
1311 				hsh->v5.rcp[category + i].load_dist_type);
1312 			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
1313 				hsh->v5.rcp[category + i].mac_port_mask);
1314 			hsh_nthw_rcp_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].sort);
1315 			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_pe);
1316 			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_ofs);
1317 			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_pe);
1318 			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_ofs);
1319 			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_pe);
1320 			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_ofs);
1321 			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_sort);
1322 			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_pe);
1323 			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_ofs);
1324 			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_sort);
1325 			hsh_nthw_rcp_w9_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_p);
1326 			hsh_nthw_rcp_p_mask(be->p_hsh_nthw, hsh->v5.rcp[category + i].p_mask);
1327 			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
1328 				hsh->v5.rcp[category + i].word_mask);
1329 			hsh_nthw_rcp_seed(be->p_hsh_nthw, hsh->v5.rcp[category + i].seed);
1330 			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].tnl_p);
1331 			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
1332 				hsh->v5.rcp[category + i].hsh_valid);
1333 			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw, hsh->v5.rcp[category + i].hsh_type);
1334 			hsh_nthw_rcp_toeplitz(be->p_hsh_nthw, hsh->v5.rcp[category + i].toeplitz);
1335 			hsh_nthw_rcp_k(be->p_hsh_nthw, hsh->v5.rcp[category + i].k);
1336 			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
1337 				hsh->v5.rcp[category + i].auto_ipv4_mask);
1338 			hsh_nthw_rcp_flush(be->p_hsh_nthw);
1339 		}
1340 	}
1341 
1342 	CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
1343 	return 0;
1344 }
1345 
1346 /*
1347  * QSL
1348  */
1349 
1350 static bool qsl_get_present(void *be_dev)
1351 {
1352 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1353 	return be->p_qsl_nthw != NULL;
1354 }
1355 
1356 static uint32_t qsl_get_version(void *be_dev)
1357 {
1358 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1359 	return (uint32_t)((nthw_module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
1360 			(nthw_module_get_minor_version(be->p_qsl_nthw->m_qsl) & 0xffff));
1361 }
1362 
1363 static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl, int category, int cnt)
1364 {
1365 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1366 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1367 
1368 	if (qsl->ver == 7) {
1369 		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
1370 
1371 		for (int i = 0; i < cnt; i++) {
1372 			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
1373 			qsl_nthw_rcp_discard(be->p_qsl_nthw, qsl->v7.rcp[category + i].discard);
1374 			qsl_nthw_rcp_drop(be->p_qsl_nthw, qsl->v7.rcp[category + i].drop);
1375 			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_lo);
1376 			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_hi);
1377 			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_idx);
1378 			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_msk);
1379 			qsl_nthw_rcp_lr(be->p_qsl_nthw, qsl->v7.rcp[category + i].lr);
1380 			qsl_nthw_rcp_tsa(be->p_qsl_nthw, qsl->v7.rcp[category + i].tsa);
1381 			qsl_nthw_rcp_vli(be->p_qsl_nthw, qsl->v7.rcp[category + i].vli);
1382 			qsl_nthw_rcp_flush(be->p_qsl_nthw);
1383 		}
1384 	}
1385 
1386 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1387 	return 0;
1388 }
1389 
1390 static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1391 {
1392 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1393 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1394 
1395 	if (qsl->ver == 7) {
1396 		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
1397 
1398 		for (int i = 0; i < cnt; i++) {
1399 			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
1400 			qsl_nthw_qst_queue(be->p_qsl_nthw, qsl->v7.qst[entry + i].queue);
1401 			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
1402 
1403 			qsl_nthw_qst_tx_port(be->p_qsl_nthw, qsl->v7.qst[entry + i].tx_port);
1404 			qsl_nthw_qst_lre(be->p_qsl_nthw, qsl->v7.qst[entry + i].lre);
1405 			qsl_nthw_qst_tci(be->p_qsl_nthw, qsl->v7.qst[entry + i].tci);
1406 			qsl_nthw_qst_ven(be->p_qsl_nthw, qsl->v7.qst[entry + i].ven);
1407 			qsl_nthw_qst_flush(be->p_qsl_nthw);
1408 		}
1409 	}
1410 
1411 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1412 	return 0;
1413 }
1414 
1415 static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1416 {
1417 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1418 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1419 
1420 	if (qsl->ver == 7) {
1421 		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
1422 
1423 		for (int i = 0; i < cnt; i++) {
1424 			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
1425 			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
1426 			qsl_nthw_qen_flush(be->p_qsl_nthw);
1427 		}
1428 	}
1429 
1430 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1431 	return 0;
1432 }
1433 
1434 static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1435 {
1436 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1437 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1438 
1439 	if (qsl->ver == 7) {
1440 		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
1441 
1442 		for (int i = 0; i < cnt; i++) {
1443 			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
1444 			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
1445 				qsl->v7.unmq[entry + i].dest_queue);
1446 			qsl_nthw_unmq_en(be->p_qsl_nthw, qsl->v7.unmq[entry + i].en);
1447 			qsl_nthw_unmq_flush(be->p_qsl_nthw);
1448 		}
1449 	}
1450 
1451 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1452 	return 0;
1453 }
1454 
1455 /*
1456  * SLC LR
1457  */
1458 
1459 static bool slc_lr_get_present(void *be_dev)
1460 {
1461 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1462 	return be->p_slc_lr_nthw != NULL;
1463 }
1464 
1465 static uint32_t slc_lr_get_version(void *be_dev)
1466 {
1467 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1468 	return (uint32_t)((nthw_module_get_major_version(be->p_slc_lr_nthw->m_slc_lr) << 16) |
1469 			(nthw_module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) & 0xffff));
1470 }
1471 
1472 static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr, int category,
1473 	int cnt)
1474 {
1475 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1476 	CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
1477 
1478 	if (slc_lr->ver == 2) {
1479 		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
1480 
1481 		for (int i = 0; i < cnt; i++) {
1482 			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
1483 			slc_lr_nthw_rcp_head_slc_en(be->p_slc_lr_nthw,
1484 				slc_lr->v2.rcp[category + i].head_slc_en);
1485 			slc_lr_nthw_rcp_head_dyn(be->p_slc_lr_nthw,
1486 				slc_lr->v2.rcp[category + i].head_dyn);
1487 			slc_lr_nthw_rcp_head_ofs(be->p_slc_lr_nthw,
1488 				slc_lr->v2.rcp[category + i].head_ofs);
1489 			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
1490 				slc_lr->v2.rcp[category + i].tail_slc_en);
1491 			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
1492 				slc_lr->v2.rcp[category + i].tail_dyn);
1493 			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
1494 				slc_lr->v2.rcp[category + i].tail_ofs);
1495 			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw, slc_lr->v2.rcp[category + i].pcap);
1496 			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
1497 		}
1498 	}
1499 
1500 	CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
1501 	return 0;
1502 }
1503 
1504 /*
1505  * PDB
1506  */
1507 
1508 static bool pdb_get_present(void *be_dev)
1509 {
1510 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1511 	return be->p_pdb_nthw != NULL;
1512 }
1513 
1514 static uint32_t pdb_get_version(void *be_dev)
1515 {
1516 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1517 	return (uint32_t)((nthw_module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
1518 			(nthw_module_get_minor_version(be->p_pdb_nthw->m_pdb) & 0xffff));
1519 }
1520 
1521 static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb, int category, int cnt)
1522 {
1523 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1524 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1525 
1526 	if (pdb->ver == 9) {
1527 		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
1528 
1529 		for (int i = 0; i < cnt; i++) {
1530 			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
1531 			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
1532 				pdb->v9.rcp[category + i].descriptor);
1533 			pdb_nthw_rcp_desc_len(be->p_pdb_nthw, pdb->v9.rcp[category + i].desc_len);
1534 			pdb_nthw_rcp_tx_port(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_port);
1535 			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
1536 				pdb->v9.rcp[category + i].tx_ignore);
1537 			pdb_nthw_rcp_tx_now(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_now);
1538 			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
1539 				pdb->v9.rcp[category + i].crc_overwrite);
1540 			pdb_nthw_rcp_align(be->p_pdb_nthw, pdb->v9.rcp[category + i].align);
1541 			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_dyn);
1542 			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_rel);
1543 			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_dyn);
1544 			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_rel);
1545 			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_dyn);
1546 			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_rel);
1547 			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
1548 				pdb->v9.rcp[category + i].ip_prot_tnl);
1549 			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw, pdb->v9.rcp[category + i].ppc_hsh);
1550 			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
1551 				pdb->v9.rcp[category + i].duplicate_en);
1552 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1553 				pdb->v9.rcp[category + i].duplicate_bit);
1554 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1555 				pdb->v9.rcp[category + i].pcap_keep_fcs);
1556 			pdb_nthw_rcp_flush(be->p_pdb_nthw);
1557 		}
1558 	}
1559 
1560 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1561 	return 0;
1562 }
1563 
1564 static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
1565 {
1566 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1567 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1568 
1569 	if (pdb->ver == 9) {
1570 		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
1571 		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
1572 		pdb_nthw_config_flush(be->p_pdb_nthw);
1573 	}
1574 
1575 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1576 	return 0;
1577 }
1578 
1579 /*
1580  * TPE
1581  */
1582 
1583 static bool tpe_get_present(void *be_dev)
1584 {
1585 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1586 	return be->p_csu_nthw != NULL && be->p_hfu_nthw != NULL && be->p_rpp_lr_nthw != NULL &&
1587 		be->p_tx_cpy_nthw != NULL && be->p_tx_ins_nthw != NULL &&
1588 		be->p_tx_rpl_nthw != NULL;
1589 }
1590 
1591 static uint32_t tpe_get_version(void *be_dev)
1592 {
1593 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1594 
1595 	const uint32_t csu_version =
1596 		(uint32_t)((nthw_module_get_major_version(be->p_csu_nthw->m_csu) << 16) |
1597 			(nthw_module_get_minor_version(be->p_csu_nthw->m_csu) & 0xffff));
1598 
1599 	const uint32_t hfu_version =
1600 		(uint32_t)((nthw_module_get_major_version(be->p_hfu_nthw->m_hfu) << 16) |
1601 			(nthw_module_get_minor_version(be->p_hfu_nthw->m_hfu) & 0xffff));
1602 
1603 	const uint32_t rpp_lr_version =
1604 		(uint32_t)((nthw_module_get_major_version(be->p_rpp_lr_nthw->m_rpp_lr) << 16) |
1605 			(nthw_module_get_minor_version(be->p_rpp_lr_nthw->m_rpp_lr) & 0xffff));
1606 
1607 	const uint32_t tx_cpy_version =
1608 		(uint32_t)((nthw_module_get_major_version(be->p_tx_cpy_nthw->m_tx_cpy) << 16) |
1609 			(nthw_module_get_minor_version(be->p_tx_cpy_nthw->m_tx_cpy) & 0xffff));
1610 
1611 	const uint32_t tx_ins_version =
1612 		(uint32_t)((nthw_module_get_major_version(be->p_tx_ins_nthw->m_tx_ins) << 16) |
1613 			(nthw_module_get_minor_version(be->p_tx_ins_nthw->m_tx_ins) & 0xffff));
1614 
1615 	const uint32_t tx_rpl_version =
1616 		(uint32_t)((nthw_module_get_major_version(be->p_tx_rpl_nthw->m_tx_rpl) << 16) |
1617 			(nthw_module_get_minor_version(be->p_tx_rpl_nthw->m_tx_rpl) & 0xffff));
1618 
1619 	/*
1620 	 * we have to support 9563-55-28 and 9563-55-30
1621 	 * so check for INS ver 0.1 and RPL ver 0.2 or for INS ver 0.2 and RPL ver 0.4
1622 	 */
1623 	if (csu_version == 0 && hfu_version == 2 && rpp_lr_version >= 1 && tx_cpy_version == 2 &&
1624 		((tx_ins_version == 1 && tx_rpl_version == 2) ||
1625 			(tx_ins_version == 2 && tx_rpl_version == 4))) {
1626 		return 3;
1627 	}
1628 
1629 	if (csu_version == 0 && hfu_version == 2 && rpp_lr_version >= 1 && tx_cpy_version == 4 &&
1630 		((tx_ins_version == 1 && tx_rpl_version == 2) ||
1631 			(tx_ins_version == 2 && tx_rpl_version == 4))) {
1632 		return 3;
1633 	}
1634 
1635 	assert(false);
1636 	return 0;
1637 }
1638 
1639 static int tpe_rpp_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr, int index, int cnt)
1640 {
1641 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1642 	CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
1643 
1644 	if (rpp_lr->ver >= 1) {
1645 		rpp_lr_nthw_rcp_cnt(be->p_rpp_lr_nthw, 1);
1646 
1647 		for (int i = 0; i < cnt; i++) {
1648 			rpp_lr_nthw_rcp_select(be->p_rpp_lr_nthw, index + i);
1649 			rpp_lr_nthw_rcp_exp(be->p_rpp_lr_nthw, rpp_lr->v3.rpp_rcp[index + i].exp);
1650 			rpp_lr_nthw_rcp_flush(be->p_rpp_lr_nthw);
1651 		}
1652 	}
1653 
1654 	CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
1655 	return 0;
1656 }
1657 
1658 static int tpe_rpp_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *rpp_lr, int index, int cnt)
1659 {
1660 	int res = 0;
1661 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1662 	CHECK_DEBUG_ON(be, rpp_lr, be->p_rpp_lr_nthw);
1663 
1664 	if (rpp_lr->ver >= 2) {
1665 		rpp_lr_nthw_ifr_rcp_cnt(be->p_rpp_lr_nthw, 1);
1666 
1667 		for (int i = 0; i < cnt; i++) {
1668 			rpp_lr_nthw_ifr_rcp_select(be->p_rpp_lr_nthw, index + i);
1669 			rpp_lr_nthw_ifr_rcp_ipv4_en(be->p_rpp_lr_nthw,
1670 				rpp_lr->v3.rpp_ifr_rcp[index + i].ipv4_en);
1671 			rpp_lr_nthw_ifr_rcp_ipv4_df_drop(be->p_rpp_lr_nthw,
1672 				rpp_lr->v3.rpp_ifr_rcp[index + i]
1673 				.ipv4_df_drop);
1674 			rpp_lr_nthw_ifr_rcp_ipv6_en(be->p_rpp_lr_nthw,
1675 				rpp_lr->v3.rpp_ifr_rcp[index + i].ipv6_en);
1676 			rpp_lr_nthw_ifr_rcp_ipv6_drop(be->p_rpp_lr_nthw,
1677 				rpp_lr->v3.rpp_ifr_rcp[index + i].ipv6_drop);
1678 			rpp_lr_nthw_ifr_rcp_mtu(be->p_rpp_lr_nthw,
1679 				rpp_lr->v3.rpp_ifr_rcp[index + i].mtu);
1680 			rpp_lr_nthw_ifr_rcp_flush(be->p_rpp_lr_nthw);
1681 		}
1682 
1683 	} else {
1684 		res = -1;
1685 	}
1686 
1687 	CHECK_DEBUG_OFF(rpp_lr, be->p_rpp_lr_nthw);
1688 	return res;
1689 }
1690 
1691 static int tpe_ifr_rcp_flush(void *be_dev, const struct tpe_func_s *ifr, int index, int cnt)
1692 {
1693 	int res = 0;
1694 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1695 	CHECK_DEBUG_ON(be, ifr, be->p_ifr_nthw);
1696 
1697 	if (ifr->ver >= 2) {
1698 		ifr_nthw_rcp_cnt(be->p_ifr_nthw, 1);
1699 
1700 		for (int i = 0; i < cnt; i++) {
1701 			ifr_nthw_rcp_select(be->p_ifr_nthw, index + i);
1702 			ifr_nthw_rcp_ipv4_en(be->p_ifr_nthw, ifr->v3.ifr_rcp[index + i].ipv4_en);
1703 			ifr_nthw_rcp_ipv4_df_drop(be->p_ifr_nthw,
1704 				ifr->v3.ifr_rcp[index + i].ipv4_df_drop);
1705 			ifr_nthw_rcp_ipv6_en(be->p_ifr_nthw, ifr->v3.ifr_rcp[index + i].ipv6_en);
1706 			ifr_nthw_rcp_ipv6_drop(be->p_ifr_nthw,
1707 				ifr->v3.ifr_rcp[index + i].ipv6_drop);
1708 			ifr_nthw_rcp_mtu(be->p_ifr_nthw, ifr->v3.ifr_rcp[index + i].mtu);
1709 			ifr_nthw_rcp_flush(be->p_ifr_nthw);
1710 		}
1711 
1712 	} else {
1713 		res = -1;
1714 	}
1715 
1716 	CHECK_DEBUG_OFF(ifr, be->p_ifr_nthw);
1717 	return res;
1718 }
1719 
1720 static int tpe_ins_rcp_flush(void *be_dev, const struct tpe_func_s *tx_ins, int index, int cnt)
1721 {
1722 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1723 	CHECK_DEBUG_ON(be, tx_ins, be->p_tx_ins_nthw);
1724 
1725 	if (tx_ins->ver >= 1) {
1726 		tx_ins_nthw_rcp_cnt(be->p_tx_ins_nthw, 1);
1727 
1728 		for (int i = 0; i < cnt; i++) {
1729 			tx_ins_nthw_rcp_select(be->p_tx_ins_nthw, index + i);
1730 			tx_ins_nthw_rcp_dyn(be->p_tx_ins_nthw, tx_ins->v3.ins_rcp[index + i].dyn);
1731 			tx_ins_nthw_rcp_ofs(be->p_tx_ins_nthw, tx_ins->v3.ins_rcp[index + i].ofs);
1732 			tx_ins_nthw_rcp_len(be->p_tx_ins_nthw, tx_ins->v3.ins_rcp[index + i].len);
1733 			tx_ins_nthw_rcp_flush(be->p_tx_ins_nthw);
1734 		}
1735 	}
1736 
1737 	CHECK_DEBUG_OFF(tx_ins, be->p_tx_ins_nthw);
1738 	return 0;
1739 }
1740 
1741 static int tpe_rpl_rcp_flush(void *be_dev, const struct tpe_func_s *tx_rpl, int index, int cnt)
1742 {
1743 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1744 	CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
1745 
1746 	if (tx_rpl->ver >= 1) {
1747 		tx_rpl_nthw_rcp_cnt(be->p_tx_rpl_nthw, 1);
1748 
1749 		for (int i = 0; i < cnt; i++) {
1750 			tx_rpl_nthw_rcp_select(be->p_tx_rpl_nthw, index + i);
1751 			tx_rpl_nthw_rcp_dyn(be->p_tx_rpl_nthw, tx_rpl->v3.rpl_rcp[index + i].dyn);
1752 			tx_rpl_nthw_rcp_ofs(be->p_tx_rpl_nthw, tx_rpl->v3.rpl_rcp[index + i].ofs);
1753 			tx_rpl_nthw_rcp_len(be->p_tx_rpl_nthw, tx_rpl->v3.rpl_rcp[index + i].len);
1754 			tx_rpl_nthw_rcp_rpl_ptr(be->p_tx_rpl_nthw,
1755 				tx_rpl->v3.rpl_rcp[index + i].rpl_ptr);
1756 			tx_rpl_nthw_rcp_ext_prio(be->p_tx_rpl_nthw,
1757 				tx_rpl->v3.rpl_rcp[index + i].ext_prio);
1758 
1759 			if (tx_rpl->ver >= 3) {
1760 				tx_rpl_nthw_rcp_eth_type_wr(be->p_tx_rpl_nthw,
1761 					tx_rpl->v3.rpl_rcp[index + i]
1762 					.eth_type_wr);
1763 			}
1764 
1765 			tx_rpl_nthw_rcp_flush(be->p_tx_rpl_nthw);
1766 		}
1767 	}
1768 
1769 	CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
1770 	return 0;
1771 }
1772 
1773 static int tpe_rpl_ext_flush(void *be_dev, const struct tpe_func_s *tx_rpl, int index, int cnt)
1774 {
1775 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1776 	CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
1777 
1778 	if (tx_rpl->ver >= 1) {
1779 		tx_rpl_nthw_ext_cnt(be->p_tx_rpl_nthw, 1);
1780 
1781 		for (int i = 0; i < cnt; i++) {
1782 			tx_rpl_nthw_ext_select(be->p_tx_rpl_nthw, index + i);
1783 			tx_rpl_nthw_ext_rpl_ptr(be->p_tx_rpl_nthw,
1784 				tx_rpl->v3.rpl_ext[index + i].rpl_ptr);
1785 			tx_rpl_nthw_ext_flush(be->p_tx_rpl_nthw);
1786 		}
1787 	}
1788 
1789 	CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
1790 	return 0;
1791 }
1792 
1793 static int tpe_rpl_rpl_flush(void *be_dev, const struct tpe_func_s *tx_rpl, int index, int cnt)
1794 {
1795 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1796 	CHECK_DEBUG_ON(be, tx_rpl, be->p_tx_rpl_nthw);
1797 
1798 	if (tx_rpl->ver >= 1) {
1799 		tx_rpl_nthw_rpl_cnt(be->p_tx_rpl_nthw, 1);
1800 
1801 		for (int i = 0; i < cnt; i++) {
1802 			tx_rpl_nthw_rpl_select(be->p_tx_rpl_nthw, index + i);
1803 			tx_rpl_nthw_rpl_value(be->p_tx_rpl_nthw,
1804 				tx_rpl->v3.rpl_rpl[index + i].value);
1805 			tx_rpl_nthw_rpl_flush(be->p_tx_rpl_nthw);
1806 		}
1807 	}
1808 
1809 	CHECK_DEBUG_OFF(tx_rpl, be->p_tx_rpl_nthw);
1810 	return 0;
1811 }
1812 
1813 static int tpe_cpy_rcp_flush(void *be_dev, const struct tpe_func_s *tx_cpy, int index, int cnt)
1814 {
1815 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1816 	unsigned int wr_index = -1;
1817 
1818 	CHECK_DEBUG_ON(be, tx_cpy, be->p_tx_cpy_nthw);
1819 
1820 	if (tx_cpy->ver >= 1) {
1821 		for (int i = 0; i < cnt; i++) {
1822 			if (wr_index != (index + i) / tx_cpy->nb_rcp_categories) {
1823 				wr_index = (index + i) / tx_cpy->nb_rcp_categories;
1824 				tx_cpy_nthw_writer_cnt(be->p_tx_cpy_nthw, wr_index, 1);
1825 			}
1826 
1827 			tx_cpy_nthw_writer_select(be->p_tx_cpy_nthw, wr_index,
1828 				(index + i) % tx_cpy->nb_rcp_categories);
1829 			tx_cpy_nthw_writer_reader_select(be->p_tx_cpy_nthw, wr_index,
1830 				tx_cpy->v3.cpy_rcp[index + i]
1831 				.reader_select);
1832 			tx_cpy_nthw_writer_dyn(be->p_tx_cpy_nthw, wr_index,
1833 				tx_cpy->v3.cpy_rcp[index + i].dyn);
1834 			tx_cpy_nthw_writer_ofs(be->p_tx_cpy_nthw, wr_index,
1835 				tx_cpy->v3.cpy_rcp[index + i].ofs);
1836 			tx_cpy_nthw_writer_len(be->p_tx_cpy_nthw, wr_index,
1837 				tx_cpy->v3.cpy_rcp[index + i].len);
1838 			tx_cpy_nthw_writer_flush(be->p_tx_cpy_nthw, wr_index);
1839 		}
1840 	}
1841 
1842 	CHECK_DEBUG_OFF(tx_cpy, be->p_tx_cpy_nthw);
1843 	return 0;
1844 }
1845 
1846 static int tpe_hfu_rcp_flush(void *be_dev, const struct tpe_func_s *hfu, int index, int cnt)
1847 {
1848 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1849 	CHECK_DEBUG_ON(be, hfu, be->p_hfu_nthw);
1850 
1851 	if (hfu->ver >= 1) {
1852 		hfu_nthw_rcp_cnt(be->p_hfu_nthw, 1);
1853 
1854 		for (int i = 0; i < cnt; i++) {
1855 			hfu_nthw_rcp_select(be->p_hfu_nthw, index + i);
1856 			hfu_nthw_rcp_len_a_wr(be->p_hfu_nthw, hfu->v3.hfu_rcp[index + i].len_a_wr);
1857 			hfu_nthw_rcp_len_a_ol4len(be->p_hfu_nthw,
1858 				hfu->v3.hfu_rcp[index + i].len_a_outer_l4_len);
1859 			hfu_nthw_rcp_len_a_pos_dyn(be->p_hfu_nthw,
1860 				hfu->v3.hfu_rcp[index + i].len_a_pos_dyn);
1861 			hfu_nthw_rcp_len_a_pos_ofs(be->p_hfu_nthw,
1862 				hfu->v3.hfu_rcp[index + i].len_a_pos_ofs);
1863 			hfu_nthw_rcp_len_a_add_dyn(be->p_hfu_nthw,
1864 				hfu->v3.hfu_rcp[index + i].len_a_add_dyn);
1865 			hfu_nthw_rcp_len_a_add_ofs(be->p_hfu_nthw,
1866 				hfu->v3.hfu_rcp[index + i].len_a_add_ofs);
1867 			hfu_nthw_rcp_len_a_sub_dyn(be->p_hfu_nthw,
1868 				hfu->v3.hfu_rcp[index + i].len_a_sub_dyn);
1869 			hfu_nthw_rcp_len_b_wr(be->p_hfu_nthw, hfu->v3.hfu_rcp[index + i].len_b_wr);
1870 			hfu_nthw_rcp_len_b_pos_dyn(be->p_hfu_nthw,
1871 				hfu->v3.hfu_rcp[index + i].len_b_pos_dyn);
1872 			hfu_nthw_rcp_len_b_pos_ofs(be->p_hfu_nthw,
1873 				hfu->v3.hfu_rcp[index + i].len_b_pos_ofs);
1874 			hfu_nthw_rcp_len_b_add_dyn(be->p_hfu_nthw,
1875 				hfu->v3.hfu_rcp[index + i].len_b_add_dyn);
1876 			hfu_nthw_rcp_len_b_add_ofs(be->p_hfu_nthw,
1877 				hfu->v3.hfu_rcp[index + i].len_b_add_ofs);
1878 			hfu_nthw_rcp_len_b_sub_dyn(be->p_hfu_nthw,
1879 				hfu->v3.hfu_rcp[index + i].len_b_sub_dyn);
1880 			hfu_nthw_rcp_len_c_wr(be->p_hfu_nthw, hfu->v3.hfu_rcp[index + i].len_c_wr);
1881 			hfu_nthw_rcp_len_c_pos_dyn(be->p_hfu_nthw,
1882 				hfu->v3.hfu_rcp[index + i].len_c_pos_dyn);
1883 			hfu_nthw_rcp_len_c_pos_ofs(be->p_hfu_nthw,
1884 				hfu->v3.hfu_rcp[index + i].len_c_pos_ofs);
1885 			hfu_nthw_rcp_len_c_add_dyn(be->p_hfu_nthw,
1886 				hfu->v3.hfu_rcp[index + i].len_c_add_dyn);
1887 			hfu_nthw_rcp_len_c_add_ofs(be->p_hfu_nthw,
1888 				hfu->v3.hfu_rcp[index + i].len_c_add_ofs);
1889 			hfu_nthw_rcp_len_c_sub_dyn(be->p_hfu_nthw,
1890 				hfu->v3.hfu_rcp[index + i].len_c_sub_dyn);
1891 			hfu_nthw_rcp_ttl_wr(be->p_hfu_nthw, hfu->v3.hfu_rcp[index + i].ttl_wr);
1892 			hfu_nthw_rcp_ttl_pos_dyn(be->p_hfu_nthw,
1893 				hfu->v3.hfu_rcp[index + i].ttl_pos_dyn);
1894 			hfu_nthw_rcp_ttl_pos_ofs(be->p_hfu_nthw,
1895 				hfu->v3.hfu_rcp[index + i].ttl_pos_ofs);
1896 			hfu_nthw_rcp_flush(be->p_hfu_nthw);
1897 		}
1898 	}
1899 
1900 	CHECK_DEBUG_OFF(hfu, be->p_hfu_nthw);
1901 	return 0;
1902 }
1903 
1904 static int tpe_csu_rcp_flush(void *be_dev, const struct tpe_func_s *csu, int index, int cnt)
1905 {
1906 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1907 	CHECK_DEBUG_ON(be, csu, be->p_csu_nthw);
1908 
1909 	if (csu->ver >= 1) {
1910 		csu_nthw_rcp_cnt(be->p_csu_nthw, 1);
1911 
1912 		for (int i = 0; i < cnt; i++) {
1913 			csu_nthw_rcp_select(be->p_csu_nthw, index + i);
1914 			csu_nthw_rcp_outer_l3_cmd(be->p_csu_nthw,
1915 				csu->v3.csu_rcp[index + i].ol3_cmd);
1916 			csu_nthw_rcp_outer_l4_cmd(be->p_csu_nthw,
1917 				csu->v3.csu_rcp[index + i].ol4_cmd);
1918 			csu_nthw_rcp_inner_l3_cmd(be->p_csu_nthw,
1919 				csu->v3.csu_rcp[index + i].il3_cmd);
1920 			csu_nthw_rcp_inner_l4_cmd(be->p_csu_nthw,
1921 				csu->v3.csu_rcp[index + i].il4_cmd);
1922 			csu_nthw_rcp_flush(be->p_csu_nthw);
1923 		}
1924 	}
1925 
1926 	CHECK_DEBUG_OFF(csu, be->p_csu_nthw);
1927 	return 0;
1928 }
1929 
1930 /*
1931  * DBS
1932  */
1933 
1934 static int alloc_rx_queue(void *be_dev, int queue_id)
1935 {
1936 	(void)be_dev;
1937 	(void)queue_id;
1938 	NT_LOG(ERR, FILTER, "ERROR alloc Rx queue");
1939 	return -1;
1940 }
1941 
1942 static int free_rx_queue(void *be_dev, int hw_queue)
1943 {
1944 	(void)be_dev;
1945 	(void)hw_queue;
1946 	NT_LOG(ERR, FILTER, "ERROR free Rx queue");
1947 	return 0;
1948 }
1949 
1950 const struct flow_api_backend_ops flow_be_iface = {
1951 	1,
1952 
1953 	set_debug_mode,
1954 	get_nb_phy_ports,
1955 	get_nb_rx_ports,
1956 	get_ltx_avail,
1957 	get_nb_cat_funcs,
1958 	get_nb_categories,
1959 	get_nb_cat_km_if_cnt,
1960 	get_nb_cat_km_if_m0,
1961 	get_nb_cat_km_if_m1,
1962 	get_nb_queues,
1963 	get_nb_km_flow_types,
1964 	get_nb_pm_ext,
1965 	get_nb_len,
1966 	get_kcc_size,
1967 	get_kcc_banks,
1968 	get_nb_km_categories,
1969 	get_nb_km_cam_banks,
1970 	get_nb_km_cam_record_words,
1971 	get_nb_km_cam_records,
1972 	get_nb_km_tcam_banks,
1973 	get_nb_km_tcam_bank_width,
1974 	get_nb_flm_categories,
1975 	get_nb_flm_size_mb,
1976 	get_nb_flm_entry_size,
1977 	get_nb_flm_variant,
1978 	get_nb_flm_prios,
1979 	get_nb_flm_pst_profiles,
1980 	get_nb_flm_scrub_profiles,
1981 	get_nb_flm_load_aps_max,
1982 	get_nb_qsl_categories,
1983 	get_nb_qsl_qst_entries,
1984 	get_nb_pdb_categories,
1985 	get_nb_roa_categories,
1986 	get_nb_tpe_categories,
1987 	get_nb_tx_cpy_writers,
1988 	get_nb_tx_cpy_mask_mem,
1989 	get_nb_tx_rpl_depth,
1990 	get_nb_tx_rpl_ext_categories,
1991 	get_nb_tpe_ifr_categories,
1992 	get_nb_rpp_per_ps,
1993 	get_nb_hsh_categories,
1994 	get_nb_hsh_toeplitz,
1995 
1996 	alloc_rx_queue,
1997 	free_rx_queue,
1998 
1999 	cat_get_present,
2000 	cat_get_version,
2001 	cat_cfn_flush,
2002 
2003 	cat_kce_flush,
2004 	cat_kcs_flush,
2005 	cat_fte_flush,
2006 
2007 	cat_cte_flush,
2008 	cat_cts_flush,
2009 	cat_cot_flush,
2010 	cat_cct_flush,
2011 	cat_exo_flush,
2012 	cat_rck_flush,
2013 	cat_len_flush,
2014 	cat_kcc_flush,
2015 
2016 	km_get_present,
2017 	km_get_version,
2018 	km_rcp_flush,
2019 	km_cam_flush,
2020 	km_tcam_flush,
2021 	km_tci_flush,
2022 	km_tcq_flush,
2023 
2024 	flm_get_present,
2025 	flm_get_version,
2026 	flm_control_flush,
2027 	flm_status_flush,
2028 	flm_status_update,
2029 	flm_scan_flush,
2030 	flm_load_bin_flush,
2031 	flm_prio_flush,
2032 	flm_pst_flush,
2033 	flm_rcp_flush,
2034 	flm_scrub_flush,
2035 	flm_buf_ctrl_update,
2036 	flm_stat_update,
2037 	flm_lrn_data_flush,
2038 	flm_inf_sta_data_update,
2039 
2040 	hsh_get_present,
2041 	hsh_get_version,
2042 	hsh_rcp_flush,
2043 
2044 	qsl_get_present,
2045 	qsl_get_version,
2046 	qsl_rcp_flush,
2047 	qsl_qst_flush,
2048 	qsl_qen_flush,
2049 	qsl_unmq_flush,
2050 
2051 	slc_lr_get_present,
2052 	slc_lr_get_version,
2053 	slc_lr_rcp_flush,
2054 
2055 	pdb_get_present,
2056 	pdb_get_version,
2057 	pdb_rcp_flush,
2058 	pdb_config_flush,
2059 
2060 	tpe_get_present,
2061 	tpe_get_version,
2062 	tpe_rpp_rcp_flush,
2063 	tpe_rpp_ifr_rcp_flush,
2064 	tpe_ifr_rcp_flush,
2065 	tpe_ins_rcp_flush,
2066 	tpe_rpl_rcp_flush,
2067 	tpe_rpl_ext_flush,
2068 	tpe_rpl_rpl_flush,
2069 	tpe_cpy_rcp_flush,
2070 	tpe_hfu_rcp_flush,
2071 	tpe_csu_rcp_flush,
2072 };
2073 
2074 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
2075 {
2076 	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
2077 
2078 	struct info_nthw *pinfonthw = info_nthw_new();
2079 	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
2080 	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
2081 
2082 	/* Init nthw CAT */
2083 	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2084 		struct cat_nthw *pcatnthw = cat_nthw_new();
2085 		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
2086 		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
2087 
2088 	} else {
2089 		be_devs[physical_adapter_no].p_cat_nthw = NULL;
2090 	}
2091 
2092 	/* Init nthw KM */
2093 	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2094 		struct km_nthw *pkmnthw = km_nthw_new();
2095 		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
2096 		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
2097 
2098 	} else {
2099 		be_devs[physical_adapter_no].p_km_nthw = NULL;
2100 	}
2101 
2102 	/* Init nthw FLM */
2103 	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2104 		struct flm_nthw *pflmnthw = flm_nthw_new();
2105 		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
2106 		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
2107 
2108 	} else {
2109 		be_devs[physical_adapter_no].p_flm_nthw = NULL;
2110 	}
2111 
2112 	/* Init nthw IFR */
2113 	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2114 		struct ifr_nthw *ifrnthw = ifr_nthw_new();
2115 		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
2116 		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
2117 
2118 	} else {
2119 		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
2120 	}
2121 
2122 	/* Init nthw HSH */
2123 	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2124 		struct hsh_nthw *phshnthw = hsh_nthw_new();
2125 		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
2126 		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
2127 
2128 	} else {
2129 		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
2130 	}
2131 
2132 	/* Init nthw QSL */
2133 	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2134 		struct qsl_nthw *pqslnthw = qsl_nthw_new();
2135 		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
2136 		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
2137 
2138 	} else {
2139 		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
2140 	}
2141 
2142 	/* Init nthw SLC LR */
2143 	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2144 		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
2145 		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
2146 		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
2147 
2148 	} else {
2149 		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
2150 	}
2151 
2152 	/* Init nthw PDB */
2153 	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2154 		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
2155 		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
2156 		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
2157 
2158 	} else {
2159 		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
2160 	}
2161 
2162 	/* Init nthw HFU */
2163 	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2164 		struct hfu_nthw *ptr = hfu_nthw_new();
2165 		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
2166 		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
2167 
2168 	} else {
2169 		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
2170 	}
2171 
2172 	/* Init nthw RPP_LR */
2173 	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2174 		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
2175 		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
2176 		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
2177 
2178 	} else {
2179 		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
2180 	}
2181 
2182 	/* Init nthw TX_CPY */
2183 	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2184 		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
2185 		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
2186 		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
2187 
2188 	} else {
2189 		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
2190 	}
2191 
2192 	/* Init nthw CSU */
2193 	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2194 		struct csu_nthw *ptr = csu_nthw_new();
2195 		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
2196 		be_devs[physical_adapter_no].p_csu_nthw = ptr;
2197 
2198 	} else {
2199 		be_devs[physical_adapter_no].p_csu_nthw = NULL;
2200 	}
2201 
2202 	/* Init nthw TX_INS */
2203 	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2204 		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
2205 		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
2206 		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
2207 
2208 	} else {
2209 		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
2210 	}
2211 
2212 	/* Init nthw TX_RPL */
2213 	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
2214 		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
2215 		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
2216 		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
2217 
2218 	} else {
2219 		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
2220 	}
2221 
2222 	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
2223 	*dev = (void *)&be_devs[physical_adapter_no];
2224 
2225 	return &flow_be_iface;
2226 }
2227 
2228 static void bin_flow_backend_done(void *dev)
2229 {
2230 	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
2231 	info_nthw_delete(be_dev->p_info_nthw);
2232 	cat_nthw_delete(be_dev->p_cat_nthw);
2233 	km_nthw_delete(be_dev->p_km_nthw);
2234 	flm_nthw_delete(be_dev->p_flm_nthw);
2235 	hsh_nthw_delete(be_dev->p_hsh_nthw);
2236 	qsl_nthw_delete(be_dev->p_qsl_nthw);
2237 	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
2238 	pdb_nthw_delete(be_dev->p_pdb_nthw);
2239 	csu_nthw_delete(be_dev->p_csu_nthw);
2240 	hfu_nthw_delete(be_dev->p_hfu_nthw);
2241 	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
2242 	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
2243 	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
2244 	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
2245 }
2246 
2247 static const struct flow_backend_ops ops = {
2248 	.bin_flow_backend_init = bin_flow_backend_init,
2249 	.bin_flow_backend_done = bin_flow_backend_done,
2250 };
2251 
2252 void flow_backend_init(void)
2253 {
2254 	register_flow_backend_ops(&ops);
2255 }
2256