xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_backend/flow_backend.c (revision 0338fef450a4be0f2ccb7cc491e16d6a80caa504)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include <stdint.h>
7 
8 #include "flow_nthw_info.h"
9 #include "flow_nthw_ifr.h"
10 #include "flow_nthw_cat.h"
11 #include "flow_nthw_csu.h"
12 #include "flow_nthw_km.h"
13 #include "flow_nthw_flm.h"
14 #include "flow_nthw_hfu.h"
15 #include "flow_nthw_hsh.h"
16 #include "flow_nthw_qsl.h"
17 #include "flow_nthw_slc_lr.h"
18 #include "flow_nthw_pdb.h"
19 #include "flow_nthw_rpp_lr.h"
20 #include "flow_nthw_tx_cpy.h"
21 #include "flow_nthw_tx_ins.h"
22 #include "flow_nthw_tx_rpl.h"
23 #include "ntnic_mod_reg.h"
24 #include "nthw_fpga_model.h"
25 #include "hw_mod_backend.h"
26 
27 /*
28  * Binary Flow API backend implementation into ntservice driver
29  *
30  * General note on this backend implementation:
31  * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
32  */
33 
34 static struct backend_dev_s {
35 	uint8_t adapter_no;
36 	enum debug_mode_e dmode;
37 	struct info_nthw *p_info_nthw;
38 	struct cat_nthw *p_cat_nthw;
39 	struct km_nthw *p_km_nthw;
40 	struct flm_nthw *p_flm_nthw;
41 	struct hsh_nthw *p_hsh_nthw;
42 	struct qsl_nthw *p_qsl_nthw;
43 	struct slc_lr_nthw *p_slc_lr_nthw;
44 	struct pdb_nthw *p_pdb_nthw;
45 	struct hfu_nthw *p_hfu_nthw;    /* TPE module */
46 	struct rpp_lr_nthw *p_rpp_lr_nthw;      /* TPE module */
47 	struct tx_cpy_nthw *p_tx_cpy_nthw;      /* TPE module */
48 	struct tx_ins_nthw *p_tx_ins_nthw;      /* TPE module */
49 	struct tx_rpl_nthw *p_tx_rpl_nthw;      /* TPE module */
50 	struct csu_nthw *p_csu_nthw;    /* TPE module */
51 	struct ifr_nthw *p_ifr_nthw;    /* TPE module */
52 } be_devs[MAX_PHYS_ADAPTERS];
53 
54 #define CHECK_DEBUG_ON(be, mod, inst)                                                             \
55 	int __debug__ = 0;                                                                        \
56 	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug)                        \
57 		do {                                                                              \
58 			mod##_nthw_set_debug_mode((inst), 0xFF);                                  \
59 			__debug__ = 1;                                                            \
60 	} while (0)
61 
62 #define CHECK_DEBUG_OFF(mod, inst)                                                                \
63 	do {                                                                                      \
64 		if (__debug__)                                                                    \
65 			mod##_nthw_set_debug_mode((inst), 0);                                     \
66 	} while (0)
67 
68 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev);
69 static void bin_flow_backend_done(void *be_dev);
70 
71 static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
72 {
73 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
74 	be->dmode = mode;
75 	return 0;
76 }
77 
78 /*
79  * INFO
80  */
81 
82 static int get_nb_phy_ports(void *be_dev)
83 {
84 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
85 	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
86 }
87 
88 static int get_nb_rx_ports(void *be_dev)
89 {
90 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
91 	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
92 }
93 
94 static int get_ltx_avail(void *be_dev)
95 {
96 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
97 	return info_nthw_get_ltx_avail(be->p_info_nthw);
98 }
99 
100 static int get_nb_cat_funcs(void *be_dev)
101 {
102 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
103 	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
104 }
105 
106 static int get_nb_categories(void *be_dev)
107 {
108 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
109 	return info_nthw_get_nb_categories(be->p_info_nthw);
110 }
111 
112 static int get_nb_cat_km_if_cnt(void *be_dev)
113 {
114 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
115 	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
116 }
117 
118 static int get_nb_cat_km_if_m0(void *be_dev)
119 {
120 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
121 	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
122 }
123 
124 static int get_nb_cat_km_if_m1(void *be_dev)
125 {
126 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
127 	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
128 }
129 
130 static int get_nb_queues(void *be_dev)
131 {
132 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
133 	return info_nthw_get_nb_queues(be->p_info_nthw);
134 }
135 
136 static int get_nb_km_flow_types(void *be_dev)
137 {
138 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
139 	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
140 }
141 
142 static int get_nb_pm_ext(void *be_dev)
143 {
144 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
145 	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
146 }
147 
148 static int get_nb_len(void *be_dev)
149 {
150 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
151 	return info_nthw_get_nb_len(be->p_info_nthw);
152 }
153 
154 static int get_kcc_size(void *be_dev)
155 {
156 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
157 	return info_nthw_get_kcc_size(be->p_info_nthw);
158 }
159 
160 static int get_kcc_banks(void *be_dev)
161 {
162 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
163 	return info_nthw_get_kcc_banks(be->p_info_nthw);
164 }
165 
166 static int get_nb_km_categories(void *be_dev)
167 {
168 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
169 	return info_nthw_get_nb_km_categories(be->p_info_nthw);
170 }
171 
172 static int get_nb_km_cam_banks(void *be_dev)
173 {
174 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
175 	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
176 }
177 
178 static int get_nb_km_cam_record_words(void *be_dev)
179 {
180 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
181 	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
182 }
183 
184 static int get_nb_km_cam_records(void *be_dev)
185 {
186 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
187 	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
188 }
189 
190 static int get_nb_km_tcam_banks(void *be_dev)
191 {
192 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
193 	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
194 }
195 
196 static int get_nb_km_tcam_bank_width(void *be_dev)
197 {
198 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
199 	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
200 }
201 
202 static int get_nb_flm_categories(void *be_dev)
203 {
204 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
205 	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
206 }
207 
208 static int get_nb_flm_size_mb(void *be_dev)
209 {
210 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
211 	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
212 }
213 
214 static int get_nb_flm_entry_size(void *be_dev)
215 {
216 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
217 	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
218 }
219 
220 static int get_nb_flm_variant(void *be_dev)
221 {
222 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
223 	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
224 }
225 
226 static int get_nb_flm_prios(void *be_dev)
227 {
228 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
229 	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
230 }
231 
232 static int get_nb_flm_pst_profiles(void *be_dev)
233 {
234 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
235 	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
236 }
237 
238 static int get_nb_flm_scrub_profiles(void *be_dev)
239 {
240 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
241 	return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw);
242 }
243 
244 static int get_nb_flm_load_aps_max(void *be_dev)
245 {
246 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
247 	return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw);
248 }
249 
250 static int get_nb_qsl_categories(void *be_dev)
251 {
252 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
253 	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
254 }
255 
256 static int get_nb_qsl_qst_entries(void *be_dev)
257 {
258 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
259 	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
260 }
261 
262 static int get_nb_pdb_categories(void *be_dev)
263 {
264 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
265 	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
266 }
267 
268 static int get_nb_roa_categories(void *be_dev)
269 {
270 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
271 	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
272 }
273 
274 static int get_nb_tpe_categories(void *be_dev)
275 {
276 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
277 	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
278 }
279 
280 static int get_nb_tx_cpy_writers(void *be_dev)
281 {
282 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
283 	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
284 }
285 
286 static int get_nb_tx_cpy_mask_mem(void *be_dev)
287 {
288 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
289 	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
290 }
291 
292 static int get_nb_tx_rpl_depth(void *be_dev)
293 {
294 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
295 	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
296 }
297 
298 static int get_nb_tx_rpl_ext_categories(void *be_dev)
299 {
300 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
301 	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
302 }
303 
304 static int get_nb_tpe_ifr_categories(void *be_dev)
305 {
306 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
307 	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
308 }
309 
310 static int get_nb_rpp_per_ps(void *be_dev)
311 {
312 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
313 	return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw);
314 }
315 
316 static int get_nb_hsh_categories(void *be_dev)
317 {
318 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
319 	return info_nthw_get_nb_hsh_categories(be->p_info_nthw);
320 }
321 
322 static int get_nb_hsh_toeplitz(void *be_dev)
323 {
324 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
325 	return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw);
326 }
327 
328 /*
329  * CAT
330  */
331 
332 static bool cat_get_present(void *be_dev)
333 {
334 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
335 	return be->p_cat_nthw != NULL;
336 }
337 
338 static uint32_t cat_get_version(void *be_dev)
339 {
340 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
341 	return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
342 			(nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff));
343 }
344 
345 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
346 {
347 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
348 
349 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
350 
351 	if (cat->ver == 18) {
352 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
353 
354 		for (int i = 0; i < cnt; i++) {
355 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
356 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable);
357 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv);
358 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv);
359 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl);
360 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp);
361 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac);
362 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2);
363 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag);
364 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan);
365 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls);
366 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3);
367 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag);
368 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
369 				cat->v18.cfn[cat_func].ptc_ip_prot);
370 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4);
371 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel);
372 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2);
373 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
374 				cat->v18.cfn[cat_func].ptc_tnl_vlan);
375 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
376 				cat->v18.cfn[cat_func].ptc_tnl_mpls);
377 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3);
378 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
379 				cat->v18.cfn[cat_func].ptc_tnl_frag);
380 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
381 				cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
382 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4);
383 
384 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv);
385 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv);
386 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs);
387 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc);
388 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs);
389 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs);
390 
391 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port);
392 
393 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp);
394 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct);
395 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv);
396 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb);
397 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv);
398 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv);
399 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv);
400 
401 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
402 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv);
403 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or);
404 			cat_nthw_cfn_flush(be->p_cat_nthw);
405 			cat_func++;
406 		}
407 
408 	} else if (cat->ver == 21) {
409 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
410 
411 		for (int i = 0; i < cnt; i++) {
412 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
413 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable);
414 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv);
415 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv);
416 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl);
417 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp);
418 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac);
419 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2);
420 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag);
421 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan);
422 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls);
423 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3);
424 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag);
425 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
426 				cat->v21.cfn[cat_func].ptc_ip_prot);
427 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4);
428 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel);
429 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2);
430 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
431 				cat->v21.cfn[cat_func].ptc_tnl_vlan);
432 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
433 				cat->v21.cfn[cat_func].ptc_tnl_mpls);
434 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3);
435 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
436 				cat->v21.cfn[cat_func].ptc_tnl_frag);
437 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
438 				cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
439 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4);
440 
441 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv);
442 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv);
443 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs);
444 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc);
445 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs);
446 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs);
447 			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
448 				cat->v21.cfn[cat_func].err_tnl_l3_cs);
449 			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
450 				cat->v21.cfn[cat_func].err_tnl_l4_cs);
451 			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
452 				cat->v21.cfn[cat_func].err_ttl_exp);
453 			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
454 				cat->v21.cfn[cat_func].err_tnl_ttl_exp);
455 
456 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port);
457 
458 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp);
459 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct);
460 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv);
461 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb);
462 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv);
463 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv);
464 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv);
465 
466 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
467 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv);
468 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or);
469 
470 			if (be->p_cat_nthw->m_km_if_cnt > 1)
471 				cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or);
472 
473 			cat_nthw_cfn_flush(be->p_cat_nthw);
474 			cat_func++;
475 		}
476 	}
477 
478 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
479 	return 0;
480 }
481 
482 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
483 	int cnt)
484 {
485 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
486 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
487 
488 	if (cat->ver == 18) {
489 		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
490 
491 		for (int i = 0; i < cnt; i++) {
492 			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
493 			cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm);
494 			cat_nthw_kce_flush(be->p_cat_nthw, 0);
495 		}
496 
497 	} else if (cat->ver == 21) {
498 		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
499 
500 		for (int i = 0; i < cnt; i++) {
501 			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
502 			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
503 				cat->v21.kce[index + i].enable_bm[km_if_idx]);
504 			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
505 		}
506 	}
507 
508 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
509 	return 0;
510 }
511 
512 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func,
513 	int cnt)
514 {
515 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
516 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
517 
518 	if (cat->ver == 18) {
519 		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
520 
521 		for (int i = 0; i < cnt; i++) {
522 			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
523 			cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category);
524 			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
525 			cat_func++;
526 		}
527 
528 	} else if (cat->ver == 21) {
529 		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
530 
531 		for (int i = 0; i < cnt; i++) {
532 			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
533 			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
534 				cat->v21.kcs[cat_func].category[km_if_idx]);
535 			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
536 			cat_func++;
537 		}
538 	}
539 
540 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
541 	return 0;
542 }
543 
544 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
545 	int cnt)
546 {
547 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
548 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
549 
550 	if (cat->ver == 18) {
551 		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
552 
553 		for (int i = 0; i < cnt; i++) {
554 			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
555 			cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm);
556 			cat_nthw_fte_flush(be->p_cat_nthw, 0);
557 		}
558 
559 	} else if (cat->ver == 21) {
560 		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
561 
562 		for (int i = 0; i < cnt; i++) {
563 			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
564 			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
565 				cat->v21.fte[index + i].enable_bm[km_if_idx]);
566 			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
567 		}
568 	}
569 
570 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
571 	return 0;
572 }
573 
574 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
575 {
576 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
577 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
578 
579 	if (cat->ver == 18 || cat->ver == 21) {
580 		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
581 
582 		for (int i = 0; i < cnt; i++) {
583 			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
584 			cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col);
585 			cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor);
586 			cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh);
587 			cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl);
588 			cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf);
589 			cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc);
590 			cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb);
591 			cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk);
592 			cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst);
593 			cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp);
594 			cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe);
595 
596 			cat_nthw_cte_flush(be->p_cat_nthw);
597 			cat_func++;
598 		}
599 	}
600 
601 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
602 	return 0;
603 }
604 
605 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
606 {
607 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
608 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
609 
610 	if (cat->ver == 18 || cat->ver == 21) {
611 		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
612 
613 		for (int i = 0; i < cnt; i++) {
614 			cat_nthw_cts_select(be->p_cat_nthw, index + i);
615 			cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a);
616 			cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b);
617 			cat_nthw_cts_flush(be->p_cat_nthw);
618 		}
619 	}
620 
621 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
622 	return 0;
623 }
624 
625 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
626 {
627 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
628 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
629 
630 	if (cat->ver == 18 || cat->ver == 21) {
631 		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
632 
633 		for (int i = 0; i < cnt; i++) {
634 			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
635 			cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color);
636 			cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km);
637 			cat_nthw_cot_flush(be->p_cat_nthw);
638 		}
639 	}
640 
641 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
642 	return 0;
643 }
644 
645 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
646 {
647 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
648 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
649 
650 	if (cat->ver == 18 || cat->ver == 21) {
651 		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
652 
653 		for (int i = 0; i < cnt; i++) {
654 			cat_nthw_cct_select(be->p_cat_nthw, index + i);
655 			cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color);
656 			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
657 			cat_nthw_cct_flush(be->p_cat_nthw);
658 		}
659 	}
660 
661 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
662 	return 0;
663 }
664 
665 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt)
666 {
667 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
668 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
669 
670 	if (cat->ver == 18 || cat->ver == 21) {
671 		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
672 
673 		for (int i = 0; i < cnt; i++) {
674 			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
675 			cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn);
676 			cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs);
677 			cat_nthw_exo_flush(be->p_cat_nthw);
678 		}
679 	}
680 
681 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
682 	return 0;
683 }
684 
685 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
686 {
687 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
688 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
689 
690 	if (cat->ver == 18 || cat->ver == 21) {
691 		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
692 
693 		for (int i = 0; i < cnt; i++) {
694 			cat_nthw_rck_select(be->p_cat_nthw, index + i);
695 			cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data);
696 			cat_nthw_rck_flush(be->p_cat_nthw);
697 		}
698 	}
699 
700 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
701 	return 0;
702 }
703 
704 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
705 {
706 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
707 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
708 
709 	if (cat->ver == 18 || cat->ver == 21) {
710 		cat_nthw_len_cnt(be->p_cat_nthw, 1);
711 
712 		for (int i = 0; i < cnt; i++) {
713 			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
714 			cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower);
715 			cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper);
716 			cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1);
717 			cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2);
718 			cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv);
719 			cat_nthw_len_flush(be->p_cat_nthw);
720 		}
721 	}
722 
723 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
724 	return 0;
725 }
726 
727 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
728 {
729 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
730 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
731 
732 	if (cat->ver == 18 || cat->ver == 21) {
733 		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
734 
735 		for (int i = 0; i < cnt; i++) {
736 			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
737 			cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key);
738 			cat_nthw_kcc_category(be->p_cat_nthw,
739 				cat->v18.kcc_cam[len_index + i].category);
740 			cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id);
741 			cat_nthw_kcc_flush(be->p_cat_nthw);
742 		}
743 	}
744 
745 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
746 	return 0;
747 }
748 
749 /*
750  * KM
751  */
752 
753 static bool km_get_present(void *be_dev)
754 {
755 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
756 	return be->p_km_nthw != NULL;
757 }
758 
759 static uint32_t km_get_version(void *be_dev)
760 {
761 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
762 	return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) |
763 			(nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
764 }
765 
766 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt)
767 {
768 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
769 
770 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
771 
772 	if (km->ver == 7) {
773 		km_nthw_rcp_cnt(be->p_km_nthw, 1);
774 
775 		for (int i = 0; i < cnt; i++) {
776 			km_nthw_rcp_select(be->p_km_nthw, category + i);
777 			km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn);
778 			km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs);
779 			km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a);
780 			km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b);
781 			km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn);
782 			km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs);
783 			km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a);
784 			km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b);
785 			km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn);
786 			km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs);
787 			km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a);
788 			km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b);
789 			km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn);
790 			km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs);
791 			km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a);
792 			km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b);
793 			km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch);
794 			km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a);
795 			km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b);
796 			km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a);
797 			km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b);
798 			km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual);
799 			km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired);
800 			km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a);
801 			km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b);
802 			km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a);
803 			km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b);
804 			km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a);
805 			km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b);
806 			km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a);
807 			km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b);
808 			km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a);
809 			km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b);
810 			km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a);
811 			km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b);
812 			km_nthw_rcp_synergy_mode(be->p_km_nthw,
813 				km->v7.rcp[category + i].synergy_mode);
814 			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn);
815 			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs);
816 			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn);
817 			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs);
818 			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn);
819 			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs);
820 			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn);
821 			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs);
822 			km_nthw_rcp_flush(be->p_km_nthw);
823 		}
824 	}
825 
826 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
827 	return 0;
828 }
829 
830 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt)
831 {
832 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
833 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
834 
835 	if (km->ver == 7) {
836 		km_nthw_cam_cnt(be->p_km_nthw, 1);
837 
838 		for (int i = 0; i < cnt; i++) {
839 			km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i);
840 			km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0);
841 			km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1);
842 			km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2);
843 			km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3);
844 			km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4);
845 			km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5);
846 			km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0);
847 			km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1);
848 			km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2);
849 			km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3);
850 			km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4);
851 			km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5);
852 			km_nthw_cam_flush(be->p_km_nthw);
853 		}
854 	}
855 
856 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
857 	return 0;
858 }
859 
860 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value,
861 	int cnt)
862 {
863 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
864 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
865 
866 	if (km->ver == 7) {
867 		int start_idx = bank * 4 * 256 + byte * 256 + value;
868 		km_nthw_tcam_cnt(be->p_km_nthw, 1);
869 
870 		for (int i = 0; i < cnt; i++) {
871 			if (km->v7.tcam[start_idx + i].dirty) {
872 				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
873 				km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t);
874 				km_nthw_tcam_flush(be->p_km_nthw);
875 				km->v7.tcam[start_idx + i].dirty = 0;
876 			}
877 		}
878 	}
879 
880 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
881 	return 0;
882 }
883 
884 /*
885  * bank is the TCAM bank, index is the index within the bank (0..71)
886  */
887 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
888 {
889 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
890 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
891 
892 	if (km->ver == 7) {
893 		/* TCAM bank width in version 3 = 72 */
894 		km_nthw_tci_cnt(be->p_km_nthw, 1);
895 
896 		for (int i = 0; i < cnt; i++) {
897 			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
898 			km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color);
899 			km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft);
900 			km_nthw_tci_flush(be->p_km_nthw);
901 		}
902 	}
903 
904 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
905 	return 0;
906 }
907 
908 /*
909  * bank is the TCAM bank, index is the index within the bank (0..71)
910  */
911 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
912 {
913 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
914 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
915 
916 	if (km->ver == 7) {
917 		/* TCAM bank width in version 3 = 72 */
918 		km_nthw_tcq_cnt(be->p_km_nthw, 1);
919 
920 		for (int i = 0; i < cnt; i++) {
921 			/* adr = lover 4 bits = bank, upper 7 bits = index */
922 			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
923 			km_nthw_tcq_bank_mask(be->p_km_nthw,
924 				km->v7.tcq[bank + (index << 4) + i].bank_mask);
925 			km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual);
926 			km_nthw_tcq_flush(be->p_km_nthw);
927 		}
928 	}
929 
930 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
931 	return 0;
932 }
933 
934 /*
935  * FLM
936  */
937 
938 static bool flm_get_present(void *be_dev)
939 {
940 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
941 	return be->p_flm_nthw != NULL;
942 }
943 
944 static uint32_t flm_get_version(void *be_dev)
945 {
946 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
947 	return (uint32_t)((nthw_module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
948 			(nthw_module_get_minor_version(be->p_flm_nthw->m_flm) & 0xffff));
949 }
950 
951 static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
952 {
953 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
954 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
955 
956 	if (flm->ver >= 25) {
957 		flm_nthw_control_enable(be->p_flm_nthw, flm->v25.control->enable);
958 		flm_nthw_control_init(be->p_flm_nthw, flm->v25.control->init);
959 		flm_nthw_control_lds(be->p_flm_nthw, flm->v25.control->lds);
960 		flm_nthw_control_lfs(be->p_flm_nthw, flm->v25.control->lfs);
961 		flm_nthw_control_lis(be->p_flm_nthw, flm->v25.control->lis);
962 		flm_nthw_control_uds(be->p_flm_nthw, flm->v25.control->uds);
963 		flm_nthw_control_uis(be->p_flm_nthw, flm->v25.control->uis);
964 		flm_nthw_control_rds(be->p_flm_nthw, flm->v25.control->rds);
965 		flm_nthw_control_ris(be->p_flm_nthw, flm->v25.control->ris);
966 		flm_nthw_control_pds(be->p_flm_nthw, flm->v25.control->pds);
967 		flm_nthw_control_pis(be->p_flm_nthw, flm->v25.control->pis);
968 		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v25.control->crcwr);
969 		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v25.control->crcrd);
970 		flm_nthw_control_rbl(be->p_flm_nthw, flm->v25.control->rbl);
971 		flm_nthw_control_eab(be->p_flm_nthw, flm->v25.control->eab);
972 		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
973 			flm->v25.control->split_sdram_usage);
974 		flm_nthw_control_flush(be->p_flm_nthw);
975 	}
976 
977 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
978 	return 0;
979 }
980 
981 static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
982 {
983 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
984 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
985 
986 	if (flm->ver >= 25) {
987 		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
988 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 0);
989 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 0);
990 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 0);
991 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
992 			&flm->v25.status->cache_buf_critical, 0);
993 		flm_nthw_status_flush(be->p_flm_nthw);
994 	}
995 
996 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
997 	return 0;
998 }
999 
1000 static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
1001 {
1002 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1003 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1004 
1005 	if (flm->ver >= 25) {
1006 		flm_nthw_status_update(be->p_flm_nthw);
1007 		flm_nthw_status_calib_success(be->p_flm_nthw, &flm->v25.status->calib_success, 1);
1008 		flm_nthw_status_calib_fail(be->p_flm_nthw, &flm->v25.status->calib_fail, 1);
1009 		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v25.status->initdone, 1);
1010 		flm_nthw_status_idle(be->p_flm_nthw, &flm->v25.status->idle, 1);
1011 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 1);
1012 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 1);
1013 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 1);
1014 		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v25.status->eft_bp, 1);
1015 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
1016 			&flm->v25.status->cache_buf_critical, 1);
1017 	}
1018 
1019 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1020 	return 0;
1021 }
1022 
1023 static int flm_scan_flush(void *be_dev, const struct flm_func_s *flm)
1024 {
1025 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1026 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1027 
1028 	if (flm->ver >= 25) {
1029 		flm_nthw_scan_i(be->p_flm_nthw, flm->v25.scan->i);
1030 		flm_nthw_scan_flush(be->p_flm_nthw);
1031 	}
1032 
1033 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1034 	return 0;
1035 }
1036 
1037 static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
1038 {
1039 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1040 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1041 
1042 	if (flm->ver >= 25) {
1043 		flm_nthw_load_bin(be->p_flm_nthw, flm->v25.load_bin->bin);
1044 		flm_nthw_load_bin_flush(be->p_flm_nthw);
1045 	}
1046 
1047 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1048 	return 0;
1049 }
1050 
1051 static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
1052 {
1053 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1054 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1055 
1056 	if (flm->ver >= 25) {
1057 		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v25.prio->limit0);
1058 		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v25.prio->ft0);
1059 		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v25.prio->limit1);
1060 		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v25.prio->ft1);
1061 		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v25.prio->limit2);
1062 		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v25.prio->ft2);
1063 		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v25.prio->limit3);
1064 		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v25.prio->ft3);
1065 		flm_nthw_prio_flush(be->p_flm_nthw);
1066 	}
1067 
1068 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1069 	return 0;
1070 }
1071 
1072 static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1073 {
1074 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1075 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1076 
1077 	if (flm->ver >= 25) {
1078 		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
1079 
1080 		for (int i = 0; i < cnt; i++) {
1081 			flm_nthw_pst_select(be->p_flm_nthw, index + i);
1082 			flm_nthw_pst_bp(be->p_flm_nthw, flm->v25.pst[index + i].bp);
1083 			flm_nthw_pst_pp(be->p_flm_nthw, flm->v25.pst[index + i].pp);
1084 			flm_nthw_pst_tp(be->p_flm_nthw, flm->v25.pst[index + i].tp);
1085 			flm_nthw_pst_flush(be->p_flm_nthw);
1086 		}
1087 	}
1088 
1089 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1090 	return 0;
1091 }
1092 
1093 static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1094 {
1095 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1096 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1097 
1098 	if (flm->ver >= 25) {
1099 		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
1100 
1101 		for (int i = 0; i < cnt; i++) {
1102 			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
1103 			flm_nthw_rcp_lookup(be->p_flm_nthw, flm->v25.rcp[index + i].lookup);
1104 			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_dyn);
1105 			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_ofs);
1106 			flm_nthw_rcp_qw0_sel(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_sel);
1107 			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_dyn);
1108 			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_ofs);
1109 			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_dyn);
1110 			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_ofs);
1111 			flm_nthw_rcp_sw8_sel(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_sel);
1112 			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_dyn);
1113 			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_ofs);
1114 			flm_nthw_rcp_mask(be->p_flm_nthw, flm->v25.rcp[index + i].mask);
1115 			flm_nthw_rcp_kid(be->p_flm_nthw, flm->v25.rcp[index + i].kid);
1116 			flm_nthw_rcp_opn(be->p_flm_nthw, flm->v25.rcp[index + i].opn);
1117 			flm_nthw_rcp_ipn(be->p_flm_nthw, flm->v25.rcp[index + i].ipn);
1118 			flm_nthw_rcp_byt_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].byt_dyn);
1119 			flm_nthw_rcp_byt_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].byt_ofs);
1120 			flm_nthw_rcp_txplm(be->p_flm_nthw, flm->v25.rcp[index + i].txplm);
1121 			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
1122 				flm->v25.rcp[index + i].auto_ipv4_mask);
1123 			flm_nthw_rcp_flush(be->p_flm_nthw);
1124 		}
1125 	}
1126 
1127 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1128 	return 0;
1129 }
1130 
1131 static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1132 {
1133 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1134 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1135 
1136 	if (flm->ver >= 25) {
1137 		flm_nthw_scrub_cnt(be->p_flm_nthw, 1);
1138 
1139 		for (int i = 0; i < cnt; i++) {
1140 			flm_nthw_scrub_select(be->p_flm_nthw, index + i);
1141 			flm_nthw_scrub_t(be->p_flm_nthw, flm->v25.scrub[index + i].t);
1142 			flm_nthw_scrub_r(be->p_flm_nthw, flm->v25.scrub[index + i].r);
1143 			flm_nthw_scrub_del(be->p_flm_nthw, flm->v25.scrub[index + i].del);
1144 			flm_nthw_scrub_inf(be->p_flm_nthw, flm->v25.scrub[index + i].inf);
1145 			flm_nthw_scrub_flush(be->p_flm_nthw);
1146 		}
1147 	}
1148 
1149 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1150 	return 0;
1151 }
1152 
1153 static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
1154 {
1155 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1156 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1157 
1158 	if (flm->ver >= 25) {
1159 		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
1160 			&flm->v25.buf_ctrl->lrn_free,
1161 			&flm->v25.buf_ctrl->inf_avail,
1162 			&flm->v25.buf_ctrl->sta_avail);
1163 	}
1164 
1165 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1166 	return 0;
1167 }
1168 
1169 static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
1170 {
1171 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1172 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1173 
1174 	if (flm->ver >= 25) {
1175 		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
1176 		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
1177 		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
1178 		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
1179 		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
1180 		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
1181 		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
1182 		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
1183 		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
1184 		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
1185 		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
1186 		flm_nthw_stat_flows_update(be->p_flm_nthw);
1187 		flm_nthw_load_lps_update(be->p_flm_nthw);
1188 		flm_nthw_load_aps_update(be->p_flm_nthw);
1189 
1190 		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v25.lrn_done->cnt, 1);
1191 		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw, &flm->v25.lrn_ignore->cnt, 1);
1192 		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v25.lrn_fail->cnt, 1);
1193 		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v25.unl_done->cnt, 1);
1194 		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw, &flm->v25.unl_ignore->cnt, 1);
1195 		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v25.rel_done->cnt, 1);
1196 		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw, &flm->v25.rel_ignore->cnt, 1);
1197 		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v25.aul_done->cnt, 1);
1198 		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw, &flm->v25.aul_ignore->cnt, 1);
1199 		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v25.aul_fail->cnt, 1);
1200 		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v25.tul_done->cnt, 1);
1201 		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v25.flows->cnt, 1);
1202 
1203 		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
1204 		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
1205 		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v25.prb_done->cnt, 1);
1206 		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw, &flm->v25.prb_ignore->cnt, 1);
1207 
1208 		flm_nthw_load_lps_cnt(be->p_flm_nthw, &flm->v25.load_lps->lps, 1);
1209 		flm_nthw_load_aps_cnt(be->p_flm_nthw, &flm->v25.load_aps->aps, 1);
1210 	}
1211 
1212 	if (flm->ver >= 25) {
1213 		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
1214 		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
1215 		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
1216 		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
1217 		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
1218 		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
1219 		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
1220 		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
1221 		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
1222 		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
1223 		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
1224 		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
1225 
1226 		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v25.sta_done->cnt, 1);
1227 		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v25.inf_done->cnt, 1);
1228 		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v25.inf_skip->cnt, 1);
1229 		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v25.pck_hit->cnt, 1);
1230 		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v25.pck_miss->cnt, 1);
1231 		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v25.pck_unh->cnt, 1);
1232 		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v25.pck_dis->cnt, 1);
1233 		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v25.csh_hit->cnt, 1);
1234 		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v25.csh_miss->cnt, 1);
1235 		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v25.csh_unh->cnt, 1);
1236 		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v25.cuc_start->cnt, 1);
1237 		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v25.cuc_move->cnt, 1);
1238 	}
1239 
1240 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1241 	return 0;
1242 }
1243 
1244 static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm, const uint32_t *lrn_data,
1245 	uint32_t records, uint32_t *handled_records,
1246 	uint32_t words_per_record, uint32_t *inf_word_cnt,
1247 	uint32_t *sta_word_cnt)
1248 {
1249 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1250 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1251 
1252 	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, records, words_per_record,
1253 			handled_records, &flm->v25.buf_ctrl->lrn_free,
1254 			&flm->v25.buf_ctrl->inf_avail,
1255 			&flm->v25.buf_ctrl->sta_avail);
1256 
1257 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1258 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1259 
1260 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1261 	return ret;
1262 }
1263 
1264 static int flm_inf_sta_data_update(void *be_dev, const struct flm_func_s *flm, uint32_t *inf_data,
1265 	uint32_t inf_size, uint32_t *inf_word_cnt, uint32_t *sta_data,
1266 	uint32_t sta_size, uint32_t *sta_word_cnt)
1267 {
1268 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1269 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1270 
1271 	int ret = flm_nthw_inf_sta_data_update(be->p_flm_nthw, inf_data, inf_size, sta_data,
1272 			sta_size, &flm->v25.buf_ctrl->lrn_free,
1273 			&flm->v25.buf_ctrl->inf_avail,
1274 			&flm->v25.buf_ctrl->sta_avail);
1275 
1276 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1277 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1278 
1279 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1280 	return ret;
1281 }
1282 
1283 /*
1284  * HSH
1285  */
1286 
1287 static bool hsh_get_present(void *be_dev)
1288 {
1289 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1290 	return be->p_hsh_nthw != NULL;
1291 }
1292 
1293 static uint32_t hsh_get_version(void *be_dev)
1294 {
1295 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1296 	return (uint32_t)((nthw_module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
1297 			(nthw_module_get_minor_version(be->p_hsh_nthw->m_hsh) & 0xffff));
1298 }
1299 
1300 static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh, int category, int cnt)
1301 {
1302 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1303 	CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
1304 
1305 	if (hsh->ver == 5) {
1306 		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
1307 
1308 		for (int i = 0; i < cnt; i++) {
1309 			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
1310 			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
1311 				hsh->v5.rcp[category + i].load_dist_type);
1312 			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
1313 				hsh->v5.rcp[category + i].mac_port_mask);
1314 			hsh_nthw_rcp_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].sort);
1315 			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_pe);
1316 			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_ofs);
1317 			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_pe);
1318 			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_ofs);
1319 			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_pe);
1320 			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_ofs);
1321 			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_sort);
1322 			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_pe);
1323 			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_ofs);
1324 			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_sort);
1325 			hsh_nthw_rcp_w9_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_p);
1326 			hsh_nthw_rcp_p_mask(be->p_hsh_nthw, hsh->v5.rcp[category + i].p_mask);
1327 			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
1328 				hsh->v5.rcp[category + i].word_mask);
1329 			hsh_nthw_rcp_seed(be->p_hsh_nthw, hsh->v5.rcp[category + i].seed);
1330 			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].tnl_p);
1331 			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
1332 				hsh->v5.rcp[category + i].hsh_valid);
1333 			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw, hsh->v5.rcp[category + i].hsh_type);
1334 			hsh_nthw_rcp_toeplitz(be->p_hsh_nthw, hsh->v5.rcp[category + i].toeplitz);
1335 			hsh_nthw_rcp_k(be->p_hsh_nthw, hsh->v5.rcp[category + i].k);
1336 			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
1337 				hsh->v5.rcp[category + i].auto_ipv4_mask);
1338 			hsh_nthw_rcp_flush(be->p_hsh_nthw);
1339 		}
1340 	}
1341 
1342 	CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
1343 	return 0;
1344 }
1345 
1346 /*
1347  * QSL
1348  */
1349 
1350 static bool qsl_get_present(void *be_dev)
1351 {
1352 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1353 	return be->p_qsl_nthw != NULL;
1354 }
1355 
1356 static uint32_t qsl_get_version(void *be_dev)
1357 {
1358 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1359 	return (uint32_t)((nthw_module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
1360 			(nthw_module_get_minor_version(be->p_qsl_nthw->m_qsl) & 0xffff));
1361 }
1362 
1363 static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl, int category, int cnt)
1364 {
1365 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1366 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1367 
1368 	if (qsl->ver == 7) {
1369 		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
1370 
1371 		for (int i = 0; i < cnt; i++) {
1372 			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
1373 			qsl_nthw_rcp_discard(be->p_qsl_nthw, qsl->v7.rcp[category + i].discard);
1374 			qsl_nthw_rcp_drop(be->p_qsl_nthw, qsl->v7.rcp[category + i].drop);
1375 			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_lo);
1376 			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_hi);
1377 			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_idx);
1378 			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_msk);
1379 			qsl_nthw_rcp_lr(be->p_qsl_nthw, qsl->v7.rcp[category + i].lr);
1380 			qsl_nthw_rcp_tsa(be->p_qsl_nthw, qsl->v7.rcp[category + i].tsa);
1381 			qsl_nthw_rcp_vli(be->p_qsl_nthw, qsl->v7.rcp[category + i].vli);
1382 			qsl_nthw_rcp_flush(be->p_qsl_nthw);
1383 		}
1384 	}
1385 
1386 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1387 	return 0;
1388 }
1389 
1390 static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1391 {
1392 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1393 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1394 
1395 	if (qsl->ver == 7) {
1396 		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
1397 
1398 		for (int i = 0; i < cnt; i++) {
1399 			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
1400 			qsl_nthw_qst_queue(be->p_qsl_nthw, qsl->v7.qst[entry + i].queue);
1401 			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
1402 
1403 			qsl_nthw_qst_tx_port(be->p_qsl_nthw, qsl->v7.qst[entry + i].tx_port);
1404 			qsl_nthw_qst_lre(be->p_qsl_nthw, qsl->v7.qst[entry + i].lre);
1405 			qsl_nthw_qst_tci(be->p_qsl_nthw, qsl->v7.qst[entry + i].tci);
1406 			qsl_nthw_qst_ven(be->p_qsl_nthw, qsl->v7.qst[entry + i].ven);
1407 			qsl_nthw_qst_flush(be->p_qsl_nthw);
1408 		}
1409 	}
1410 
1411 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1412 	return 0;
1413 }
1414 
1415 static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1416 {
1417 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1418 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1419 
1420 	if (qsl->ver == 7) {
1421 		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
1422 
1423 		for (int i = 0; i < cnt; i++) {
1424 			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
1425 			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
1426 			qsl_nthw_qen_flush(be->p_qsl_nthw);
1427 		}
1428 	}
1429 
1430 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1431 	return 0;
1432 }
1433 
1434 static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1435 {
1436 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1437 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1438 
1439 	if (qsl->ver == 7) {
1440 		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
1441 
1442 		for (int i = 0; i < cnt; i++) {
1443 			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
1444 			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
1445 				qsl->v7.unmq[entry + i].dest_queue);
1446 			qsl_nthw_unmq_en(be->p_qsl_nthw, qsl->v7.unmq[entry + i].en);
1447 			qsl_nthw_unmq_flush(be->p_qsl_nthw);
1448 		}
1449 	}
1450 
1451 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1452 	return 0;
1453 }
1454 
1455 /*
1456  * SLC LR
1457  */
1458 
1459 static bool slc_lr_get_present(void *be_dev)
1460 {
1461 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1462 	return be->p_slc_lr_nthw != NULL;
1463 }
1464 
1465 static uint32_t slc_lr_get_version(void *be_dev)
1466 {
1467 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1468 	return (uint32_t)((nthw_module_get_major_version(be->p_slc_lr_nthw->m_slc_lr) << 16) |
1469 			(nthw_module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) & 0xffff));
1470 }
1471 
1472 static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr, int category,
1473 	int cnt)
1474 {
1475 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1476 	CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
1477 
1478 	if (slc_lr->ver == 2) {
1479 		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
1480 
1481 		for (int i = 0; i < cnt; i++) {
1482 			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
1483 			slc_lr_nthw_rcp_head_slc_en(be->p_slc_lr_nthw,
1484 				slc_lr->v2.rcp[category + i].head_slc_en);
1485 			slc_lr_nthw_rcp_head_dyn(be->p_slc_lr_nthw,
1486 				slc_lr->v2.rcp[category + i].head_dyn);
1487 			slc_lr_nthw_rcp_head_ofs(be->p_slc_lr_nthw,
1488 				slc_lr->v2.rcp[category + i].head_ofs);
1489 			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
1490 				slc_lr->v2.rcp[category + i].tail_slc_en);
1491 			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
1492 				slc_lr->v2.rcp[category + i].tail_dyn);
1493 			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
1494 				slc_lr->v2.rcp[category + i].tail_ofs);
1495 			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw, slc_lr->v2.rcp[category + i].pcap);
1496 			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
1497 		}
1498 	}
1499 
1500 	CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
1501 	return 0;
1502 }
1503 
1504 /*
1505  * PDB
1506  */
1507 
1508 static bool pdb_get_present(void *be_dev)
1509 {
1510 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1511 	return be->p_pdb_nthw != NULL;
1512 }
1513 
1514 static uint32_t pdb_get_version(void *be_dev)
1515 {
1516 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1517 	return (uint32_t)((nthw_module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
1518 			(nthw_module_get_minor_version(be->p_pdb_nthw->m_pdb) & 0xffff));
1519 }
1520 
1521 static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb, int category, int cnt)
1522 {
1523 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1524 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1525 
1526 	if (pdb->ver == 9) {
1527 		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
1528 
1529 		for (int i = 0; i < cnt; i++) {
1530 			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
1531 			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
1532 				pdb->v9.rcp[category + i].descriptor);
1533 			pdb_nthw_rcp_desc_len(be->p_pdb_nthw, pdb->v9.rcp[category + i].desc_len);
1534 			pdb_nthw_rcp_tx_port(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_port);
1535 			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
1536 				pdb->v9.rcp[category + i].tx_ignore);
1537 			pdb_nthw_rcp_tx_now(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_now);
1538 			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
1539 				pdb->v9.rcp[category + i].crc_overwrite);
1540 			pdb_nthw_rcp_align(be->p_pdb_nthw, pdb->v9.rcp[category + i].align);
1541 			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_dyn);
1542 			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_rel);
1543 			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_dyn);
1544 			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_rel);
1545 			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_dyn);
1546 			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_rel);
1547 			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
1548 				pdb->v9.rcp[category + i].ip_prot_tnl);
1549 			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw, pdb->v9.rcp[category + i].ppc_hsh);
1550 			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
1551 				pdb->v9.rcp[category + i].duplicate_en);
1552 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1553 				pdb->v9.rcp[category + i].duplicate_bit);
1554 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1555 				pdb->v9.rcp[category + i].pcap_keep_fcs);
1556 			pdb_nthw_rcp_flush(be->p_pdb_nthw);
1557 		}
1558 	}
1559 
1560 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1561 	return 0;
1562 }
1563 
1564 static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
1565 {
1566 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1567 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1568 
1569 	if (pdb->ver == 9) {
1570 		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
1571 		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
1572 		pdb_nthw_config_flush(be->p_pdb_nthw);
1573 	}
1574 
1575 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1576 	return 0;
1577 }
1578 
1579 /*
1580  * DBS
1581  */
1582 
1583 static int alloc_rx_queue(void *be_dev, int queue_id)
1584 {
1585 	(void)be_dev;
1586 	(void)queue_id;
1587 	NT_LOG(ERR, FILTER, "ERROR alloc Rx queue");
1588 	return -1;
1589 }
1590 
1591 static int free_rx_queue(void *be_dev, int hw_queue)
1592 {
1593 	(void)be_dev;
1594 	(void)hw_queue;
1595 	NT_LOG(ERR, FILTER, "ERROR free Rx queue");
1596 	return 0;
1597 }
1598 
1599 const struct flow_api_backend_ops flow_be_iface = {
1600 	1,
1601 
1602 	set_debug_mode,
1603 	get_nb_phy_ports,
1604 	get_nb_rx_ports,
1605 	get_ltx_avail,
1606 	get_nb_cat_funcs,
1607 	get_nb_categories,
1608 	get_nb_cat_km_if_cnt,
1609 	get_nb_cat_km_if_m0,
1610 	get_nb_cat_km_if_m1,
1611 	get_nb_queues,
1612 	get_nb_km_flow_types,
1613 	get_nb_pm_ext,
1614 	get_nb_len,
1615 	get_kcc_size,
1616 	get_kcc_banks,
1617 	get_nb_km_categories,
1618 	get_nb_km_cam_banks,
1619 	get_nb_km_cam_record_words,
1620 	get_nb_km_cam_records,
1621 	get_nb_km_tcam_banks,
1622 	get_nb_km_tcam_bank_width,
1623 	get_nb_flm_categories,
1624 	get_nb_flm_size_mb,
1625 	get_nb_flm_entry_size,
1626 	get_nb_flm_variant,
1627 	get_nb_flm_prios,
1628 	get_nb_flm_pst_profiles,
1629 	get_nb_flm_scrub_profiles,
1630 	get_nb_flm_load_aps_max,
1631 	get_nb_qsl_categories,
1632 	get_nb_qsl_qst_entries,
1633 	get_nb_pdb_categories,
1634 	get_nb_roa_categories,
1635 	get_nb_tpe_categories,
1636 	get_nb_tx_cpy_writers,
1637 	get_nb_tx_cpy_mask_mem,
1638 	get_nb_tx_rpl_depth,
1639 	get_nb_tx_rpl_ext_categories,
1640 	get_nb_tpe_ifr_categories,
1641 	get_nb_rpp_per_ps,
1642 	get_nb_hsh_categories,
1643 	get_nb_hsh_toeplitz,
1644 
1645 	alloc_rx_queue,
1646 	free_rx_queue,
1647 
1648 	cat_get_present,
1649 	cat_get_version,
1650 	cat_cfn_flush,
1651 
1652 	cat_kce_flush,
1653 	cat_kcs_flush,
1654 	cat_fte_flush,
1655 
1656 	cat_cte_flush,
1657 	cat_cts_flush,
1658 	cat_cot_flush,
1659 	cat_cct_flush,
1660 	cat_exo_flush,
1661 	cat_rck_flush,
1662 	cat_len_flush,
1663 	cat_kcc_flush,
1664 
1665 	km_get_present,
1666 	km_get_version,
1667 	km_rcp_flush,
1668 	km_cam_flush,
1669 	km_tcam_flush,
1670 	km_tci_flush,
1671 	km_tcq_flush,
1672 
1673 	flm_get_present,
1674 	flm_get_version,
1675 	flm_control_flush,
1676 	flm_status_flush,
1677 	flm_status_update,
1678 	flm_scan_flush,
1679 	flm_load_bin_flush,
1680 	flm_prio_flush,
1681 	flm_pst_flush,
1682 	flm_rcp_flush,
1683 	flm_scrub_flush,
1684 	flm_buf_ctrl_update,
1685 	flm_stat_update,
1686 	flm_lrn_data_flush,
1687 	flm_inf_sta_data_update,
1688 
1689 	hsh_get_present,
1690 	hsh_get_version,
1691 	hsh_rcp_flush,
1692 
1693 	qsl_get_present,
1694 	qsl_get_version,
1695 	qsl_rcp_flush,
1696 	qsl_qst_flush,
1697 	qsl_qen_flush,
1698 	qsl_unmq_flush,
1699 
1700 	slc_lr_get_present,
1701 	slc_lr_get_version,
1702 	slc_lr_rcp_flush,
1703 
1704 	pdb_get_present,
1705 	pdb_get_version,
1706 	pdb_rcp_flush,
1707 	pdb_config_flush,
1708 };
1709 
1710 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
1711 {
1712 	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
1713 
1714 	struct info_nthw *pinfonthw = info_nthw_new();
1715 	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
1716 	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
1717 
1718 	/* Init nthw CAT */
1719 	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1720 		struct cat_nthw *pcatnthw = cat_nthw_new();
1721 		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
1722 		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
1723 
1724 	} else {
1725 		be_devs[physical_adapter_no].p_cat_nthw = NULL;
1726 	}
1727 
1728 	/* Init nthw KM */
1729 	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1730 		struct km_nthw *pkmnthw = km_nthw_new();
1731 		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
1732 		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
1733 
1734 	} else {
1735 		be_devs[physical_adapter_no].p_km_nthw = NULL;
1736 	}
1737 
1738 	/* Init nthw FLM */
1739 	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1740 		struct flm_nthw *pflmnthw = flm_nthw_new();
1741 		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
1742 		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
1743 
1744 	} else {
1745 		be_devs[physical_adapter_no].p_flm_nthw = NULL;
1746 	}
1747 
1748 	/* Init nthw IFR */
1749 	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1750 		struct ifr_nthw *ifrnthw = ifr_nthw_new();
1751 		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
1752 		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
1753 
1754 	} else {
1755 		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
1756 	}
1757 
1758 	/* Init nthw HSH */
1759 	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1760 		struct hsh_nthw *phshnthw = hsh_nthw_new();
1761 		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
1762 		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
1763 
1764 	} else {
1765 		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
1766 	}
1767 
1768 	/* Init nthw QSL */
1769 	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1770 		struct qsl_nthw *pqslnthw = qsl_nthw_new();
1771 		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
1772 		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
1773 
1774 	} else {
1775 		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
1776 	}
1777 
1778 	/* Init nthw SLC LR */
1779 	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1780 		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
1781 		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
1782 		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
1783 
1784 	} else {
1785 		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
1786 	}
1787 
1788 	/* Init nthw PDB */
1789 	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1790 		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
1791 		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
1792 		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
1793 
1794 	} else {
1795 		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
1796 	}
1797 
1798 	/* Init nthw HFU */
1799 	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1800 		struct hfu_nthw *ptr = hfu_nthw_new();
1801 		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
1802 		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
1803 
1804 	} else {
1805 		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
1806 	}
1807 
1808 	/* Init nthw RPP_LR */
1809 	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1810 		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
1811 		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
1812 		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
1813 
1814 	} else {
1815 		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
1816 	}
1817 
1818 	/* Init nthw TX_CPY */
1819 	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1820 		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
1821 		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
1822 		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
1823 
1824 	} else {
1825 		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
1826 	}
1827 
1828 	/* Init nthw CSU */
1829 	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1830 		struct csu_nthw *ptr = csu_nthw_new();
1831 		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
1832 		be_devs[physical_adapter_no].p_csu_nthw = ptr;
1833 
1834 	} else {
1835 		be_devs[physical_adapter_no].p_csu_nthw = NULL;
1836 	}
1837 
1838 	/* Init nthw TX_INS */
1839 	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1840 		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
1841 		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
1842 		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
1843 
1844 	} else {
1845 		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
1846 	}
1847 
1848 	/* Init nthw TX_RPL */
1849 	if (tx_rpl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1850 		struct tx_rpl_nthw *ptr = tx_rpl_nthw_new();
1851 		tx_rpl_nthw_init(ptr, p_fpga, physical_adapter_no);
1852 		be_devs[physical_adapter_no].p_tx_rpl_nthw = ptr;
1853 
1854 	} else {
1855 		be_devs[physical_adapter_no].p_tx_rpl_nthw = NULL;
1856 	}
1857 
1858 	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
1859 	*dev = (void *)&be_devs[physical_adapter_no];
1860 
1861 	return &flow_be_iface;
1862 }
1863 
1864 static void bin_flow_backend_done(void *dev)
1865 {
1866 	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
1867 	info_nthw_delete(be_dev->p_info_nthw);
1868 	cat_nthw_delete(be_dev->p_cat_nthw);
1869 	km_nthw_delete(be_dev->p_km_nthw);
1870 	flm_nthw_delete(be_dev->p_flm_nthw);
1871 	hsh_nthw_delete(be_dev->p_hsh_nthw);
1872 	qsl_nthw_delete(be_dev->p_qsl_nthw);
1873 	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
1874 	pdb_nthw_delete(be_dev->p_pdb_nthw);
1875 	csu_nthw_delete(be_dev->p_csu_nthw);
1876 	hfu_nthw_delete(be_dev->p_hfu_nthw);
1877 	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
1878 	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
1879 	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
1880 	tx_rpl_nthw_delete(be_dev->p_tx_rpl_nthw);
1881 }
1882 
1883 static const struct flow_backend_ops ops = {
1884 	.bin_flow_backend_init = bin_flow_backend_init,
1885 	.bin_flow_backend_done = bin_flow_backend_done,
1886 };
1887 
1888 void flow_backend_init(void)
1889 {
1890 	register_flow_backend_ops(&ops);
1891 }
1892