xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_backend/flow_backend.c (revision 55f767cb7f66a255159dd5ffb22103fc7ab38072)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include <stdint.h>
7 
8 #include "flow_nthw_info.h"
9 #include "flow_nthw_ifr.h"
10 #include "flow_nthw_cat.h"
11 #include "flow_nthw_csu.h"
12 #include "flow_nthw_km.h"
13 #include "flow_nthw_flm.h"
14 #include "flow_nthw_hfu.h"
15 #include "flow_nthw_hsh.h"
16 #include "flow_nthw_qsl.h"
17 #include "flow_nthw_slc_lr.h"
18 #include "flow_nthw_pdb.h"
19 #include "flow_nthw_rpp_lr.h"
20 #include "flow_nthw_tx_cpy.h"
21 #include "ntnic_mod_reg.h"
22 #include "nthw_fpga_model.h"
23 #include "hw_mod_backend.h"
24 
25 /*
26  * Binary Flow API backend implementation into ntservice driver
27  *
28  * General note on this backend implementation:
29  * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
30  */
31 
32 static struct backend_dev_s {
33 	uint8_t adapter_no;
34 	enum debug_mode_e dmode;
35 	struct info_nthw *p_info_nthw;
36 	struct cat_nthw *p_cat_nthw;
37 	struct km_nthw *p_km_nthw;
38 	struct flm_nthw *p_flm_nthw;
39 	struct hsh_nthw *p_hsh_nthw;
40 	struct qsl_nthw *p_qsl_nthw;
41 	struct slc_lr_nthw *p_slc_lr_nthw;
42 	struct pdb_nthw *p_pdb_nthw;
43 	struct hfu_nthw *p_hfu_nthw;    /* TPE module */
44 	struct rpp_lr_nthw *p_rpp_lr_nthw;      /* TPE module */
45 	struct tx_cpy_nthw *p_tx_cpy_nthw;      /* TPE module */
46 	struct csu_nthw *p_csu_nthw;    /* TPE module */
47 	struct ifr_nthw *p_ifr_nthw;    /* TPE module */
48 } be_devs[MAX_PHYS_ADAPTERS];
49 
50 #define CHECK_DEBUG_ON(be, mod, inst)                                                             \
51 	int __debug__ = 0;                                                                        \
52 	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug)                        \
53 		do {                                                                              \
54 			mod##_nthw_set_debug_mode((inst), 0xFF);                                  \
55 			__debug__ = 1;                                                            \
56 	} while (0)
57 
58 #define CHECK_DEBUG_OFF(mod, inst)                                                                \
59 	do {                                                                                      \
60 		if (__debug__)                                                                    \
61 			mod##_nthw_set_debug_mode((inst), 0);                                     \
62 	} while (0)
63 
64 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev);
65 static void bin_flow_backend_done(void *be_dev);
66 
67 static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
68 {
69 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
70 	be->dmode = mode;
71 	return 0;
72 }
73 
74 /*
75  * INFO
76  */
77 
78 static int get_nb_phy_ports(void *be_dev)
79 {
80 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
81 	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
82 }
83 
84 static int get_nb_rx_ports(void *be_dev)
85 {
86 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
87 	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
88 }
89 
90 static int get_ltx_avail(void *be_dev)
91 {
92 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
93 	return info_nthw_get_ltx_avail(be->p_info_nthw);
94 }
95 
96 static int get_nb_cat_funcs(void *be_dev)
97 {
98 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
99 	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
100 }
101 
102 static int get_nb_categories(void *be_dev)
103 {
104 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
105 	return info_nthw_get_nb_categories(be->p_info_nthw);
106 }
107 
108 static int get_nb_cat_km_if_cnt(void *be_dev)
109 {
110 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
111 	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
112 }
113 
114 static int get_nb_cat_km_if_m0(void *be_dev)
115 {
116 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
117 	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
118 }
119 
120 static int get_nb_cat_km_if_m1(void *be_dev)
121 {
122 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
123 	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
124 }
125 
126 static int get_nb_queues(void *be_dev)
127 {
128 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
129 	return info_nthw_get_nb_queues(be->p_info_nthw);
130 }
131 
132 static int get_nb_km_flow_types(void *be_dev)
133 {
134 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
135 	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
136 }
137 
138 static int get_nb_pm_ext(void *be_dev)
139 {
140 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
141 	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
142 }
143 
144 static int get_nb_len(void *be_dev)
145 {
146 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
147 	return info_nthw_get_nb_len(be->p_info_nthw);
148 }
149 
150 static int get_kcc_size(void *be_dev)
151 {
152 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
153 	return info_nthw_get_kcc_size(be->p_info_nthw);
154 }
155 
156 static int get_kcc_banks(void *be_dev)
157 {
158 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
159 	return info_nthw_get_kcc_banks(be->p_info_nthw);
160 }
161 
162 static int get_nb_km_categories(void *be_dev)
163 {
164 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
165 	return info_nthw_get_nb_km_categories(be->p_info_nthw);
166 }
167 
168 static int get_nb_km_cam_banks(void *be_dev)
169 {
170 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
171 	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
172 }
173 
174 static int get_nb_km_cam_record_words(void *be_dev)
175 {
176 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
177 	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
178 }
179 
180 static int get_nb_km_cam_records(void *be_dev)
181 {
182 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
183 	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
184 }
185 
186 static int get_nb_km_tcam_banks(void *be_dev)
187 {
188 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
189 	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
190 }
191 
192 static int get_nb_km_tcam_bank_width(void *be_dev)
193 {
194 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
195 	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
196 }
197 
198 static int get_nb_flm_categories(void *be_dev)
199 {
200 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
201 	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
202 }
203 
204 static int get_nb_flm_size_mb(void *be_dev)
205 {
206 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
207 	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
208 }
209 
210 static int get_nb_flm_entry_size(void *be_dev)
211 {
212 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
213 	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
214 }
215 
216 static int get_nb_flm_variant(void *be_dev)
217 {
218 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
219 	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
220 }
221 
222 static int get_nb_flm_prios(void *be_dev)
223 {
224 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
225 	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
226 }
227 
228 static int get_nb_flm_pst_profiles(void *be_dev)
229 {
230 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
231 	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
232 }
233 
234 static int get_nb_flm_scrub_profiles(void *be_dev)
235 {
236 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
237 	return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw);
238 }
239 
240 static int get_nb_flm_load_aps_max(void *be_dev)
241 {
242 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
243 	return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw);
244 }
245 
246 static int get_nb_qsl_categories(void *be_dev)
247 {
248 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
249 	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
250 }
251 
252 static int get_nb_qsl_qst_entries(void *be_dev)
253 {
254 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
255 	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
256 }
257 
258 static int get_nb_pdb_categories(void *be_dev)
259 {
260 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
261 	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
262 }
263 
264 static int get_nb_roa_categories(void *be_dev)
265 {
266 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
267 	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
268 }
269 
270 static int get_nb_tpe_categories(void *be_dev)
271 {
272 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
273 	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
274 }
275 
276 static int get_nb_tx_cpy_writers(void *be_dev)
277 {
278 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
279 	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
280 }
281 
282 static int get_nb_tx_cpy_mask_mem(void *be_dev)
283 {
284 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
285 	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
286 }
287 
288 static int get_nb_tx_rpl_depth(void *be_dev)
289 {
290 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
291 	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
292 }
293 
294 static int get_nb_tx_rpl_ext_categories(void *be_dev)
295 {
296 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
297 	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
298 }
299 
300 static int get_nb_tpe_ifr_categories(void *be_dev)
301 {
302 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
303 	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
304 }
305 
306 static int get_nb_rpp_per_ps(void *be_dev)
307 {
308 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
309 	return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw);
310 }
311 
312 static int get_nb_hsh_categories(void *be_dev)
313 {
314 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
315 	return info_nthw_get_nb_hsh_categories(be->p_info_nthw);
316 }
317 
318 static int get_nb_hsh_toeplitz(void *be_dev)
319 {
320 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
321 	return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw);
322 }
323 
324 /*
325  * CAT
326  */
327 
328 static bool cat_get_present(void *be_dev)
329 {
330 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
331 	return be->p_cat_nthw != NULL;
332 }
333 
334 static uint32_t cat_get_version(void *be_dev)
335 {
336 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
337 	return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
338 			(nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff));
339 }
340 
341 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
342 {
343 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
344 
345 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
346 
347 	if (cat->ver == 18) {
348 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
349 
350 		for (int i = 0; i < cnt; i++) {
351 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
352 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable);
353 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv);
354 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv);
355 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl);
356 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp);
357 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac);
358 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2);
359 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag);
360 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan);
361 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls);
362 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3);
363 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag);
364 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
365 				cat->v18.cfn[cat_func].ptc_ip_prot);
366 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4);
367 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel);
368 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2);
369 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
370 				cat->v18.cfn[cat_func].ptc_tnl_vlan);
371 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
372 				cat->v18.cfn[cat_func].ptc_tnl_mpls);
373 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3);
374 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
375 				cat->v18.cfn[cat_func].ptc_tnl_frag);
376 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
377 				cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
378 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4);
379 
380 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv);
381 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv);
382 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs);
383 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc);
384 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs);
385 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs);
386 
387 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port);
388 
389 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp);
390 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct);
391 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv);
392 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb);
393 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv);
394 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv);
395 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv);
396 
397 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
398 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv);
399 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or);
400 			cat_nthw_cfn_flush(be->p_cat_nthw);
401 			cat_func++;
402 		}
403 
404 	} else if (cat->ver == 21) {
405 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
406 
407 		for (int i = 0; i < cnt; i++) {
408 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
409 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable);
410 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv);
411 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv);
412 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl);
413 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp);
414 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac);
415 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2);
416 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag);
417 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan);
418 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls);
419 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3);
420 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag);
421 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
422 				cat->v21.cfn[cat_func].ptc_ip_prot);
423 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4);
424 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel);
425 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2);
426 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
427 				cat->v21.cfn[cat_func].ptc_tnl_vlan);
428 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
429 				cat->v21.cfn[cat_func].ptc_tnl_mpls);
430 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3);
431 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
432 				cat->v21.cfn[cat_func].ptc_tnl_frag);
433 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
434 				cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
435 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4);
436 
437 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv);
438 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv);
439 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs);
440 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc);
441 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs);
442 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs);
443 			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
444 				cat->v21.cfn[cat_func].err_tnl_l3_cs);
445 			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
446 				cat->v21.cfn[cat_func].err_tnl_l4_cs);
447 			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
448 				cat->v21.cfn[cat_func].err_ttl_exp);
449 			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
450 				cat->v21.cfn[cat_func].err_tnl_ttl_exp);
451 
452 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port);
453 
454 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp);
455 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct);
456 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv);
457 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb);
458 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv);
459 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv);
460 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv);
461 
462 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
463 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv);
464 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or);
465 
466 			if (be->p_cat_nthw->m_km_if_cnt > 1)
467 				cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or);
468 
469 			cat_nthw_cfn_flush(be->p_cat_nthw);
470 			cat_func++;
471 		}
472 	}
473 
474 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
475 	return 0;
476 }
477 
478 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
479 	int cnt)
480 {
481 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
482 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
483 
484 	if (cat->ver == 18) {
485 		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
486 
487 		for (int i = 0; i < cnt; i++) {
488 			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
489 			cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm);
490 			cat_nthw_kce_flush(be->p_cat_nthw, 0);
491 		}
492 
493 	} else if (cat->ver == 21) {
494 		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
495 
496 		for (int i = 0; i < cnt; i++) {
497 			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
498 			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
499 				cat->v21.kce[index + i].enable_bm[km_if_idx]);
500 			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
501 		}
502 	}
503 
504 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
505 	return 0;
506 }
507 
508 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func,
509 	int cnt)
510 {
511 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
512 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
513 
514 	if (cat->ver == 18) {
515 		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
516 
517 		for (int i = 0; i < cnt; i++) {
518 			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
519 			cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category);
520 			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
521 			cat_func++;
522 		}
523 
524 	} else if (cat->ver == 21) {
525 		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
526 
527 		for (int i = 0; i < cnt; i++) {
528 			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
529 			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
530 				cat->v21.kcs[cat_func].category[km_if_idx]);
531 			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
532 			cat_func++;
533 		}
534 	}
535 
536 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
537 	return 0;
538 }
539 
540 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
541 	int cnt)
542 {
543 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
544 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
545 
546 	if (cat->ver == 18) {
547 		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
548 
549 		for (int i = 0; i < cnt; i++) {
550 			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
551 			cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm);
552 			cat_nthw_fte_flush(be->p_cat_nthw, 0);
553 		}
554 
555 	} else if (cat->ver == 21) {
556 		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
557 
558 		for (int i = 0; i < cnt; i++) {
559 			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
560 			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
561 				cat->v21.fte[index + i].enable_bm[km_if_idx]);
562 			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
563 		}
564 	}
565 
566 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
567 	return 0;
568 }
569 
570 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
571 {
572 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
573 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
574 
575 	if (cat->ver == 18 || cat->ver == 21) {
576 		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
577 
578 		for (int i = 0; i < cnt; i++) {
579 			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
580 			cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col);
581 			cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor);
582 			cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh);
583 			cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl);
584 			cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf);
585 			cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc);
586 			cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb);
587 			cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk);
588 			cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst);
589 			cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp);
590 			cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe);
591 
592 			cat_nthw_cte_flush(be->p_cat_nthw);
593 			cat_func++;
594 		}
595 	}
596 
597 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
598 	return 0;
599 }
600 
601 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
602 {
603 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
604 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
605 
606 	if (cat->ver == 18 || cat->ver == 21) {
607 		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
608 
609 		for (int i = 0; i < cnt; i++) {
610 			cat_nthw_cts_select(be->p_cat_nthw, index + i);
611 			cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a);
612 			cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b);
613 			cat_nthw_cts_flush(be->p_cat_nthw);
614 		}
615 	}
616 
617 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
618 	return 0;
619 }
620 
621 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
622 {
623 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
624 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
625 
626 	if (cat->ver == 18 || cat->ver == 21) {
627 		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
628 
629 		for (int i = 0; i < cnt; i++) {
630 			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
631 			cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color);
632 			cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km);
633 			cat_nthw_cot_flush(be->p_cat_nthw);
634 		}
635 	}
636 
637 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
638 	return 0;
639 }
640 
641 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
642 {
643 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
644 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
645 
646 	if (cat->ver == 18 || cat->ver == 21) {
647 		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
648 
649 		for (int i = 0; i < cnt; i++) {
650 			cat_nthw_cct_select(be->p_cat_nthw, index + i);
651 			cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color);
652 			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
653 			cat_nthw_cct_flush(be->p_cat_nthw);
654 		}
655 	}
656 
657 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
658 	return 0;
659 }
660 
661 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt)
662 {
663 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
664 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
665 
666 	if (cat->ver == 18 || cat->ver == 21) {
667 		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
668 
669 		for (int i = 0; i < cnt; i++) {
670 			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
671 			cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn);
672 			cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs);
673 			cat_nthw_exo_flush(be->p_cat_nthw);
674 		}
675 	}
676 
677 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
678 	return 0;
679 }
680 
681 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
682 {
683 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
684 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
685 
686 	if (cat->ver == 18 || cat->ver == 21) {
687 		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
688 
689 		for (int i = 0; i < cnt; i++) {
690 			cat_nthw_rck_select(be->p_cat_nthw, index + i);
691 			cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data);
692 			cat_nthw_rck_flush(be->p_cat_nthw);
693 		}
694 	}
695 
696 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
697 	return 0;
698 }
699 
700 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
701 {
702 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
703 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
704 
705 	if (cat->ver == 18 || cat->ver == 21) {
706 		cat_nthw_len_cnt(be->p_cat_nthw, 1);
707 
708 		for (int i = 0; i < cnt; i++) {
709 			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
710 			cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower);
711 			cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper);
712 			cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1);
713 			cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2);
714 			cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv);
715 			cat_nthw_len_flush(be->p_cat_nthw);
716 		}
717 	}
718 
719 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
720 	return 0;
721 }
722 
723 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
724 {
725 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
726 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
727 
728 	if (cat->ver == 18 || cat->ver == 21) {
729 		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
730 
731 		for (int i = 0; i < cnt; i++) {
732 			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
733 			cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key);
734 			cat_nthw_kcc_category(be->p_cat_nthw,
735 				cat->v18.kcc_cam[len_index + i].category);
736 			cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id);
737 			cat_nthw_kcc_flush(be->p_cat_nthw);
738 		}
739 	}
740 
741 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
742 	return 0;
743 }
744 
745 /*
746  * KM
747  */
748 
749 static bool km_get_present(void *be_dev)
750 {
751 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
752 	return be->p_km_nthw != NULL;
753 }
754 
755 static uint32_t km_get_version(void *be_dev)
756 {
757 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
758 	return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) |
759 			(nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
760 }
761 
762 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt)
763 {
764 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
765 
766 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
767 
768 	if (km->ver == 7) {
769 		km_nthw_rcp_cnt(be->p_km_nthw, 1);
770 
771 		for (int i = 0; i < cnt; i++) {
772 			km_nthw_rcp_select(be->p_km_nthw, category + i);
773 			km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn);
774 			km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs);
775 			km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a);
776 			km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b);
777 			km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn);
778 			km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs);
779 			km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a);
780 			km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b);
781 			km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn);
782 			km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs);
783 			km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a);
784 			km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b);
785 			km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn);
786 			km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs);
787 			km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a);
788 			km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b);
789 			km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch);
790 			km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a);
791 			km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b);
792 			km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a);
793 			km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b);
794 			km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual);
795 			km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired);
796 			km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a);
797 			km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b);
798 			km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a);
799 			km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b);
800 			km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a);
801 			km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b);
802 			km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a);
803 			km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b);
804 			km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a);
805 			km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b);
806 			km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a);
807 			km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b);
808 			km_nthw_rcp_synergy_mode(be->p_km_nthw,
809 				km->v7.rcp[category + i].synergy_mode);
810 			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn);
811 			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs);
812 			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn);
813 			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs);
814 			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn);
815 			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs);
816 			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn);
817 			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs);
818 			km_nthw_rcp_flush(be->p_km_nthw);
819 		}
820 	}
821 
822 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
823 	return 0;
824 }
825 
826 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt)
827 {
828 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
829 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
830 
831 	if (km->ver == 7) {
832 		km_nthw_cam_cnt(be->p_km_nthw, 1);
833 
834 		for (int i = 0; i < cnt; i++) {
835 			km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i);
836 			km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0);
837 			km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1);
838 			km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2);
839 			km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3);
840 			km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4);
841 			km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5);
842 			km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0);
843 			km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1);
844 			km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2);
845 			km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3);
846 			km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4);
847 			km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5);
848 			km_nthw_cam_flush(be->p_km_nthw);
849 		}
850 	}
851 
852 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
853 	return 0;
854 }
855 
856 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value,
857 	int cnt)
858 {
859 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
860 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
861 
862 	if (km->ver == 7) {
863 		int start_idx = bank * 4 * 256 + byte * 256 + value;
864 		km_nthw_tcam_cnt(be->p_km_nthw, 1);
865 
866 		for (int i = 0; i < cnt; i++) {
867 			if (km->v7.tcam[start_idx + i].dirty) {
868 				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
869 				km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t);
870 				km_nthw_tcam_flush(be->p_km_nthw);
871 				km->v7.tcam[start_idx + i].dirty = 0;
872 			}
873 		}
874 	}
875 
876 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
877 	return 0;
878 }
879 
880 /*
881  * bank is the TCAM bank, index is the index within the bank (0..71)
882  */
883 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
884 {
885 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
886 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
887 
888 	if (km->ver == 7) {
889 		/* TCAM bank width in version 3 = 72 */
890 		km_nthw_tci_cnt(be->p_km_nthw, 1);
891 
892 		for (int i = 0; i < cnt; i++) {
893 			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
894 			km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color);
895 			km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft);
896 			km_nthw_tci_flush(be->p_km_nthw);
897 		}
898 	}
899 
900 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
901 	return 0;
902 }
903 
904 /*
905  * bank is the TCAM bank, index is the index within the bank (0..71)
906  */
907 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
908 {
909 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
910 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
911 
912 	if (km->ver == 7) {
913 		/* TCAM bank width in version 3 = 72 */
914 		km_nthw_tcq_cnt(be->p_km_nthw, 1);
915 
916 		for (int i = 0; i < cnt; i++) {
917 			/* adr = lover 4 bits = bank, upper 7 bits = index */
918 			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
919 			km_nthw_tcq_bank_mask(be->p_km_nthw,
920 				km->v7.tcq[bank + (index << 4) + i].bank_mask);
921 			km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual);
922 			km_nthw_tcq_flush(be->p_km_nthw);
923 		}
924 	}
925 
926 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
927 	return 0;
928 }
929 
930 /*
931  * FLM
932  */
933 
934 static bool flm_get_present(void *be_dev)
935 {
936 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
937 	return be->p_flm_nthw != NULL;
938 }
939 
940 static uint32_t flm_get_version(void *be_dev)
941 {
942 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
943 	return (uint32_t)((nthw_module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
944 			(nthw_module_get_minor_version(be->p_flm_nthw->m_flm) & 0xffff));
945 }
946 
947 static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
948 {
949 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
950 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
951 
952 	if (flm->ver >= 25) {
953 		flm_nthw_control_enable(be->p_flm_nthw, flm->v25.control->enable);
954 		flm_nthw_control_init(be->p_flm_nthw, flm->v25.control->init);
955 		flm_nthw_control_lds(be->p_flm_nthw, flm->v25.control->lds);
956 		flm_nthw_control_lfs(be->p_flm_nthw, flm->v25.control->lfs);
957 		flm_nthw_control_lis(be->p_flm_nthw, flm->v25.control->lis);
958 		flm_nthw_control_uds(be->p_flm_nthw, flm->v25.control->uds);
959 		flm_nthw_control_uis(be->p_flm_nthw, flm->v25.control->uis);
960 		flm_nthw_control_rds(be->p_flm_nthw, flm->v25.control->rds);
961 		flm_nthw_control_ris(be->p_flm_nthw, flm->v25.control->ris);
962 		flm_nthw_control_pds(be->p_flm_nthw, flm->v25.control->pds);
963 		flm_nthw_control_pis(be->p_flm_nthw, flm->v25.control->pis);
964 		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v25.control->crcwr);
965 		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v25.control->crcrd);
966 		flm_nthw_control_rbl(be->p_flm_nthw, flm->v25.control->rbl);
967 		flm_nthw_control_eab(be->p_flm_nthw, flm->v25.control->eab);
968 		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
969 			flm->v25.control->split_sdram_usage);
970 		flm_nthw_control_flush(be->p_flm_nthw);
971 	}
972 
973 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
974 	return 0;
975 }
976 
977 static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
978 {
979 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
980 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
981 
982 	if (flm->ver >= 25) {
983 		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
984 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 0);
985 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 0);
986 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 0);
987 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
988 			&flm->v25.status->cache_buf_critical, 0);
989 		flm_nthw_status_flush(be->p_flm_nthw);
990 	}
991 
992 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
993 	return 0;
994 }
995 
996 static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
997 {
998 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
999 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1000 
1001 	if (flm->ver >= 25) {
1002 		flm_nthw_status_update(be->p_flm_nthw);
1003 		flm_nthw_status_calib_success(be->p_flm_nthw, &flm->v25.status->calib_success, 1);
1004 		flm_nthw_status_calib_fail(be->p_flm_nthw, &flm->v25.status->calib_fail, 1);
1005 		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v25.status->initdone, 1);
1006 		flm_nthw_status_idle(be->p_flm_nthw, &flm->v25.status->idle, 1);
1007 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 1);
1008 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 1);
1009 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 1);
1010 		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v25.status->eft_bp, 1);
1011 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
1012 			&flm->v25.status->cache_buf_critical, 1);
1013 	}
1014 
1015 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1016 	return 0;
1017 }
1018 
1019 static int flm_scan_flush(void *be_dev, const struct flm_func_s *flm)
1020 {
1021 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1022 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1023 
1024 	if (flm->ver >= 25) {
1025 		flm_nthw_scan_i(be->p_flm_nthw, flm->v25.scan->i);
1026 		flm_nthw_scan_flush(be->p_flm_nthw);
1027 	}
1028 
1029 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1030 	return 0;
1031 }
1032 
1033 static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
1034 {
1035 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1036 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1037 
1038 	if (flm->ver >= 25) {
1039 		flm_nthw_load_bin(be->p_flm_nthw, flm->v25.load_bin->bin);
1040 		flm_nthw_load_bin_flush(be->p_flm_nthw);
1041 	}
1042 
1043 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1044 	return 0;
1045 }
1046 
1047 static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
1048 {
1049 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1050 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1051 
1052 	if (flm->ver >= 25) {
1053 		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v25.prio->limit0);
1054 		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v25.prio->ft0);
1055 		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v25.prio->limit1);
1056 		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v25.prio->ft1);
1057 		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v25.prio->limit2);
1058 		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v25.prio->ft2);
1059 		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v25.prio->limit3);
1060 		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v25.prio->ft3);
1061 		flm_nthw_prio_flush(be->p_flm_nthw);
1062 	}
1063 
1064 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1065 	return 0;
1066 }
1067 
1068 static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1069 {
1070 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1071 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1072 
1073 	if (flm->ver >= 25) {
1074 		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
1075 
1076 		for (int i = 0; i < cnt; i++) {
1077 			flm_nthw_pst_select(be->p_flm_nthw, index + i);
1078 			flm_nthw_pst_bp(be->p_flm_nthw, flm->v25.pst[index + i].bp);
1079 			flm_nthw_pst_pp(be->p_flm_nthw, flm->v25.pst[index + i].pp);
1080 			flm_nthw_pst_tp(be->p_flm_nthw, flm->v25.pst[index + i].tp);
1081 			flm_nthw_pst_flush(be->p_flm_nthw);
1082 		}
1083 	}
1084 
1085 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1086 	return 0;
1087 }
1088 
1089 static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1090 {
1091 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1092 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1093 
1094 	if (flm->ver >= 25) {
1095 		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
1096 
1097 		for (int i = 0; i < cnt; i++) {
1098 			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
1099 			flm_nthw_rcp_lookup(be->p_flm_nthw, flm->v25.rcp[index + i].lookup);
1100 			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_dyn);
1101 			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_ofs);
1102 			flm_nthw_rcp_qw0_sel(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_sel);
1103 			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_dyn);
1104 			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_ofs);
1105 			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_dyn);
1106 			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_ofs);
1107 			flm_nthw_rcp_sw8_sel(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_sel);
1108 			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_dyn);
1109 			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_ofs);
1110 			flm_nthw_rcp_mask(be->p_flm_nthw, flm->v25.rcp[index + i].mask);
1111 			flm_nthw_rcp_kid(be->p_flm_nthw, flm->v25.rcp[index + i].kid);
1112 			flm_nthw_rcp_opn(be->p_flm_nthw, flm->v25.rcp[index + i].opn);
1113 			flm_nthw_rcp_ipn(be->p_flm_nthw, flm->v25.rcp[index + i].ipn);
1114 			flm_nthw_rcp_byt_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].byt_dyn);
1115 			flm_nthw_rcp_byt_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].byt_ofs);
1116 			flm_nthw_rcp_txplm(be->p_flm_nthw, flm->v25.rcp[index + i].txplm);
1117 			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
1118 				flm->v25.rcp[index + i].auto_ipv4_mask);
1119 			flm_nthw_rcp_flush(be->p_flm_nthw);
1120 		}
1121 	}
1122 
1123 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1124 	return 0;
1125 }
1126 
1127 static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1128 {
1129 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1130 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1131 
1132 	if (flm->ver >= 25) {
1133 		flm_nthw_scrub_cnt(be->p_flm_nthw, 1);
1134 
1135 		for (int i = 0; i < cnt; i++) {
1136 			flm_nthw_scrub_select(be->p_flm_nthw, index + i);
1137 			flm_nthw_scrub_t(be->p_flm_nthw, flm->v25.scrub[index + i].t);
1138 			flm_nthw_scrub_r(be->p_flm_nthw, flm->v25.scrub[index + i].r);
1139 			flm_nthw_scrub_del(be->p_flm_nthw, flm->v25.scrub[index + i].del);
1140 			flm_nthw_scrub_inf(be->p_flm_nthw, flm->v25.scrub[index + i].inf);
1141 			flm_nthw_scrub_flush(be->p_flm_nthw);
1142 		}
1143 	}
1144 
1145 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1146 	return 0;
1147 }
1148 
1149 static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
1150 {
1151 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1152 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1153 
1154 	if (flm->ver >= 25) {
1155 		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
1156 			&flm->v25.buf_ctrl->lrn_free,
1157 			&flm->v25.buf_ctrl->inf_avail,
1158 			&flm->v25.buf_ctrl->sta_avail);
1159 	}
1160 
1161 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1162 	return 0;
1163 }
1164 
1165 static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
1166 {
1167 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1168 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1169 
1170 	if (flm->ver >= 25) {
1171 		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
1172 		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
1173 		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
1174 		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
1175 		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
1176 		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
1177 		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
1178 		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
1179 		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
1180 		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
1181 		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
1182 		flm_nthw_stat_flows_update(be->p_flm_nthw);
1183 		flm_nthw_load_lps_update(be->p_flm_nthw);
1184 		flm_nthw_load_aps_update(be->p_flm_nthw);
1185 
1186 		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v25.lrn_done->cnt, 1);
1187 		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw, &flm->v25.lrn_ignore->cnt, 1);
1188 		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v25.lrn_fail->cnt, 1);
1189 		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v25.unl_done->cnt, 1);
1190 		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw, &flm->v25.unl_ignore->cnt, 1);
1191 		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v25.rel_done->cnt, 1);
1192 		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw, &flm->v25.rel_ignore->cnt, 1);
1193 		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v25.aul_done->cnt, 1);
1194 		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw, &flm->v25.aul_ignore->cnt, 1);
1195 		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v25.aul_fail->cnt, 1);
1196 		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v25.tul_done->cnt, 1);
1197 		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v25.flows->cnt, 1);
1198 
1199 		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
1200 		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
1201 		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v25.prb_done->cnt, 1);
1202 		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw, &flm->v25.prb_ignore->cnt, 1);
1203 
1204 		flm_nthw_load_lps_cnt(be->p_flm_nthw, &flm->v25.load_lps->lps, 1);
1205 		flm_nthw_load_aps_cnt(be->p_flm_nthw, &flm->v25.load_aps->aps, 1);
1206 	}
1207 
1208 	if (flm->ver >= 25) {
1209 		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
1210 		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
1211 		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
1212 		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
1213 		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
1214 		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
1215 		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
1216 		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
1217 		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
1218 		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
1219 		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
1220 		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
1221 
1222 		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v25.sta_done->cnt, 1);
1223 		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v25.inf_done->cnt, 1);
1224 		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v25.inf_skip->cnt, 1);
1225 		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v25.pck_hit->cnt, 1);
1226 		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v25.pck_miss->cnt, 1);
1227 		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v25.pck_unh->cnt, 1);
1228 		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v25.pck_dis->cnt, 1);
1229 		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v25.csh_hit->cnt, 1);
1230 		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v25.csh_miss->cnt, 1);
1231 		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v25.csh_unh->cnt, 1);
1232 		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v25.cuc_start->cnt, 1);
1233 		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v25.cuc_move->cnt, 1);
1234 	}
1235 
1236 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1237 	return 0;
1238 }
1239 
1240 static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm, const uint32_t *lrn_data,
1241 	uint32_t records, uint32_t *handled_records,
1242 	uint32_t words_per_record, uint32_t *inf_word_cnt,
1243 	uint32_t *sta_word_cnt)
1244 {
1245 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1246 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1247 
1248 	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, records, words_per_record,
1249 			handled_records, &flm->v25.buf_ctrl->lrn_free,
1250 			&flm->v25.buf_ctrl->inf_avail,
1251 			&flm->v25.buf_ctrl->sta_avail);
1252 
1253 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1254 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1255 
1256 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1257 	return ret;
1258 }
1259 
1260 static int flm_inf_sta_data_update(void *be_dev, const struct flm_func_s *flm, uint32_t *inf_data,
1261 	uint32_t inf_size, uint32_t *inf_word_cnt, uint32_t *sta_data,
1262 	uint32_t sta_size, uint32_t *sta_word_cnt)
1263 {
1264 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1265 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1266 
1267 	int ret = flm_nthw_inf_sta_data_update(be->p_flm_nthw, inf_data, inf_size, sta_data,
1268 			sta_size, &flm->v25.buf_ctrl->lrn_free,
1269 			&flm->v25.buf_ctrl->inf_avail,
1270 			&flm->v25.buf_ctrl->sta_avail);
1271 
1272 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1273 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1274 
1275 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1276 	return ret;
1277 }
1278 
1279 /*
1280  * HSH
1281  */
1282 
1283 static bool hsh_get_present(void *be_dev)
1284 {
1285 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1286 	return be->p_hsh_nthw != NULL;
1287 }
1288 
1289 static uint32_t hsh_get_version(void *be_dev)
1290 {
1291 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1292 	return (uint32_t)((nthw_module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
1293 			(nthw_module_get_minor_version(be->p_hsh_nthw->m_hsh) & 0xffff));
1294 }
1295 
1296 static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh, int category, int cnt)
1297 {
1298 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1299 	CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
1300 
1301 	if (hsh->ver == 5) {
1302 		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
1303 
1304 		for (int i = 0; i < cnt; i++) {
1305 			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
1306 			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
1307 				hsh->v5.rcp[category + i].load_dist_type);
1308 			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
1309 				hsh->v5.rcp[category + i].mac_port_mask);
1310 			hsh_nthw_rcp_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].sort);
1311 			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_pe);
1312 			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_ofs);
1313 			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_pe);
1314 			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_ofs);
1315 			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_pe);
1316 			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_ofs);
1317 			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_sort);
1318 			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_pe);
1319 			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_ofs);
1320 			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_sort);
1321 			hsh_nthw_rcp_w9_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_p);
1322 			hsh_nthw_rcp_p_mask(be->p_hsh_nthw, hsh->v5.rcp[category + i].p_mask);
1323 			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
1324 				hsh->v5.rcp[category + i].word_mask);
1325 			hsh_nthw_rcp_seed(be->p_hsh_nthw, hsh->v5.rcp[category + i].seed);
1326 			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].tnl_p);
1327 			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
1328 				hsh->v5.rcp[category + i].hsh_valid);
1329 			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw, hsh->v5.rcp[category + i].hsh_type);
1330 			hsh_nthw_rcp_toeplitz(be->p_hsh_nthw, hsh->v5.rcp[category + i].toeplitz);
1331 			hsh_nthw_rcp_k(be->p_hsh_nthw, hsh->v5.rcp[category + i].k);
1332 			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
1333 				hsh->v5.rcp[category + i].auto_ipv4_mask);
1334 			hsh_nthw_rcp_flush(be->p_hsh_nthw);
1335 		}
1336 	}
1337 
1338 	CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
1339 	return 0;
1340 }
1341 
1342 /*
1343  * QSL
1344  */
1345 
1346 static bool qsl_get_present(void *be_dev)
1347 {
1348 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1349 	return be->p_qsl_nthw != NULL;
1350 }
1351 
1352 static uint32_t qsl_get_version(void *be_dev)
1353 {
1354 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1355 	return (uint32_t)((nthw_module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
1356 			(nthw_module_get_minor_version(be->p_qsl_nthw->m_qsl) & 0xffff));
1357 }
1358 
1359 static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl, int category, int cnt)
1360 {
1361 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1362 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1363 
1364 	if (qsl->ver == 7) {
1365 		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
1366 
1367 		for (int i = 0; i < cnt; i++) {
1368 			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
1369 			qsl_nthw_rcp_discard(be->p_qsl_nthw, qsl->v7.rcp[category + i].discard);
1370 			qsl_nthw_rcp_drop(be->p_qsl_nthw, qsl->v7.rcp[category + i].drop);
1371 			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_lo);
1372 			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_hi);
1373 			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_idx);
1374 			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_msk);
1375 			qsl_nthw_rcp_lr(be->p_qsl_nthw, qsl->v7.rcp[category + i].lr);
1376 			qsl_nthw_rcp_tsa(be->p_qsl_nthw, qsl->v7.rcp[category + i].tsa);
1377 			qsl_nthw_rcp_vli(be->p_qsl_nthw, qsl->v7.rcp[category + i].vli);
1378 			qsl_nthw_rcp_flush(be->p_qsl_nthw);
1379 		}
1380 	}
1381 
1382 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1383 	return 0;
1384 }
1385 
1386 static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1387 {
1388 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1389 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1390 
1391 	if (qsl->ver == 7) {
1392 		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
1393 
1394 		for (int i = 0; i < cnt; i++) {
1395 			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
1396 			qsl_nthw_qst_queue(be->p_qsl_nthw, qsl->v7.qst[entry + i].queue);
1397 			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
1398 
1399 			qsl_nthw_qst_tx_port(be->p_qsl_nthw, qsl->v7.qst[entry + i].tx_port);
1400 			qsl_nthw_qst_lre(be->p_qsl_nthw, qsl->v7.qst[entry + i].lre);
1401 			qsl_nthw_qst_tci(be->p_qsl_nthw, qsl->v7.qst[entry + i].tci);
1402 			qsl_nthw_qst_ven(be->p_qsl_nthw, qsl->v7.qst[entry + i].ven);
1403 			qsl_nthw_qst_flush(be->p_qsl_nthw);
1404 		}
1405 	}
1406 
1407 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1408 	return 0;
1409 }
1410 
1411 static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1412 {
1413 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1414 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1415 
1416 	if (qsl->ver == 7) {
1417 		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
1418 
1419 		for (int i = 0; i < cnt; i++) {
1420 			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
1421 			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
1422 			qsl_nthw_qen_flush(be->p_qsl_nthw);
1423 		}
1424 	}
1425 
1426 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1427 	return 0;
1428 }
1429 
1430 static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1431 {
1432 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1433 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1434 
1435 	if (qsl->ver == 7) {
1436 		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
1437 
1438 		for (int i = 0; i < cnt; i++) {
1439 			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
1440 			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
1441 				qsl->v7.unmq[entry + i].dest_queue);
1442 			qsl_nthw_unmq_en(be->p_qsl_nthw, qsl->v7.unmq[entry + i].en);
1443 			qsl_nthw_unmq_flush(be->p_qsl_nthw);
1444 		}
1445 	}
1446 
1447 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1448 	return 0;
1449 }
1450 
1451 /*
1452  * SLC LR
1453  */
1454 
1455 static bool slc_lr_get_present(void *be_dev)
1456 {
1457 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1458 	return be->p_slc_lr_nthw != NULL;
1459 }
1460 
1461 static uint32_t slc_lr_get_version(void *be_dev)
1462 {
1463 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1464 	return (uint32_t)((nthw_module_get_major_version(be->p_slc_lr_nthw->m_slc_lr) << 16) |
1465 			(nthw_module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) & 0xffff));
1466 }
1467 
1468 static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr, int category,
1469 	int cnt)
1470 {
1471 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1472 	CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
1473 
1474 	if (slc_lr->ver == 2) {
1475 		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
1476 
1477 		for (int i = 0; i < cnt; i++) {
1478 			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
1479 			slc_lr_nthw_rcp_head_slc_en(be->p_slc_lr_nthw,
1480 				slc_lr->v2.rcp[category + i].head_slc_en);
1481 			slc_lr_nthw_rcp_head_dyn(be->p_slc_lr_nthw,
1482 				slc_lr->v2.rcp[category + i].head_dyn);
1483 			slc_lr_nthw_rcp_head_ofs(be->p_slc_lr_nthw,
1484 				slc_lr->v2.rcp[category + i].head_ofs);
1485 			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
1486 				slc_lr->v2.rcp[category + i].tail_slc_en);
1487 			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
1488 				slc_lr->v2.rcp[category + i].tail_dyn);
1489 			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
1490 				slc_lr->v2.rcp[category + i].tail_ofs);
1491 			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw, slc_lr->v2.rcp[category + i].pcap);
1492 			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
1493 		}
1494 	}
1495 
1496 	CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
1497 	return 0;
1498 }
1499 
1500 /*
1501  * PDB
1502  */
1503 
1504 static bool pdb_get_present(void *be_dev)
1505 {
1506 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1507 	return be->p_pdb_nthw != NULL;
1508 }
1509 
1510 static uint32_t pdb_get_version(void *be_dev)
1511 {
1512 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1513 	return (uint32_t)((nthw_module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
1514 			(nthw_module_get_minor_version(be->p_pdb_nthw->m_pdb) & 0xffff));
1515 }
1516 
1517 static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb, int category, int cnt)
1518 {
1519 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1520 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1521 
1522 	if (pdb->ver == 9) {
1523 		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
1524 
1525 		for (int i = 0; i < cnt; i++) {
1526 			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
1527 			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
1528 				pdb->v9.rcp[category + i].descriptor);
1529 			pdb_nthw_rcp_desc_len(be->p_pdb_nthw, pdb->v9.rcp[category + i].desc_len);
1530 			pdb_nthw_rcp_tx_port(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_port);
1531 			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
1532 				pdb->v9.rcp[category + i].tx_ignore);
1533 			pdb_nthw_rcp_tx_now(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_now);
1534 			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
1535 				pdb->v9.rcp[category + i].crc_overwrite);
1536 			pdb_nthw_rcp_align(be->p_pdb_nthw, pdb->v9.rcp[category + i].align);
1537 			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_dyn);
1538 			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_rel);
1539 			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_dyn);
1540 			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_rel);
1541 			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_dyn);
1542 			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_rel);
1543 			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
1544 				pdb->v9.rcp[category + i].ip_prot_tnl);
1545 			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw, pdb->v9.rcp[category + i].ppc_hsh);
1546 			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
1547 				pdb->v9.rcp[category + i].duplicate_en);
1548 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1549 				pdb->v9.rcp[category + i].duplicate_bit);
1550 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1551 				pdb->v9.rcp[category + i].pcap_keep_fcs);
1552 			pdb_nthw_rcp_flush(be->p_pdb_nthw);
1553 		}
1554 	}
1555 
1556 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1557 	return 0;
1558 }
1559 
1560 static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
1561 {
1562 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1563 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1564 
1565 	if (pdb->ver == 9) {
1566 		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
1567 		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
1568 		pdb_nthw_config_flush(be->p_pdb_nthw);
1569 	}
1570 
1571 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1572 	return 0;
1573 }
1574 
1575 /*
1576  * DBS
1577  */
1578 
1579 static int alloc_rx_queue(void *be_dev, int queue_id)
1580 {
1581 	(void)be_dev;
1582 	(void)queue_id;
1583 	NT_LOG(ERR, FILTER, "ERROR alloc Rx queue");
1584 	return -1;
1585 }
1586 
1587 static int free_rx_queue(void *be_dev, int hw_queue)
1588 {
1589 	(void)be_dev;
1590 	(void)hw_queue;
1591 	NT_LOG(ERR, FILTER, "ERROR free Rx queue");
1592 	return 0;
1593 }
1594 
1595 const struct flow_api_backend_ops flow_be_iface = {
1596 	1,
1597 
1598 	set_debug_mode,
1599 	get_nb_phy_ports,
1600 	get_nb_rx_ports,
1601 	get_ltx_avail,
1602 	get_nb_cat_funcs,
1603 	get_nb_categories,
1604 	get_nb_cat_km_if_cnt,
1605 	get_nb_cat_km_if_m0,
1606 	get_nb_cat_km_if_m1,
1607 	get_nb_queues,
1608 	get_nb_km_flow_types,
1609 	get_nb_pm_ext,
1610 	get_nb_len,
1611 	get_kcc_size,
1612 	get_kcc_banks,
1613 	get_nb_km_categories,
1614 	get_nb_km_cam_banks,
1615 	get_nb_km_cam_record_words,
1616 	get_nb_km_cam_records,
1617 	get_nb_km_tcam_banks,
1618 	get_nb_km_tcam_bank_width,
1619 	get_nb_flm_categories,
1620 	get_nb_flm_size_mb,
1621 	get_nb_flm_entry_size,
1622 	get_nb_flm_variant,
1623 	get_nb_flm_prios,
1624 	get_nb_flm_pst_profiles,
1625 	get_nb_flm_scrub_profiles,
1626 	get_nb_flm_load_aps_max,
1627 	get_nb_qsl_categories,
1628 	get_nb_qsl_qst_entries,
1629 	get_nb_pdb_categories,
1630 	get_nb_roa_categories,
1631 	get_nb_tpe_categories,
1632 	get_nb_tx_cpy_writers,
1633 	get_nb_tx_cpy_mask_mem,
1634 	get_nb_tx_rpl_depth,
1635 	get_nb_tx_rpl_ext_categories,
1636 	get_nb_tpe_ifr_categories,
1637 	get_nb_rpp_per_ps,
1638 	get_nb_hsh_categories,
1639 	get_nb_hsh_toeplitz,
1640 
1641 	alloc_rx_queue,
1642 	free_rx_queue,
1643 
1644 	cat_get_present,
1645 	cat_get_version,
1646 	cat_cfn_flush,
1647 
1648 	cat_kce_flush,
1649 	cat_kcs_flush,
1650 	cat_fte_flush,
1651 
1652 	cat_cte_flush,
1653 	cat_cts_flush,
1654 	cat_cot_flush,
1655 	cat_cct_flush,
1656 	cat_exo_flush,
1657 	cat_rck_flush,
1658 	cat_len_flush,
1659 	cat_kcc_flush,
1660 
1661 	km_get_present,
1662 	km_get_version,
1663 	km_rcp_flush,
1664 	km_cam_flush,
1665 	km_tcam_flush,
1666 	km_tci_flush,
1667 	km_tcq_flush,
1668 
1669 	flm_get_present,
1670 	flm_get_version,
1671 	flm_control_flush,
1672 	flm_status_flush,
1673 	flm_status_update,
1674 	flm_scan_flush,
1675 	flm_load_bin_flush,
1676 	flm_prio_flush,
1677 	flm_pst_flush,
1678 	flm_rcp_flush,
1679 	flm_scrub_flush,
1680 	flm_buf_ctrl_update,
1681 	flm_stat_update,
1682 	flm_lrn_data_flush,
1683 	flm_inf_sta_data_update,
1684 
1685 	hsh_get_present,
1686 	hsh_get_version,
1687 	hsh_rcp_flush,
1688 
1689 	qsl_get_present,
1690 	qsl_get_version,
1691 	qsl_rcp_flush,
1692 	qsl_qst_flush,
1693 	qsl_qen_flush,
1694 	qsl_unmq_flush,
1695 
1696 	slc_lr_get_present,
1697 	slc_lr_get_version,
1698 	slc_lr_rcp_flush,
1699 
1700 	pdb_get_present,
1701 	pdb_get_version,
1702 	pdb_rcp_flush,
1703 	pdb_config_flush,
1704 };
1705 
1706 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
1707 {
1708 	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
1709 
1710 	struct info_nthw *pinfonthw = info_nthw_new();
1711 	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
1712 	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
1713 
1714 	/* Init nthw CAT */
1715 	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1716 		struct cat_nthw *pcatnthw = cat_nthw_new();
1717 		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
1718 		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
1719 
1720 	} else {
1721 		be_devs[physical_adapter_no].p_cat_nthw = NULL;
1722 	}
1723 
1724 	/* Init nthw KM */
1725 	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1726 		struct km_nthw *pkmnthw = km_nthw_new();
1727 		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
1728 		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
1729 
1730 	} else {
1731 		be_devs[physical_adapter_no].p_km_nthw = NULL;
1732 	}
1733 
1734 	/* Init nthw FLM */
1735 	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1736 		struct flm_nthw *pflmnthw = flm_nthw_new();
1737 		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
1738 		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
1739 
1740 	} else {
1741 		be_devs[physical_adapter_no].p_flm_nthw = NULL;
1742 	}
1743 
1744 	/* Init nthw IFR */
1745 	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1746 		struct ifr_nthw *ifrnthw = ifr_nthw_new();
1747 		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
1748 		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
1749 
1750 	} else {
1751 		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
1752 	}
1753 
1754 	/* Init nthw HSH */
1755 	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1756 		struct hsh_nthw *phshnthw = hsh_nthw_new();
1757 		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
1758 		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
1759 
1760 	} else {
1761 		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
1762 	}
1763 
1764 	/* Init nthw QSL */
1765 	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1766 		struct qsl_nthw *pqslnthw = qsl_nthw_new();
1767 		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
1768 		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
1769 
1770 	} else {
1771 		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
1772 	}
1773 
1774 	/* Init nthw SLC LR */
1775 	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1776 		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
1777 		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
1778 		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
1779 
1780 	} else {
1781 		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
1782 	}
1783 
1784 	/* Init nthw PDB */
1785 	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1786 		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
1787 		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
1788 		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
1789 
1790 	} else {
1791 		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
1792 	}
1793 
1794 	/* Init nthw HFU */
1795 	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1796 		struct hfu_nthw *ptr = hfu_nthw_new();
1797 		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
1798 		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
1799 
1800 	} else {
1801 		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
1802 	}
1803 
1804 	/* Init nthw RPP_LR */
1805 	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1806 		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
1807 		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
1808 		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
1809 
1810 	} else {
1811 		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
1812 	}
1813 
1814 	/* Init nthw TX_CPY */
1815 	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1816 		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
1817 		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
1818 		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
1819 
1820 	} else {
1821 		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
1822 	}
1823 
1824 	/* Init nthw CSU */
1825 	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1826 		struct csu_nthw *ptr = csu_nthw_new();
1827 		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
1828 		be_devs[physical_adapter_no].p_csu_nthw = ptr;
1829 
1830 	} else {
1831 		be_devs[physical_adapter_no].p_csu_nthw = NULL;
1832 	}
1833 
1834 	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
1835 	*dev = (void *)&be_devs[physical_adapter_no];
1836 
1837 	return &flow_be_iface;
1838 }
1839 
1840 static void bin_flow_backend_done(void *dev)
1841 {
1842 	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
1843 	info_nthw_delete(be_dev->p_info_nthw);
1844 	cat_nthw_delete(be_dev->p_cat_nthw);
1845 	km_nthw_delete(be_dev->p_km_nthw);
1846 	flm_nthw_delete(be_dev->p_flm_nthw);
1847 	hsh_nthw_delete(be_dev->p_hsh_nthw);
1848 	qsl_nthw_delete(be_dev->p_qsl_nthw);
1849 	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
1850 	pdb_nthw_delete(be_dev->p_pdb_nthw);
1851 	csu_nthw_delete(be_dev->p_csu_nthw);
1852 	hfu_nthw_delete(be_dev->p_hfu_nthw);
1853 	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
1854 	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
1855 }
1856 
1857 static const struct flow_backend_ops ops = {
1858 	.bin_flow_backend_init = bin_flow_backend_init,
1859 	.bin_flow_backend_done = bin_flow_backend_done,
1860 };
1861 
1862 void flow_backend_init(void)
1863 {
1864 	register_flow_backend_ops(&ops);
1865 }
1866