xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_backend/flow_backend.c (revision 689a97b08f1075dc78a3d9ec2cf5802a72f71db2)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include <stdint.h>
7 
8 #include "flow_nthw_info.h"
9 #include "flow_nthw_ifr.h"
10 #include "flow_nthw_cat.h"
11 #include "flow_nthw_csu.h"
12 #include "flow_nthw_km.h"
13 #include "flow_nthw_flm.h"
14 #include "flow_nthw_hfu.h"
15 #include "flow_nthw_hsh.h"
16 #include "flow_nthw_qsl.h"
17 #include "flow_nthw_slc_lr.h"
18 #include "flow_nthw_pdb.h"
19 #include "flow_nthw_rpp_lr.h"
20 #include "flow_nthw_tx_cpy.h"
21 #include "flow_nthw_tx_ins.h"
22 #include "ntnic_mod_reg.h"
23 #include "nthw_fpga_model.h"
24 #include "hw_mod_backend.h"
25 
26 /*
27  * Binary Flow API backend implementation into ntservice driver
28  *
29  * General note on this backend implementation:
30  * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
31  */
32 
33 static struct backend_dev_s {
34 	uint8_t adapter_no;
35 	enum debug_mode_e dmode;
36 	struct info_nthw *p_info_nthw;
37 	struct cat_nthw *p_cat_nthw;
38 	struct km_nthw *p_km_nthw;
39 	struct flm_nthw *p_flm_nthw;
40 	struct hsh_nthw *p_hsh_nthw;
41 	struct qsl_nthw *p_qsl_nthw;
42 	struct slc_lr_nthw *p_slc_lr_nthw;
43 	struct pdb_nthw *p_pdb_nthw;
44 	struct hfu_nthw *p_hfu_nthw;    /* TPE module */
45 	struct rpp_lr_nthw *p_rpp_lr_nthw;      /* TPE module */
46 	struct tx_cpy_nthw *p_tx_cpy_nthw;      /* TPE module */
47 	struct tx_ins_nthw *p_tx_ins_nthw;      /* TPE module */
48 	struct csu_nthw *p_csu_nthw;    /* TPE module */
49 	struct ifr_nthw *p_ifr_nthw;    /* TPE module */
50 } be_devs[MAX_PHYS_ADAPTERS];
51 
52 #define CHECK_DEBUG_ON(be, mod, inst)                                                             \
53 	int __debug__ = 0;                                                                        \
54 	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug)                        \
55 		do {                                                                              \
56 			mod##_nthw_set_debug_mode((inst), 0xFF);                                  \
57 			__debug__ = 1;                                                            \
58 	} while (0)
59 
60 #define CHECK_DEBUG_OFF(mod, inst)                                                                \
61 	do {                                                                                      \
62 		if (__debug__)                                                                    \
63 			mod##_nthw_set_debug_mode((inst), 0);                                     \
64 	} while (0)
65 
66 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev);
67 static void bin_flow_backend_done(void *be_dev);
68 
69 static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
70 {
71 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
72 	be->dmode = mode;
73 	return 0;
74 }
75 
76 /*
77  * INFO
78  */
79 
80 static int get_nb_phy_ports(void *be_dev)
81 {
82 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
83 	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
84 }
85 
86 static int get_nb_rx_ports(void *be_dev)
87 {
88 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
89 	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
90 }
91 
92 static int get_ltx_avail(void *be_dev)
93 {
94 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
95 	return info_nthw_get_ltx_avail(be->p_info_nthw);
96 }
97 
98 static int get_nb_cat_funcs(void *be_dev)
99 {
100 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
101 	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
102 }
103 
104 static int get_nb_categories(void *be_dev)
105 {
106 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
107 	return info_nthw_get_nb_categories(be->p_info_nthw);
108 }
109 
110 static int get_nb_cat_km_if_cnt(void *be_dev)
111 {
112 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
113 	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
114 }
115 
116 static int get_nb_cat_km_if_m0(void *be_dev)
117 {
118 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
119 	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
120 }
121 
122 static int get_nb_cat_km_if_m1(void *be_dev)
123 {
124 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
125 	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
126 }
127 
128 static int get_nb_queues(void *be_dev)
129 {
130 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
131 	return info_nthw_get_nb_queues(be->p_info_nthw);
132 }
133 
134 static int get_nb_km_flow_types(void *be_dev)
135 {
136 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
137 	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
138 }
139 
140 static int get_nb_pm_ext(void *be_dev)
141 {
142 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
143 	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
144 }
145 
146 static int get_nb_len(void *be_dev)
147 {
148 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
149 	return info_nthw_get_nb_len(be->p_info_nthw);
150 }
151 
152 static int get_kcc_size(void *be_dev)
153 {
154 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
155 	return info_nthw_get_kcc_size(be->p_info_nthw);
156 }
157 
158 static int get_kcc_banks(void *be_dev)
159 {
160 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
161 	return info_nthw_get_kcc_banks(be->p_info_nthw);
162 }
163 
164 static int get_nb_km_categories(void *be_dev)
165 {
166 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
167 	return info_nthw_get_nb_km_categories(be->p_info_nthw);
168 }
169 
170 static int get_nb_km_cam_banks(void *be_dev)
171 {
172 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
173 	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
174 }
175 
176 static int get_nb_km_cam_record_words(void *be_dev)
177 {
178 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
179 	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
180 }
181 
182 static int get_nb_km_cam_records(void *be_dev)
183 {
184 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
185 	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
186 }
187 
188 static int get_nb_km_tcam_banks(void *be_dev)
189 {
190 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
191 	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
192 }
193 
194 static int get_nb_km_tcam_bank_width(void *be_dev)
195 {
196 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
197 	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
198 }
199 
200 static int get_nb_flm_categories(void *be_dev)
201 {
202 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
203 	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
204 }
205 
206 static int get_nb_flm_size_mb(void *be_dev)
207 {
208 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
209 	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
210 }
211 
212 static int get_nb_flm_entry_size(void *be_dev)
213 {
214 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
215 	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
216 }
217 
218 static int get_nb_flm_variant(void *be_dev)
219 {
220 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
221 	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
222 }
223 
224 static int get_nb_flm_prios(void *be_dev)
225 {
226 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
227 	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
228 }
229 
230 static int get_nb_flm_pst_profiles(void *be_dev)
231 {
232 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
233 	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
234 }
235 
236 static int get_nb_flm_scrub_profiles(void *be_dev)
237 {
238 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
239 	return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw);
240 }
241 
242 static int get_nb_flm_load_aps_max(void *be_dev)
243 {
244 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
245 	return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw);
246 }
247 
248 static int get_nb_qsl_categories(void *be_dev)
249 {
250 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
251 	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
252 }
253 
254 static int get_nb_qsl_qst_entries(void *be_dev)
255 {
256 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
257 	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
258 }
259 
260 static int get_nb_pdb_categories(void *be_dev)
261 {
262 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
263 	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
264 }
265 
266 static int get_nb_roa_categories(void *be_dev)
267 {
268 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
269 	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
270 }
271 
272 static int get_nb_tpe_categories(void *be_dev)
273 {
274 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
275 	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
276 }
277 
278 static int get_nb_tx_cpy_writers(void *be_dev)
279 {
280 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
281 	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
282 }
283 
284 static int get_nb_tx_cpy_mask_mem(void *be_dev)
285 {
286 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
287 	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
288 }
289 
290 static int get_nb_tx_rpl_depth(void *be_dev)
291 {
292 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
293 	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
294 }
295 
296 static int get_nb_tx_rpl_ext_categories(void *be_dev)
297 {
298 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
299 	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
300 }
301 
302 static int get_nb_tpe_ifr_categories(void *be_dev)
303 {
304 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
305 	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
306 }
307 
308 static int get_nb_rpp_per_ps(void *be_dev)
309 {
310 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
311 	return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw);
312 }
313 
314 static int get_nb_hsh_categories(void *be_dev)
315 {
316 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
317 	return info_nthw_get_nb_hsh_categories(be->p_info_nthw);
318 }
319 
320 static int get_nb_hsh_toeplitz(void *be_dev)
321 {
322 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
323 	return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw);
324 }
325 
326 /*
327  * CAT
328  */
329 
330 static bool cat_get_present(void *be_dev)
331 {
332 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
333 	return be->p_cat_nthw != NULL;
334 }
335 
336 static uint32_t cat_get_version(void *be_dev)
337 {
338 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
339 	return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
340 			(nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff));
341 }
342 
343 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
344 {
345 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
346 
347 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
348 
349 	if (cat->ver == 18) {
350 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
351 
352 		for (int i = 0; i < cnt; i++) {
353 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
354 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable);
355 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv);
356 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv);
357 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl);
358 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp);
359 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac);
360 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2);
361 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag);
362 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan);
363 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls);
364 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3);
365 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag);
366 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
367 				cat->v18.cfn[cat_func].ptc_ip_prot);
368 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4);
369 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel);
370 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2);
371 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
372 				cat->v18.cfn[cat_func].ptc_tnl_vlan);
373 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
374 				cat->v18.cfn[cat_func].ptc_tnl_mpls);
375 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3);
376 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
377 				cat->v18.cfn[cat_func].ptc_tnl_frag);
378 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
379 				cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
380 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4);
381 
382 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv);
383 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv);
384 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs);
385 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc);
386 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs);
387 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs);
388 
389 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port);
390 
391 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp);
392 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct);
393 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv);
394 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb);
395 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv);
396 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv);
397 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv);
398 
399 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
400 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv);
401 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or);
402 			cat_nthw_cfn_flush(be->p_cat_nthw);
403 			cat_func++;
404 		}
405 
406 	} else if (cat->ver == 21) {
407 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
408 
409 		for (int i = 0; i < cnt; i++) {
410 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
411 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable);
412 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv);
413 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv);
414 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl);
415 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp);
416 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac);
417 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2);
418 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag);
419 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan);
420 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls);
421 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3);
422 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag);
423 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
424 				cat->v21.cfn[cat_func].ptc_ip_prot);
425 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4);
426 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel);
427 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2);
428 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
429 				cat->v21.cfn[cat_func].ptc_tnl_vlan);
430 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
431 				cat->v21.cfn[cat_func].ptc_tnl_mpls);
432 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3);
433 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
434 				cat->v21.cfn[cat_func].ptc_tnl_frag);
435 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
436 				cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
437 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4);
438 
439 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv);
440 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv);
441 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs);
442 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc);
443 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs);
444 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs);
445 			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
446 				cat->v21.cfn[cat_func].err_tnl_l3_cs);
447 			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
448 				cat->v21.cfn[cat_func].err_tnl_l4_cs);
449 			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
450 				cat->v21.cfn[cat_func].err_ttl_exp);
451 			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
452 				cat->v21.cfn[cat_func].err_tnl_ttl_exp);
453 
454 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port);
455 
456 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp);
457 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct);
458 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv);
459 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb);
460 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv);
461 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv);
462 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv);
463 
464 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
465 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv);
466 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or);
467 
468 			if (be->p_cat_nthw->m_km_if_cnt > 1)
469 				cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or);
470 
471 			cat_nthw_cfn_flush(be->p_cat_nthw);
472 			cat_func++;
473 		}
474 	}
475 
476 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
477 	return 0;
478 }
479 
480 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
481 	int cnt)
482 {
483 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
484 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
485 
486 	if (cat->ver == 18) {
487 		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
488 
489 		for (int i = 0; i < cnt; i++) {
490 			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
491 			cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm);
492 			cat_nthw_kce_flush(be->p_cat_nthw, 0);
493 		}
494 
495 	} else if (cat->ver == 21) {
496 		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
497 
498 		for (int i = 0; i < cnt; i++) {
499 			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
500 			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
501 				cat->v21.kce[index + i].enable_bm[km_if_idx]);
502 			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
503 		}
504 	}
505 
506 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
507 	return 0;
508 }
509 
510 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func,
511 	int cnt)
512 {
513 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
514 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
515 
516 	if (cat->ver == 18) {
517 		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
518 
519 		for (int i = 0; i < cnt; i++) {
520 			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
521 			cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category);
522 			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
523 			cat_func++;
524 		}
525 
526 	} else if (cat->ver == 21) {
527 		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
528 
529 		for (int i = 0; i < cnt; i++) {
530 			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
531 			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
532 				cat->v21.kcs[cat_func].category[km_if_idx]);
533 			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
534 			cat_func++;
535 		}
536 	}
537 
538 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
539 	return 0;
540 }
541 
542 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
543 	int cnt)
544 {
545 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
546 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
547 
548 	if (cat->ver == 18) {
549 		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
550 
551 		for (int i = 0; i < cnt; i++) {
552 			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
553 			cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm);
554 			cat_nthw_fte_flush(be->p_cat_nthw, 0);
555 		}
556 
557 	} else if (cat->ver == 21) {
558 		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
559 
560 		for (int i = 0; i < cnt; i++) {
561 			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
562 			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
563 				cat->v21.fte[index + i].enable_bm[km_if_idx]);
564 			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
565 		}
566 	}
567 
568 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
569 	return 0;
570 }
571 
572 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
573 {
574 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
575 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
576 
577 	if (cat->ver == 18 || cat->ver == 21) {
578 		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
579 
580 		for (int i = 0; i < cnt; i++) {
581 			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
582 			cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col);
583 			cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor);
584 			cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh);
585 			cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl);
586 			cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf);
587 			cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc);
588 			cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb);
589 			cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk);
590 			cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst);
591 			cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp);
592 			cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe);
593 
594 			cat_nthw_cte_flush(be->p_cat_nthw);
595 			cat_func++;
596 		}
597 	}
598 
599 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
600 	return 0;
601 }
602 
603 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
604 {
605 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
606 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
607 
608 	if (cat->ver == 18 || cat->ver == 21) {
609 		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
610 
611 		for (int i = 0; i < cnt; i++) {
612 			cat_nthw_cts_select(be->p_cat_nthw, index + i);
613 			cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a);
614 			cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b);
615 			cat_nthw_cts_flush(be->p_cat_nthw);
616 		}
617 	}
618 
619 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
620 	return 0;
621 }
622 
623 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
624 {
625 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
626 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
627 
628 	if (cat->ver == 18 || cat->ver == 21) {
629 		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
630 
631 		for (int i = 0; i < cnt; i++) {
632 			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
633 			cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color);
634 			cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km);
635 			cat_nthw_cot_flush(be->p_cat_nthw);
636 		}
637 	}
638 
639 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
640 	return 0;
641 }
642 
643 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
644 {
645 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
646 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
647 
648 	if (cat->ver == 18 || cat->ver == 21) {
649 		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
650 
651 		for (int i = 0; i < cnt; i++) {
652 			cat_nthw_cct_select(be->p_cat_nthw, index + i);
653 			cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color);
654 			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
655 			cat_nthw_cct_flush(be->p_cat_nthw);
656 		}
657 	}
658 
659 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
660 	return 0;
661 }
662 
663 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt)
664 {
665 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
666 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
667 
668 	if (cat->ver == 18 || cat->ver == 21) {
669 		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
670 
671 		for (int i = 0; i < cnt; i++) {
672 			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
673 			cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn);
674 			cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs);
675 			cat_nthw_exo_flush(be->p_cat_nthw);
676 		}
677 	}
678 
679 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
680 	return 0;
681 }
682 
683 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
684 {
685 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
686 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
687 
688 	if (cat->ver == 18 || cat->ver == 21) {
689 		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
690 
691 		for (int i = 0; i < cnt; i++) {
692 			cat_nthw_rck_select(be->p_cat_nthw, index + i);
693 			cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data);
694 			cat_nthw_rck_flush(be->p_cat_nthw);
695 		}
696 	}
697 
698 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
699 	return 0;
700 }
701 
702 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
703 {
704 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
705 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
706 
707 	if (cat->ver == 18 || cat->ver == 21) {
708 		cat_nthw_len_cnt(be->p_cat_nthw, 1);
709 
710 		for (int i = 0; i < cnt; i++) {
711 			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
712 			cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower);
713 			cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper);
714 			cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1);
715 			cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2);
716 			cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv);
717 			cat_nthw_len_flush(be->p_cat_nthw);
718 		}
719 	}
720 
721 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
722 	return 0;
723 }
724 
725 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
726 {
727 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
728 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
729 
730 	if (cat->ver == 18 || cat->ver == 21) {
731 		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
732 
733 		for (int i = 0; i < cnt; i++) {
734 			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
735 			cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key);
736 			cat_nthw_kcc_category(be->p_cat_nthw,
737 				cat->v18.kcc_cam[len_index + i].category);
738 			cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id);
739 			cat_nthw_kcc_flush(be->p_cat_nthw);
740 		}
741 	}
742 
743 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
744 	return 0;
745 }
746 
747 /*
748  * KM
749  */
750 
751 static bool km_get_present(void *be_dev)
752 {
753 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
754 	return be->p_km_nthw != NULL;
755 }
756 
757 static uint32_t km_get_version(void *be_dev)
758 {
759 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
760 	return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) |
761 			(nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
762 }
763 
764 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt)
765 {
766 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
767 
768 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
769 
770 	if (km->ver == 7) {
771 		km_nthw_rcp_cnt(be->p_km_nthw, 1);
772 
773 		for (int i = 0; i < cnt; i++) {
774 			km_nthw_rcp_select(be->p_km_nthw, category + i);
775 			km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn);
776 			km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs);
777 			km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a);
778 			km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b);
779 			km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn);
780 			km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs);
781 			km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a);
782 			km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b);
783 			km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn);
784 			km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs);
785 			km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a);
786 			km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b);
787 			km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn);
788 			km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs);
789 			km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a);
790 			km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b);
791 			km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch);
792 			km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a);
793 			km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b);
794 			km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a);
795 			km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b);
796 			km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual);
797 			km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired);
798 			km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a);
799 			km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b);
800 			km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a);
801 			km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b);
802 			km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a);
803 			km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b);
804 			km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a);
805 			km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b);
806 			km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a);
807 			km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b);
808 			km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a);
809 			km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b);
810 			km_nthw_rcp_synergy_mode(be->p_km_nthw,
811 				km->v7.rcp[category + i].synergy_mode);
812 			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn);
813 			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs);
814 			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn);
815 			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs);
816 			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn);
817 			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs);
818 			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn);
819 			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs);
820 			km_nthw_rcp_flush(be->p_km_nthw);
821 		}
822 	}
823 
824 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
825 	return 0;
826 }
827 
828 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt)
829 {
830 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
831 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
832 
833 	if (km->ver == 7) {
834 		km_nthw_cam_cnt(be->p_km_nthw, 1);
835 
836 		for (int i = 0; i < cnt; i++) {
837 			km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i);
838 			km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0);
839 			km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1);
840 			km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2);
841 			km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3);
842 			km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4);
843 			km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5);
844 			km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0);
845 			km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1);
846 			km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2);
847 			km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3);
848 			km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4);
849 			km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5);
850 			km_nthw_cam_flush(be->p_km_nthw);
851 		}
852 	}
853 
854 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
855 	return 0;
856 }
857 
858 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value,
859 	int cnt)
860 {
861 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
862 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
863 
864 	if (km->ver == 7) {
865 		int start_idx = bank * 4 * 256 + byte * 256 + value;
866 		km_nthw_tcam_cnt(be->p_km_nthw, 1);
867 
868 		for (int i = 0; i < cnt; i++) {
869 			if (km->v7.tcam[start_idx + i].dirty) {
870 				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
871 				km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t);
872 				km_nthw_tcam_flush(be->p_km_nthw);
873 				km->v7.tcam[start_idx + i].dirty = 0;
874 			}
875 		}
876 	}
877 
878 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
879 	return 0;
880 }
881 
882 /*
883  * bank is the TCAM bank, index is the index within the bank (0..71)
884  */
885 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
886 {
887 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
888 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
889 
890 	if (km->ver == 7) {
891 		/* TCAM bank width in version 3 = 72 */
892 		km_nthw_tci_cnt(be->p_km_nthw, 1);
893 
894 		for (int i = 0; i < cnt; i++) {
895 			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
896 			km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color);
897 			km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft);
898 			km_nthw_tci_flush(be->p_km_nthw);
899 		}
900 	}
901 
902 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
903 	return 0;
904 }
905 
906 /*
907  * bank is the TCAM bank, index is the index within the bank (0..71)
908  */
909 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
910 {
911 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
912 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
913 
914 	if (km->ver == 7) {
915 		/* TCAM bank width in version 3 = 72 */
916 		km_nthw_tcq_cnt(be->p_km_nthw, 1);
917 
918 		for (int i = 0; i < cnt; i++) {
919 			/* adr = lover 4 bits = bank, upper 7 bits = index */
920 			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
921 			km_nthw_tcq_bank_mask(be->p_km_nthw,
922 				km->v7.tcq[bank + (index << 4) + i].bank_mask);
923 			km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual);
924 			km_nthw_tcq_flush(be->p_km_nthw);
925 		}
926 	}
927 
928 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
929 	return 0;
930 }
931 
932 /*
933  * FLM
934  */
935 
936 static bool flm_get_present(void *be_dev)
937 {
938 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
939 	return be->p_flm_nthw != NULL;
940 }
941 
942 static uint32_t flm_get_version(void *be_dev)
943 {
944 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
945 	return (uint32_t)((nthw_module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
946 			(nthw_module_get_minor_version(be->p_flm_nthw->m_flm) & 0xffff));
947 }
948 
949 static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
950 {
951 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
952 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
953 
954 	if (flm->ver >= 25) {
955 		flm_nthw_control_enable(be->p_flm_nthw, flm->v25.control->enable);
956 		flm_nthw_control_init(be->p_flm_nthw, flm->v25.control->init);
957 		flm_nthw_control_lds(be->p_flm_nthw, flm->v25.control->lds);
958 		flm_nthw_control_lfs(be->p_flm_nthw, flm->v25.control->lfs);
959 		flm_nthw_control_lis(be->p_flm_nthw, flm->v25.control->lis);
960 		flm_nthw_control_uds(be->p_flm_nthw, flm->v25.control->uds);
961 		flm_nthw_control_uis(be->p_flm_nthw, flm->v25.control->uis);
962 		flm_nthw_control_rds(be->p_flm_nthw, flm->v25.control->rds);
963 		flm_nthw_control_ris(be->p_flm_nthw, flm->v25.control->ris);
964 		flm_nthw_control_pds(be->p_flm_nthw, flm->v25.control->pds);
965 		flm_nthw_control_pis(be->p_flm_nthw, flm->v25.control->pis);
966 		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v25.control->crcwr);
967 		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v25.control->crcrd);
968 		flm_nthw_control_rbl(be->p_flm_nthw, flm->v25.control->rbl);
969 		flm_nthw_control_eab(be->p_flm_nthw, flm->v25.control->eab);
970 		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
971 			flm->v25.control->split_sdram_usage);
972 		flm_nthw_control_flush(be->p_flm_nthw);
973 	}
974 
975 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
976 	return 0;
977 }
978 
979 static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
980 {
981 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
982 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
983 
984 	if (flm->ver >= 25) {
985 		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
986 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 0);
987 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 0);
988 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 0);
989 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
990 			&flm->v25.status->cache_buf_critical, 0);
991 		flm_nthw_status_flush(be->p_flm_nthw);
992 	}
993 
994 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
995 	return 0;
996 }
997 
998 static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
999 {
1000 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1001 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1002 
1003 	if (flm->ver >= 25) {
1004 		flm_nthw_status_update(be->p_flm_nthw);
1005 		flm_nthw_status_calib_success(be->p_flm_nthw, &flm->v25.status->calib_success, 1);
1006 		flm_nthw_status_calib_fail(be->p_flm_nthw, &flm->v25.status->calib_fail, 1);
1007 		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v25.status->initdone, 1);
1008 		flm_nthw_status_idle(be->p_flm_nthw, &flm->v25.status->idle, 1);
1009 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 1);
1010 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 1);
1011 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 1);
1012 		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v25.status->eft_bp, 1);
1013 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
1014 			&flm->v25.status->cache_buf_critical, 1);
1015 	}
1016 
1017 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1018 	return 0;
1019 }
1020 
1021 static int flm_scan_flush(void *be_dev, const struct flm_func_s *flm)
1022 {
1023 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1024 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1025 
1026 	if (flm->ver >= 25) {
1027 		flm_nthw_scan_i(be->p_flm_nthw, flm->v25.scan->i);
1028 		flm_nthw_scan_flush(be->p_flm_nthw);
1029 	}
1030 
1031 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1032 	return 0;
1033 }
1034 
1035 static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
1036 {
1037 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1038 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1039 
1040 	if (flm->ver >= 25) {
1041 		flm_nthw_load_bin(be->p_flm_nthw, flm->v25.load_bin->bin);
1042 		flm_nthw_load_bin_flush(be->p_flm_nthw);
1043 	}
1044 
1045 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1046 	return 0;
1047 }
1048 
1049 static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
1050 {
1051 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1052 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1053 
1054 	if (flm->ver >= 25) {
1055 		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v25.prio->limit0);
1056 		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v25.prio->ft0);
1057 		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v25.prio->limit1);
1058 		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v25.prio->ft1);
1059 		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v25.prio->limit2);
1060 		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v25.prio->ft2);
1061 		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v25.prio->limit3);
1062 		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v25.prio->ft3);
1063 		flm_nthw_prio_flush(be->p_flm_nthw);
1064 	}
1065 
1066 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1067 	return 0;
1068 }
1069 
1070 static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1071 {
1072 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1073 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1074 
1075 	if (flm->ver >= 25) {
1076 		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
1077 
1078 		for (int i = 0; i < cnt; i++) {
1079 			flm_nthw_pst_select(be->p_flm_nthw, index + i);
1080 			flm_nthw_pst_bp(be->p_flm_nthw, flm->v25.pst[index + i].bp);
1081 			flm_nthw_pst_pp(be->p_flm_nthw, flm->v25.pst[index + i].pp);
1082 			flm_nthw_pst_tp(be->p_flm_nthw, flm->v25.pst[index + i].tp);
1083 			flm_nthw_pst_flush(be->p_flm_nthw);
1084 		}
1085 	}
1086 
1087 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1088 	return 0;
1089 }
1090 
1091 static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1092 {
1093 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1094 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1095 
1096 	if (flm->ver >= 25) {
1097 		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
1098 
1099 		for (int i = 0; i < cnt; i++) {
1100 			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
1101 			flm_nthw_rcp_lookup(be->p_flm_nthw, flm->v25.rcp[index + i].lookup);
1102 			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_dyn);
1103 			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_ofs);
1104 			flm_nthw_rcp_qw0_sel(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_sel);
1105 			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_dyn);
1106 			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_ofs);
1107 			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_dyn);
1108 			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_ofs);
1109 			flm_nthw_rcp_sw8_sel(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_sel);
1110 			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_dyn);
1111 			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_ofs);
1112 			flm_nthw_rcp_mask(be->p_flm_nthw, flm->v25.rcp[index + i].mask);
1113 			flm_nthw_rcp_kid(be->p_flm_nthw, flm->v25.rcp[index + i].kid);
1114 			flm_nthw_rcp_opn(be->p_flm_nthw, flm->v25.rcp[index + i].opn);
1115 			flm_nthw_rcp_ipn(be->p_flm_nthw, flm->v25.rcp[index + i].ipn);
1116 			flm_nthw_rcp_byt_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].byt_dyn);
1117 			flm_nthw_rcp_byt_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].byt_ofs);
1118 			flm_nthw_rcp_txplm(be->p_flm_nthw, flm->v25.rcp[index + i].txplm);
1119 			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
1120 				flm->v25.rcp[index + i].auto_ipv4_mask);
1121 			flm_nthw_rcp_flush(be->p_flm_nthw);
1122 		}
1123 	}
1124 
1125 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1126 	return 0;
1127 }
1128 
1129 static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1130 {
1131 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1132 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1133 
1134 	if (flm->ver >= 25) {
1135 		flm_nthw_scrub_cnt(be->p_flm_nthw, 1);
1136 
1137 		for (int i = 0; i < cnt; i++) {
1138 			flm_nthw_scrub_select(be->p_flm_nthw, index + i);
1139 			flm_nthw_scrub_t(be->p_flm_nthw, flm->v25.scrub[index + i].t);
1140 			flm_nthw_scrub_r(be->p_flm_nthw, flm->v25.scrub[index + i].r);
1141 			flm_nthw_scrub_del(be->p_flm_nthw, flm->v25.scrub[index + i].del);
1142 			flm_nthw_scrub_inf(be->p_flm_nthw, flm->v25.scrub[index + i].inf);
1143 			flm_nthw_scrub_flush(be->p_flm_nthw);
1144 		}
1145 	}
1146 
1147 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1148 	return 0;
1149 }
1150 
1151 static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
1152 {
1153 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1154 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1155 
1156 	if (flm->ver >= 25) {
1157 		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
1158 			&flm->v25.buf_ctrl->lrn_free,
1159 			&flm->v25.buf_ctrl->inf_avail,
1160 			&flm->v25.buf_ctrl->sta_avail);
1161 	}
1162 
1163 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1164 	return 0;
1165 }
1166 
1167 static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
1168 {
1169 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1170 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1171 
1172 	if (flm->ver >= 25) {
1173 		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
1174 		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
1175 		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
1176 		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
1177 		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
1178 		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
1179 		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
1180 		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
1181 		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
1182 		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
1183 		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
1184 		flm_nthw_stat_flows_update(be->p_flm_nthw);
1185 		flm_nthw_load_lps_update(be->p_flm_nthw);
1186 		flm_nthw_load_aps_update(be->p_flm_nthw);
1187 
1188 		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v25.lrn_done->cnt, 1);
1189 		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw, &flm->v25.lrn_ignore->cnt, 1);
1190 		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v25.lrn_fail->cnt, 1);
1191 		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v25.unl_done->cnt, 1);
1192 		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw, &flm->v25.unl_ignore->cnt, 1);
1193 		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v25.rel_done->cnt, 1);
1194 		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw, &flm->v25.rel_ignore->cnt, 1);
1195 		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v25.aul_done->cnt, 1);
1196 		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw, &flm->v25.aul_ignore->cnt, 1);
1197 		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v25.aul_fail->cnt, 1);
1198 		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v25.tul_done->cnt, 1);
1199 		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v25.flows->cnt, 1);
1200 
1201 		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
1202 		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
1203 		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v25.prb_done->cnt, 1);
1204 		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw, &flm->v25.prb_ignore->cnt, 1);
1205 
1206 		flm_nthw_load_lps_cnt(be->p_flm_nthw, &flm->v25.load_lps->lps, 1);
1207 		flm_nthw_load_aps_cnt(be->p_flm_nthw, &flm->v25.load_aps->aps, 1);
1208 	}
1209 
1210 	if (flm->ver >= 25) {
1211 		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
1212 		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
1213 		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
1214 		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
1215 		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
1216 		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
1217 		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
1218 		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
1219 		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
1220 		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
1221 		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
1222 		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
1223 
1224 		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v25.sta_done->cnt, 1);
1225 		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v25.inf_done->cnt, 1);
1226 		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v25.inf_skip->cnt, 1);
1227 		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v25.pck_hit->cnt, 1);
1228 		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v25.pck_miss->cnt, 1);
1229 		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v25.pck_unh->cnt, 1);
1230 		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v25.pck_dis->cnt, 1);
1231 		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v25.csh_hit->cnt, 1);
1232 		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v25.csh_miss->cnt, 1);
1233 		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v25.csh_unh->cnt, 1);
1234 		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v25.cuc_start->cnt, 1);
1235 		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v25.cuc_move->cnt, 1);
1236 	}
1237 
1238 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1239 	return 0;
1240 }
1241 
1242 static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm, const uint32_t *lrn_data,
1243 	uint32_t records, uint32_t *handled_records,
1244 	uint32_t words_per_record, uint32_t *inf_word_cnt,
1245 	uint32_t *sta_word_cnt)
1246 {
1247 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1248 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1249 
1250 	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, records, words_per_record,
1251 			handled_records, &flm->v25.buf_ctrl->lrn_free,
1252 			&flm->v25.buf_ctrl->inf_avail,
1253 			&flm->v25.buf_ctrl->sta_avail);
1254 
1255 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1256 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1257 
1258 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1259 	return ret;
1260 }
1261 
1262 static int flm_inf_sta_data_update(void *be_dev, const struct flm_func_s *flm, uint32_t *inf_data,
1263 	uint32_t inf_size, uint32_t *inf_word_cnt, uint32_t *sta_data,
1264 	uint32_t sta_size, uint32_t *sta_word_cnt)
1265 {
1266 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1267 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1268 
1269 	int ret = flm_nthw_inf_sta_data_update(be->p_flm_nthw, inf_data, inf_size, sta_data,
1270 			sta_size, &flm->v25.buf_ctrl->lrn_free,
1271 			&flm->v25.buf_ctrl->inf_avail,
1272 			&flm->v25.buf_ctrl->sta_avail);
1273 
1274 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1275 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1276 
1277 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1278 	return ret;
1279 }
1280 
1281 /*
1282  * HSH
1283  */
1284 
1285 static bool hsh_get_present(void *be_dev)
1286 {
1287 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1288 	return be->p_hsh_nthw != NULL;
1289 }
1290 
1291 static uint32_t hsh_get_version(void *be_dev)
1292 {
1293 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1294 	return (uint32_t)((nthw_module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
1295 			(nthw_module_get_minor_version(be->p_hsh_nthw->m_hsh) & 0xffff));
1296 }
1297 
1298 static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh, int category, int cnt)
1299 {
1300 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1301 	CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
1302 
1303 	if (hsh->ver == 5) {
1304 		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
1305 
1306 		for (int i = 0; i < cnt; i++) {
1307 			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
1308 			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
1309 				hsh->v5.rcp[category + i].load_dist_type);
1310 			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
1311 				hsh->v5.rcp[category + i].mac_port_mask);
1312 			hsh_nthw_rcp_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].sort);
1313 			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_pe);
1314 			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_ofs);
1315 			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_pe);
1316 			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_ofs);
1317 			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_pe);
1318 			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_ofs);
1319 			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_sort);
1320 			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_pe);
1321 			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_ofs);
1322 			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_sort);
1323 			hsh_nthw_rcp_w9_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_p);
1324 			hsh_nthw_rcp_p_mask(be->p_hsh_nthw, hsh->v5.rcp[category + i].p_mask);
1325 			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
1326 				hsh->v5.rcp[category + i].word_mask);
1327 			hsh_nthw_rcp_seed(be->p_hsh_nthw, hsh->v5.rcp[category + i].seed);
1328 			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].tnl_p);
1329 			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
1330 				hsh->v5.rcp[category + i].hsh_valid);
1331 			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw, hsh->v5.rcp[category + i].hsh_type);
1332 			hsh_nthw_rcp_toeplitz(be->p_hsh_nthw, hsh->v5.rcp[category + i].toeplitz);
1333 			hsh_nthw_rcp_k(be->p_hsh_nthw, hsh->v5.rcp[category + i].k);
1334 			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
1335 				hsh->v5.rcp[category + i].auto_ipv4_mask);
1336 			hsh_nthw_rcp_flush(be->p_hsh_nthw);
1337 		}
1338 	}
1339 
1340 	CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
1341 	return 0;
1342 }
1343 
1344 /*
1345  * QSL
1346  */
1347 
1348 static bool qsl_get_present(void *be_dev)
1349 {
1350 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1351 	return be->p_qsl_nthw != NULL;
1352 }
1353 
1354 static uint32_t qsl_get_version(void *be_dev)
1355 {
1356 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1357 	return (uint32_t)((nthw_module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
1358 			(nthw_module_get_minor_version(be->p_qsl_nthw->m_qsl) & 0xffff));
1359 }
1360 
1361 static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl, int category, int cnt)
1362 {
1363 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1364 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1365 
1366 	if (qsl->ver == 7) {
1367 		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
1368 
1369 		for (int i = 0; i < cnt; i++) {
1370 			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
1371 			qsl_nthw_rcp_discard(be->p_qsl_nthw, qsl->v7.rcp[category + i].discard);
1372 			qsl_nthw_rcp_drop(be->p_qsl_nthw, qsl->v7.rcp[category + i].drop);
1373 			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_lo);
1374 			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_hi);
1375 			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_idx);
1376 			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_msk);
1377 			qsl_nthw_rcp_lr(be->p_qsl_nthw, qsl->v7.rcp[category + i].lr);
1378 			qsl_nthw_rcp_tsa(be->p_qsl_nthw, qsl->v7.rcp[category + i].tsa);
1379 			qsl_nthw_rcp_vli(be->p_qsl_nthw, qsl->v7.rcp[category + i].vli);
1380 			qsl_nthw_rcp_flush(be->p_qsl_nthw);
1381 		}
1382 	}
1383 
1384 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1385 	return 0;
1386 }
1387 
1388 static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1389 {
1390 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1391 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1392 
1393 	if (qsl->ver == 7) {
1394 		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
1395 
1396 		for (int i = 0; i < cnt; i++) {
1397 			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
1398 			qsl_nthw_qst_queue(be->p_qsl_nthw, qsl->v7.qst[entry + i].queue);
1399 			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
1400 
1401 			qsl_nthw_qst_tx_port(be->p_qsl_nthw, qsl->v7.qst[entry + i].tx_port);
1402 			qsl_nthw_qst_lre(be->p_qsl_nthw, qsl->v7.qst[entry + i].lre);
1403 			qsl_nthw_qst_tci(be->p_qsl_nthw, qsl->v7.qst[entry + i].tci);
1404 			qsl_nthw_qst_ven(be->p_qsl_nthw, qsl->v7.qst[entry + i].ven);
1405 			qsl_nthw_qst_flush(be->p_qsl_nthw);
1406 		}
1407 	}
1408 
1409 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1410 	return 0;
1411 }
1412 
1413 static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1414 {
1415 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1416 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1417 
1418 	if (qsl->ver == 7) {
1419 		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
1420 
1421 		for (int i = 0; i < cnt; i++) {
1422 			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
1423 			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
1424 			qsl_nthw_qen_flush(be->p_qsl_nthw);
1425 		}
1426 	}
1427 
1428 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1429 	return 0;
1430 }
1431 
1432 static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1433 {
1434 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1435 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1436 
1437 	if (qsl->ver == 7) {
1438 		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
1439 
1440 		for (int i = 0; i < cnt; i++) {
1441 			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
1442 			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
1443 				qsl->v7.unmq[entry + i].dest_queue);
1444 			qsl_nthw_unmq_en(be->p_qsl_nthw, qsl->v7.unmq[entry + i].en);
1445 			qsl_nthw_unmq_flush(be->p_qsl_nthw);
1446 		}
1447 	}
1448 
1449 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1450 	return 0;
1451 }
1452 
1453 /*
1454  * SLC LR
1455  */
1456 
1457 static bool slc_lr_get_present(void *be_dev)
1458 {
1459 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1460 	return be->p_slc_lr_nthw != NULL;
1461 }
1462 
1463 static uint32_t slc_lr_get_version(void *be_dev)
1464 {
1465 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1466 	return (uint32_t)((nthw_module_get_major_version(be->p_slc_lr_nthw->m_slc_lr) << 16) |
1467 			(nthw_module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) & 0xffff));
1468 }
1469 
1470 static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr, int category,
1471 	int cnt)
1472 {
1473 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1474 	CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
1475 
1476 	if (slc_lr->ver == 2) {
1477 		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
1478 
1479 		for (int i = 0; i < cnt; i++) {
1480 			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
1481 			slc_lr_nthw_rcp_head_slc_en(be->p_slc_lr_nthw,
1482 				slc_lr->v2.rcp[category + i].head_slc_en);
1483 			slc_lr_nthw_rcp_head_dyn(be->p_slc_lr_nthw,
1484 				slc_lr->v2.rcp[category + i].head_dyn);
1485 			slc_lr_nthw_rcp_head_ofs(be->p_slc_lr_nthw,
1486 				slc_lr->v2.rcp[category + i].head_ofs);
1487 			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
1488 				slc_lr->v2.rcp[category + i].tail_slc_en);
1489 			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
1490 				slc_lr->v2.rcp[category + i].tail_dyn);
1491 			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
1492 				slc_lr->v2.rcp[category + i].tail_ofs);
1493 			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw, slc_lr->v2.rcp[category + i].pcap);
1494 			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
1495 		}
1496 	}
1497 
1498 	CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
1499 	return 0;
1500 }
1501 
1502 /*
1503  * PDB
1504  */
1505 
1506 static bool pdb_get_present(void *be_dev)
1507 {
1508 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1509 	return be->p_pdb_nthw != NULL;
1510 }
1511 
1512 static uint32_t pdb_get_version(void *be_dev)
1513 {
1514 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1515 	return (uint32_t)((nthw_module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
1516 			(nthw_module_get_minor_version(be->p_pdb_nthw->m_pdb) & 0xffff));
1517 }
1518 
1519 static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb, int category, int cnt)
1520 {
1521 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1522 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1523 
1524 	if (pdb->ver == 9) {
1525 		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
1526 
1527 		for (int i = 0; i < cnt; i++) {
1528 			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
1529 			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
1530 				pdb->v9.rcp[category + i].descriptor);
1531 			pdb_nthw_rcp_desc_len(be->p_pdb_nthw, pdb->v9.rcp[category + i].desc_len);
1532 			pdb_nthw_rcp_tx_port(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_port);
1533 			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
1534 				pdb->v9.rcp[category + i].tx_ignore);
1535 			pdb_nthw_rcp_tx_now(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_now);
1536 			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
1537 				pdb->v9.rcp[category + i].crc_overwrite);
1538 			pdb_nthw_rcp_align(be->p_pdb_nthw, pdb->v9.rcp[category + i].align);
1539 			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_dyn);
1540 			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_rel);
1541 			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_dyn);
1542 			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_rel);
1543 			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_dyn);
1544 			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_rel);
1545 			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
1546 				pdb->v9.rcp[category + i].ip_prot_tnl);
1547 			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw, pdb->v9.rcp[category + i].ppc_hsh);
1548 			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
1549 				pdb->v9.rcp[category + i].duplicate_en);
1550 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1551 				pdb->v9.rcp[category + i].duplicate_bit);
1552 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1553 				pdb->v9.rcp[category + i].pcap_keep_fcs);
1554 			pdb_nthw_rcp_flush(be->p_pdb_nthw);
1555 		}
1556 	}
1557 
1558 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1559 	return 0;
1560 }
1561 
1562 static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
1563 {
1564 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1565 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1566 
1567 	if (pdb->ver == 9) {
1568 		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
1569 		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
1570 		pdb_nthw_config_flush(be->p_pdb_nthw);
1571 	}
1572 
1573 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1574 	return 0;
1575 }
1576 
1577 /*
1578  * DBS
1579  */
1580 
1581 static int alloc_rx_queue(void *be_dev, int queue_id)
1582 {
1583 	(void)be_dev;
1584 	(void)queue_id;
1585 	NT_LOG(ERR, FILTER, "ERROR alloc Rx queue");
1586 	return -1;
1587 }
1588 
1589 static int free_rx_queue(void *be_dev, int hw_queue)
1590 {
1591 	(void)be_dev;
1592 	(void)hw_queue;
1593 	NT_LOG(ERR, FILTER, "ERROR free Rx queue");
1594 	return 0;
1595 }
1596 
1597 const struct flow_api_backend_ops flow_be_iface = {
1598 	1,
1599 
1600 	set_debug_mode,
1601 	get_nb_phy_ports,
1602 	get_nb_rx_ports,
1603 	get_ltx_avail,
1604 	get_nb_cat_funcs,
1605 	get_nb_categories,
1606 	get_nb_cat_km_if_cnt,
1607 	get_nb_cat_km_if_m0,
1608 	get_nb_cat_km_if_m1,
1609 	get_nb_queues,
1610 	get_nb_km_flow_types,
1611 	get_nb_pm_ext,
1612 	get_nb_len,
1613 	get_kcc_size,
1614 	get_kcc_banks,
1615 	get_nb_km_categories,
1616 	get_nb_km_cam_banks,
1617 	get_nb_km_cam_record_words,
1618 	get_nb_km_cam_records,
1619 	get_nb_km_tcam_banks,
1620 	get_nb_km_tcam_bank_width,
1621 	get_nb_flm_categories,
1622 	get_nb_flm_size_mb,
1623 	get_nb_flm_entry_size,
1624 	get_nb_flm_variant,
1625 	get_nb_flm_prios,
1626 	get_nb_flm_pst_profiles,
1627 	get_nb_flm_scrub_profiles,
1628 	get_nb_flm_load_aps_max,
1629 	get_nb_qsl_categories,
1630 	get_nb_qsl_qst_entries,
1631 	get_nb_pdb_categories,
1632 	get_nb_roa_categories,
1633 	get_nb_tpe_categories,
1634 	get_nb_tx_cpy_writers,
1635 	get_nb_tx_cpy_mask_mem,
1636 	get_nb_tx_rpl_depth,
1637 	get_nb_tx_rpl_ext_categories,
1638 	get_nb_tpe_ifr_categories,
1639 	get_nb_rpp_per_ps,
1640 	get_nb_hsh_categories,
1641 	get_nb_hsh_toeplitz,
1642 
1643 	alloc_rx_queue,
1644 	free_rx_queue,
1645 
1646 	cat_get_present,
1647 	cat_get_version,
1648 	cat_cfn_flush,
1649 
1650 	cat_kce_flush,
1651 	cat_kcs_flush,
1652 	cat_fte_flush,
1653 
1654 	cat_cte_flush,
1655 	cat_cts_flush,
1656 	cat_cot_flush,
1657 	cat_cct_flush,
1658 	cat_exo_flush,
1659 	cat_rck_flush,
1660 	cat_len_flush,
1661 	cat_kcc_flush,
1662 
1663 	km_get_present,
1664 	km_get_version,
1665 	km_rcp_flush,
1666 	km_cam_flush,
1667 	km_tcam_flush,
1668 	km_tci_flush,
1669 	km_tcq_flush,
1670 
1671 	flm_get_present,
1672 	flm_get_version,
1673 	flm_control_flush,
1674 	flm_status_flush,
1675 	flm_status_update,
1676 	flm_scan_flush,
1677 	flm_load_bin_flush,
1678 	flm_prio_flush,
1679 	flm_pst_flush,
1680 	flm_rcp_flush,
1681 	flm_scrub_flush,
1682 	flm_buf_ctrl_update,
1683 	flm_stat_update,
1684 	flm_lrn_data_flush,
1685 	flm_inf_sta_data_update,
1686 
1687 	hsh_get_present,
1688 	hsh_get_version,
1689 	hsh_rcp_flush,
1690 
1691 	qsl_get_present,
1692 	qsl_get_version,
1693 	qsl_rcp_flush,
1694 	qsl_qst_flush,
1695 	qsl_qen_flush,
1696 	qsl_unmq_flush,
1697 
1698 	slc_lr_get_present,
1699 	slc_lr_get_version,
1700 	slc_lr_rcp_flush,
1701 
1702 	pdb_get_present,
1703 	pdb_get_version,
1704 	pdb_rcp_flush,
1705 	pdb_config_flush,
1706 };
1707 
1708 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
1709 {
1710 	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
1711 
1712 	struct info_nthw *pinfonthw = info_nthw_new();
1713 	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
1714 	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
1715 
1716 	/* Init nthw CAT */
1717 	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1718 		struct cat_nthw *pcatnthw = cat_nthw_new();
1719 		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
1720 		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
1721 
1722 	} else {
1723 		be_devs[physical_adapter_no].p_cat_nthw = NULL;
1724 	}
1725 
1726 	/* Init nthw KM */
1727 	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1728 		struct km_nthw *pkmnthw = km_nthw_new();
1729 		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
1730 		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
1731 
1732 	} else {
1733 		be_devs[physical_adapter_no].p_km_nthw = NULL;
1734 	}
1735 
1736 	/* Init nthw FLM */
1737 	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1738 		struct flm_nthw *pflmnthw = flm_nthw_new();
1739 		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
1740 		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
1741 
1742 	} else {
1743 		be_devs[physical_adapter_no].p_flm_nthw = NULL;
1744 	}
1745 
1746 	/* Init nthw IFR */
1747 	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1748 		struct ifr_nthw *ifrnthw = ifr_nthw_new();
1749 		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
1750 		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
1751 
1752 	} else {
1753 		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
1754 	}
1755 
1756 	/* Init nthw HSH */
1757 	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1758 		struct hsh_nthw *phshnthw = hsh_nthw_new();
1759 		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
1760 		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
1761 
1762 	} else {
1763 		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
1764 	}
1765 
1766 	/* Init nthw QSL */
1767 	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1768 		struct qsl_nthw *pqslnthw = qsl_nthw_new();
1769 		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
1770 		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
1771 
1772 	} else {
1773 		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
1774 	}
1775 
1776 	/* Init nthw SLC LR */
1777 	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1778 		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
1779 		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
1780 		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
1781 
1782 	} else {
1783 		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
1784 	}
1785 
1786 	/* Init nthw PDB */
1787 	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1788 		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
1789 		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
1790 		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
1791 
1792 	} else {
1793 		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
1794 	}
1795 
1796 	/* Init nthw HFU */
1797 	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1798 		struct hfu_nthw *ptr = hfu_nthw_new();
1799 		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
1800 		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
1801 
1802 	} else {
1803 		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
1804 	}
1805 
1806 	/* Init nthw RPP_LR */
1807 	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1808 		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
1809 		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
1810 		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
1811 
1812 	} else {
1813 		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
1814 	}
1815 
1816 	/* Init nthw TX_CPY */
1817 	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1818 		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
1819 		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
1820 		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
1821 
1822 	} else {
1823 		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
1824 	}
1825 
1826 	/* Init nthw CSU */
1827 	if (csu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1828 		struct csu_nthw *ptr = csu_nthw_new();
1829 		csu_nthw_init(ptr, p_fpga, physical_adapter_no);
1830 		be_devs[physical_adapter_no].p_csu_nthw = ptr;
1831 
1832 	} else {
1833 		be_devs[physical_adapter_no].p_csu_nthw = NULL;
1834 	}
1835 
1836 	/* Init nthw TX_INS */
1837 	if (tx_ins_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1838 		struct tx_ins_nthw *ptr = tx_ins_nthw_new();
1839 		tx_ins_nthw_init(ptr, p_fpga, physical_adapter_no);
1840 		be_devs[physical_adapter_no].p_tx_ins_nthw = ptr;
1841 
1842 	} else {
1843 		be_devs[physical_adapter_no].p_tx_ins_nthw = NULL;
1844 	}
1845 
1846 	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
1847 	*dev = (void *)&be_devs[physical_adapter_no];
1848 
1849 	return &flow_be_iface;
1850 }
1851 
1852 static void bin_flow_backend_done(void *dev)
1853 {
1854 	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
1855 	info_nthw_delete(be_dev->p_info_nthw);
1856 	cat_nthw_delete(be_dev->p_cat_nthw);
1857 	km_nthw_delete(be_dev->p_km_nthw);
1858 	flm_nthw_delete(be_dev->p_flm_nthw);
1859 	hsh_nthw_delete(be_dev->p_hsh_nthw);
1860 	qsl_nthw_delete(be_dev->p_qsl_nthw);
1861 	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
1862 	pdb_nthw_delete(be_dev->p_pdb_nthw);
1863 	csu_nthw_delete(be_dev->p_csu_nthw);
1864 	hfu_nthw_delete(be_dev->p_hfu_nthw);
1865 	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
1866 	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
1867 	tx_ins_nthw_delete(be_dev->p_tx_ins_nthw);
1868 }
1869 
1870 static const struct flow_backend_ops ops = {
1871 	.bin_flow_backend_init = bin_flow_backend_init,
1872 	.bin_flow_backend_done = bin_flow_backend_done,
1873 };
1874 
1875 void flow_backend_init(void)
1876 {
1877 	register_flow_backend_ops(&ops);
1878 }
1879