xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_backend/flow_backend.c (revision f543ca6b9ab2b09750b2e2d44737b0af1ff4df2e)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include <stdint.h>
7 
8 #include "flow_nthw_info.h"
9 #include "flow_nthw_ifr.h"
10 #include "flow_nthw_cat.h"
11 #include "flow_nthw_km.h"
12 #include "flow_nthw_flm.h"
13 #include "flow_nthw_hfu.h"
14 #include "flow_nthw_hsh.h"
15 #include "flow_nthw_qsl.h"
16 #include "flow_nthw_slc_lr.h"
17 #include "flow_nthw_pdb.h"
18 #include "flow_nthw_rpp_lr.h"
19 #include "ntnic_mod_reg.h"
20 #include "nthw_fpga_model.h"
21 #include "hw_mod_backend.h"
22 
23 /*
24  * Binary Flow API backend implementation into ntservice driver
25  *
26  * General note on this backend implementation:
27  * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
28  */
29 
30 static struct backend_dev_s {
31 	uint8_t adapter_no;
32 	enum debug_mode_e dmode;
33 	struct info_nthw *p_info_nthw;
34 	struct cat_nthw *p_cat_nthw;
35 	struct km_nthw *p_km_nthw;
36 	struct flm_nthw *p_flm_nthw;
37 	struct hsh_nthw *p_hsh_nthw;
38 	struct qsl_nthw *p_qsl_nthw;
39 	struct slc_lr_nthw *p_slc_lr_nthw;
40 	struct pdb_nthw *p_pdb_nthw;
41 	struct hfu_nthw *p_hfu_nthw;    /* TPE module */
42 	struct rpp_lr_nthw *p_rpp_lr_nthw;      /* TPE module */
43 	struct ifr_nthw *p_ifr_nthw;    /* TPE module */
44 } be_devs[MAX_PHYS_ADAPTERS];
45 
46 #define CHECK_DEBUG_ON(be, mod, inst)                                                             \
47 	int __debug__ = 0;                                                                        \
48 	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug)                        \
49 		do {                                                                              \
50 			mod##_nthw_set_debug_mode((inst), 0xFF);                                  \
51 			__debug__ = 1;                                                            \
52 	} while (0)
53 
54 #define CHECK_DEBUG_OFF(mod, inst)                                                                \
55 	do {                                                                                      \
56 		if (__debug__)                                                                    \
57 			mod##_nthw_set_debug_mode((inst), 0);                                     \
58 	} while (0)
59 
60 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev);
61 static void bin_flow_backend_done(void *be_dev);
62 
63 static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
64 {
65 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
66 	be->dmode = mode;
67 	return 0;
68 }
69 
70 /*
71  * INFO
72  */
73 
74 static int get_nb_phy_ports(void *be_dev)
75 {
76 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
77 	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
78 }
79 
80 static int get_nb_rx_ports(void *be_dev)
81 {
82 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
83 	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
84 }
85 
86 static int get_ltx_avail(void *be_dev)
87 {
88 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
89 	return info_nthw_get_ltx_avail(be->p_info_nthw);
90 }
91 
92 static int get_nb_cat_funcs(void *be_dev)
93 {
94 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
95 	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
96 }
97 
98 static int get_nb_categories(void *be_dev)
99 {
100 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
101 	return info_nthw_get_nb_categories(be->p_info_nthw);
102 }
103 
104 static int get_nb_cat_km_if_cnt(void *be_dev)
105 {
106 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
107 	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
108 }
109 
110 static int get_nb_cat_km_if_m0(void *be_dev)
111 {
112 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
113 	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
114 }
115 
116 static int get_nb_cat_km_if_m1(void *be_dev)
117 {
118 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
119 	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
120 }
121 
122 static int get_nb_queues(void *be_dev)
123 {
124 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
125 	return info_nthw_get_nb_queues(be->p_info_nthw);
126 }
127 
128 static int get_nb_km_flow_types(void *be_dev)
129 {
130 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
131 	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
132 }
133 
134 static int get_nb_pm_ext(void *be_dev)
135 {
136 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
137 	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
138 }
139 
140 static int get_nb_len(void *be_dev)
141 {
142 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
143 	return info_nthw_get_nb_len(be->p_info_nthw);
144 }
145 
146 static int get_kcc_size(void *be_dev)
147 {
148 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
149 	return info_nthw_get_kcc_size(be->p_info_nthw);
150 }
151 
152 static int get_kcc_banks(void *be_dev)
153 {
154 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
155 	return info_nthw_get_kcc_banks(be->p_info_nthw);
156 }
157 
158 static int get_nb_km_categories(void *be_dev)
159 {
160 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
161 	return info_nthw_get_nb_km_categories(be->p_info_nthw);
162 }
163 
164 static int get_nb_km_cam_banks(void *be_dev)
165 {
166 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
167 	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
168 }
169 
170 static int get_nb_km_cam_record_words(void *be_dev)
171 {
172 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
173 	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
174 }
175 
176 static int get_nb_km_cam_records(void *be_dev)
177 {
178 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
179 	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
180 }
181 
182 static int get_nb_km_tcam_banks(void *be_dev)
183 {
184 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
185 	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
186 }
187 
188 static int get_nb_km_tcam_bank_width(void *be_dev)
189 {
190 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
191 	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
192 }
193 
194 static int get_nb_flm_categories(void *be_dev)
195 {
196 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
197 	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
198 }
199 
200 static int get_nb_flm_size_mb(void *be_dev)
201 {
202 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
203 	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
204 }
205 
206 static int get_nb_flm_entry_size(void *be_dev)
207 {
208 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
209 	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
210 }
211 
212 static int get_nb_flm_variant(void *be_dev)
213 {
214 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
215 	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
216 }
217 
218 static int get_nb_flm_prios(void *be_dev)
219 {
220 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
221 	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
222 }
223 
224 static int get_nb_flm_pst_profiles(void *be_dev)
225 {
226 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
227 	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
228 }
229 
230 static int get_nb_flm_scrub_profiles(void *be_dev)
231 {
232 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
233 	return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw);
234 }
235 
236 static int get_nb_flm_load_aps_max(void *be_dev)
237 {
238 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
239 	return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw);
240 }
241 
242 static int get_nb_qsl_categories(void *be_dev)
243 {
244 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
245 	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
246 }
247 
248 static int get_nb_qsl_qst_entries(void *be_dev)
249 {
250 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
251 	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
252 }
253 
254 static int get_nb_pdb_categories(void *be_dev)
255 {
256 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
257 	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
258 }
259 
260 static int get_nb_roa_categories(void *be_dev)
261 {
262 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
263 	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
264 }
265 
266 static int get_nb_tpe_categories(void *be_dev)
267 {
268 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
269 	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
270 }
271 
272 static int get_nb_tx_cpy_writers(void *be_dev)
273 {
274 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
275 	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
276 }
277 
278 static int get_nb_tx_cpy_mask_mem(void *be_dev)
279 {
280 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
281 	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
282 }
283 
284 static int get_nb_tx_rpl_depth(void *be_dev)
285 {
286 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
287 	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
288 }
289 
290 static int get_nb_tx_rpl_ext_categories(void *be_dev)
291 {
292 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
293 	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
294 }
295 
296 static int get_nb_tpe_ifr_categories(void *be_dev)
297 {
298 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
299 	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
300 }
301 
302 static int get_nb_rpp_per_ps(void *be_dev)
303 {
304 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
305 	return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw);
306 }
307 
308 static int get_nb_hsh_categories(void *be_dev)
309 {
310 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
311 	return info_nthw_get_nb_hsh_categories(be->p_info_nthw);
312 }
313 
314 static int get_nb_hsh_toeplitz(void *be_dev)
315 {
316 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
317 	return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw);
318 }
319 
320 /*
321  * CAT
322  */
323 
324 static bool cat_get_present(void *be_dev)
325 {
326 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
327 	return be->p_cat_nthw != NULL;
328 }
329 
330 static uint32_t cat_get_version(void *be_dev)
331 {
332 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
333 	return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
334 			(nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff));
335 }
336 
337 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
338 {
339 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
340 
341 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
342 
343 	if (cat->ver == 18) {
344 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
345 
346 		for (int i = 0; i < cnt; i++) {
347 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
348 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable);
349 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv);
350 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv);
351 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl);
352 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp);
353 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac);
354 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2);
355 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag);
356 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan);
357 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls);
358 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3);
359 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag);
360 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
361 				cat->v18.cfn[cat_func].ptc_ip_prot);
362 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4);
363 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel);
364 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2);
365 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
366 				cat->v18.cfn[cat_func].ptc_tnl_vlan);
367 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
368 				cat->v18.cfn[cat_func].ptc_tnl_mpls);
369 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3);
370 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
371 				cat->v18.cfn[cat_func].ptc_tnl_frag);
372 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
373 				cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
374 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4);
375 
376 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv);
377 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv);
378 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs);
379 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc);
380 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs);
381 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs);
382 
383 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port);
384 
385 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp);
386 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct);
387 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv);
388 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb);
389 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv);
390 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv);
391 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv);
392 
393 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
394 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv);
395 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or);
396 			cat_nthw_cfn_flush(be->p_cat_nthw);
397 			cat_func++;
398 		}
399 
400 	} else if (cat->ver == 21) {
401 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
402 
403 		for (int i = 0; i < cnt; i++) {
404 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
405 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable);
406 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv);
407 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv);
408 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl);
409 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp);
410 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac);
411 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2);
412 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag);
413 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan);
414 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls);
415 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3);
416 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag);
417 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
418 				cat->v21.cfn[cat_func].ptc_ip_prot);
419 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4);
420 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel);
421 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2);
422 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
423 				cat->v21.cfn[cat_func].ptc_tnl_vlan);
424 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
425 				cat->v21.cfn[cat_func].ptc_tnl_mpls);
426 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3);
427 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
428 				cat->v21.cfn[cat_func].ptc_tnl_frag);
429 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
430 				cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
431 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4);
432 
433 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv);
434 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv);
435 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs);
436 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc);
437 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs);
438 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs);
439 			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
440 				cat->v21.cfn[cat_func].err_tnl_l3_cs);
441 			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
442 				cat->v21.cfn[cat_func].err_tnl_l4_cs);
443 			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
444 				cat->v21.cfn[cat_func].err_ttl_exp);
445 			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
446 				cat->v21.cfn[cat_func].err_tnl_ttl_exp);
447 
448 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port);
449 
450 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp);
451 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct);
452 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv);
453 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb);
454 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv);
455 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv);
456 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv);
457 
458 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
459 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv);
460 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or);
461 
462 			if (be->p_cat_nthw->m_km_if_cnt > 1)
463 				cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or);
464 
465 			cat_nthw_cfn_flush(be->p_cat_nthw);
466 			cat_func++;
467 		}
468 	}
469 
470 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
471 	return 0;
472 }
473 
474 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
475 	int cnt)
476 {
477 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
478 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
479 
480 	if (cat->ver == 18) {
481 		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
482 
483 		for (int i = 0; i < cnt; i++) {
484 			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
485 			cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm);
486 			cat_nthw_kce_flush(be->p_cat_nthw, 0);
487 		}
488 
489 	} else if (cat->ver == 21) {
490 		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
491 
492 		for (int i = 0; i < cnt; i++) {
493 			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
494 			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
495 				cat->v21.kce[index + i].enable_bm[km_if_idx]);
496 			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
497 		}
498 	}
499 
500 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
501 	return 0;
502 }
503 
504 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func,
505 	int cnt)
506 {
507 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
508 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
509 
510 	if (cat->ver == 18) {
511 		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
512 
513 		for (int i = 0; i < cnt; i++) {
514 			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
515 			cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category);
516 			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
517 			cat_func++;
518 		}
519 
520 	} else if (cat->ver == 21) {
521 		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
522 
523 		for (int i = 0; i < cnt; i++) {
524 			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
525 			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
526 				cat->v21.kcs[cat_func].category[km_if_idx]);
527 			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
528 			cat_func++;
529 		}
530 	}
531 
532 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
533 	return 0;
534 }
535 
536 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
537 	int cnt)
538 {
539 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
540 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
541 
542 	if (cat->ver == 18) {
543 		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
544 
545 		for (int i = 0; i < cnt; i++) {
546 			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
547 			cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm);
548 			cat_nthw_fte_flush(be->p_cat_nthw, 0);
549 		}
550 
551 	} else if (cat->ver == 21) {
552 		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
553 
554 		for (int i = 0; i < cnt; i++) {
555 			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
556 			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
557 				cat->v21.fte[index + i].enable_bm[km_if_idx]);
558 			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
559 		}
560 	}
561 
562 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
563 	return 0;
564 }
565 
566 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
567 {
568 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
569 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
570 
571 	if (cat->ver == 18 || cat->ver == 21) {
572 		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
573 
574 		for (int i = 0; i < cnt; i++) {
575 			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
576 			cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col);
577 			cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor);
578 			cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh);
579 			cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl);
580 			cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf);
581 			cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc);
582 			cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb);
583 			cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk);
584 			cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst);
585 			cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp);
586 			cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe);
587 
588 			cat_nthw_cte_flush(be->p_cat_nthw);
589 			cat_func++;
590 		}
591 	}
592 
593 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
594 	return 0;
595 }
596 
597 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
598 {
599 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
600 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
601 
602 	if (cat->ver == 18 || cat->ver == 21) {
603 		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
604 
605 		for (int i = 0; i < cnt; i++) {
606 			cat_nthw_cts_select(be->p_cat_nthw, index + i);
607 			cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a);
608 			cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b);
609 			cat_nthw_cts_flush(be->p_cat_nthw);
610 		}
611 	}
612 
613 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
614 	return 0;
615 }
616 
617 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
618 {
619 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
620 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
621 
622 	if (cat->ver == 18 || cat->ver == 21) {
623 		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
624 
625 		for (int i = 0; i < cnt; i++) {
626 			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
627 			cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color);
628 			cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km);
629 			cat_nthw_cot_flush(be->p_cat_nthw);
630 		}
631 	}
632 
633 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
634 	return 0;
635 }
636 
637 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
638 {
639 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
640 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
641 
642 	if (cat->ver == 18 || cat->ver == 21) {
643 		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
644 
645 		for (int i = 0; i < cnt; i++) {
646 			cat_nthw_cct_select(be->p_cat_nthw, index + i);
647 			cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color);
648 			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
649 			cat_nthw_cct_flush(be->p_cat_nthw);
650 		}
651 	}
652 
653 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
654 	return 0;
655 }
656 
657 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt)
658 {
659 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
660 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
661 
662 	if (cat->ver == 18 || cat->ver == 21) {
663 		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
664 
665 		for (int i = 0; i < cnt; i++) {
666 			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
667 			cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn);
668 			cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs);
669 			cat_nthw_exo_flush(be->p_cat_nthw);
670 		}
671 	}
672 
673 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
674 	return 0;
675 }
676 
677 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
678 {
679 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
680 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
681 
682 	if (cat->ver == 18 || cat->ver == 21) {
683 		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
684 
685 		for (int i = 0; i < cnt; i++) {
686 			cat_nthw_rck_select(be->p_cat_nthw, index + i);
687 			cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data);
688 			cat_nthw_rck_flush(be->p_cat_nthw);
689 		}
690 	}
691 
692 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
693 	return 0;
694 }
695 
696 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
697 {
698 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
699 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
700 
701 	if (cat->ver == 18 || cat->ver == 21) {
702 		cat_nthw_len_cnt(be->p_cat_nthw, 1);
703 
704 		for (int i = 0; i < cnt; i++) {
705 			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
706 			cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower);
707 			cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper);
708 			cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1);
709 			cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2);
710 			cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv);
711 			cat_nthw_len_flush(be->p_cat_nthw);
712 		}
713 	}
714 
715 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
716 	return 0;
717 }
718 
719 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
720 {
721 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
722 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
723 
724 	if (cat->ver == 18 || cat->ver == 21) {
725 		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
726 
727 		for (int i = 0; i < cnt; i++) {
728 			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
729 			cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key);
730 			cat_nthw_kcc_category(be->p_cat_nthw,
731 				cat->v18.kcc_cam[len_index + i].category);
732 			cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id);
733 			cat_nthw_kcc_flush(be->p_cat_nthw);
734 		}
735 	}
736 
737 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
738 	return 0;
739 }
740 
741 /*
742  * KM
743  */
744 
745 static bool km_get_present(void *be_dev)
746 {
747 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
748 	return be->p_km_nthw != NULL;
749 }
750 
751 static uint32_t km_get_version(void *be_dev)
752 {
753 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
754 	return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) |
755 			(nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
756 }
757 
758 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt)
759 {
760 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
761 
762 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
763 
764 	if (km->ver == 7) {
765 		km_nthw_rcp_cnt(be->p_km_nthw, 1);
766 
767 		for (int i = 0; i < cnt; i++) {
768 			km_nthw_rcp_select(be->p_km_nthw, category + i);
769 			km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn);
770 			km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs);
771 			km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a);
772 			km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b);
773 			km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn);
774 			km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs);
775 			km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a);
776 			km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b);
777 			km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn);
778 			km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs);
779 			km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a);
780 			km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b);
781 			km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn);
782 			km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs);
783 			km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a);
784 			km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b);
785 			km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch);
786 			km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a);
787 			km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b);
788 			km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a);
789 			km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b);
790 			km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual);
791 			km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired);
792 			km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a);
793 			km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b);
794 			km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a);
795 			km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b);
796 			km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a);
797 			km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b);
798 			km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a);
799 			km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b);
800 			km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a);
801 			km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b);
802 			km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a);
803 			km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b);
804 			km_nthw_rcp_synergy_mode(be->p_km_nthw,
805 				km->v7.rcp[category + i].synergy_mode);
806 			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn);
807 			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs);
808 			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn);
809 			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs);
810 			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn);
811 			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs);
812 			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn);
813 			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs);
814 			km_nthw_rcp_flush(be->p_km_nthw);
815 		}
816 	}
817 
818 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
819 	return 0;
820 }
821 
822 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt)
823 {
824 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
825 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
826 
827 	if (km->ver == 7) {
828 		km_nthw_cam_cnt(be->p_km_nthw, 1);
829 
830 		for (int i = 0; i < cnt; i++) {
831 			km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i);
832 			km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0);
833 			km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1);
834 			km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2);
835 			km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3);
836 			km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4);
837 			km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5);
838 			km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0);
839 			km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1);
840 			km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2);
841 			km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3);
842 			km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4);
843 			km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5);
844 			km_nthw_cam_flush(be->p_km_nthw);
845 		}
846 	}
847 
848 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
849 	return 0;
850 }
851 
852 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value,
853 	int cnt)
854 {
855 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
856 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
857 
858 	if (km->ver == 7) {
859 		int start_idx = bank * 4 * 256 + byte * 256 + value;
860 		km_nthw_tcam_cnt(be->p_km_nthw, 1);
861 
862 		for (int i = 0; i < cnt; i++) {
863 			if (km->v7.tcam[start_idx + i].dirty) {
864 				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
865 				km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t);
866 				km_nthw_tcam_flush(be->p_km_nthw);
867 				km->v7.tcam[start_idx + i].dirty = 0;
868 			}
869 		}
870 	}
871 
872 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
873 	return 0;
874 }
875 
876 /*
877  * bank is the TCAM bank, index is the index within the bank (0..71)
878  */
879 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
880 {
881 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
882 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
883 
884 	if (km->ver == 7) {
885 		/* TCAM bank width in version 3 = 72 */
886 		km_nthw_tci_cnt(be->p_km_nthw, 1);
887 
888 		for (int i = 0; i < cnt; i++) {
889 			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
890 			km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color);
891 			km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft);
892 			km_nthw_tci_flush(be->p_km_nthw);
893 		}
894 	}
895 
896 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
897 	return 0;
898 }
899 
900 /*
901  * bank is the TCAM bank, index is the index within the bank (0..71)
902  */
903 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
904 {
905 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
906 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
907 
908 	if (km->ver == 7) {
909 		/* TCAM bank width in version 3 = 72 */
910 		km_nthw_tcq_cnt(be->p_km_nthw, 1);
911 
912 		for (int i = 0; i < cnt; i++) {
913 			/* adr = lover 4 bits = bank, upper 7 bits = index */
914 			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
915 			km_nthw_tcq_bank_mask(be->p_km_nthw,
916 				km->v7.tcq[bank + (index << 4) + i].bank_mask);
917 			km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual);
918 			km_nthw_tcq_flush(be->p_km_nthw);
919 		}
920 	}
921 
922 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
923 	return 0;
924 }
925 
926 /*
927  * FLM
928  */
929 
930 static bool flm_get_present(void *be_dev)
931 {
932 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
933 	return be->p_flm_nthw != NULL;
934 }
935 
936 static uint32_t flm_get_version(void *be_dev)
937 {
938 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
939 	return (uint32_t)((nthw_module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
940 			(nthw_module_get_minor_version(be->p_flm_nthw->m_flm) & 0xffff));
941 }
942 
943 static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
944 {
945 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
946 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
947 
948 	if (flm->ver >= 25) {
949 		flm_nthw_control_enable(be->p_flm_nthw, flm->v25.control->enable);
950 		flm_nthw_control_init(be->p_flm_nthw, flm->v25.control->init);
951 		flm_nthw_control_lds(be->p_flm_nthw, flm->v25.control->lds);
952 		flm_nthw_control_lfs(be->p_flm_nthw, flm->v25.control->lfs);
953 		flm_nthw_control_lis(be->p_flm_nthw, flm->v25.control->lis);
954 		flm_nthw_control_uds(be->p_flm_nthw, flm->v25.control->uds);
955 		flm_nthw_control_uis(be->p_flm_nthw, flm->v25.control->uis);
956 		flm_nthw_control_rds(be->p_flm_nthw, flm->v25.control->rds);
957 		flm_nthw_control_ris(be->p_flm_nthw, flm->v25.control->ris);
958 		flm_nthw_control_pds(be->p_flm_nthw, flm->v25.control->pds);
959 		flm_nthw_control_pis(be->p_flm_nthw, flm->v25.control->pis);
960 		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v25.control->crcwr);
961 		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v25.control->crcrd);
962 		flm_nthw_control_rbl(be->p_flm_nthw, flm->v25.control->rbl);
963 		flm_nthw_control_eab(be->p_flm_nthw, flm->v25.control->eab);
964 		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
965 			flm->v25.control->split_sdram_usage);
966 		flm_nthw_control_flush(be->p_flm_nthw);
967 	}
968 
969 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
970 	return 0;
971 }
972 
973 static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
974 {
975 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
976 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
977 
978 	if (flm->ver >= 25) {
979 		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
980 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 0);
981 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 0);
982 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 0);
983 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
984 			&flm->v25.status->cache_buf_critical, 0);
985 		flm_nthw_status_flush(be->p_flm_nthw);
986 	}
987 
988 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
989 	return 0;
990 }
991 
992 static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
993 {
994 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
995 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
996 
997 	if (flm->ver >= 25) {
998 		flm_nthw_status_update(be->p_flm_nthw);
999 		flm_nthw_status_calib_success(be->p_flm_nthw, &flm->v25.status->calib_success, 1);
1000 		flm_nthw_status_calib_fail(be->p_flm_nthw, &flm->v25.status->calib_fail, 1);
1001 		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v25.status->initdone, 1);
1002 		flm_nthw_status_idle(be->p_flm_nthw, &flm->v25.status->idle, 1);
1003 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 1);
1004 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 1);
1005 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 1);
1006 		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v25.status->eft_bp, 1);
1007 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
1008 			&flm->v25.status->cache_buf_critical, 1);
1009 	}
1010 
1011 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1012 	return 0;
1013 }
1014 
1015 static int flm_scan_flush(void *be_dev, const struct flm_func_s *flm)
1016 {
1017 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1018 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1019 
1020 	if (flm->ver >= 25) {
1021 		flm_nthw_scan_i(be->p_flm_nthw, flm->v25.scan->i);
1022 		flm_nthw_scan_flush(be->p_flm_nthw);
1023 	}
1024 
1025 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1026 	return 0;
1027 }
1028 
1029 static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
1030 {
1031 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1032 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1033 
1034 	if (flm->ver >= 25) {
1035 		flm_nthw_load_bin(be->p_flm_nthw, flm->v25.load_bin->bin);
1036 		flm_nthw_load_bin_flush(be->p_flm_nthw);
1037 	}
1038 
1039 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1040 	return 0;
1041 }
1042 
1043 static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
1044 {
1045 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1046 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1047 
1048 	if (flm->ver >= 25) {
1049 		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v25.prio->limit0);
1050 		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v25.prio->ft0);
1051 		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v25.prio->limit1);
1052 		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v25.prio->ft1);
1053 		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v25.prio->limit2);
1054 		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v25.prio->ft2);
1055 		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v25.prio->limit3);
1056 		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v25.prio->ft3);
1057 		flm_nthw_prio_flush(be->p_flm_nthw);
1058 	}
1059 
1060 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1061 	return 0;
1062 }
1063 
1064 static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1065 {
1066 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1067 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1068 
1069 	if (flm->ver >= 25) {
1070 		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
1071 
1072 		for (int i = 0; i < cnt; i++) {
1073 			flm_nthw_pst_select(be->p_flm_nthw, index + i);
1074 			flm_nthw_pst_bp(be->p_flm_nthw, flm->v25.pst[index + i].bp);
1075 			flm_nthw_pst_pp(be->p_flm_nthw, flm->v25.pst[index + i].pp);
1076 			flm_nthw_pst_tp(be->p_flm_nthw, flm->v25.pst[index + i].tp);
1077 			flm_nthw_pst_flush(be->p_flm_nthw);
1078 		}
1079 	}
1080 
1081 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1082 	return 0;
1083 }
1084 
1085 static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1086 {
1087 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1088 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1089 
1090 	if (flm->ver >= 25) {
1091 		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
1092 
1093 		for (int i = 0; i < cnt; i++) {
1094 			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
1095 			flm_nthw_rcp_lookup(be->p_flm_nthw, flm->v25.rcp[index + i].lookup);
1096 			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_dyn);
1097 			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_ofs);
1098 			flm_nthw_rcp_qw0_sel(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_sel);
1099 			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_dyn);
1100 			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_ofs);
1101 			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_dyn);
1102 			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_ofs);
1103 			flm_nthw_rcp_sw8_sel(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_sel);
1104 			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_dyn);
1105 			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_ofs);
1106 			flm_nthw_rcp_mask(be->p_flm_nthw, flm->v25.rcp[index + i].mask);
1107 			flm_nthw_rcp_kid(be->p_flm_nthw, flm->v25.rcp[index + i].kid);
1108 			flm_nthw_rcp_opn(be->p_flm_nthw, flm->v25.rcp[index + i].opn);
1109 			flm_nthw_rcp_ipn(be->p_flm_nthw, flm->v25.rcp[index + i].ipn);
1110 			flm_nthw_rcp_byt_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].byt_dyn);
1111 			flm_nthw_rcp_byt_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].byt_ofs);
1112 			flm_nthw_rcp_txplm(be->p_flm_nthw, flm->v25.rcp[index + i].txplm);
1113 			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
1114 				flm->v25.rcp[index + i].auto_ipv4_mask);
1115 			flm_nthw_rcp_flush(be->p_flm_nthw);
1116 		}
1117 	}
1118 
1119 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1120 	return 0;
1121 }
1122 
1123 static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1124 {
1125 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1126 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1127 
1128 	if (flm->ver >= 25) {
1129 		flm_nthw_scrub_cnt(be->p_flm_nthw, 1);
1130 
1131 		for (int i = 0; i < cnt; i++) {
1132 			flm_nthw_scrub_select(be->p_flm_nthw, index + i);
1133 			flm_nthw_scrub_t(be->p_flm_nthw, flm->v25.scrub[index + i].t);
1134 			flm_nthw_scrub_r(be->p_flm_nthw, flm->v25.scrub[index + i].r);
1135 			flm_nthw_scrub_del(be->p_flm_nthw, flm->v25.scrub[index + i].del);
1136 			flm_nthw_scrub_inf(be->p_flm_nthw, flm->v25.scrub[index + i].inf);
1137 			flm_nthw_scrub_flush(be->p_flm_nthw);
1138 		}
1139 	}
1140 
1141 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1142 	return 0;
1143 }
1144 
1145 static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
1146 {
1147 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1148 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1149 
1150 	if (flm->ver >= 25) {
1151 		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
1152 			&flm->v25.buf_ctrl->lrn_free,
1153 			&flm->v25.buf_ctrl->inf_avail,
1154 			&flm->v25.buf_ctrl->sta_avail);
1155 	}
1156 
1157 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1158 	return 0;
1159 }
1160 
1161 static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
1162 {
1163 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1164 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1165 
1166 	if (flm->ver >= 25) {
1167 		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
1168 		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
1169 		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
1170 		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
1171 		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
1172 		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
1173 		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
1174 		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
1175 		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
1176 		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
1177 		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
1178 		flm_nthw_stat_flows_update(be->p_flm_nthw);
1179 		flm_nthw_load_lps_update(be->p_flm_nthw);
1180 		flm_nthw_load_aps_update(be->p_flm_nthw);
1181 
1182 		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v25.lrn_done->cnt, 1);
1183 		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw, &flm->v25.lrn_ignore->cnt, 1);
1184 		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v25.lrn_fail->cnt, 1);
1185 		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v25.unl_done->cnt, 1);
1186 		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw, &flm->v25.unl_ignore->cnt, 1);
1187 		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v25.rel_done->cnt, 1);
1188 		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw, &flm->v25.rel_ignore->cnt, 1);
1189 		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v25.aul_done->cnt, 1);
1190 		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw, &flm->v25.aul_ignore->cnt, 1);
1191 		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v25.aul_fail->cnt, 1);
1192 		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v25.tul_done->cnt, 1);
1193 		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v25.flows->cnt, 1);
1194 
1195 		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
1196 		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
1197 		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v25.prb_done->cnt, 1);
1198 		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw, &flm->v25.prb_ignore->cnt, 1);
1199 
1200 		flm_nthw_load_lps_cnt(be->p_flm_nthw, &flm->v25.load_lps->lps, 1);
1201 		flm_nthw_load_aps_cnt(be->p_flm_nthw, &flm->v25.load_aps->aps, 1);
1202 	}
1203 
1204 	if (flm->ver >= 25) {
1205 		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
1206 		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
1207 		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
1208 		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
1209 		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
1210 		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
1211 		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
1212 		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
1213 		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
1214 		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
1215 		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
1216 		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
1217 
1218 		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v25.sta_done->cnt, 1);
1219 		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v25.inf_done->cnt, 1);
1220 		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v25.inf_skip->cnt, 1);
1221 		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v25.pck_hit->cnt, 1);
1222 		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v25.pck_miss->cnt, 1);
1223 		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v25.pck_unh->cnt, 1);
1224 		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v25.pck_dis->cnt, 1);
1225 		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v25.csh_hit->cnt, 1);
1226 		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v25.csh_miss->cnt, 1);
1227 		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v25.csh_unh->cnt, 1);
1228 		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v25.cuc_start->cnt, 1);
1229 		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v25.cuc_move->cnt, 1);
1230 	}
1231 
1232 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1233 	return 0;
1234 }
1235 
1236 static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm, const uint32_t *lrn_data,
1237 	uint32_t records, uint32_t *handled_records,
1238 	uint32_t words_per_record, uint32_t *inf_word_cnt,
1239 	uint32_t *sta_word_cnt)
1240 {
1241 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1242 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1243 
1244 	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, records, words_per_record,
1245 			handled_records, &flm->v25.buf_ctrl->lrn_free,
1246 			&flm->v25.buf_ctrl->inf_avail,
1247 			&flm->v25.buf_ctrl->sta_avail);
1248 
1249 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1250 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1251 
1252 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1253 	return ret;
1254 }
1255 
1256 static int flm_inf_sta_data_update(void *be_dev, const struct flm_func_s *flm, uint32_t *inf_data,
1257 	uint32_t inf_size, uint32_t *inf_word_cnt, uint32_t *sta_data,
1258 	uint32_t sta_size, uint32_t *sta_word_cnt)
1259 {
1260 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1261 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1262 
1263 	int ret = flm_nthw_inf_sta_data_update(be->p_flm_nthw, inf_data, inf_size, sta_data,
1264 			sta_size, &flm->v25.buf_ctrl->lrn_free,
1265 			&flm->v25.buf_ctrl->inf_avail,
1266 			&flm->v25.buf_ctrl->sta_avail);
1267 
1268 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1269 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1270 
1271 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1272 	return ret;
1273 }
1274 
1275 /*
1276  * HSH
1277  */
1278 
1279 static bool hsh_get_present(void *be_dev)
1280 {
1281 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1282 	return be->p_hsh_nthw != NULL;
1283 }
1284 
1285 static uint32_t hsh_get_version(void *be_dev)
1286 {
1287 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1288 	return (uint32_t)((nthw_module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
1289 			(nthw_module_get_minor_version(be->p_hsh_nthw->m_hsh) & 0xffff));
1290 }
1291 
1292 static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh, int category, int cnt)
1293 {
1294 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1295 	CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
1296 
1297 	if (hsh->ver == 5) {
1298 		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
1299 
1300 		for (int i = 0; i < cnt; i++) {
1301 			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
1302 			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
1303 				hsh->v5.rcp[category + i].load_dist_type);
1304 			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
1305 				hsh->v5.rcp[category + i].mac_port_mask);
1306 			hsh_nthw_rcp_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].sort);
1307 			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_pe);
1308 			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_ofs);
1309 			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_pe);
1310 			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_ofs);
1311 			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_pe);
1312 			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_ofs);
1313 			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_sort);
1314 			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_pe);
1315 			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_ofs);
1316 			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_sort);
1317 			hsh_nthw_rcp_w9_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_p);
1318 			hsh_nthw_rcp_p_mask(be->p_hsh_nthw, hsh->v5.rcp[category + i].p_mask);
1319 			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
1320 				hsh->v5.rcp[category + i].word_mask);
1321 			hsh_nthw_rcp_seed(be->p_hsh_nthw, hsh->v5.rcp[category + i].seed);
1322 			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].tnl_p);
1323 			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
1324 				hsh->v5.rcp[category + i].hsh_valid);
1325 			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw, hsh->v5.rcp[category + i].hsh_type);
1326 			hsh_nthw_rcp_toeplitz(be->p_hsh_nthw, hsh->v5.rcp[category + i].toeplitz);
1327 			hsh_nthw_rcp_k(be->p_hsh_nthw, hsh->v5.rcp[category + i].k);
1328 			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
1329 				hsh->v5.rcp[category + i].auto_ipv4_mask);
1330 			hsh_nthw_rcp_flush(be->p_hsh_nthw);
1331 		}
1332 	}
1333 
1334 	CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
1335 	return 0;
1336 }
1337 
1338 /*
1339  * QSL
1340  */
1341 
1342 static bool qsl_get_present(void *be_dev)
1343 {
1344 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1345 	return be->p_qsl_nthw != NULL;
1346 }
1347 
1348 static uint32_t qsl_get_version(void *be_dev)
1349 {
1350 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1351 	return (uint32_t)((nthw_module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
1352 			(nthw_module_get_minor_version(be->p_qsl_nthw->m_qsl) & 0xffff));
1353 }
1354 
1355 static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl, int category, int cnt)
1356 {
1357 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1358 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1359 
1360 	if (qsl->ver == 7) {
1361 		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
1362 
1363 		for (int i = 0; i < cnt; i++) {
1364 			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
1365 			qsl_nthw_rcp_discard(be->p_qsl_nthw, qsl->v7.rcp[category + i].discard);
1366 			qsl_nthw_rcp_drop(be->p_qsl_nthw, qsl->v7.rcp[category + i].drop);
1367 			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_lo);
1368 			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_hi);
1369 			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_idx);
1370 			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_msk);
1371 			qsl_nthw_rcp_lr(be->p_qsl_nthw, qsl->v7.rcp[category + i].lr);
1372 			qsl_nthw_rcp_tsa(be->p_qsl_nthw, qsl->v7.rcp[category + i].tsa);
1373 			qsl_nthw_rcp_vli(be->p_qsl_nthw, qsl->v7.rcp[category + i].vli);
1374 			qsl_nthw_rcp_flush(be->p_qsl_nthw);
1375 		}
1376 	}
1377 
1378 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1379 	return 0;
1380 }
1381 
1382 static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1383 {
1384 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1385 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1386 
1387 	if (qsl->ver == 7) {
1388 		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
1389 
1390 		for (int i = 0; i < cnt; i++) {
1391 			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
1392 			qsl_nthw_qst_queue(be->p_qsl_nthw, qsl->v7.qst[entry + i].queue);
1393 			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
1394 
1395 			qsl_nthw_qst_tx_port(be->p_qsl_nthw, qsl->v7.qst[entry + i].tx_port);
1396 			qsl_nthw_qst_lre(be->p_qsl_nthw, qsl->v7.qst[entry + i].lre);
1397 			qsl_nthw_qst_tci(be->p_qsl_nthw, qsl->v7.qst[entry + i].tci);
1398 			qsl_nthw_qst_ven(be->p_qsl_nthw, qsl->v7.qst[entry + i].ven);
1399 			qsl_nthw_qst_flush(be->p_qsl_nthw);
1400 		}
1401 	}
1402 
1403 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1404 	return 0;
1405 }
1406 
1407 static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1408 {
1409 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1410 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1411 
1412 	if (qsl->ver == 7) {
1413 		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
1414 
1415 		for (int i = 0; i < cnt; i++) {
1416 			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
1417 			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
1418 			qsl_nthw_qen_flush(be->p_qsl_nthw);
1419 		}
1420 	}
1421 
1422 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1423 	return 0;
1424 }
1425 
1426 static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1427 {
1428 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1429 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1430 
1431 	if (qsl->ver == 7) {
1432 		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
1433 
1434 		for (int i = 0; i < cnt; i++) {
1435 			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
1436 			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
1437 				qsl->v7.unmq[entry + i].dest_queue);
1438 			qsl_nthw_unmq_en(be->p_qsl_nthw, qsl->v7.unmq[entry + i].en);
1439 			qsl_nthw_unmq_flush(be->p_qsl_nthw);
1440 		}
1441 	}
1442 
1443 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1444 	return 0;
1445 }
1446 
1447 /*
1448  * SLC LR
1449  */
1450 
1451 static bool slc_lr_get_present(void *be_dev)
1452 {
1453 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1454 	return be->p_slc_lr_nthw != NULL;
1455 }
1456 
1457 static uint32_t slc_lr_get_version(void *be_dev)
1458 {
1459 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1460 	return (uint32_t)((nthw_module_get_major_version(be->p_slc_lr_nthw->m_slc_lr) << 16) |
1461 			(nthw_module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) & 0xffff));
1462 }
1463 
1464 static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr, int category,
1465 	int cnt)
1466 {
1467 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1468 	CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
1469 
1470 	if (slc_lr->ver == 2) {
1471 		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
1472 
1473 		for (int i = 0; i < cnt; i++) {
1474 			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
1475 			slc_lr_nthw_rcp_head_slc_en(be->p_slc_lr_nthw,
1476 				slc_lr->v2.rcp[category + i].head_slc_en);
1477 			slc_lr_nthw_rcp_head_dyn(be->p_slc_lr_nthw,
1478 				slc_lr->v2.rcp[category + i].head_dyn);
1479 			slc_lr_nthw_rcp_head_ofs(be->p_slc_lr_nthw,
1480 				slc_lr->v2.rcp[category + i].head_ofs);
1481 			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
1482 				slc_lr->v2.rcp[category + i].tail_slc_en);
1483 			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
1484 				slc_lr->v2.rcp[category + i].tail_dyn);
1485 			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
1486 				slc_lr->v2.rcp[category + i].tail_ofs);
1487 			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw, slc_lr->v2.rcp[category + i].pcap);
1488 			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
1489 		}
1490 	}
1491 
1492 	CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
1493 	return 0;
1494 }
1495 
1496 /*
1497  * PDB
1498  */
1499 
1500 static bool pdb_get_present(void *be_dev)
1501 {
1502 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1503 	return be->p_pdb_nthw != NULL;
1504 }
1505 
1506 static uint32_t pdb_get_version(void *be_dev)
1507 {
1508 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1509 	return (uint32_t)((nthw_module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
1510 			(nthw_module_get_minor_version(be->p_pdb_nthw->m_pdb) & 0xffff));
1511 }
1512 
1513 static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb, int category, int cnt)
1514 {
1515 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1516 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1517 
1518 	if (pdb->ver == 9) {
1519 		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
1520 
1521 		for (int i = 0; i < cnt; i++) {
1522 			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
1523 			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
1524 				pdb->v9.rcp[category + i].descriptor);
1525 			pdb_nthw_rcp_desc_len(be->p_pdb_nthw, pdb->v9.rcp[category + i].desc_len);
1526 			pdb_nthw_rcp_tx_port(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_port);
1527 			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
1528 				pdb->v9.rcp[category + i].tx_ignore);
1529 			pdb_nthw_rcp_tx_now(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_now);
1530 			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
1531 				pdb->v9.rcp[category + i].crc_overwrite);
1532 			pdb_nthw_rcp_align(be->p_pdb_nthw, pdb->v9.rcp[category + i].align);
1533 			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_dyn);
1534 			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_rel);
1535 			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_dyn);
1536 			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_rel);
1537 			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_dyn);
1538 			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_rel);
1539 			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
1540 				pdb->v9.rcp[category + i].ip_prot_tnl);
1541 			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw, pdb->v9.rcp[category + i].ppc_hsh);
1542 			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
1543 				pdb->v9.rcp[category + i].duplicate_en);
1544 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1545 				pdb->v9.rcp[category + i].duplicate_bit);
1546 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1547 				pdb->v9.rcp[category + i].pcap_keep_fcs);
1548 			pdb_nthw_rcp_flush(be->p_pdb_nthw);
1549 		}
1550 	}
1551 
1552 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1553 	return 0;
1554 }
1555 
1556 static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
1557 {
1558 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1559 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1560 
1561 	if (pdb->ver == 9) {
1562 		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
1563 		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
1564 		pdb_nthw_config_flush(be->p_pdb_nthw);
1565 	}
1566 
1567 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1568 	return 0;
1569 }
1570 
1571 /*
1572  * DBS
1573  */
1574 
1575 static int alloc_rx_queue(void *be_dev, int queue_id)
1576 {
1577 	(void)be_dev;
1578 	(void)queue_id;
1579 	NT_LOG(ERR, FILTER, "ERROR alloc Rx queue");
1580 	return -1;
1581 }
1582 
1583 static int free_rx_queue(void *be_dev, int hw_queue)
1584 {
1585 	(void)be_dev;
1586 	(void)hw_queue;
1587 	NT_LOG(ERR, FILTER, "ERROR free Rx queue");
1588 	return 0;
1589 }
1590 
1591 const struct flow_api_backend_ops flow_be_iface = {
1592 	1,
1593 
1594 	set_debug_mode,
1595 	get_nb_phy_ports,
1596 	get_nb_rx_ports,
1597 	get_ltx_avail,
1598 	get_nb_cat_funcs,
1599 	get_nb_categories,
1600 	get_nb_cat_km_if_cnt,
1601 	get_nb_cat_km_if_m0,
1602 	get_nb_cat_km_if_m1,
1603 	get_nb_queues,
1604 	get_nb_km_flow_types,
1605 	get_nb_pm_ext,
1606 	get_nb_len,
1607 	get_kcc_size,
1608 	get_kcc_banks,
1609 	get_nb_km_categories,
1610 	get_nb_km_cam_banks,
1611 	get_nb_km_cam_record_words,
1612 	get_nb_km_cam_records,
1613 	get_nb_km_tcam_banks,
1614 	get_nb_km_tcam_bank_width,
1615 	get_nb_flm_categories,
1616 	get_nb_flm_size_mb,
1617 	get_nb_flm_entry_size,
1618 	get_nb_flm_variant,
1619 	get_nb_flm_prios,
1620 	get_nb_flm_pst_profiles,
1621 	get_nb_flm_scrub_profiles,
1622 	get_nb_flm_load_aps_max,
1623 	get_nb_qsl_categories,
1624 	get_nb_qsl_qst_entries,
1625 	get_nb_pdb_categories,
1626 	get_nb_roa_categories,
1627 	get_nb_tpe_categories,
1628 	get_nb_tx_cpy_writers,
1629 	get_nb_tx_cpy_mask_mem,
1630 	get_nb_tx_rpl_depth,
1631 	get_nb_tx_rpl_ext_categories,
1632 	get_nb_tpe_ifr_categories,
1633 	get_nb_rpp_per_ps,
1634 	get_nb_hsh_categories,
1635 	get_nb_hsh_toeplitz,
1636 
1637 	alloc_rx_queue,
1638 	free_rx_queue,
1639 
1640 	cat_get_present,
1641 	cat_get_version,
1642 	cat_cfn_flush,
1643 
1644 	cat_kce_flush,
1645 	cat_kcs_flush,
1646 	cat_fte_flush,
1647 
1648 	cat_cte_flush,
1649 	cat_cts_flush,
1650 	cat_cot_flush,
1651 	cat_cct_flush,
1652 	cat_exo_flush,
1653 	cat_rck_flush,
1654 	cat_len_flush,
1655 	cat_kcc_flush,
1656 
1657 	km_get_present,
1658 	km_get_version,
1659 	km_rcp_flush,
1660 	km_cam_flush,
1661 	km_tcam_flush,
1662 	km_tci_flush,
1663 	km_tcq_flush,
1664 
1665 	flm_get_present,
1666 	flm_get_version,
1667 	flm_control_flush,
1668 	flm_status_flush,
1669 	flm_status_update,
1670 	flm_scan_flush,
1671 	flm_load_bin_flush,
1672 	flm_prio_flush,
1673 	flm_pst_flush,
1674 	flm_rcp_flush,
1675 	flm_scrub_flush,
1676 	flm_buf_ctrl_update,
1677 	flm_stat_update,
1678 	flm_lrn_data_flush,
1679 	flm_inf_sta_data_update,
1680 
1681 	hsh_get_present,
1682 	hsh_get_version,
1683 	hsh_rcp_flush,
1684 
1685 	qsl_get_present,
1686 	qsl_get_version,
1687 	qsl_rcp_flush,
1688 	qsl_qst_flush,
1689 	qsl_qen_flush,
1690 	qsl_unmq_flush,
1691 
1692 	slc_lr_get_present,
1693 	slc_lr_get_version,
1694 	slc_lr_rcp_flush,
1695 
1696 	pdb_get_present,
1697 	pdb_get_version,
1698 	pdb_rcp_flush,
1699 	pdb_config_flush,
1700 };
1701 
1702 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
1703 {
1704 	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
1705 
1706 	struct info_nthw *pinfonthw = info_nthw_new();
1707 	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
1708 	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
1709 
1710 	/* Init nthw CAT */
1711 	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1712 		struct cat_nthw *pcatnthw = cat_nthw_new();
1713 		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
1714 		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
1715 
1716 	} else {
1717 		be_devs[physical_adapter_no].p_cat_nthw = NULL;
1718 	}
1719 
1720 	/* Init nthw KM */
1721 	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1722 		struct km_nthw *pkmnthw = km_nthw_new();
1723 		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
1724 		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
1725 
1726 	} else {
1727 		be_devs[physical_adapter_no].p_km_nthw = NULL;
1728 	}
1729 
1730 	/* Init nthw FLM */
1731 	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1732 		struct flm_nthw *pflmnthw = flm_nthw_new();
1733 		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
1734 		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
1735 
1736 	} else {
1737 		be_devs[physical_adapter_no].p_flm_nthw = NULL;
1738 	}
1739 
1740 	/* Init nthw IFR */
1741 	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1742 		struct ifr_nthw *ifrnthw = ifr_nthw_new();
1743 		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
1744 		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
1745 
1746 	} else {
1747 		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
1748 	}
1749 
1750 	/* Init nthw HSH */
1751 	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1752 		struct hsh_nthw *phshnthw = hsh_nthw_new();
1753 		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
1754 		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
1755 
1756 	} else {
1757 		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
1758 	}
1759 
1760 	/* Init nthw QSL */
1761 	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1762 		struct qsl_nthw *pqslnthw = qsl_nthw_new();
1763 		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
1764 		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
1765 
1766 	} else {
1767 		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
1768 	}
1769 
1770 	/* Init nthw SLC LR */
1771 	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1772 		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
1773 		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
1774 		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
1775 
1776 	} else {
1777 		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
1778 	}
1779 
1780 	/* Init nthw PDB */
1781 	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1782 		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
1783 		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
1784 		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
1785 
1786 	} else {
1787 		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
1788 	}
1789 
1790 	/* Init nthw HFU */
1791 	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1792 		struct hfu_nthw *ptr = hfu_nthw_new();
1793 		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
1794 		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
1795 
1796 	} else {
1797 		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
1798 	}
1799 
1800 	/* Init nthw RPP_LR */
1801 	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1802 		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
1803 		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
1804 		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
1805 
1806 	} else {
1807 		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
1808 	}
1809 
1810 	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
1811 	*dev = (void *)&be_devs[physical_adapter_no];
1812 
1813 	return &flow_be_iface;
1814 }
1815 
1816 static void bin_flow_backend_done(void *dev)
1817 {
1818 	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
1819 	info_nthw_delete(be_dev->p_info_nthw);
1820 	cat_nthw_delete(be_dev->p_cat_nthw);
1821 	km_nthw_delete(be_dev->p_km_nthw);
1822 	flm_nthw_delete(be_dev->p_flm_nthw);
1823 	hsh_nthw_delete(be_dev->p_hsh_nthw);
1824 	qsl_nthw_delete(be_dev->p_qsl_nthw);
1825 	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
1826 	pdb_nthw_delete(be_dev->p_pdb_nthw);
1827 	hfu_nthw_delete(be_dev->p_hfu_nthw);
1828 	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
1829 }
1830 
1831 static const struct flow_backend_ops ops = {
1832 	.bin_flow_backend_init = bin_flow_backend_init,
1833 	.bin_flow_backend_done = bin_flow_backend_done,
1834 };
1835 
1836 void flow_backend_init(void)
1837 {
1838 	register_flow_backend_ops(&ops);
1839 }
1840