xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_backend/flow_backend.c (revision d42029a54dc9ace2bb1ed1769f9877949730ae67)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include <stdint.h>
7 
8 #include "flow_nthw_info.h"
9 #include "flow_nthw_ifr.h"
10 #include "flow_nthw_cat.h"
11 #include "flow_nthw_km.h"
12 #include "flow_nthw_flm.h"
13 #include "flow_nthw_hfu.h"
14 #include "flow_nthw_hsh.h"
15 #include "flow_nthw_qsl.h"
16 #include "flow_nthw_slc_lr.h"
17 #include "flow_nthw_pdb.h"
18 #include "flow_nthw_rpp_lr.h"
19 #include "flow_nthw_tx_cpy.h"
20 #include "ntnic_mod_reg.h"
21 #include "nthw_fpga_model.h"
22 #include "hw_mod_backend.h"
23 
24 /*
25  * Binary Flow API backend implementation into ntservice driver
26  *
27  * General note on this backend implementation:
28  * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
29  */
30 
31 static struct backend_dev_s {
32 	uint8_t adapter_no;
33 	enum debug_mode_e dmode;
34 	struct info_nthw *p_info_nthw;
35 	struct cat_nthw *p_cat_nthw;
36 	struct km_nthw *p_km_nthw;
37 	struct flm_nthw *p_flm_nthw;
38 	struct hsh_nthw *p_hsh_nthw;
39 	struct qsl_nthw *p_qsl_nthw;
40 	struct slc_lr_nthw *p_slc_lr_nthw;
41 	struct pdb_nthw *p_pdb_nthw;
42 	struct hfu_nthw *p_hfu_nthw;    /* TPE module */
43 	struct rpp_lr_nthw *p_rpp_lr_nthw;      /* TPE module */
44 	struct tx_cpy_nthw *p_tx_cpy_nthw;      /* TPE module */
45 	struct ifr_nthw *p_ifr_nthw;    /* TPE module */
46 } be_devs[MAX_PHYS_ADAPTERS];
47 
48 #define CHECK_DEBUG_ON(be, mod, inst)                                                             \
49 	int __debug__ = 0;                                                                        \
50 	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug)                        \
51 		do {                                                                              \
52 			mod##_nthw_set_debug_mode((inst), 0xFF);                                  \
53 			__debug__ = 1;                                                            \
54 	} while (0)
55 
56 #define CHECK_DEBUG_OFF(mod, inst)                                                                \
57 	do {                                                                                      \
58 		if (__debug__)                                                                    \
59 			mod##_nthw_set_debug_mode((inst), 0);                                     \
60 	} while (0)
61 
62 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev);
63 static void bin_flow_backend_done(void *be_dev);
64 
65 static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
66 {
67 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
68 	be->dmode = mode;
69 	return 0;
70 }
71 
72 /*
73  * INFO
74  */
75 
76 static int get_nb_phy_ports(void *be_dev)
77 {
78 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
79 	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
80 }
81 
82 static int get_nb_rx_ports(void *be_dev)
83 {
84 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
85 	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
86 }
87 
88 static int get_ltx_avail(void *be_dev)
89 {
90 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
91 	return info_nthw_get_ltx_avail(be->p_info_nthw);
92 }
93 
94 static int get_nb_cat_funcs(void *be_dev)
95 {
96 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
97 	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
98 }
99 
100 static int get_nb_categories(void *be_dev)
101 {
102 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
103 	return info_nthw_get_nb_categories(be->p_info_nthw);
104 }
105 
106 static int get_nb_cat_km_if_cnt(void *be_dev)
107 {
108 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
109 	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
110 }
111 
112 static int get_nb_cat_km_if_m0(void *be_dev)
113 {
114 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
115 	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
116 }
117 
118 static int get_nb_cat_km_if_m1(void *be_dev)
119 {
120 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
121 	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
122 }
123 
124 static int get_nb_queues(void *be_dev)
125 {
126 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
127 	return info_nthw_get_nb_queues(be->p_info_nthw);
128 }
129 
130 static int get_nb_km_flow_types(void *be_dev)
131 {
132 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
133 	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
134 }
135 
136 static int get_nb_pm_ext(void *be_dev)
137 {
138 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
139 	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
140 }
141 
142 static int get_nb_len(void *be_dev)
143 {
144 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
145 	return info_nthw_get_nb_len(be->p_info_nthw);
146 }
147 
148 static int get_kcc_size(void *be_dev)
149 {
150 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
151 	return info_nthw_get_kcc_size(be->p_info_nthw);
152 }
153 
154 static int get_kcc_banks(void *be_dev)
155 {
156 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
157 	return info_nthw_get_kcc_banks(be->p_info_nthw);
158 }
159 
160 static int get_nb_km_categories(void *be_dev)
161 {
162 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
163 	return info_nthw_get_nb_km_categories(be->p_info_nthw);
164 }
165 
166 static int get_nb_km_cam_banks(void *be_dev)
167 {
168 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
169 	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
170 }
171 
172 static int get_nb_km_cam_record_words(void *be_dev)
173 {
174 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
175 	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
176 }
177 
178 static int get_nb_km_cam_records(void *be_dev)
179 {
180 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
181 	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
182 }
183 
184 static int get_nb_km_tcam_banks(void *be_dev)
185 {
186 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
187 	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
188 }
189 
190 static int get_nb_km_tcam_bank_width(void *be_dev)
191 {
192 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
193 	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
194 }
195 
196 static int get_nb_flm_categories(void *be_dev)
197 {
198 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
199 	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
200 }
201 
202 static int get_nb_flm_size_mb(void *be_dev)
203 {
204 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
205 	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
206 }
207 
208 static int get_nb_flm_entry_size(void *be_dev)
209 {
210 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
211 	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
212 }
213 
214 static int get_nb_flm_variant(void *be_dev)
215 {
216 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
217 	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
218 }
219 
220 static int get_nb_flm_prios(void *be_dev)
221 {
222 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
223 	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
224 }
225 
226 static int get_nb_flm_pst_profiles(void *be_dev)
227 {
228 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
229 	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
230 }
231 
232 static int get_nb_flm_scrub_profiles(void *be_dev)
233 {
234 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
235 	return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw);
236 }
237 
238 static int get_nb_flm_load_aps_max(void *be_dev)
239 {
240 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
241 	return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw);
242 }
243 
244 static int get_nb_qsl_categories(void *be_dev)
245 {
246 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
247 	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
248 }
249 
250 static int get_nb_qsl_qst_entries(void *be_dev)
251 {
252 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
253 	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
254 }
255 
256 static int get_nb_pdb_categories(void *be_dev)
257 {
258 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
259 	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
260 }
261 
262 static int get_nb_roa_categories(void *be_dev)
263 {
264 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
265 	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
266 }
267 
268 static int get_nb_tpe_categories(void *be_dev)
269 {
270 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
271 	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
272 }
273 
274 static int get_nb_tx_cpy_writers(void *be_dev)
275 {
276 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
277 	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
278 }
279 
280 static int get_nb_tx_cpy_mask_mem(void *be_dev)
281 {
282 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
283 	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
284 }
285 
286 static int get_nb_tx_rpl_depth(void *be_dev)
287 {
288 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
289 	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
290 }
291 
292 static int get_nb_tx_rpl_ext_categories(void *be_dev)
293 {
294 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
295 	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
296 }
297 
298 static int get_nb_tpe_ifr_categories(void *be_dev)
299 {
300 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
301 	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
302 }
303 
304 static int get_nb_rpp_per_ps(void *be_dev)
305 {
306 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
307 	return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw);
308 }
309 
310 static int get_nb_hsh_categories(void *be_dev)
311 {
312 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
313 	return info_nthw_get_nb_hsh_categories(be->p_info_nthw);
314 }
315 
316 static int get_nb_hsh_toeplitz(void *be_dev)
317 {
318 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
319 	return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw);
320 }
321 
322 /*
323  * CAT
324  */
325 
326 static bool cat_get_present(void *be_dev)
327 {
328 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
329 	return be->p_cat_nthw != NULL;
330 }
331 
332 static uint32_t cat_get_version(void *be_dev)
333 {
334 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
335 	return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
336 			(nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff));
337 }
338 
339 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
340 {
341 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
342 
343 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
344 
345 	if (cat->ver == 18) {
346 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
347 
348 		for (int i = 0; i < cnt; i++) {
349 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
350 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable);
351 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv);
352 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv);
353 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl);
354 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp);
355 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac);
356 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2);
357 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag);
358 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan);
359 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls);
360 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3);
361 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag);
362 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
363 				cat->v18.cfn[cat_func].ptc_ip_prot);
364 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4);
365 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel);
366 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2);
367 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
368 				cat->v18.cfn[cat_func].ptc_tnl_vlan);
369 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
370 				cat->v18.cfn[cat_func].ptc_tnl_mpls);
371 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3);
372 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
373 				cat->v18.cfn[cat_func].ptc_tnl_frag);
374 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
375 				cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
376 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4);
377 
378 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv);
379 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv);
380 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs);
381 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc);
382 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs);
383 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs);
384 
385 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port);
386 
387 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp);
388 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct);
389 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv);
390 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb);
391 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv);
392 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv);
393 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv);
394 
395 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
396 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv);
397 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or);
398 			cat_nthw_cfn_flush(be->p_cat_nthw);
399 			cat_func++;
400 		}
401 
402 	} else if (cat->ver == 21) {
403 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
404 
405 		for (int i = 0; i < cnt; i++) {
406 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
407 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable);
408 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv);
409 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv);
410 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl);
411 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp);
412 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac);
413 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2);
414 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag);
415 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan);
416 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls);
417 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3);
418 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag);
419 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
420 				cat->v21.cfn[cat_func].ptc_ip_prot);
421 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4);
422 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel);
423 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2);
424 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
425 				cat->v21.cfn[cat_func].ptc_tnl_vlan);
426 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
427 				cat->v21.cfn[cat_func].ptc_tnl_mpls);
428 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3);
429 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
430 				cat->v21.cfn[cat_func].ptc_tnl_frag);
431 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
432 				cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
433 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4);
434 
435 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv);
436 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv);
437 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs);
438 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc);
439 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs);
440 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs);
441 			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
442 				cat->v21.cfn[cat_func].err_tnl_l3_cs);
443 			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
444 				cat->v21.cfn[cat_func].err_tnl_l4_cs);
445 			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
446 				cat->v21.cfn[cat_func].err_ttl_exp);
447 			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
448 				cat->v21.cfn[cat_func].err_tnl_ttl_exp);
449 
450 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port);
451 
452 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp);
453 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct);
454 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv);
455 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb);
456 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv);
457 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv);
458 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv);
459 
460 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
461 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv);
462 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or);
463 
464 			if (be->p_cat_nthw->m_km_if_cnt > 1)
465 				cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or);
466 
467 			cat_nthw_cfn_flush(be->p_cat_nthw);
468 			cat_func++;
469 		}
470 	}
471 
472 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
473 	return 0;
474 }
475 
476 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
477 	int cnt)
478 {
479 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
480 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
481 
482 	if (cat->ver == 18) {
483 		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
484 
485 		for (int i = 0; i < cnt; i++) {
486 			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
487 			cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm);
488 			cat_nthw_kce_flush(be->p_cat_nthw, 0);
489 		}
490 
491 	} else if (cat->ver == 21) {
492 		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
493 
494 		for (int i = 0; i < cnt; i++) {
495 			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
496 			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
497 				cat->v21.kce[index + i].enable_bm[km_if_idx]);
498 			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
499 		}
500 	}
501 
502 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
503 	return 0;
504 }
505 
506 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func,
507 	int cnt)
508 {
509 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
510 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
511 
512 	if (cat->ver == 18) {
513 		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
514 
515 		for (int i = 0; i < cnt; i++) {
516 			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
517 			cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category);
518 			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
519 			cat_func++;
520 		}
521 
522 	} else if (cat->ver == 21) {
523 		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
524 
525 		for (int i = 0; i < cnt; i++) {
526 			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
527 			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
528 				cat->v21.kcs[cat_func].category[km_if_idx]);
529 			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
530 			cat_func++;
531 		}
532 	}
533 
534 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
535 	return 0;
536 }
537 
538 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
539 	int cnt)
540 {
541 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
542 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
543 
544 	if (cat->ver == 18) {
545 		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
546 
547 		for (int i = 0; i < cnt; i++) {
548 			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
549 			cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm);
550 			cat_nthw_fte_flush(be->p_cat_nthw, 0);
551 		}
552 
553 	} else if (cat->ver == 21) {
554 		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
555 
556 		for (int i = 0; i < cnt; i++) {
557 			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
558 			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
559 				cat->v21.fte[index + i].enable_bm[km_if_idx]);
560 			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
561 		}
562 	}
563 
564 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
565 	return 0;
566 }
567 
568 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
569 {
570 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
571 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
572 
573 	if (cat->ver == 18 || cat->ver == 21) {
574 		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
575 
576 		for (int i = 0; i < cnt; i++) {
577 			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
578 			cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col);
579 			cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor);
580 			cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh);
581 			cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl);
582 			cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf);
583 			cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc);
584 			cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb);
585 			cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk);
586 			cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst);
587 			cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp);
588 			cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe);
589 
590 			cat_nthw_cte_flush(be->p_cat_nthw);
591 			cat_func++;
592 		}
593 	}
594 
595 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
596 	return 0;
597 }
598 
599 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
600 {
601 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
602 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
603 
604 	if (cat->ver == 18 || cat->ver == 21) {
605 		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
606 
607 		for (int i = 0; i < cnt; i++) {
608 			cat_nthw_cts_select(be->p_cat_nthw, index + i);
609 			cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a);
610 			cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b);
611 			cat_nthw_cts_flush(be->p_cat_nthw);
612 		}
613 	}
614 
615 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
616 	return 0;
617 }
618 
619 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
620 {
621 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
622 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
623 
624 	if (cat->ver == 18 || cat->ver == 21) {
625 		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
626 
627 		for (int i = 0; i < cnt; i++) {
628 			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
629 			cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color);
630 			cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km);
631 			cat_nthw_cot_flush(be->p_cat_nthw);
632 		}
633 	}
634 
635 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
636 	return 0;
637 }
638 
639 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
640 {
641 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
642 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
643 
644 	if (cat->ver == 18 || cat->ver == 21) {
645 		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
646 
647 		for (int i = 0; i < cnt; i++) {
648 			cat_nthw_cct_select(be->p_cat_nthw, index + i);
649 			cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color);
650 			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
651 			cat_nthw_cct_flush(be->p_cat_nthw);
652 		}
653 	}
654 
655 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
656 	return 0;
657 }
658 
659 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt)
660 {
661 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
662 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
663 
664 	if (cat->ver == 18 || cat->ver == 21) {
665 		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
666 
667 		for (int i = 0; i < cnt; i++) {
668 			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
669 			cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn);
670 			cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs);
671 			cat_nthw_exo_flush(be->p_cat_nthw);
672 		}
673 	}
674 
675 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
676 	return 0;
677 }
678 
679 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
680 {
681 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
682 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
683 
684 	if (cat->ver == 18 || cat->ver == 21) {
685 		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
686 
687 		for (int i = 0; i < cnt; i++) {
688 			cat_nthw_rck_select(be->p_cat_nthw, index + i);
689 			cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data);
690 			cat_nthw_rck_flush(be->p_cat_nthw);
691 		}
692 	}
693 
694 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
695 	return 0;
696 }
697 
698 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
699 {
700 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
701 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
702 
703 	if (cat->ver == 18 || cat->ver == 21) {
704 		cat_nthw_len_cnt(be->p_cat_nthw, 1);
705 
706 		for (int i = 0; i < cnt; i++) {
707 			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
708 			cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower);
709 			cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper);
710 			cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1);
711 			cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2);
712 			cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv);
713 			cat_nthw_len_flush(be->p_cat_nthw);
714 		}
715 	}
716 
717 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
718 	return 0;
719 }
720 
721 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
722 {
723 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
724 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
725 
726 	if (cat->ver == 18 || cat->ver == 21) {
727 		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
728 
729 		for (int i = 0; i < cnt; i++) {
730 			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
731 			cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key);
732 			cat_nthw_kcc_category(be->p_cat_nthw,
733 				cat->v18.kcc_cam[len_index + i].category);
734 			cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id);
735 			cat_nthw_kcc_flush(be->p_cat_nthw);
736 		}
737 	}
738 
739 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
740 	return 0;
741 }
742 
743 /*
744  * KM
745  */
746 
747 static bool km_get_present(void *be_dev)
748 {
749 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
750 	return be->p_km_nthw != NULL;
751 }
752 
753 static uint32_t km_get_version(void *be_dev)
754 {
755 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
756 	return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) |
757 			(nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
758 }
759 
760 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt)
761 {
762 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
763 
764 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
765 
766 	if (km->ver == 7) {
767 		km_nthw_rcp_cnt(be->p_km_nthw, 1);
768 
769 		for (int i = 0; i < cnt; i++) {
770 			km_nthw_rcp_select(be->p_km_nthw, category + i);
771 			km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn);
772 			km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs);
773 			km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a);
774 			km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b);
775 			km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn);
776 			km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs);
777 			km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a);
778 			km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b);
779 			km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn);
780 			km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs);
781 			km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a);
782 			km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b);
783 			km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn);
784 			km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs);
785 			km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a);
786 			km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b);
787 			km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch);
788 			km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a);
789 			km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b);
790 			km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a);
791 			km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b);
792 			km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual);
793 			km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired);
794 			km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a);
795 			km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b);
796 			km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a);
797 			km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b);
798 			km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a);
799 			km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b);
800 			km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a);
801 			km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b);
802 			km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a);
803 			km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b);
804 			km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a);
805 			km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b);
806 			km_nthw_rcp_synergy_mode(be->p_km_nthw,
807 				km->v7.rcp[category + i].synergy_mode);
808 			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn);
809 			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs);
810 			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn);
811 			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs);
812 			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn);
813 			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs);
814 			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn);
815 			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs);
816 			km_nthw_rcp_flush(be->p_km_nthw);
817 		}
818 	}
819 
820 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
821 	return 0;
822 }
823 
824 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt)
825 {
826 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
827 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
828 
829 	if (km->ver == 7) {
830 		km_nthw_cam_cnt(be->p_km_nthw, 1);
831 
832 		for (int i = 0; i < cnt; i++) {
833 			km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i);
834 			km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0);
835 			km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1);
836 			km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2);
837 			km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3);
838 			km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4);
839 			km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5);
840 			km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0);
841 			km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1);
842 			km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2);
843 			km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3);
844 			km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4);
845 			km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5);
846 			km_nthw_cam_flush(be->p_km_nthw);
847 		}
848 	}
849 
850 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
851 	return 0;
852 }
853 
854 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value,
855 	int cnt)
856 {
857 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
858 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
859 
860 	if (km->ver == 7) {
861 		int start_idx = bank * 4 * 256 + byte * 256 + value;
862 		km_nthw_tcam_cnt(be->p_km_nthw, 1);
863 
864 		for (int i = 0; i < cnt; i++) {
865 			if (km->v7.tcam[start_idx + i].dirty) {
866 				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
867 				km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t);
868 				km_nthw_tcam_flush(be->p_km_nthw);
869 				km->v7.tcam[start_idx + i].dirty = 0;
870 			}
871 		}
872 	}
873 
874 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
875 	return 0;
876 }
877 
878 /*
879  * bank is the TCAM bank, index is the index within the bank (0..71)
880  */
881 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
882 {
883 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
884 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
885 
886 	if (km->ver == 7) {
887 		/* TCAM bank width in version 3 = 72 */
888 		km_nthw_tci_cnt(be->p_km_nthw, 1);
889 
890 		for (int i = 0; i < cnt; i++) {
891 			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
892 			km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color);
893 			km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft);
894 			km_nthw_tci_flush(be->p_km_nthw);
895 		}
896 	}
897 
898 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
899 	return 0;
900 }
901 
902 /*
903  * bank is the TCAM bank, index is the index within the bank (0..71)
904  */
905 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
906 {
907 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
908 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
909 
910 	if (km->ver == 7) {
911 		/* TCAM bank width in version 3 = 72 */
912 		km_nthw_tcq_cnt(be->p_km_nthw, 1);
913 
914 		for (int i = 0; i < cnt; i++) {
915 			/* adr = lover 4 bits = bank, upper 7 bits = index */
916 			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
917 			km_nthw_tcq_bank_mask(be->p_km_nthw,
918 				km->v7.tcq[bank + (index << 4) + i].bank_mask);
919 			km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual);
920 			km_nthw_tcq_flush(be->p_km_nthw);
921 		}
922 	}
923 
924 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
925 	return 0;
926 }
927 
928 /*
929  * FLM
930  */
931 
932 static bool flm_get_present(void *be_dev)
933 {
934 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
935 	return be->p_flm_nthw != NULL;
936 }
937 
938 static uint32_t flm_get_version(void *be_dev)
939 {
940 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
941 	return (uint32_t)((nthw_module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
942 			(nthw_module_get_minor_version(be->p_flm_nthw->m_flm) & 0xffff));
943 }
944 
945 static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
946 {
947 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
948 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
949 
950 	if (flm->ver >= 25) {
951 		flm_nthw_control_enable(be->p_flm_nthw, flm->v25.control->enable);
952 		flm_nthw_control_init(be->p_flm_nthw, flm->v25.control->init);
953 		flm_nthw_control_lds(be->p_flm_nthw, flm->v25.control->lds);
954 		flm_nthw_control_lfs(be->p_flm_nthw, flm->v25.control->lfs);
955 		flm_nthw_control_lis(be->p_flm_nthw, flm->v25.control->lis);
956 		flm_nthw_control_uds(be->p_flm_nthw, flm->v25.control->uds);
957 		flm_nthw_control_uis(be->p_flm_nthw, flm->v25.control->uis);
958 		flm_nthw_control_rds(be->p_flm_nthw, flm->v25.control->rds);
959 		flm_nthw_control_ris(be->p_flm_nthw, flm->v25.control->ris);
960 		flm_nthw_control_pds(be->p_flm_nthw, flm->v25.control->pds);
961 		flm_nthw_control_pis(be->p_flm_nthw, flm->v25.control->pis);
962 		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v25.control->crcwr);
963 		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v25.control->crcrd);
964 		flm_nthw_control_rbl(be->p_flm_nthw, flm->v25.control->rbl);
965 		flm_nthw_control_eab(be->p_flm_nthw, flm->v25.control->eab);
966 		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
967 			flm->v25.control->split_sdram_usage);
968 		flm_nthw_control_flush(be->p_flm_nthw);
969 	}
970 
971 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
972 	return 0;
973 }
974 
975 static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
976 {
977 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
978 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
979 
980 	if (flm->ver >= 25) {
981 		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
982 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 0);
983 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 0);
984 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 0);
985 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
986 			&flm->v25.status->cache_buf_critical, 0);
987 		flm_nthw_status_flush(be->p_flm_nthw);
988 	}
989 
990 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
991 	return 0;
992 }
993 
994 static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
995 {
996 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
997 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
998 
999 	if (flm->ver >= 25) {
1000 		flm_nthw_status_update(be->p_flm_nthw);
1001 		flm_nthw_status_calib_success(be->p_flm_nthw, &flm->v25.status->calib_success, 1);
1002 		flm_nthw_status_calib_fail(be->p_flm_nthw, &flm->v25.status->calib_fail, 1);
1003 		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v25.status->initdone, 1);
1004 		flm_nthw_status_idle(be->p_flm_nthw, &flm->v25.status->idle, 1);
1005 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 1);
1006 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 1);
1007 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 1);
1008 		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v25.status->eft_bp, 1);
1009 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
1010 			&flm->v25.status->cache_buf_critical, 1);
1011 	}
1012 
1013 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1014 	return 0;
1015 }
1016 
1017 static int flm_scan_flush(void *be_dev, const struct flm_func_s *flm)
1018 {
1019 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1020 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1021 
1022 	if (flm->ver >= 25) {
1023 		flm_nthw_scan_i(be->p_flm_nthw, flm->v25.scan->i);
1024 		flm_nthw_scan_flush(be->p_flm_nthw);
1025 	}
1026 
1027 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1028 	return 0;
1029 }
1030 
1031 static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
1032 {
1033 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1034 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1035 
1036 	if (flm->ver >= 25) {
1037 		flm_nthw_load_bin(be->p_flm_nthw, flm->v25.load_bin->bin);
1038 		flm_nthw_load_bin_flush(be->p_flm_nthw);
1039 	}
1040 
1041 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1042 	return 0;
1043 }
1044 
1045 static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
1046 {
1047 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1048 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1049 
1050 	if (flm->ver >= 25) {
1051 		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v25.prio->limit0);
1052 		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v25.prio->ft0);
1053 		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v25.prio->limit1);
1054 		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v25.prio->ft1);
1055 		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v25.prio->limit2);
1056 		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v25.prio->ft2);
1057 		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v25.prio->limit3);
1058 		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v25.prio->ft3);
1059 		flm_nthw_prio_flush(be->p_flm_nthw);
1060 	}
1061 
1062 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1063 	return 0;
1064 }
1065 
1066 static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1067 {
1068 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1069 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1070 
1071 	if (flm->ver >= 25) {
1072 		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
1073 
1074 		for (int i = 0; i < cnt; i++) {
1075 			flm_nthw_pst_select(be->p_flm_nthw, index + i);
1076 			flm_nthw_pst_bp(be->p_flm_nthw, flm->v25.pst[index + i].bp);
1077 			flm_nthw_pst_pp(be->p_flm_nthw, flm->v25.pst[index + i].pp);
1078 			flm_nthw_pst_tp(be->p_flm_nthw, flm->v25.pst[index + i].tp);
1079 			flm_nthw_pst_flush(be->p_flm_nthw);
1080 		}
1081 	}
1082 
1083 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1084 	return 0;
1085 }
1086 
1087 static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1088 {
1089 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1090 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1091 
1092 	if (flm->ver >= 25) {
1093 		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
1094 
1095 		for (int i = 0; i < cnt; i++) {
1096 			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
1097 			flm_nthw_rcp_lookup(be->p_flm_nthw, flm->v25.rcp[index + i].lookup);
1098 			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_dyn);
1099 			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_ofs);
1100 			flm_nthw_rcp_qw0_sel(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_sel);
1101 			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_dyn);
1102 			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_ofs);
1103 			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_dyn);
1104 			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_ofs);
1105 			flm_nthw_rcp_sw8_sel(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_sel);
1106 			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_dyn);
1107 			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_ofs);
1108 			flm_nthw_rcp_mask(be->p_flm_nthw, flm->v25.rcp[index + i].mask);
1109 			flm_nthw_rcp_kid(be->p_flm_nthw, flm->v25.rcp[index + i].kid);
1110 			flm_nthw_rcp_opn(be->p_flm_nthw, flm->v25.rcp[index + i].opn);
1111 			flm_nthw_rcp_ipn(be->p_flm_nthw, flm->v25.rcp[index + i].ipn);
1112 			flm_nthw_rcp_byt_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].byt_dyn);
1113 			flm_nthw_rcp_byt_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].byt_ofs);
1114 			flm_nthw_rcp_txplm(be->p_flm_nthw, flm->v25.rcp[index + i].txplm);
1115 			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
1116 				flm->v25.rcp[index + i].auto_ipv4_mask);
1117 			flm_nthw_rcp_flush(be->p_flm_nthw);
1118 		}
1119 	}
1120 
1121 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1122 	return 0;
1123 }
1124 
1125 static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1126 {
1127 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1128 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1129 
1130 	if (flm->ver >= 25) {
1131 		flm_nthw_scrub_cnt(be->p_flm_nthw, 1);
1132 
1133 		for (int i = 0; i < cnt; i++) {
1134 			flm_nthw_scrub_select(be->p_flm_nthw, index + i);
1135 			flm_nthw_scrub_t(be->p_flm_nthw, flm->v25.scrub[index + i].t);
1136 			flm_nthw_scrub_r(be->p_flm_nthw, flm->v25.scrub[index + i].r);
1137 			flm_nthw_scrub_del(be->p_flm_nthw, flm->v25.scrub[index + i].del);
1138 			flm_nthw_scrub_inf(be->p_flm_nthw, flm->v25.scrub[index + i].inf);
1139 			flm_nthw_scrub_flush(be->p_flm_nthw);
1140 		}
1141 	}
1142 
1143 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1144 	return 0;
1145 }
1146 
1147 static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
1148 {
1149 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1150 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1151 
1152 	if (flm->ver >= 25) {
1153 		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
1154 			&flm->v25.buf_ctrl->lrn_free,
1155 			&flm->v25.buf_ctrl->inf_avail,
1156 			&flm->v25.buf_ctrl->sta_avail);
1157 	}
1158 
1159 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1160 	return 0;
1161 }
1162 
1163 static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
1164 {
1165 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1166 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1167 
1168 	if (flm->ver >= 25) {
1169 		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
1170 		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
1171 		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
1172 		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
1173 		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
1174 		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
1175 		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
1176 		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
1177 		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
1178 		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
1179 		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
1180 		flm_nthw_stat_flows_update(be->p_flm_nthw);
1181 		flm_nthw_load_lps_update(be->p_flm_nthw);
1182 		flm_nthw_load_aps_update(be->p_flm_nthw);
1183 
1184 		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v25.lrn_done->cnt, 1);
1185 		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw, &flm->v25.lrn_ignore->cnt, 1);
1186 		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v25.lrn_fail->cnt, 1);
1187 		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v25.unl_done->cnt, 1);
1188 		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw, &flm->v25.unl_ignore->cnt, 1);
1189 		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v25.rel_done->cnt, 1);
1190 		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw, &flm->v25.rel_ignore->cnt, 1);
1191 		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v25.aul_done->cnt, 1);
1192 		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw, &flm->v25.aul_ignore->cnt, 1);
1193 		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v25.aul_fail->cnt, 1);
1194 		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v25.tul_done->cnt, 1);
1195 		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v25.flows->cnt, 1);
1196 
1197 		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
1198 		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
1199 		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v25.prb_done->cnt, 1);
1200 		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw, &flm->v25.prb_ignore->cnt, 1);
1201 
1202 		flm_nthw_load_lps_cnt(be->p_flm_nthw, &flm->v25.load_lps->lps, 1);
1203 		flm_nthw_load_aps_cnt(be->p_flm_nthw, &flm->v25.load_aps->aps, 1);
1204 	}
1205 
1206 	if (flm->ver >= 25) {
1207 		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
1208 		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
1209 		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
1210 		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
1211 		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
1212 		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
1213 		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
1214 		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
1215 		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
1216 		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
1217 		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
1218 		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
1219 
1220 		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v25.sta_done->cnt, 1);
1221 		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v25.inf_done->cnt, 1);
1222 		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v25.inf_skip->cnt, 1);
1223 		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v25.pck_hit->cnt, 1);
1224 		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v25.pck_miss->cnt, 1);
1225 		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v25.pck_unh->cnt, 1);
1226 		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v25.pck_dis->cnt, 1);
1227 		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v25.csh_hit->cnt, 1);
1228 		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v25.csh_miss->cnt, 1);
1229 		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v25.csh_unh->cnt, 1);
1230 		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v25.cuc_start->cnt, 1);
1231 		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v25.cuc_move->cnt, 1);
1232 	}
1233 
1234 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1235 	return 0;
1236 }
1237 
1238 static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm, const uint32_t *lrn_data,
1239 	uint32_t records, uint32_t *handled_records,
1240 	uint32_t words_per_record, uint32_t *inf_word_cnt,
1241 	uint32_t *sta_word_cnt)
1242 {
1243 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1244 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1245 
1246 	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, records, words_per_record,
1247 			handled_records, &flm->v25.buf_ctrl->lrn_free,
1248 			&flm->v25.buf_ctrl->inf_avail,
1249 			&flm->v25.buf_ctrl->sta_avail);
1250 
1251 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1252 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1253 
1254 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1255 	return ret;
1256 }
1257 
1258 static int flm_inf_sta_data_update(void *be_dev, const struct flm_func_s *flm, uint32_t *inf_data,
1259 	uint32_t inf_size, uint32_t *inf_word_cnt, uint32_t *sta_data,
1260 	uint32_t sta_size, uint32_t *sta_word_cnt)
1261 {
1262 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1263 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1264 
1265 	int ret = flm_nthw_inf_sta_data_update(be->p_flm_nthw, inf_data, inf_size, sta_data,
1266 			sta_size, &flm->v25.buf_ctrl->lrn_free,
1267 			&flm->v25.buf_ctrl->inf_avail,
1268 			&flm->v25.buf_ctrl->sta_avail);
1269 
1270 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1271 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1272 
1273 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1274 	return ret;
1275 }
1276 
1277 /*
1278  * HSH
1279  */
1280 
1281 static bool hsh_get_present(void *be_dev)
1282 {
1283 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1284 	return be->p_hsh_nthw != NULL;
1285 }
1286 
1287 static uint32_t hsh_get_version(void *be_dev)
1288 {
1289 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1290 	return (uint32_t)((nthw_module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
1291 			(nthw_module_get_minor_version(be->p_hsh_nthw->m_hsh) & 0xffff));
1292 }
1293 
1294 static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh, int category, int cnt)
1295 {
1296 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1297 	CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
1298 
1299 	if (hsh->ver == 5) {
1300 		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
1301 
1302 		for (int i = 0; i < cnt; i++) {
1303 			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
1304 			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
1305 				hsh->v5.rcp[category + i].load_dist_type);
1306 			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
1307 				hsh->v5.rcp[category + i].mac_port_mask);
1308 			hsh_nthw_rcp_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].sort);
1309 			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_pe);
1310 			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_ofs);
1311 			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_pe);
1312 			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_ofs);
1313 			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_pe);
1314 			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_ofs);
1315 			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_sort);
1316 			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_pe);
1317 			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_ofs);
1318 			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_sort);
1319 			hsh_nthw_rcp_w9_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_p);
1320 			hsh_nthw_rcp_p_mask(be->p_hsh_nthw, hsh->v5.rcp[category + i].p_mask);
1321 			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
1322 				hsh->v5.rcp[category + i].word_mask);
1323 			hsh_nthw_rcp_seed(be->p_hsh_nthw, hsh->v5.rcp[category + i].seed);
1324 			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].tnl_p);
1325 			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
1326 				hsh->v5.rcp[category + i].hsh_valid);
1327 			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw, hsh->v5.rcp[category + i].hsh_type);
1328 			hsh_nthw_rcp_toeplitz(be->p_hsh_nthw, hsh->v5.rcp[category + i].toeplitz);
1329 			hsh_nthw_rcp_k(be->p_hsh_nthw, hsh->v5.rcp[category + i].k);
1330 			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
1331 				hsh->v5.rcp[category + i].auto_ipv4_mask);
1332 			hsh_nthw_rcp_flush(be->p_hsh_nthw);
1333 		}
1334 	}
1335 
1336 	CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
1337 	return 0;
1338 }
1339 
1340 /*
1341  * QSL
1342  */
1343 
1344 static bool qsl_get_present(void *be_dev)
1345 {
1346 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1347 	return be->p_qsl_nthw != NULL;
1348 }
1349 
1350 static uint32_t qsl_get_version(void *be_dev)
1351 {
1352 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1353 	return (uint32_t)((nthw_module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
1354 			(nthw_module_get_minor_version(be->p_qsl_nthw->m_qsl) & 0xffff));
1355 }
1356 
1357 static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl, int category, int cnt)
1358 {
1359 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1360 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1361 
1362 	if (qsl->ver == 7) {
1363 		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
1364 
1365 		for (int i = 0; i < cnt; i++) {
1366 			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
1367 			qsl_nthw_rcp_discard(be->p_qsl_nthw, qsl->v7.rcp[category + i].discard);
1368 			qsl_nthw_rcp_drop(be->p_qsl_nthw, qsl->v7.rcp[category + i].drop);
1369 			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_lo);
1370 			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_hi);
1371 			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_idx);
1372 			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_msk);
1373 			qsl_nthw_rcp_lr(be->p_qsl_nthw, qsl->v7.rcp[category + i].lr);
1374 			qsl_nthw_rcp_tsa(be->p_qsl_nthw, qsl->v7.rcp[category + i].tsa);
1375 			qsl_nthw_rcp_vli(be->p_qsl_nthw, qsl->v7.rcp[category + i].vli);
1376 			qsl_nthw_rcp_flush(be->p_qsl_nthw);
1377 		}
1378 	}
1379 
1380 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1381 	return 0;
1382 }
1383 
1384 static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1385 {
1386 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1387 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1388 
1389 	if (qsl->ver == 7) {
1390 		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
1391 
1392 		for (int i = 0; i < cnt; i++) {
1393 			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
1394 			qsl_nthw_qst_queue(be->p_qsl_nthw, qsl->v7.qst[entry + i].queue);
1395 			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
1396 
1397 			qsl_nthw_qst_tx_port(be->p_qsl_nthw, qsl->v7.qst[entry + i].tx_port);
1398 			qsl_nthw_qst_lre(be->p_qsl_nthw, qsl->v7.qst[entry + i].lre);
1399 			qsl_nthw_qst_tci(be->p_qsl_nthw, qsl->v7.qst[entry + i].tci);
1400 			qsl_nthw_qst_ven(be->p_qsl_nthw, qsl->v7.qst[entry + i].ven);
1401 			qsl_nthw_qst_flush(be->p_qsl_nthw);
1402 		}
1403 	}
1404 
1405 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1406 	return 0;
1407 }
1408 
1409 static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1410 {
1411 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1412 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1413 
1414 	if (qsl->ver == 7) {
1415 		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
1416 
1417 		for (int i = 0; i < cnt; i++) {
1418 			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
1419 			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
1420 			qsl_nthw_qen_flush(be->p_qsl_nthw);
1421 		}
1422 	}
1423 
1424 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1425 	return 0;
1426 }
1427 
1428 static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1429 {
1430 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1431 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1432 
1433 	if (qsl->ver == 7) {
1434 		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
1435 
1436 		for (int i = 0; i < cnt; i++) {
1437 			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
1438 			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
1439 				qsl->v7.unmq[entry + i].dest_queue);
1440 			qsl_nthw_unmq_en(be->p_qsl_nthw, qsl->v7.unmq[entry + i].en);
1441 			qsl_nthw_unmq_flush(be->p_qsl_nthw);
1442 		}
1443 	}
1444 
1445 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1446 	return 0;
1447 }
1448 
1449 /*
1450  * SLC LR
1451  */
1452 
1453 static bool slc_lr_get_present(void *be_dev)
1454 {
1455 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1456 	return be->p_slc_lr_nthw != NULL;
1457 }
1458 
1459 static uint32_t slc_lr_get_version(void *be_dev)
1460 {
1461 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1462 	return (uint32_t)((nthw_module_get_major_version(be->p_slc_lr_nthw->m_slc_lr) << 16) |
1463 			(nthw_module_get_minor_version(be->p_slc_lr_nthw->m_slc_lr) & 0xffff));
1464 }
1465 
1466 static int slc_lr_rcp_flush(void *be_dev, const struct slc_lr_func_s *slc_lr, int category,
1467 	int cnt)
1468 {
1469 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1470 	CHECK_DEBUG_ON(be, slc_lr, be->p_slc_lr_nthw);
1471 
1472 	if (slc_lr->ver == 2) {
1473 		slc_lr_nthw_rcp_cnt(be->p_slc_lr_nthw, 1);
1474 
1475 		for (int i = 0; i < cnt; i++) {
1476 			slc_lr_nthw_rcp_select(be->p_slc_lr_nthw, category + i);
1477 			slc_lr_nthw_rcp_head_slc_en(be->p_slc_lr_nthw,
1478 				slc_lr->v2.rcp[category + i].head_slc_en);
1479 			slc_lr_nthw_rcp_head_dyn(be->p_slc_lr_nthw,
1480 				slc_lr->v2.rcp[category + i].head_dyn);
1481 			slc_lr_nthw_rcp_head_ofs(be->p_slc_lr_nthw,
1482 				slc_lr->v2.rcp[category + i].head_ofs);
1483 			slc_lr_nthw_rcp_tail_slc_en(be->p_slc_lr_nthw,
1484 				slc_lr->v2.rcp[category + i].tail_slc_en);
1485 			slc_lr_nthw_rcp_tail_dyn(be->p_slc_lr_nthw,
1486 				slc_lr->v2.rcp[category + i].tail_dyn);
1487 			slc_lr_nthw_rcp_tail_ofs(be->p_slc_lr_nthw,
1488 				slc_lr->v2.rcp[category + i].tail_ofs);
1489 			slc_lr_nthw_rcp_pcap(be->p_slc_lr_nthw, slc_lr->v2.rcp[category + i].pcap);
1490 			slc_lr_nthw_rcp_flush(be->p_slc_lr_nthw);
1491 		}
1492 	}
1493 
1494 	CHECK_DEBUG_OFF(slc_lr, be->p_slc_lr_nthw);
1495 	return 0;
1496 }
1497 
1498 /*
1499  * PDB
1500  */
1501 
1502 static bool pdb_get_present(void *be_dev)
1503 {
1504 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1505 	return be->p_pdb_nthw != NULL;
1506 }
1507 
1508 static uint32_t pdb_get_version(void *be_dev)
1509 {
1510 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1511 	return (uint32_t)((nthw_module_get_major_version(be->p_pdb_nthw->m_pdb) << 16) |
1512 			(nthw_module_get_minor_version(be->p_pdb_nthw->m_pdb) & 0xffff));
1513 }
1514 
1515 static int pdb_rcp_flush(void *be_dev, const struct pdb_func_s *pdb, int category, int cnt)
1516 {
1517 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1518 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1519 
1520 	if (pdb->ver == 9) {
1521 		pdb_nthw_rcp_cnt(be->p_pdb_nthw, 1);
1522 
1523 		for (int i = 0; i < cnt; i++) {
1524 			pdb_nthw_rcp_select(be->p_pdb_nthw, category + i);
1525 			pdb_nthw_rcp_descriptor(be->p_pdb_nthw,
1526 				pdb->v9.rcp[category + i].descriptor);
1527 			pdb_nthw_rcp_desc_len(be->p_pdb_nthw, pdb->v9.rcp[category + i].desc_len);
1528 			pdb_nthw_rcp_tx_port(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_port);
1529 			pdb_nthw_rcp_tx_ignore(be->p_pdb_nthw,
1530 				pdb->v9.rcp[category + i].tx_ignore);
1531 			pdb_nthw_rcp_tx_now(be->p_pdb_nthw, pdb->v9.rcp[category + i].tx_now);
1532 			pdb_nthw_rcp_crc_overwrite(be->p_pdb_nthw,
1533 				pdb->v9.rcp[category + i].crc_overwrite);
1534 			pdb_nthw_rcp_align(be->p_pdb_nthw, pdb->v9.rcp[category + i].align);
1535 			pdb_nthw_rcp_ofs0_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_dyn);
1536 			pdb_nthw_rcp_ofs0_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs0_rel);
1537 			pdb_nthw_rcp_ofs1_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_dyn);
1538 			pdb_nthw_rcp_ofs1_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs1_rel);
1539 			pdb_nthw_rcp_ofs2_dyn(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_dyn);
1540 			pdb_nthw_rcp_ofs2_rel(be->p_pdb_nthw, pdb->v9.rcp[category + i].ofs2_rel);
1541 			pdb_nthw_rcp_ip_prot_tnl(be->p_pdb_nthw,
1542 				pdb->v9.rcp[category + i].ip_prot_tnl);
1543 			pdb_nthw_rcp_ppc_hsh(be->p_pdb_nthw, pdb->v9.rcp[category + i].ppc_hsh);
1544 			pdb_nthw_rcp_duplicate_en(be->p_pdb_nthw,
1545 				pdb->v9.rcp[category + i].duplicate_en);
1546 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1547 				pdb->v9.rcp[category + i].duplicate_bit);
1548 			pdb_nthw_rcp_duplicate_bit(be->p_pdb_nthw,
1549 				pdb->v9.rcp[category + i].pcap_keep_fcs);
1550 			pdb_nthw_rcp_flush(be->p_pdb_nthw);
1551 		}
1552 	}
1553 
1554 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1555 	return 0;
1556 }
1557 
1558 static int pdb_config_flush(void *be_dev, const struct pdb_func_s *pdb)
1559 {
1560 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1561 	CHECK_DEBUG_ON(be, pdb, be->p_pdb_nthw);
1562 
1563 	if (pdb->ver == 9) {
1564 		pdb_nthw_config_ts_format(be->p_pdb_nthw, pdb->v9.config->ts_format);
1565 		pdb_nthw_config_port_ofs(be->p_pdb_nthw, pdb->v9.config->port_ofs);
1566 		pdb_nthw_config_flush(be->p_pdb_nthw);
1567 	}
1568 
1569 	CHECK_DEBUG_OFF(pdb, be->p_pdb_nthw);
1570 	return 0;
1571 }
1572 
1573 /*
1574  * DBS
1575  */
1576 
1577 static int alloc_rx_queue(void *be_dev, int queue_id)
1578 {
1579 	(void)be_dev;
1580 	(void)queue_id;
1581 	NT_LOG(ERR, FILTER, "ERROR alloc Rx queue");
1582 	return -1;
1583 }
1584 
1585 static int free_rx_queue(void *be_dev, int hw_queue)
1586 {
1587 	(void)be_dev;
1588 	(void)hw_queue;
1589 	NT_LOG(ERR, FILTER, "ERROR free Rx queue");
1590 	return 0;
1591 }
1592 
1593 const struct flow_api_backend_ops flow_be_iface = {
1594 	1,
1595 
1596 	set_debug_mode,
1597 	get_nb_phy_ports,
1598 	get_nb_rx_ports,
1599 	get_ltx_avail,
1600 	get_nb_cat_funcs,
1601 	get_nb_categories,
1602 	get_nb_cat_km_if_cnt,
1603 	get_nb_cat_km_if_m0,
1604 	get_nb_cat_km_if_m1,
1605 	get_nb_queues,
1606 	get_nb_km_flow_types,
1607 	get_nb_pm_ext,
1608 	get_nb_len,
1609 	get_kcc_size,
1610 	get_kcc_banks,
1611 	get_nb_km_categories,
1612 	get_nb_km_cam_banks,
1613 	get_nb_km_cam_record_words,
1614 	get_nb_km_cam_records,
1615 	get_nb_km_tcam_banks,
1616 	get_nb_km_tcam_bank_width,
1617 	get_nb_flm_categories,
1618 	get_nb_flm_size_mb,
1619 	get_nb_flm_entry_size,
1620 	get_nb_flm_variant,
1621 	get_nb_flm_prios,
1622 	get_nb_flm_pst_profiles,
1623 	get_nb_flm_scrub_profiles,
1624 	get_nb_flm_load_aps_max,
1625 	get_nb_qsl_categories,
1626 	get_nb_qsl_qst_entries,
1627 	get_nb_pdb_categories,
1628 	get_nb_roa_categories,
1629 	get_nb_tpe_categories,
1630 	get_nb_tx_cpy_writers,
1631 	get_nb_tx_cpy_mask_mem,
1632 	get_nb_tx_rpl_depth,
1633 	get_nb_tx_rpl_ext_categories,
1634 	get_nb_tpe_ifr_categories,
1635 	get_nb_rpp_per_ps,
1636 	get_nb_hsh_categories,
1637 	get_nb_hsh_toeplitz,
1638 
1639 	alloc_rx_queue,
1640 	free_rx_queue,
1641 
1642 	cat_get_present,
1643 	cat_get_version,
1644 	cat_cfn_flush,
1645 
1646 	cat_kce_flush,
1647 	cat_kcs_flush,
1648 	cat_fte_flush,
1649 
1650 	cat_cte_flush,
1651 	cat_cts_flush,
1652 	cat_cot_flush,
1653 	cat_cct_flush,
1654 	cat_exo_flush,
1655 	cat_rck_flush,
1656 	cat_len_flush,
1657 	cat_kcc_flush,
1658 
1659 	km_get_present,
1660 	km_get_version,
1661 	km_rcp_flush,
1662 	km_cam_flush,
1663 	km_tcam_flush,
1664 	km_tci_flush,
1665 	km_tcq_flush,
1666 
1667 	flm_get_present,
1668 	flm_get_version,
1669 	flm_control_flush,
1670 	flm_status_flush,
1671 	flm_status_update,
1672 	flm_scan_flush,
1673 	flm_load_bin_flush,
1674 	flm_prio_flush,
1675 	flm_pst_flush,
1676 	flm_rcp_flush,
1677 	flm_scrub_flush,
1678 	flm_buf_ctrl_update,
1679 	flm_stat_update,
1680 	flm_lrn_data_flush,
1681 	flm_inf_sta_data_update,
1682 
1683 	hsh_get_present,
1684 	hsh_get_version,
1685 	hsh_rcp_flush,
1686 
1687 	qsl_get_present,
1688 	qsl_get_version,
1689 	qsl_rcp_flush,
1690 	qsl_qst_flush,
1691 	qsl_qen_flush,
1692 	qsl_unmq_flush,
1693 
1694 	slc_lr_get_present,
1695 	slc_lr_get_version,
1696 	slc_lr_rcp_flush,
1697 
1698 	pdb_get_present,
1699 	pdb_get_version,
1700 	pdb_rcp_flush,
1701 	pdb_config_flush,
1702 };
1703 
1704 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
1705 {
1706 	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
1707 
1708 	struct info_nthw *pinfonthw = info_nthw_new();
1709 	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
1710 	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
1711 
1712 	/* Init nthw CAT */
1713 	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1714 		struct cat_nthw *pcatnthw = cat_nthw_new();
1715 		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
1716 		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
1717 
1718 	} else {
1719 		be_devs[physical_adapter_no].p_cat_nthw = NULL;
1720 	}
1721 
1722 	/* Init nthw KM */
1723 	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1724 		struct km_nthw *pkmnthw = km_nthw_new();
1725 		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
1726 		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
1727 
1728 	} else {
1729 		be_devs[physical_adapter_no].p_km_nthw = NULL;
1730 	}
1731 
1732 	/* Init nthw FLM */
1733 	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1734 		struct flm_nthw *pflmnthw = flm_nthw_new();
1735 		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
1736 		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
1737 
1738 	} else {
1739 		be_devs[physical_adapter_no].p_flm_nthw = NULL;
1740 	}
1741 
1742 	/* Init nthw IFR */
1743 	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1744 		struct ifr_nthw *ifrnthw = ifr_nthw_new();
1745 		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
1746 		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
1747 
1748 	} else {
1749 		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
1750 	}
1751 
1752 	/* Init nthw HSH */
1753 	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1754 		struct hsh_nthw *phshnthw = hsh_nthw_new();
1755 		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
1756 		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
1757 
1758 	} else {
1759 		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
1760 	}
1761 
1762 	/* Init nthw QSL */
1763 	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1764 		struct qsl_nthw *pqslnthw = qsl_nthw_new();
1765 		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
1766 		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
1767 
1768 	} else {
1769 		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
1770 	}
1771 
1772 	/* Init nthw SLC LR */
1773 	if (slc_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1774 		struct slc_lr_nthw *pslclrnthw = slc_lr_nthw_new();
1775 		slc_lr_nthw_init(pslclrnthw, p_fpga, physical_adapter_no);
1776 		be_devs[physical_adapter_no].p_slc_lr_nthw = pslclrnthw;
1777 
1778 	} else {
1779 		be_devs[physical_adapter_no].p_slc_lr_nthw = NULL;
1780 	}
1781 
1782 	/* Init nthw PDB */
1783 	if (pdb_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1784 		struct pdb_nthw *ppdbnthw = pdb_nthw_new();
1785 		pdb_nthw_init(ppdbnthw, p_fpga, physical_adapter_no);
1786 		be_devs[physical_adapter_no].p_pdb_nthw = ppdbnthw;
1787 
1788 	} else {
1789 		be_devs[physical_adapter_no].p_pdb_nthw = NULL;
1790 	}
1791 
1792 	/* Init nthw HFU */
1793 	if (hfu_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1794 		struct hfu_nthw *ptr = hfu_nthw_new();
1795 		hfu_nthw_init(ptr, p_fpga, physical_adapter_no);
1796 		be_devs[physical_adapter_no].p_hfu_nthw = ptr;
1797 
1798 	} else {
1799 		be_devs[physical_adapter_no].p_hfu_nthw = NULL;
1800 	}
1801 
1802 	/* Init nthw RPP_LR */
1803 	if (rpp_lr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1804 		struct rpp_lr_nthw *ptr = rpp_lr_nthw_new();
1805 		rpp_lr_nthw_init(ptr, p_fpga, physical_adapter_no);
1806 		be_devs[physical_adapter_no].p_rpp_lr_nthw = ptr;
1807 
1808 	} else {
1809 		be_devs[physical_adapter_no].p_rpp_lr_nthw = NULL;
1810 	}
1811 
1812 	/* Init nthw TX_CPY */
1813 	if (tx_cpy_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1814 		struct tx_cpy_nthw *ptr = tx_cpy_nthw_new();
1815 		tx_cpy_nthw_init(ptr, p_fpga, physical_adapter_no);
1816 		be_devs[physical_adapter_no].p_tx_cpy_nthw = ptr;
1817 
1818 	} else {
1819 		be_devs[physical_adapter_no].p_tx_cpy_nthw = NULL;
1820 	}
1821 
1822 	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
1823 	*dev = (void *)&be_devs[physical_adapter_no];
1824 
1825 	return &flow_be_iface;
1826 }
1827 
1828 static void bin_flow_backend_done(void *dev)
1829 {
1830 	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
1831 	info_nthw_delete(be_dev->p_info_nthw);
1832 	cat_nthw_delete(be_dev->p_cat_nthw);
1833 	km_nthw_delete(be_dev->p_km_nthw);
1834 	flm_nthw_delete(be_dev->p_flm_nthw);
1835 	hsh_nthw_delete(be_dev->p_hsh_nthw);
1836 	qsl_nthw_delete(be_dev->p_qsl_nthw);
1837 	slc_lr_nthw_delete(be_dev->p_slc_lr_nthw);
1838 	pdb_nthw_delete(be_dev->p_pdb_nthw);
1839 	hfu_nthw_delete(be_dev->p_hfu_nthw);
1840 	rpp_lr_nthw_delete(be_dev->p_rpp_lr_nthw);
1841 	tx_cpy_nthw_delete(be_dev->p_tx_cpy_nthw);
1842 }
1843 
1844 static const struct flow_backend_ops ops = {
1845 	.bin_flow_backend_init = bin_flow_backend_init,
1846 	.bin_flow_backend_done = bin_flow_backend_done,
1847 };
1848 
1849 void flow_backend_init(void)
1850 {
1851 	register_flow_backend_ops(&ops);
1852 }
1853