xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_backend/flow_backend.c (revision cec43fab911c9ff28cb2d00a72c306572f1d09e9)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include <stdint.h>
7 
8 #include "flow_nthw_info.h"
9 #include "flow_nthw_cat.h"
10 #include "flow_nthw_km.h"
11 #include "flow_nthw_flm.h"
12 #include "ntnic_mod_reg.h"
13 #include "nthw_fpga_model.h"
14 #include "hw_mod_backend.h"
15 
16 /*
17  * Binary Flow API backend implementation into ntservice driver
18  *
19  * General note on this backend implementation:
20  * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
21  */
22 
23 static struct backend_dev_s {
24 	uint8_t adapter_no;
25 	enum debug_mode_e dmode;
26 	struct info_nthw *p_info_nthw;
27 	struct cat_nthw *p_cat_nthw;
28 	struct km_nthw *p_km_nthw;
29 	struct flm_nthw *p_flm_nthw;
30 } be_devs[MAX_PHYS_ADAPTERS];
31 
32 #define CHECK_DEBUG_ON(be, mod, inst)                                                             \
33 	int __debug__ = 0;                                                                        \
34 	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug)                        \
35 		do {                                                                              \
36 			mod##_nthw_set_debug_mode((inst), 0xFF);                                  \
37 			__debug__ = 1;                                                            \
38 	} while (0)
39 
40 #define CHECK_DEBUG_OFF(mod, inst)                                                                \
41 	do {                                                                                      \
42 		if (__debug__)                                                                    \
43 			mod##_nthw_set_debug_mode((inst), 0);                                     \
44 	} while (0)
45 
46 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev);
47 static void bin_flow_backend_done(void *be_dev);
48 
49 static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
50 {
51 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
52 	be->dmode = mode;
53 	return 0;
54 }
55 
56 /*
57  * INFO
58  */
59 
60 static int get_nb_phy_ports(void *be_dev)
61 {
62 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
63 	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
64 }
65 
66 static int get_nb_rx_ports(void *be_dev)
67 {
68 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
69 	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
70 }
71 
72 static int get_ltx_avail(void *be_dev)
73 {
74 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
75 	return info_nthw_get_ltx_avail(be->p_info_nthw);
76 }
77 
78 static int get_nb_cat_funcs(void *be_dev)
79 {
80 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
81 	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
82 }
83 
84 static int get_nb_categories(void *be_dev)
85 {
86 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
87 	return info_nthw_get_nb_categories(be->p_info_nthw);
88 }
89 
90 static int get_nb_cat_km_if_cnt(void *be_dev)
91 {
92 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
93 	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
94 }
95 
96 static int get_nb_cat_km_if_m0(void *be_dev)
97 {
98 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
99 	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
100 }
101 
102 static int get_nb_cat_km_if_m1(void *be_dev)
103 {
104 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
105 	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
106 }
107 
108 static int get_nb_queues(void *be_dev)
109 {
110 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
111 	return info_nthw_get_nb_queues(be->p_info_nthw);
112 }
113 
114 static int get_nb_km_flow_types(void *be_dev)
115 {
116 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
117 	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
118 }
119 
120 static int get_nb_pm_ext(void *be_dev)
121 {
122 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
123 	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
124 }
125 
126 static int get_nb_len(void *be_dev)
127 {
128 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
129 	return info_nthw_get_nb_len(be->p_info_nthw);
130 }
131 
132 static int get_kcc_size(void *be_dev)
133 {
134 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
135 	return info_nthw_get_kcc_size(be->p_info_nthw);
136 }
137 
138 static int get_kcc_banks(void *be_dev)
139 {
140 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
141 	return info_nthw_get_kcc_banks(be->p_info_nthw);
142 }
143 
144 static int get_nb_km_categories(void *be_dev)
145 {
146 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
147 	return info_nthw_get_nb_km_categories(be->p_info_nthw);
148 }
149 
150 static int get_nb_km_cam_banks(void *be_dev)
151 {
152 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
153 	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
154 }
155 
156 static int get_nb_km_cam_record_words(void *be_dev)
157 {
158 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
159 	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
160 }
161 
162 static int get_nb_km_cam_records(void *be_dev)
163 {
164 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
165 	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
166 }
167 
168 static int get_nb_km_tcam_banks(void *be_dev)
169 {
170 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
171 	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
172 }
173 
174 static int get_nb_km_tcam_bank_width(void *be_dev)
175 {
176 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
177 	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
178 }
179 
180 static int get_nb_flm_categories(void *be_dev)
181 {
182 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
183 	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
184 }
185 
186 static int get_nb_flm_size_mb(void *be_dev)
187 {
188 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
189 	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
190 }
191 
192 static int get_nb_flm_entry_size(void *be_dev)
193 {
194 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
195 	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
196 }
197 
198 static int get_nb_flm_variant(void *be_dev)
199 {
200 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
201 	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
202 }
203 
204 static int get_nb_flm_prios(void *be_dev)
205 {
206 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
207 	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
208 }
209 
210 static int get_nb_flm_pst_profiles(void *be_dev)
211 {
212 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
213 	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
214 }
215 
216 static int get_nb_flm_scrub_profiles(void *be_dev)
217 {
218 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
219 	return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw);
220 }
221 
222 static int get_nb_flm_load_aps_max(void *be_dev)
223 {
224 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
225 	return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw);
226 }
227 
228 static int get_nb_qsl_categories(void *be_dev)
229 {
230 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
231 	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
232 }
233 
234 static int get_nb_qsl_qst_entries(void *be_dev)
235 {
236 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
237 	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
238 }
239 
240 static int get_nb_pdb_categories(void *be_dev)
241 {
242 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
243 	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
244 }
245 
246 static int get_nb_roa_categories(void *be_dev)
247 {
248 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
249 	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
250 }
251 
252 static int get_nb_tpe_categories(void *be_dev)
253 {
254 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
255 	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
256 }
257 
258 static int get_nb_tx_cpy_writers(void *be_dev)
259 {
260 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
261 	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
262 }
263 
264 static int get_nb_tx_cpy_mask_mem(void *be_dev)
265 {
266 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
267 	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
268 }
269 
270 static int get_nb_tx_rpl_depth(void *be_dev)
271 {
272 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
273 	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
274 }
275 
276 static int get_nb_tx_rpl_ext_categories(void *be_dev)
277 {
278 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
279 	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
280 }
281 
282 static int get_nb_tpe_ifr_categories(void *be_dev)
283 {
284 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
285 	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
286 }
287 
288 static int get_nb_rpp_per_ps(void *be_dev)
289 {
290 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
291 	return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw);
292 }
293 
294 static int get_nb_hsh_categories(void *be_dev)
295 {
296 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
297 	return info_nthw_get_nb_hsh_categories(be->p_info_nthw);
298 }
299 
300 static int get_nb_hsh_toeplitz(void *be_dev)
301 {
302 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
303 	return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw);
304 }
305 
306 /*
307  * CAT
308  */
309 
310 static bool cat_get_present(void *be_dev)
311 {
312 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
313 	return be->p_cat_nthw != NULL;
314 }
315 
316 static uint32_t cat_get_version(void *be_dev)
317 {
318 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
319 	return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
320 			(nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff));
321 }
322 
323 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
324 {
325 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
326 
327 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
328 
329 	if (cat->ver == 18) {
330 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
331 
332 		for (int i = 0; i < cnt; i++) {
333 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
334 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable);
335 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv);
336 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv);
337 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl);
338 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp);
339 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac);
340 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2);
341 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag);
342 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan);
343 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls);
344 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3);
345 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag);
346 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
347 				cat->v18.cfn[cat_func].ptc_ip_prot);
348 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4);
349 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel);
350 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2);
351 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
352 				cat->v18.cfn[cat_func].ptc_tnl_vlan);
353 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
354 				cat->v18.cfn[cat_func].ptc_tnl_mpls);
355 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3);
356 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
357 				cat->v18.cfn[cat_func].ptc_tnl_frag);
358 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
359 				cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
360 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4);
361 
362 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv);
363 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv);
364 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs);
365 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc);
366 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs);
367 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs);
368 
369 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port);
370 
371 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp);
372 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct);
373 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv);
374 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb);
375 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv);
376 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv);
377 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv);
378 
379 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
380 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv);
381 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or);
382 			cat_nthw_cfn_flush(be->p_cat_nthw);
383 			cat_func++;
384 		}
385 
386 	} else if (cat->ver == 21) {
387 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
388 
389 		for (int i = 0; i < cnt; i++) {
390 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
391 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable);
392 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv);
393 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv);
394 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl);
395 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp);
396 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac);
397 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2);
398 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag);
399 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan);
400 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls);
401 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3);
402 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag);
403 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
404 				cat->v21.cfn[cat_func].ptc_ip_prot);
405 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4);
406 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel);
407 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2);
408 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
409 				cat->v21.cfn[cat_func].ptc_tnl_vlan);
410 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
411 				cat->v21.cfn[cat_func].ptc_tnl_mpls);
412 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3);
413 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
414 				cat->v21.cfn[cat_func].ptc_tnl_frag);
415 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
416 				cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
417 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4);
418 
419 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv);
420 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv);
421 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs);
422 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc);
423 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs);
424 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs);
425 			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
426 				cat->v21.cfn[cat_func].err_tnl_l3_cs);
427 			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
428 				cat->v21.cfn[cat_func].err_tnl_l4_cs);
429 			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
430 				cat->v21.cfn[cat_func].err_ttl_exp);
431 			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
432 				cat->v21.cfn[cat_func].err_tnl_ttl_exp);
433 
434 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port);
435 
436 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp);
437 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct);
438 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv);
439 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb);
440 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv);
441 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv);
442 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv);
443 
444 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
445 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv);
446 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or);
447 
448 			if (be->p_cat_nthw->m_km_if_cnt > 1)
449 				cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or);
450 
451 			cat_nthw_cfn_flush(be->p_cat_nthw);
452 			cat_func++;
453 		}
454 	}
455 
456 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
457 	return 0;
458 }
459 
460 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
461 	int cnt)
462 {
463 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
464 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
465 
466 	if (cat->ver == 18) {
467 		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
468 
469 		for (int i = 0; i < cnt; i++) {
470 			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
471 			cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm);
472 			cat_nthw_kce_flush(be->p_cat_nthw, 0);
473 		}
474 
475 	} else if (cat->ver == 21) {
476 		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
477 
478 		for (int i = 0; i < cnt; i++) {
479 			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
480 			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
481 				cat->v21.kce[index + i].enable_bm[km_if_idx]);
482 			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
483 		}
484 	}
485 
486 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
487 	return 0;
488 }
489 
490 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func,
491 	int cnt)
492 {
493 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
494 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
495 
496 	if (cat->ver == 18) {
497 		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
498 
499 		for (int i = 0; i < cnt; i++) {
500 			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
501 			cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category);
502 			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
503 			cat_func++;
504 		}
505 
506 	} else if (cat->ver == 21) {
507 		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
508 
509 		for (int i = 0; i < cnt; i++) {
510 			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
511 			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
512 				cat->v21.kcs[cat_func].category[km_if_idx]);
513 			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
514 			cat_func++;
515 		}
516 	}
517 
518 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
519 	return 0;
520 }
521 
522 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
523 	int cnt)
524 {
525 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
526 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
527 
528 	if (cat->ver == 18) {
529 		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
530 
531 		for (int i = 0; i < cnt; i++) {
532 			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
533 			cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm);
534 			cat_nthw_fte_flush(be->p_cat_nthw, 0);
535 		}
536 
537 	} else if (cat->ver == 21) {
538 		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
539 
540 		for (int i = 0; i < cnt; i++) {
541 			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
542 			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
543 				cat->v21.fte[index + i].enable_bm[km_if_idx]);
544 			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
545 		}
546 	}
547 
548 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
549 	return 0;
550 }
551 
552 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
553 {
554 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
555 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
556 
557 	if (cat->ver == 18 || cat->ver == 21) {
558 		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
559 
560 		for (int i = 0; i < cnt; i++) {
561 			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
562 			cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col);
563 			cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor);
564 			cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh);
565 			cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl);
566 			cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf);
567 			cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc);
568 			cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb);
569 			cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk);
570 			cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst);
571 			cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp);
572 			cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe);
573 
574 			cat_nthw_cte_flush(be->p_cat_nthw);
575 			cat_func++;
576 		}
577 	}
578 
579 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
580 	return 0;
581 }
582 
583 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
584 {
585 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
586 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
587 
588 	if (cat->ver == 18 || cat->ver == 21) {
589 		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
590 
591 		for (int i = 0; i < cnt; i++) {
592 			cat_nthw_cts_select(be->p_cat_nthw, index + i);
593 			cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a);
594 			cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b);
595 			cat_nthw_cts_flush(be->p_cat_nthw);
596 		}
597 	}
598 
599 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
600 	return 0;
601 }
602 
603 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
604 {
605 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
606 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
607 
608 	if (cat->ver == 18 || cat->ver == 21) {
609 		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
610 
611 		for (int i = 0; i < cnt; i++) {
612 			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
613 			cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color);
614 			cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km);
615 			cat_nthw_cot_flush(be->p_cat_nthw);
616 		}
617 	}
618 
619 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
620 	return 0;
621 }
622 
623 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
624 {
625 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
626 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
627 
628 	if (cat->ver == 18 || cat->ver == 21) {
629 		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
630 
631 		for (int i = 0; i < cnt; i++) {
632 			cat_nthw_cct_select(be->p_cat_nthw, index + i);
633 			cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color);
634 			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
635 			cat_nthw_cct_flush(be->p_cat_nthw);
636 		}
637 	}
638 
639 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
640 	return 0;
641 }
642 
643 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt)
644 {
645 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
646 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
647 
648 	if (cat->ver == 18 || cat->ver == 21) {
649 		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
650 
651 		for (int i = 0; i < cnt; i++) {
652 			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
653 			cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn);
654 			cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs);
655 			cat_nthw_exo_flush(be->p_cat_nthw);
656 		}
657 	}
658 
659 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
660 	return 0;
661 }
662 
663 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
664 {
665 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
666 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
667 
668 	if (cat->ver == 18 || cat->ver == 21) {
669 		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
670 
671 		for (int i = 0; i < cnt; i++) {
672 			cat_nthw_rck_select(be->p_cat_nthw, index + i);
673 			cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data);
674 			cat_nthw_rck_flush(be->p_cat_nthw);
675 		}
676 	}
677 
678 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
679 	return 0;
680 }
681 
682 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
683 {
684 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
685 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
686 
687 	if (cat->ver == 18 || cat->ver == 21) {
688 		cat_nthw_len_cnt(be->p_cat_nthw, 1);
689 
690 		for (int i = 0; i < cnt; i++) {
691 			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
692 			cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower);
693 			cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper);
694 			cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1);
695 			cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2);
696 			cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv);
697 			cat_nthw_len_flush(be->p_cat_nthw);
698 		}
699 	}
700 
701 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
702 	return 0;
703 }
704 
705 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
706 {
707 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
708 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
709 
710 	if (cat->ver == 18 || cat->ver == 21) {
711 		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
712 
713 		for (int i = 0; i < cnt; i++) {
714 			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
715 			cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key);
716 			cat_nthw_kcc_category(be->p_cat_nthw,
717 				cat->v18.kcc_cam[len_index + i].category);
718 			cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id);
719 			cat_nthw_kcc_flush(be->p_cat_nthw);
720 		}
721 	}
722 
723 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
724 	return 0;
725 }
726 
727 /*
728  * KM
729  */
730 
731 static bool km_get_present(void *be_dev)
732 {
733 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
734 	return be->p_km_nthw != NULL;
735 }
736 
737 static uint32_t km_get_version(void *be_dev)
738 {
739 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
740 	return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) |
741 			(nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
742 }
743 
744 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt)
745 {
746 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
747 
748 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
749 
750 	if (km->ver == 7) {
751 		km_nthw_rcp_cnt(be->p_km_nthw, 1);
752 
753 		for (int i = 0; i < cnt; i++) {
754 			km_nthw_rcp_select(be->p_km_nthw, category + i);
755 			km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn);
756 			km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs);
757 			km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a);
758 			km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b);
759 			km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn);
760 			km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs);
761 			km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a);
762 			km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b);
763 			km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn);
764 			km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs);
765 			km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a);
766 			km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b);
767 			km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn);
768 			km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs);
769 			km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a);
770 			km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b);
771 			km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch);
772 			km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a);
773 			km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b);
774 			km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a);
775 			km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b);
776 			km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual);
777 			km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired);
778 			km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a);
779 			km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b);
780 			km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a);
781 			km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b);
782 			km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a);
783 			km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b);
784 			km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a);
785 			km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b);
786 			km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a);
787 			km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b);
788 			km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a);
789 			km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b);
790 			km_nthw_rcp_synergy_mode(be->p_km_nthw,
791 				km->v7.rcp[category + i].synergy_mode);
792 			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn);
793 			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs);
794 			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn);
795 			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs);
796 			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn);
797 			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs);
798 			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn);
799 			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs);
800 			km_nthw_rcp_flush(be->p_km_nthw);
801 		}
802 	}
803 
804 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
805 	return 0;
806 }
807 
808 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt)
809 {
810 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
811 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
812 
813 	if (km->ver == 7) {
814 		km_nthw_cam_cnt(be->p_km_nthw, 1);
815 
816 		for (int i = 0; i < cnt; i++) {
817 			km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i);
818 			km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0);
819 			km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1);
820 			km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2);
821 			km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3);
822 			km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4);
823 			km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5);
824 			km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0);
825 			km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1);
826 			km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2);
827 			km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3);
828 			km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4);
829 			km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5);
830 			km_nthw_cam_flush(be->p_km_nthw);
831 		}
832 	}
833 
834 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
835 	return 0;
836 }
837 
838 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value,
839 	int cnt)
840 {
841 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
842 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
843 
844 	if (km->ver == 7) {
845 		int start_idx = bank * 4 * 256 + byte * 256 + value;
846 		km_nthw_tcam_cnt(be->p_km_nthw, 1);
847 
848 		for (int i = 0; i < cnt; i++) {
849 			if (km->v7.tcam[start_idx + i].dirty) {
850 				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
851 				km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t);
852 				km_nthw_tcam_flush(be->p_km_nthw);
853 				km->v7.tcam[start_idx + i].dirty = 0;
854 			}
855 		}
856 	}
857 
858 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
859 	return 0;
860 }
861 
862 /*
863  * bank is the TCAM bank, index is the index within the bank (0..71)
864  */
865 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
866 {
867 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
868 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
869 
870 	if (km->ver == 7) {
871 		/* TCAM bank width in version 3 = 72 */
872 		km_nthw_tci_cnt(be->p_km_nthw, 1);
873 
874 		for (int i = 0; i < cnt; i++) {
875 			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
876 			km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color);
877 			km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft);
878 			km_nthw_tci_flush(be->p_km_nthw);
879 		}
880 	}
881 
882 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
883 	return 0;
884 }
885 
886 /*
887  * bank is the TCAM bank, index is the index within the bank (0..71)
888  */
889 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
890 {
891 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
892 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
893 
894 	if (km->ver == 7) {
895 		/* TCAM bank width in version 3 = 72 */
896 		km_nthw_tcq_cnt(be->p_km_nthw, 1);
897 
898 		for (int i = 0; i < cnt; i++) {
899 			/* adr = lover 4 bits = bank, upper 7 bits = index */
900 			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
901 			km_nthw_tcq_bank_mask(be->p_km_nthw,
902 				km->v7.tcq[bank + (index << 4) + i].bank_mask);
903 			km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual);
904 			km_nthw_tcq_flush(be->p_km_nthw);
905 		}
906 	}
907 
908 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
909 	return 0;
910 }
911 
912 /*
913  * FLM
914  */
915 
916 static bool flm_get_present(void *be_dev)
917 {
918 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
919 	return be->p_flm_nthw != NULL;
920 }
921 
922 static uint32_t flm_get_version(void *be_dev)
923 {
924 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
925 	return (uint32_t)((nthw_module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
926 			(nthw_module_get_minor_version(be->p_flm_nthw->m_flm) & 0xffff));
927 }
928 
929 static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
930 {
931 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
932 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
933 
934 	if (flm->ver >= 25) {
935 		flm_nthw_control_enable(be->p_flm_nthw, flm->v25.control->enable);
936 		flm_nthw_control_init(be->p_flm_nthw, flm->v25.control->init);
937 		flm_nthw_control_lds(be->p_flm_nthw, flm->v25.control->lds);
938 		flm_nthw_control_lfs(be->p_flm_nthw, flm->v25.control->lfs);
939 		flm_nthw_control_lis(be->p_flm_nthw, flm->v25.control->lis);
940 		flm_nthw_control_uds(be->p_flm_nthw, flm->v25.control->uds);
941 		flm_nthw_control_uis(be->p_flm_nthw, flm->v25.control->uis);
942 		flm_nthw_control_rds(be->p_flm_nthw, flm->v25.control->rds);
943 		flm_nthw_control_ris(be->p_flm_nthw, flm->v25.control->ris);
944 		flm_nthw_control_pds(be->p_flm_nthw, flm->v25.control->pds);
945 		flm_nthw_control_pis(be->p_flm_nthw, flm->v25.control->pis);
946 		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v25.control->crcwr);
947 		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v25.control->crcrd);
948 		flm_nthw_control_rbl(be->p_flm_nthw, flm->v25.control->rbl);
949 		flm_nthw_control_eab(be->p_flm_nthw, flm->v25.control->eab);
950 		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
951 			flm->v25.control->split_sdram_usage);
952 		flm_nthw_control_flush(be->p_flm_nthw);
953 	}
954 
955 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
956 	return 0;
957 }
958 
959 static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
960 {
961 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
962 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
963 
964 	if (flm->ver >= 25) {
965 		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
966 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 0);
967 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 0);
968 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 0);
969 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
970 			&flm->v25.status->cache_buf_critical, 0);
971 		flm_nthw_status_flush(be->p_flm_nthw);
972 	}
973 
974 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
975 	return 0;
976 }
977 
978 static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
979 {
980 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
981 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
982 
983 	if (flm->ver >= 25) {
984 		flm_nthw_status_update(be->p_flm_nthw);
985 		flm_nthw_status_calib_success(be->p_flm_nthw, &flm->v25.status->calib_success, 1);
986 		flm_nthw_status_calib_fail(be->p_flm_nthw, &flm->v25.status->calib_fail, 1);
987 		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v25.status->initdone, 1);
988 		flm_nthw_status_idle(be->p_flm_nthw, &flm->v25.status->idle, 1);
989 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 1);
990 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 1);
991 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 1);
992 		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v25.status->eft_bp, 1);
993 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
994 			&flm->v25.status->cache_buf_critical, 1);
995 	}
996 
997 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
998 	return 0;
999 }
1000 
1001 static int flm_scan_flush(void *be_dev, const struct flm_func_s *flm)
1002 {
1003 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1004 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1005 
1006 	if (flm->ver >= 25) {
1007 		flm_nthw_scan_i(be->p_flm_nthw, flm->v25.scan->i);
1008 		flm_nthw_scan_flush(be->p_flm_nthw);
1009 	}
1010 
1011 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1012 	return 0;
1013 }
1014 
1015 static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
1016 {
1017 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1018 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1019 
1020 	if (flm->ver >= 25) {
1021 		flm_nthw_load_bin(be->p_flm_nthw, flm->v25.load_bin->bin);
1022 		flm_nthw_load_bin_flush(be->p_flm_nthw);
1023 	}
1024 
1025 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1026 	return 0;
1027 }
1028 
1029 static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
1030 {
1031 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1032 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1033 
1034 	if (flm->ver >= 25) {
1035 		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v25.prio->limit0);
1036 		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v25.prio->ft0);
1037 		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v25.prio->limit1);
1038 		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v25.prio->ft1);
1039 		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v25.prio->limit2);
1040 		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v25.prio->ft2);
1041 		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v25.prio->limit3);
1042 		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v25.prio->ft3);
1043 		flm_nthw_prio_flush(be->p_flm_nthw);
1044 	}
1045 
1046 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1047 	return 0;
1048 }
1049 
1050 static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1051 {
1052 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1053 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1054 
1055 	if (flm->ver >= 25) {
1056 		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
1057 
1058 		for (int i = 0; i < cnt; i++) {
1059 			flm_nthw_pst_select(be->p_flm_nthw, index + i);
1060 			flm_nthw_pst_bp(be->p_flm_nthw, flm->v25.pst[index + i].bp);
1061 			flm_nthw_pst_pp(be->p_flm_nthw, flm->v25.pst[index + i].pp);
1062 			flm_nthw_pst_tp(be->p_flm_nthw, flm->v25.pst[index + i].tp);
1063 			flm_nthw_pst_flush(be->p_flm_nthw);
1064 		}
1065 	}
1066 
1067 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1068 	return 0;
1069 }
1070 
1071 static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1072 {
1073 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1074 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1075 
1076 	if (flm->ver >= 25) {
1077 		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
1078 
1079 		for (int i = 0; i < cnt; i++) {
1080 			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
1081 			flm_nthw_rcp_lookup(be->p_flm_nthw, flm->v25.rcp[index + i].lookup);
1082 			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_dyn);
1083 			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_ofs);
1084 			flm_nthw_rcp_qw0_sel(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_sel);
1085 			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_dyn);
1086 			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_ofs);
1087 			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_dyn);
1088 			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_ofs);
1089 			flm_nthw_rcp_sw8_sel(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_sel);
1090 			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_dyn);
1091 			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_ofs);
1092 			flm_nthw_rcp_mask(be->p_flm_nthw, flm->v25.rcp[index + i].mask);
1093 			flm_nthw_rcp_kid(be->p_flm_nthw, flm->v25.rcp[index + i].kid);
1094 			flm_nthw_rcp_opn(be->p_flm_nthw, flm->v25.rcp[index + i].opn);
1095 			flm_nthw_rcp_ipn(be->p_flm_nthw, flm->v25.rcp[index + i].ipn);
1096 			flm_nthw_rcp_byt_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].byt_dyn);
1097 			flm_nthw_rcp_byt_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].byt_ofs);
1098 			flm_nthw_rcp_txplm(be->p_flm_nthw, flm->v25.rcp[index + i].txplm);
1099 			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
1100 				flm->v25.rcp[index + i].auto_ipv4_mask);
1101 			flm_nthw_rcp_flush(be->p_flm_nthw);
1102 		}
1103 	}
1104 
1105 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1106 	return 0;
1107 }
1108 
1109 static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1110 {
1111 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1112 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1113 
1114 	if (flm->ver >= 25) {
1115 		flm_nthw_scrub_cnt(be->p_flm_nthw, 1);
1116 
1117 		for (int i = 0; i < cnt; i++) {
1118 			flm_nthw_scrub_select(be->p_flm_nthw, index + i);
1119 			flm_nthw_scrub_t(be->p_flm_nthw, flm->v25.scrub[index + i].t);
1120 			flm_nthw_scrub_r(be->p_flm_nthw, flm->v25.scrub[index + i].r);
1121 			flm_nthw_scrub_del(be->p_flm_nthw, flm->v25.scrub[index + i].del);
1122 			flm_nthw_scrub_inf(be->p_flm_nthw, flm->v25.scrub[index + i].inf);
1123 			flm_nthw_scrub_flush(be->p_flm_nthw);
1124 		}
1125 	}
1126 
1127 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1128 	return 0;
1129 }
1130 
1131 static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
1132 {
1133 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1134 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1135 
1136 	if (flm->ver >= 25) {
1137 		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
1138 			&flm->v25.buf_ctrl->lrn_free,
1139 			&flm->v25.buf_ctrl->inf_avail,
1140 			&flm->v25.buf_ctrl->sta_avail);
1141 	}
1142 
1143 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1144 	return 0;
1145 }
1146 
1147 static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
1148 {
1149 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1150 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1151 
1152 	if (flm->ver >= 25) {
1153 		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
1154 		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
1155 		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
1156 		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
1157 		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
1158 		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
1159 		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
1160 		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
1161 		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
1162 		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
1163 		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
1164 		flm_nthw_stat_flows_update(be->p_flm_nthw);
1165 		flm_nthw_load_lps_update(be->p_flm_nthw);
1166 		flm_nthw_load_aps_update(be->p_flm_nthw);
1167 
1168 		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v25.lrn_done->cnt, 1);
1169 		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw, &flm->v25.lrn_ignore->cnt, 1);
1170 		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v25.lrn_fail->cnt, 1);
1171 		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v25.unl_done->cnt, 1);
1172 		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw, &flm->v25.unl_ignore->cnt, 1);
1173 		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v25.rel_done->cnt, 1);
1174 		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw, &flm->v25.rel_ignore->cnt, 1);
1175 		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v25.aul_done->cnt, 1);
1176 		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw, &flm->v25.aul_ignore->cnt, 1);
1177 		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v25.aul_fail->cnt, 1);
1178 		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v25.tul_done->cnt, 1);
1179 		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v25.flows->cnt, 1);
1180 
1181 		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
1182 		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
1183 		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v25.prb_done->cnt, 1);
1184 		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw, &flm->v25.prb_ignore->cnt, 1);
1185 
1186 		flm_nthw_load_lps_cnt(be->p_flm_nthw, &flm->v25.load_lps->lps, 1);
1187 		flm_nthw_load_aps_cnt(be->p_flm_nthw, &flm->v25.load_aps->aps, 1);
1188 	}
1189 
1190 	if (flm->ver >= 25) {
1191 		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
1192 		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
1193 		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
1194 		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
1195 		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
1196 		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
1197 		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
1198 		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
1199 		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
1200 		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
1201 		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
1202 		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
1203 
1204 		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v25.sta_done->cnt, 1);
1205 		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v25.inf_done->cnt, 1);
1206 		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v25.inf_skip->cnt, 1);
1207 		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v25.pck_hit->cnt, 1);
1208 		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v25.pck_miss->cnt, 1);
1209 		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v25.pck_unh->cnt, 1);
1210 		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v25.pck_dis->cnt, 1);
1211 		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v25.csh_hit->cnt, 1);
1212 		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v25.csh_miss->cnt, 1);
1213 		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v25.csh_unh->cnt, 1);
1214 		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v25.cuc_start->cnt, 1);
1215 		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v25.cuc_move->cnt, 1);
1216 	}
1217 
1218 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1219 	return 0;
1220 }
1221 
1222 static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm, const uint32_t *lrn_data,
1223 	uint32_t records, uint32_t *handled_records,
1224 	uint32_t words_per_record, uint32_t *inf_word_cnt,
1225 	uint32_t *sta_word_cnt)
1226 {
1227 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1228 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1229 
1230 	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, records, words_per_record,
1231 			handled_records, &flm->v25.buf_ctrl->lrn_free,
1232 			&flm->v25.buf_ctrl->inf_avail,
1233 			&flm->v25.buf_ctrl->sta_avail);
1234 
1235 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1236 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1237 
1238 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1239 	return ret;
1240 }
1241 
1242 static int flm_inf_sta_data_update(void *be_dev, const struct flm_func_s *flm, uint32_t *inf_data,
1243 	uint32_t inf_size, uint32_t *inf_word_cnt, uint32_t *sta_data,
1244 	uint32_t sta_size, uint32_t *sta_word_cnt)
1245 {
1246 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1247 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1248 
1249 	int ret = flm_nthw_inf_sta_data_update(be->p_flm_nthw, inf_data, inf_size, sta_data,
1250 			sta_size, &flm->v25.buf_ctrl->lrn_free,
1251 			&flm->v25.buf_ctrl->inf_avail,
1252 			&flm->v25.buf_ctrl->sta_avail);
1253 
1254 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1255 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1256 
1257 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1258 	return ret;
1259 }
1260 
1261 /*
1262  * DBS
1263  */
1264 
1265 static int alloc_rx_queue(void *be_dev, int queue_id)
1266 {
1267 	(void)be_dev;
1268 	(void)queue_id;
1269 	NT_LOG(ERR, FILTER, "ERROR alloc Rx queue");
1270 	return -1;
1271 }
1272 
1273 static int free_rx_queue(void *be_dev, int hw_queue)
1274 {
1275 	(void)be_dev;
1276 	(void)hw_queue;
1277 	NT_LOG(ERR, FILTER, "ERROR free Rx queue");
1278 	return 0;
1279 }
1280 
1281 const struct flow_api_backend_ops flow_be_iface = {
1282 	1,
1283 
1284 	set_debug_mode,
1285 	get_nb_phy_ports,
1286 	get_nb_rx_ports,
1287 	get_ltx_avail,
1288 	get_nb_cat_funcs,
1289 	get_nb_categories,
1290 	get_nb_cat_km_if_cnt,
1291 	get_nb_cat_km_if_m0,
1292 	get_nb_cat_km_if_m1,
1293 	get_nb_queues,
1294 	get_nb_km_flow_types,
1295 	get_nb_pm_ext,
1296 	get_nb_len,
1297 	get_kcc_size,
1298 	get_kcc_banks,
1299 	get_nb_km_categories,
1300 	get_nb_km_cam_banks,
1301 	get_nb_km_cam_record_words,
1302 	get_nb_km_cam_records,
1303 	get_nb_km_tcam_banks,
1304 	get_nb_km_tcam_bank_width,
1305 	get_nb_flm_categories,
1306 	get_nb_flm_size_mb,
1307 	get_nb_flm_entry_size,
1308 	get_nb_flm_variant,
1309 	get_nb_flm_prios,
1310 	get_nb_flm_pst_profiles,
1311 	get_nb_flm_scrub_profiles,
1312 	get_nb_flm_load_aps_max,
1313 	get_nb_qsl_categories,
1314 	get_nb_qsl_qst_entries,
1315 	get_nb_pdb_categories,
1316 	get_nb_roa_categories,
1317 	get_nb_tpe_categories,
1318 	get_nb_tx_cpy_writers,
1319 	get_nb_tx_cpy_mask_mem,
1320 	get_nb_tx_rpl_depth,
1321 	get_nb_tx_rpl_ext_categories,
1322 	get_nb_tpe_ifr_categories,
1323 	get_nb_rpp_per_ps,
1324 	get_nb_hsh_categories,
1325 	get_nb_hsh_toeplitz,
1326 
1327 	alloc_rx_queue,
1328 	free_rx_queue,
1329 
1330 	cat_get_present,
1331 	cat_get_version,
1332 	cat_cfn_flush,
1333 
1334 	cat_kce_flush,
1335 	cat_kcs_flush,
1336 	cat_fte_flush,
1337 
1338 	cat_cte_flush,
1339 	cat_cts_flush,
1340 	cat_cot_flush,
1341 	cat_cct_flush,
1342 	cat_exo_flush,
1343 	cat_rck_flush,
1344 	cat_len_flush,
1345 	cat_kcc_flush,
1346 
1347 	km_get_present,
1348 	km_get_version,
1349 	km_rcp_flush,
1350 	km_cam_flush,
1351 	km_tcam_flush,
1352 	km_tci_flush,
1353 	km_tcq_flush,
1354 
1355 	flm_get_present,
1356 	flm_get_version,
1357 	flm_control_flush,
1358 	flm_status_flush,
1359 	flm_status_update,
1360 	flm_scan_flush,
1361 	flm_load_bin_flush,
1362 	flm_prio_flush,
1363 	flm_pst_flush,
1364 	flm_rcp_flush,
1365 	flm_scrub_flush,
1366 	flm_buf_ctrl_update,
1367 	flm_stat_update,
1368 	flm_lrn_data_flush,
1369 	flm_inf_sta_data_update,
1370 };
1371 
1372 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
1373 {
1374 	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
1375 
1376 	struct info_nthw *pinfonthw = info_nthw_new();
1377 	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
1378 	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
1379 
1380 	/* Init nthw CAT */
1381 	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1382 		struct cat_nthw *pcatnthw = cat_nthw_new();
1383 		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
1384 		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
1385 
1386 	} else {
1387 		be_devs[physical_adapter_no].p_cat_nthw = NULL;
1388 	}
1389 
1390 	/* Init nthw KM */
1391 	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1392 		struct km_nthw *pkmnthw = km_nthw_new();
1393 		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
1394 		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
1395 
1396 	} else {
1397 		be_devs[physical_adapter_no].p_km_nthw = NULL;
1398 	}
1399 
1400 	/* Init nthw FLM */
1401 	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1402 		struct flm_nthw *pflmnthw = flm_nthw_new();
1403 		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
1404 		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
1405 
1406 	} else {
1407 		be_devs[physical_adapter_no].p_flm_nthw = NULL;
1408 	}
1409 
1410 	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
1411 	*dev = (void *)&be_devs[physical_adapter_no];
1412 
1413 	return &flow_be_iface;
1414 }
1415 
1416 static void bin_flow_backend_done(void *dev)
1417 {
1418 	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
1419 	info_nthw_delete(be_dev->p_info_nthw);
1420 	cat_nthw_delete(be_dev->p_cat_nthw);
1421 	km_nthw_delete(be_dev->p_km_nthw);
1422 	flm_nthw_delete(be_dev->p_flm_nthw);
1423 }
1424 
1425 static const struct flow_backend_ops ops = {
1426 	.bin_flow_backend_init = bin_flow_backend_init,
1427 	.bin_flow_backend_done = bin_flow_backend_done,
1428 };
1429 
1430 void flow_backend_init(void)
1431 {
1432 	register_flow_backend_ops(&ops);
1433 }
1434