xref: /dpdk/drivers/net/ntnic/nthw/flow_api/flow_backend/flow_backend.c (revision b95f1cd053cee23862a0dfc613e95e86dfd5f3aa)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright(c) 2023 Napatech A/S
4  */
5 
6 #include <stdint.h>
7 
8 #include "flow_nthw_info.h"
9 #include "flow_nthw_ifr.h"
10 #include "flow_nthw_cat.h"
11 #include "flow_nthw_km.h"
12 #include "flow_nthw_flm.h"
13 #include "flow_nthw_hsh.h"
14 #include "flow_nthw_qsl.h"
15 #include "ntnic_mod_reg.h"
16 #include "nthw_fpga_model.h"
17 #include "hw_mod_backend.h"
18 
19 /*
20  * Binary Flow API backend implementation into ntservice driver
21  *
22  * General note on this backend implementation:
23  * Maybe use shadow class to combine multiple writes. However, this backend is only for dev/testing
24  */
25 
26 static struct backend_dev_s {
27 	uint8_t adapter_no;
28 	enum debug_mode_e dmode;
29 	struct info_nthw *p_info_nthw;
30 	struct cat_nthw *p_cat_nthw;
31 	struct km_nthw *p_km_nthw;
32 	struct flm_nthw *p_flm_nthw;
33 	struct hsh_nthw *p_hsh_nthw;
34 	struct qsl_nthw *p_qsl_nthw;
35 	struct ifr_nthw *p_ifr_nthw;    /* TPE module */
36 } be_devs[MAX_PHYS_ADAPTERS];
37 
38 #define CHECK_DEBUG_ON(be, mod, inst)                                                             \
39 	int __debug__ = 0;                                                                        \
40 	if (((be)->dmode & FLOW_BACKEND_DEBUG_MODE_WRITE) || (mod)->debug)                        \
41 		do {                                                                              \
42 			mod##_nthw_set_debug_mode((inst), 0xFF);                                  \
43 			__debug__ = 1;                                                            \
44 	} while (0)
45 
46 #define CHECK_DEBUG_OFF(mod, inst)                                                                \
47 	do {                                                                                      \
48 		if (__debug__)                                                                    \
49 			mod##_nthw_set_debug_mode((inst), 0);                                     \
50 	} while (0)
51 
52 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **be_dev);
53 static void bin_flow_backend_done(void *be_dev);
54 
55 static int set_debug_mode(void *be_dev, enum debug_mode_e mode)
56 {
57 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
58 	be->dmode = mode;
59 	return 0;
60 }
61 
62 /*
63  * INFO
64  */
65 
66 static int get_nb_phy_ports(void *be_dev)
67 {
68 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
69 	return info_nthw_get_nb_phy_ports(be->p_info_nthw);
70 }
71 
72 static int get_nb_rx_ports(void *be_dev)
73 {
74 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
75 	return info_nthw_get_nb_rx_ports(be->p_info_nthw);
76 }
77 
78 static int get_ltx_avail(void *be_dev)
79 {
80 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
81 	return info_nthw_get_ltx_avail(be->p_info_nthw);
82 }
83 
84 static int get_nb_cat_funcs(void *be_dev)
85 {
86 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
87 	return info_nthw_get_nb_cat_funcs(be->p_info_nthw);
88 }
89 
90 static int get_nb_categories(void *be_dev)
91 {
92 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
93 	return info_nthw_get_nb_categories(be->p_info_nthw);
94 }
95 
96 static int get_nb_cat_km_if_cnt(void *be_dev)
97 {
98 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
99 	return info_nthw_get_nb_cat_km_if_cnt(be->p_info_nthw);
100 }
101 
102 static int get_nb_cat_km_if_m0(void *be_dev)
103 {
104 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
105 	return info_nthw_get_nb_cat_km_if_m0(be->p_info_nthw);
106 }
107 
108 static int get_nb_cat_km_if_m1(void *be_dev)
109 {
110 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
111 	return info_nthw_get_nb_cat_km_if_m1(be->p_info_nthw);
112 }
113 
114 static int get_nb_queues(void *be_dev)
115 {
116 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
117 	return info_nthw_get_nb_queues(be->p_info_nthw);
118 }
119 
120 static int get_nb_km_flow_types(void *be_dev)
121 {
122 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
123 	return info_nthw_get_nb_km_flow_types(be->p_info_nthw);
124 }
125 
126 static int get_nb_pm_ext(void *be_dev)
127 {
128 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
129 	return info_nthw_get_nb_pm_ext(be->p_info_nthw);
130 }
131 
132 static int get_nb_len(void *be_dev)
133 {
134 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
135 	return info_nthw_get_nb_len(be->p_info_nthw);
136 }
137 
138 static int get_kcc_size(void *be_dev)
139 {
140 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
141 	return info_nthw_get_kcc_size(be->p_info_nthw);
142 }
143 
144 static int get_kcc_banks(void *be_dev)
145 {
146 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
147 	return info_nthw_get_kcc_banks(be->p_info_nthw);
148 }
149 
150 static int get_nb_km_categories(void *be_dev)
151 {
152 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
153 	return info_nthw_get_nb_km_categories(be->p_info_nthw);
154 }
155 
156 static int get_nb_km_cam_banks(void *be_dev)
157 {
158 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
159 	return info_nthw_get_nb_km_cam_banks(be->p_info_nthw);
160 }
161 
162 static int get_nb_km_cam_record_words(void *be_dev)
163 {
164 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
165 	return info_nthw_get_nb_km_cam_record_words(be->p_info_nthw);
166 }
167 
168 static int get_nb_km_cam_records(void *be_dev)
169 {
170 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
171 	return info_nthw_get_nb_km_cam_records(be->p_info_nthw);
172 }
173 
174 static int get_nb_km_tcam_banks(void *be_dev)
175 {
176 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
177 	return info_nthw_get_nb_km_tcam_banks(be->p_info_nthw);
178 }
179 
180 static int get_nb_km_tcam_bank_width(void *be_dev)
181 {
182 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
183 	return info_nthw_get_nb_km_tcam_bank_width(be->p_info_nthw);
184 }
185 
186 static int get_nb_flm_categories(void *be_dev)
187 {
188 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
189 	return info_nthw_get_nb_flm_categories(be->p_info_nthw);
190 }
191 
192 static int get_nb_flm_size_mb(void *be_dev)
193 {
194 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
195 	return info_nthw_get_nb_flm_size_mb(be->p_info_nthw);
196 }
197 
198 static int get_nb_flm_entry_size(void *be_dev)
199 {
200 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
201 	return info_nthw_get_nb_flm_entry_size(be->p_info_nthw);
202 }
203 
204 static int get_nb_flm_variant(void *be_dev)
205 {
206 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
207 	return info_nthw_get_nb_flm_variant(be->p_info_nthw);
208 }
209 
210 static int get_nb_flm_prios(void *be_dev)
211 {
212 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
213 	return info_nthw_get_nb_flm_prios(be->p_info_nthw);
214 }
215 
216 static int get_nb_flm_pst_profiles(void *be_dev)
217 {
218 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
219 	return info_nthw_get_nb_flm_pst_profiles(be->p_info_nthw);
220 }
221 
222 static int get_nb_flm_scrub_profiles(void *be_dev)
223 {
224 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
225 	return info_nthw_get_nb_flm_scrub_profiles(be->p_info_nthw);
226 }
227 
228 static int get_nb_flm_load_aps_max(void *be_dev)
229 {
230 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
231 	return info_nthw_get_nb_flm_load_aps_max(be->p_info_nthw);
232 }
233 
234 static int get_nb_qsl_categories(void *be_dev)
235 {
236 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
237 	return info_nthw_get_nb_qsl_categories(be->p_info_nthw);
238 }
239 
240 static int get_nb_qsl_qst_entries(void *be_dev)
241 {
242 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
243 	return info_nthw_get_nb_qsl_qst_entries(be->p_info_nthw);
244 }
245 
246 static int get_nb_pdb_categories(void *be_dev)
247 {
248 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
249 	return info_nthw_get_nb_pdb_categories(be->p_info_nthw);
250 }
251 
252 static int get_nb_roa_categories(void *be_dev)
253 {
254 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
255 	return info_nthw_get_nb_roa_categories(be->p_info_nthw);
256 }
257 
258 static int get_nb_tpe_categories(void *be_dev)
259 {
260 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
261 	return info_nthw_get_nb_tpe_categories(be->p_info_nthw);
262 }
263 
264 static int get_nb_tx_cpy_writers(void *be_dev)
265 {
266 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
267 	return info_nthw_get_nb_tx_cpy_writers(be->p_info_nthw);
268 }
269 
270 static int get_nb_tx_cpy_mask_mem(void *be_dev)
271 {
272 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
273 	return info_nthw_get_nb_tx_cpy_mask_mem(be->p_info_nthw);
274 }
275 
276 static int get_nb_tx_rpl_depth(void *be_dev)
277 {
278 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
279 	return info_nthw_get_nb_tx_rpl_depth(be->p_info_nthw);
280 }
281 
282 static int get_nb_tx_rpl_ext_categories(void *be_dev)
283 {
284 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
285 	return info_nthw_get_nb_tx_rpl_ext_categories(be->p_info_nthw);
286 }
287 
288 static int get_nb_tpe_ifr_categories(void *be_dev)
289 {
290 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
291 	return info_nthw_get_nb_tpe_ifr_categories(be->p_info_nthw);
292 }
293 
294 static int get_nb_rpp_per_ps(void *be_dev)
295 {
296 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
297 	return info_nthw_get_nb_rpp_per_ps(be->p_info_nthw);
298 }
299 
300 static int get_nb_hsh_categories(void *be_dev)
301 {
302 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
303 	return info_nthw_get_nb_hsh_categories(be->p_info_nthw);
304 }
305 
306 static int get_nb_hsh_toeplitz(void *be_dev)
307 {
308 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
309 	return info_nthw_get_nb_hsh_toeplitz(be->p_info_nthw);
310 }
311 
312 /*
313  * CAT
314  */
315 
316 static bool cat_get_present(void *be_dev)
317 {
318 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
319 	return be->p_cat_nthw != NULL;
320 }
321 
322 static uint32_t cat_get_version(void *be_dev)
323 {
324 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
325 	return (uint32_t)((nthw_module_get_major_version(be->p_cat_nthw->m_cat) << 16) |
326 			(nthw_module_get_minor_version(be->p_cat_nthw->m_cat) & 0xffff));
327 }
328 
329 static int cat_cfn_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
330 {
331 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
332 
333 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
334 
335 	if (cat->ver == 18) {
336 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
337 
338 		for (int i = 0; i < cnt; i++) {
339 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
340 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v18.cfn[cat_func].enable);
341 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].inv);
342 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_inv);
343 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_isl);
344 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_cfp);
345 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mac);
346 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l2);
347 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vntag);
348 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_vlan);
349 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_mpls);
350 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l3);
351 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_frag);
352 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
353 				cat->v18.cfn[cat_func].ptc_ip_prot);
354 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_l4);
355 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tunnel);
356 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l2);
357 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
358 				cat->v18.cfn[cat_func].ptc_tnl_vlan);
359 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
360 				cat->v18.cfn[cat_func].ptc_tnl_mpls);
361 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l3);
362 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
363 				cat->v18.cfn[cat_func].ptc_tnl_frag);
364 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
365 				cat->v18.cfn[cat_func].ptc_tnl_ip_prot);
366 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v18.cfn[cat_func].ptc_tnl_l4);
367 
368 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_inv);
369 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v18.cfn[cat_func].err_cv);
370 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_fcs);
371 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v18.cfn[cat_func].err_trunc);
372 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l3_cs);
373 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v18.cfn[cat_func].err_l4_cs);
374 
375 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v18.cfn[cat_func].mac_port);
376 
377 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmp);
378 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_dct);
379 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_ext_inv);
380 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_cmb);
381 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_and_inv);
382 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_or_inv);
383 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].pm_inv);
384 
385 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v18.cfn[cat_func].lc);
386 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v18.cfn[cat_func].lc_inv);
387 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v18.cfn[cat_func].km_or);
388 			cat_nthw_cfn_flush(be->p_cat_nthw);
389 			cat_func++;
390 		}
391 
392 	} else if (cat->ver == 21) {
393 		cat_nthw_cfn_cnt(be->p_cat_nthw, 1U);
394 
395 		for (int i = 0; i < cnt; i++) {
396 			cat_nthw_cfn_select(be->p_cat_nthw, cat_func);
397 			cat_nthw_cfn_enable(be->p_cat_nthw, cat->v21.cfn[cat_func].enable);
398 			cat_nthw_cfn_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].inv);
399 			cat_nthw_cfn_ptc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_inv);
400 			cat_nthw_cfn_ptc_isl(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_isl);
401 			cat_nthw_cfn_ptc_cfp(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_cfp);
402 			cat_nthw_cfn_ptc_mac(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mac);
403 			cat_nthw_cfn_ptc_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l2);
404 			cat_nthw_cfn_ptc_vn_tag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vntag);
405 			cat_nthw_cfn_ptc_vlan(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_vlan);
406 			cat_nthw_cfn_ptc_mpls(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_mpls);
407 			cat_nthw_cfn_ptc_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l3);
408 			cat_nthw_cfn_ptc_frag(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_frag);
409 			cat_nthw_cfn_ptc_ip_prot(be->p_cat_nthw,
410 				cat->v21.cfn[cat_func].ptc_ip_prot);
411 			cat_nthw_cfn_ptc_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_l4);
412 			cat_nthw_cfn_ptc_tunnel(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tunnel);
413 			cat_nthw_cfn_ptc_tnl_l2(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l2);
414 			cat_nthw_cfn_ptc_tnl_vlan(be->p_cat_nthw,
415 				cat->v21.cfn[cat_func].ptc_tnl_vlan);
416 			cat_nthw_cfn_ptc_tnl_mpls(be->p_cat_nthw,
417 				cat->v21.cfn[cat_func].ptc_tnl_mpls);
418 			cat_nthw_cfn_ptc_tnl_l3(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l3);
419 			cat_nthw_cfn_ptc_tnl_frag(be->p_cat_nthw,
420 				cat->v21.cfn[cat_func].ptc_tnl_frag);
421 			cat_nthw_cfn_ptc_tnl_ip_prot(be->p_cat_nthw,
422 				cat->v21.cfn[cat_func].ptc_tnl_ip_prot);
423 			cat_nthw_cfn_ptc_tnl_l4(be->p_cat_nthw, cat->v21.cfn[cat_func].ptc_tnl_l4);
424 
425 			cat_nthw_cfn_err_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_inv);
426 			cat_nthw_cfn_err_cv(be->p_cat_nthw, cat->v21.cfn[cat_func].err_cv);
427 			cat_nthw_cfn_err_fcs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_fcs);
428 			cat_nthw_cfn_err_trunc(be->p_cat_nthw, cat->v21.cfn[cat_func].err_trunc);
429 			cat_nthw_cfn_err_l3_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l3_cs);
430 			cat_nthw_cfn_err_l4_cs(be->p_cat_nthw, cat->v21.cfn[cat_func].err_l4_cs);
431 			cat_nthw_cfn_err_tnl_l3_cs(be->p_cat_nthw,
432 				cat->v21.cfn[cat_func].err_tnl_l3_cs);
433 			cat_nthw_cfn_err_tnl_l4_cs(be->p_cat_nthw,
434 				cat->v21.cfn[cat_func].err_tnl_l4_cs);
435 			cat_nthw_cfn_err_ttl_exp(be->p_cat_nthw,
436 				cat->v21.cfn[cat_func].err_ttl_exp);
437 			cat_nthw_cfn_err_tnl_ttl_exp(be->p_cat_nthw,
438 				cat->v21.cfn[cat_func].err_tnl_ttl_exp);
439 
440 			cat_nthw_cfn_mac_port(be->p_cat_nthw, cat->v21.cfn[cat_func].mac_port);
441 
442 			cat_nthw_cfn_pm_cmp(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmp);
443 			cat_nthw_cfn_pm_dct(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_dct);
444 			cat_nthw_cfn_pm_ext_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_ext_inv);
445 			cat_nthw_cfn_pm_cmb(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_cmb);
446 			cat_nthw_cfn_pm_and_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_and_inv);
447 			cat_nthw_cfn_pm_or_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_or_inv);
448 			cat_nthw_cfn_pm_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].pm_inv);
449 
450 			cat_nthw_cfn_lc(be->p_cat_nthw, cat->v21.cfn[cat_func].lc);
451 			cat_nthw_cfn_lc_inv(be->p_cat_nthw, cat->v21.cfn[cat_func].lc_inv);
452 			cat_nthw_cfn_km0_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km0_or);
453 
454 			if (be->p_cat_nthw->m_km_if_cnt > 1)
455 				cat_nthw_cfn_km1_or(be->p_cat_nthw, cat->v21.cfn[cat_func].km1_or);
456 
457 			cat_nthw_cfn_flush(be->p_cat_nthw);
458 			cat_func++;
459 		}
460 	}
461 
462 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
463 	return 0;
464 }
465 
466 static int cat_kce_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
467 	int cnt)
468 {
469 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
470 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
471 
472 	if (cat->ver == 18) {
473 		cat_nthw_kce_cnt(be->p_cat_nthw, 0, 1U);
474 
475 		for (int i = 0; i < cnt; i++) {
476 			cat_nthw_kce_select(be->p_cat_nthw, 0, index + i);
477 			cat_nthw_kce_enable(be->p_cat_nthw, 0, cat->v18.kce[index + i].enable_bm);
478 			cat_nthw_kce_flush(be->p_cat_nthw, 0);
479 		}
480 
481 	} else if (cat->ver == 21) {
482 		cat_nthw_kce_cnt(be->p_cat_nthw, km_if_idx, 1U);
483 
484 		for (int i = 0; i < cnt; i++) {
485 			cat_nthw_kce_select(be->p_cat_nthw, km_if_idx, index + i);
486 			cat_nthw_kce_enable(be->p_cat_nthw, km_if_idx,
487 				cat->v21.kce[index + i].enable_bm[km_if_idx]);
488 			cat_nthw_kce_flush(be->p_cat_nthw, km_if_idx);
489 		}
490 	}
491 
492 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
493 	return 0;
494 }
495 
496 static int cat_kcs_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int cat_func,
497 	int cnt)
498 {
499 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
500 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
501 
502 	if (cat->ver == 18) {
503 		cat_nthw_kcs_cnt(be->p_cat_nthw, 0, 1U);
504 
505 		for (int i = 0; i < cnt; i++) {
506 			cat_nthw_kcs_select(be->p_cat_nthw, 0, cat_func);
507 			cat_nthw_kcs_category(be->p_cat_nthw, 0, cat->v18.kcs[cat_func].category);
508 			cat_nthw_kcs_flush(be->p_cat_nthw, 0);
509 			cat_func++;
510 		}
511 
512 	} else if (cat->ver == 21) {
513 		cat_nthw_kcs_cnt(be->p_cat_nthw, km_if_idx, 1U);
514 
515 		for (int i = 0; i < cnt; i++) {
516 			cat_nthw_kcs_select(be->p_cat_nthw, km_if_idx, cat_func);
517 			cat_nthw_kcs_category(be->p_cat_nthw, km_if_idx,
518 				cat->v21.kcs[cat_func].category[km_if_idx]);
519 			cat_nthw_kcs_flush(be->p_cat_nthw, km_if_idx);
520 			cat_func++;
521 		}
522 	}
523 
524 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
525 	return 0;
526 }
527 
528 static int cat_fte_flush(void *be_dev, const struct cat_func_s *cat, int km_if_idx, int index,
529 	int cnt)
530 {
531 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
532 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
533 
534 	if (cat->ver == 18) {
535 		cat_nthw_fte_cnt(be->p_cat_nthw, 0, 1);
536 
537 		for (int i = 0; i < cnt; i++) {
538 			cat_nthw_fte_select(be->p_cat_nthw, 0, index + i);
539 			cat_nthw_fte_enable(be->p_cat_nthw, 0, cat->v18.fte[index + i].enable_bm);
540 			cat_nthw_fte_flush(be->p_cat_nthw, 0);
541 		}
542 
543 	} else if (cat->ver == 21) {
544 		cat_nthw_fte_cnt(be->p_cat_nthw, km_if_idx, 1);
545 
546 		for (int i = 0; i < cnt; i++) {
547 			cat_nthw_fte_select(be->p_cat_nthw, km_if_idx, index + i);
548 			cat_nthw_fte_enable(be->p_cat_nthw, km_if_idx,
549 				cat->v21.fte[index + i].enable_bm[km_if_idx]);
550 			cat_nthw_fte_flush(be->p_cat_nthw, km_if_idx);
551 		}
552 	}
553 
554 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
555 	return 0;
556 }
557 
558 static int cat_cte_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
559 {
560 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
561 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
562 
563 	if (cat->ver == 18 || cat->ver == 21) {
564 		cat_nthw_cte_cnt(be->p_cat_nthw, 1);
565 
566 		for (int i = 0; i < cnt; i++) {
567 			cat_nthw_cte_select(be->p_cat_nthw, cat_func);
568 			cat_nthw_cte_enable_col(be->p_cat_nthw, cat->v18.cte[cat_func].b.col);
569 			cat_nthw_cte_enable_cor(be->p_cat_nthw, cat->v18.cte[cat_func].b.cor);
570 			cat_nthw_cte_enable_hsh(be->p_cat_nthw, cat->v18.cte[cat_func].b.hsh);
571 			cat_nthw_cte_enable_qsl(be->p_cat_nthw, cat->v18.cte[cat_func].b.qsl);
572 			cat_nthw_cte_enable_ipf(be->p_cat_nthw, cat->v18.cte[cat_func].b.ipf);
573 			cat_nthw_cte_enable_slc(be->p_cat_nthw, cat->v18.cte[cat_func].b.slc);
574 			cat_nthw_cte_enable_pdb(be->p_cat_nthw, cat->v18.cte[cat_func].b.pdb);
575 			cat_nthw_cte_enable_msk(be->p_cat_nthw, cat->v18.cte[cat_func].b.msk);
576 			cat_nthw_cte_enable_hst(be->p_cat_nthw, cat->v18.cte[cat_func].b.hst);
577 			cat_nthw_cte_enable_epp(be->p_cat_nthw, cat->v18.cte[cat_func].b.epp);
578 			cat_nthw_cte_enable_tpe(be->p_cat_nthw, cat->v18.cte[cat_func].b.tpe);
579 
580 			cat_nthw_cte_flush(be->p_cat_nthw);
581 			cat_func++;
582 		}
583 	}
584 
585 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
586 	return 0;
587 }
588 
589 static int cat_cts_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
590 {
591 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
592 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
593 
594 	if (cat->ver == 18 || cat->ver == 21) {
595 		cat_nthw_cts_cnt(be->p_cat_nthw, 1);
596 
597 		for (int i = 0; i < cnt; i++) {
598 			cat_nthw_cts_select(be->p_cat_nthw, index + i);
599 			cat_nthw_cts_cat_a(be->p_cat_nthw, cat->v18.cts[index + i].cat_a);
600 			cat_nthw_cts_cat_b(be->p_cat_nthw, cat->v18.cts[index + i].cat_b);
601 			cat_nthw_cts_flush(be->p_cat_nthw);
602 		}
603 	}
604 
605 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
606 	return 0;
607 }
608 
609 static int cat_cot_flush(void *be_dev, const struct cat_func_s *cat, int cat_func, int cnt)
610 {
611 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
612 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
613 
614 	if (cat->ver == 18 || cat->ver == 21) {
615 		cat_nthw_cot_cnt(be->p_cat_nthw, 1);
616 
617 		for (int i = 0; i < cnt; i++) {
618 			cat_nthw_cot_select(be->p_cat_nthw, cat_func + i);
619 			cat_nthw_cot_color(be->p_cat_nthw, cat->v18.cot[cat_func + i].color);
620 			cat_nthw_cot_km(be->p_cat_nthw, cat->v18.cot[cat_func + i].km);
621 			cat_nthw_cot_flush(be->p_cat_nthw);
622 		}
623 	}
624 
625 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
626 	return 0;
627 }
628 
629 static int cat_cct_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
630 {
631 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
632 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
633 
634 	if (cat->ver == 18 || cat->ver == 21) {
635 		cat_nthw_cct_cnt(be->p_cat_nthw, 1);
636 
637 		for (int i = 0; i < cnt; i++) {
638 			cat_nthw_cct_select(be->p_cat_nthw, index + i);
639 			cat_nthw_cct_color(be->p_cat_nthw, cat->v18.cct[index + i].color);
640 			cat_nthw_cct_km(be->p_cat_nthw, cat->v18.cct[index + i].km);
641 			cat_nthw_cct_flush(be->p_cat_nthw);
642 		}
643 	}
644 
645 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
646 	return 0;
647 }
648 
649 static int cat_exo_flush(void *be_dev, const struct cat_func_s *cat, int ext_index, int cnt)
650 {
651 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
652 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
653 
654 	if (cat->ver == 18 || cat->ver == 21) {
655 		cat_nthw_exo_cnt(be->p_cat_nthw, 1);
656 
657 		for (int i = 0; i < cnt; i++) {
658 			cat_nthw_exo_select(be->p_cat_nthw, ext_index + i);
659 			cat_nthw_exo_dyn(be->p_cat_nthw, cat->v18.exo[ext_index + i].dyn);
660 			cat_nthw_exo_ofs(be->p_cat_nthw, cat->v18.exo[ext_index + i].ofs);
661 			cat_nthw_exo_flush(be->p_cat_nthw);
662 		}
663 	}
664 
665 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
666 	return 0;
667 }
668 
669 static int cat_rck_flush(void *be_dev, const struct cat_func_s *cat, int index, int cnt)
670 {
671 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
672 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
673 
674 	if (cat->ver == 18 || cat->ver == 21) {
675 		cat_nthw_rck_cnt(be->p_cat_nthw, 1);
676 
677 		for (int i = 0; i < cnt; i++) {
678 			cat_nthw_rck_select(be->p_cat_nthw, index + i);
679 			cat_nthw_rck_data(be->p_cat_nthw, cat->v18.rck[index + i].rck_data);
680 			cat_nthw_rck_flush(be->p_cat_nthw);
681 		}
682 	}
683 
684 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
685 	return 0;
686 }
687 
688 static int cat_len_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
689 {
690 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
691 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
692 
693 	if (cat->ver == 18 || cat->ver == 21) {
694 		cat_nthw_len_cnt(be->p_cat_nthw, 1);
695 
696 		for (int i = 0; i < cnt; i++) {
697 			cat_nthw_len_select(be->p_cat_nthw, len_index + i);
698 			cat_nthw_len_lower(be->p_cat_nthw, cat->v18.len[len_index + i].lower);
699 			cat_nthw_len_upper(be->p_cat_nthw, cat->v18.len[len_index + i].upper);
700 			cat_nthw_len_dyn1(be->p_cat_nthw, cat->v18.len[len_index + i].dyn1);
701 			cat_nthw_len_dyn2(be->p_cat_nthw, cat->v18.len[len_index + i].dyn2);
702 			cat_nthw_len_inv(be->p_cat_nthw, cat->v18.len[len_index + i].inv);
703 			cat_nthw_len_flush(be->p_cat_nthw);
704 		}
705 	}
706 
707 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
708 	return 0;
709 }
710 
711 static int cat_kcc_flush(void *be_dev, const struct cat_func_s *cat, int len_index, int cnt)
712 {
713 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
714 	CHECK_DEBUG_ON(be, cat, be->p_cat_nthw);
715 
716 	if (cat->ver == 18 || cat->ver == 21) {
717 		cat_nthw_kcc_cnt(be->p_cat_nthw, 1);
718 
719 		for (int i = 0; i < cnt; i++) {
720 			cat_nthw_kcc_select(be->p_cat_nthw, len_index + i);
721 			cat_nthw_kcc_key(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].key);
722 			cat_nthw_kcc_category(be->p_cat_nthw,
723 				cat->v18.kcc_cam[len_index + i].category);
724 			cat_nthw_kcc_id(be->p_cat_nthw, cat->v18.kcc_cam[len_index + i].id);
725 			cat_nthw_kcc_flush(be->p_cat_nthw);
726 		}
727 	}
728 
729 	CHECK_DEBUG_OFF(cat, be->p_cat_nthw);
730 	return 0;
731 }
732 
733 /*
734  * KM
735  */
736 
737 static bool km_get_present(void *be_dev)
738 {
739 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
740 	return be->p_km_nthw != NULL;
741 }
742 
743 static uint32_t km_get_version(void *be_dev)
744 {
745 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
746 	return (uint32_t)((nthw_module_get_major_version(be->p_km_nthw->m_km) << 16) |
747 			(nthw_module_get_minor_version(be->p_km_nthw->m_km) & 0xffff));
748 }
749 
750 static int km_rcp_flush(void *be_dev, const struct km_func_s *km, int category, int cnt)
751 {
752 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
753 
754 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
755 
756 	if (km->ver == 7) {
757 		km_nthw_rcp_cnt(be->p_km_nthw, 1);
758 
759 		for (int i = 0; i < cnt; i++) {
760 			km_nthw_rcp_select(be->p_km_nthw, category + i);
761 			km_nthw_rcp_qw0_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw0_dyn);
762 			km_nthw_rcp_qw0_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw0_ofs);
763 			km_nthw_rcp_qw0_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_a);
764 			km_nthw_rcp_qw0_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw0_sel_b);
765 			km_nthw_rcp_qw4_dyn(be->p_km_nthw, km->v7.rcp[category + i].qw4_dyn);
766 			km_nthw_rcp_qw4_ofs(be->p_km_nthw, km->v7.rcp[category + i].qw4_ofs);
767 			km_nthw_rcp_qw4_sel_a(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_a);
768 			km_nthw_rcp_qw4_sel_b(be->p_km_nthw, km->v7.rcp[category + i].qw4_sel_b);
769 			km_nthw_rcp_dw8_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw8_dyn);
770 			km_nthw_rcp_dw8_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw8_ofs);
771 			km_nthw_rcp_dw8_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_a);
772 			km_nthw_rcp_dw8_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw8_sel_b);
773 			km_nthw_rcp_dw10_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw10_dyn);
774 			km_nthw_rcp_dw10_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw10_ofs);
775 			km_nthw_rcp_dw10_sel_a(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_a);
776 			km_nthw_rcp_dw10_sel_b(be->p_km_nthw, km->v7.rcp[category + i].dw10_sel_b);
777 			km_nthw_rcp_swx_cch(be->p_km_nthw, km->v7.rcp[category + i].swx_cch);
778 			km_nthw_rcp_swx_sel_a(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_a);
779 			km_nthw_rcp_swx_sel_b(be->p_km_nthw, km->v7.rcp[category + i].swx_sel_b);
780 			km_nthw_rcp_mask_da(be->p_km_nthw, km->v7.rcp[category + i].mask_d_a);
781 			km_nthw_rcp_mask_b(be->p_km_nthw, km->v7.rcp[category + i].mask_b);
782 			km_nthw_rcp_dual(be->p_km_nthw, km->v7.rcp[category + i].dual);
783 			km_nthw_rcp_paired(be->p_km_nthw, km->v7.rcp[category + i].paired);
784 			km_nthw_rcp_el_a(be->p_km_nthw, km->v7.rcp[category + i].el_a);
785 			km_nthw_rcp_el_b(be->p_km_nthw, km->v7.rcp[category + i].el_b);
786 			km_nthw_rcp_info_a(be->p_km_nthw, km->v7.rcp[category + i].info_a);
787 			km_nthw_rcp_info_b(be->p_km_nthw, km->v7.rcp[category + i].info_b);
788 			km_nthw_rcp_ftm_a(be->p_km_nthw, km->v7.rcp[category + i].ftm_a);
789 			km_nthw_rcp_ftm_b(be->p_km_nthw, km->v7.rcp[category + i].ftm_b);
790 			km_nthw_rcp_bank_a(be->p_km_nthw, km->v7.rcp[category + i].bank_a);
791 			km_nthw_rcp_bank_b(be->p_km_nthw, km->v7.rcp[category + i].bank_b);
792 			km_nthw_rcp_kl_a(be->p_km_nthw, km->v7.rcp[category + i].kl_a);
793 			km_nthw_rcp_kl_b(be->p_km_nthw, km->v7.rcp[category + i].kl_b);
794 			km_nthw_rcp_keyway_a(be->p_km_nthw, km->v7.rcp[category + i].keyway_a);
795 			km_nthw_rcp_keyway_b(be->p_km_nthw, km->v7.rcp[category + i].keyway_b);
796 			km_nthw_rcp_synergy_mode(be->p_km_nthw,
797 				km->v7.rcp[category + i].synergy_mode);
798 			km_nthw_rcp_dw0_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_dyn);
799 			km_nthw_rcp_dw0_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw0_b_ofs);
800 			km_nthw_rcp_dw2_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_dyn);
801 			km_nthw_rcp_dw2_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].dw2_b_ofs);
802 			km_nthw_rcp_sw4_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_dyn);
803 			km_nthw_rcp_sw4_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw4_b_ofs);
804 			km_nthw_rcp_sw5_b_dyn(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_dyn);
805 			km_nthw_rcp_sw5_b_ofs(be->p_km_nthw, km->v7.rcp[category + i].sw5_b_ofs);
806 			km_nthw_rcp_flush(be->p_km_nthw);
807 		}
808 	}
809 
810 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
811 	return 0;
812 }
813 
814 static int km_cam_flush(void *be_dev, const struct km_func_s *km, int bank, int record, int cnt)
815 {
816 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
817 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
818 
819 	if (km->ver == 7) {
820 		km_nthw_cam_cnt(be->p_km_nthw, 1);
821 
822 		for (int i = 0; i < cnt; i++) {
823 			km_nthw_cam_select(be->p_km_nthw, (bank << 11) + record + i);
824 			km_nthw_cam_w0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w0);
825 			km_nthw_cam_w1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w1);
826 			km_nthw_cam_w2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w2);
827 			km_nthw_cam_w3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w3);
828 			km_nthw_cam_w4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w4);
829 			km_nthw_cam_w5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].w5);
830 			km_nthw_cam_ft0(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft0);
831 			km_nthw_cam_ft1(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft1);
832 			km_nthw_cam_ft2(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft2);
833 			km_nthw_cam_ft3(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft3);
834 			km_nthw_cam_ft4(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft4);
835 			km_nthw_cam_ft5(be->p_km_nthw, km->v7.cam[(bank << 11) + record + i].ft5);
836 			km_nthw_cam_flush(be->p_km_nthw);
837 		}
838 	}
839 
840 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
841 	return 0;
842 }
843 
844 static int km_tcam_flush(void *be_dev, const struct km_func_s *km, int bank, int byte, int value,
845 	int cnt)
846 {
847 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
848 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
849 
850 	if (km->ver == 7) {
851 		int start_idx = bank * 4 * 256 + byte * 256 + value;
852 		km_nthw_tcam_cnt(be->p_km_nthw, 1);
853 
854 		for (int i = 0; i < cnt; i++) {
855 			if (km->v7.tcam[start_idx + i].dirty) {
856 				km_nthw_tcam_select(be->p_km_nthw, start_idx + i);
857 				km_nthw_tcam_t(be->p_km_nthw, km->v7.tcam[start_idx + i].t);
858 				km_nthw_tcam_flush(be->p_km_nthw);
859 				km->v7.tcam[start_idx + i].dirty = 0;
860 			}
861 		}
862 	}
863 
864 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
865 	return 0;
866 }
867 
868 /*
869  * bank is the TCAM bank, index is the index within the bank (0..71)
870  */
871 static int km_tci_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
872 {
873 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
874 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
875 
876 	if (km->ver == 7) {
877 		/* TCAM bank width in version 3 = 72 */
878 		km_nthw_tci_cnt(be->p_km_nthw, 1);
879 
880 		for (int i = 0; i < cnt; i++) {
881 			km_nthw_tci_select(be->p_km_nthw, bank * 72 + index + i);
882 			km_nthw_tci_color(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].color);
883 			km_nthw_tci_ft(be->p_km_nthw, km->v7.tci[bank * 72 + index + i].ft);
884 			km_nthw_tci_flush(be->p_km_nthw);
885 		}
886 	}
887 
888 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
889 	return 0;
890 }
891 
892 /*
893  * bank is the TCAM bank, index is the index within the bank (0..71)
894  */
895 static int km_tcq_flush(void *be_dev, const struct km_func_s *km, int bank, int index, int cnt)
896 {
897 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
898 	CHECK_DEBUG_ON(be, km, be->p_km_nthw);
899 
900 	if (km->ver == 7) {
901 		/* TCAM bank width in version 3 = 72 */
902 		km_nthw_tcq_cnt(be->p_km_nthw, 1);
903 
904 		for (int i = 0; i < cnt; i++) {
905 			/* adr = lover 4 bits = bank, upper 7 bits = index */
906 			km_nthw_tcq_select(be->p_km_nthw, bank + (index << 4) + i);
907 			km_nthw_tcq_bank_mask(be->p_km_nthw,
908 				km->v7.tcq[bank + (index << 4) + i].bank_mask);
909 			km_nthw_tcq_qual(be->p_km_nthw, km->v7.tcq[bank + (index << 4) + i].qual);
910 			km_nthw_tcq_flush(be->p_km_nthw);
911 		}
912 	}
913 
914 	CHECK_DEBUG_OFF(km, be->p_km_nthw);
915 	return 0;
916 }
917 
918 /*
919  * FLM
920  */
921 
922 static bool flm_get_present(void *be_dev)
923 {
924 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
925 	return be->p_flm_nthw != NULL;
926 }
927 
928 static uint32_t flm_get_version(void *be_dev)
929 {
930 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
931 	return (uint32_t)((nthw_module_get_major_version(be->p_flm_nthw->m_flm) << 16) |
932 			(nthw_module_get_minor_version(be->p_flm_nthw->m_flm) & 0xffff));
933 }
934 
935 static int flm_control_flush(void *be_dev, const struct flm_func_s *flm)
936 {
937 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
938 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
939 
940 	if (flm->ver >= 25) {
941 		flm_nthw_control_enable(be->p_flm_nthw, flm->v25.control->enable);
942 		flm_nthw_control_init(be->p_flm_nthw, flm->v25.control->init);
943 		flm_nthw_control_lds(be->p_flm_nthw, flm->v25.control->lds);
944 		flm_nthw_control_lfs(be->p_flm_nthw, flm->v25.control->lfs);
945 		flm_nthw_control_lis(be->p_flm_nthw, flm->v25.control->lis);
946 		flm_nthw_control_uds(be->p_flm_nthw, flm->v25.control->uds);
947 		flm_nthw_control_uis(be->p_flm_nthw, flm->v25.control->uis);
948 		flm_nthw_control_rds(be->p_flm_nthw, flm->v25.control->rds);
949 		flm_nthw_control_ris(be->p_flm_nthw, flm->v25.control->ris);
950 		flm_nthw_control_pds(be->p_flm_nthw, flm->v25.control->pds);
951 		flm_nthw_control_pis(be->p_flm_nthw, flm->v25.control->pis);
952 		flm_nthw_control_crcwr(be->p_flm_nthw, flm->v25.control->crcwr);
953 		flm_nthw_control_crcrd(be->p_flm_nthw, flm->v25.control->crcrd);
954 		flm_nthw_control_rbl(be->p_flm_nthw, flm->v25.control->rbl);
955 		flm_nthw_control_eab(be->p_flm_nthw, flm->v25.control->eab);
956 		flm_nthw_control_split_sdram_usage(be->p_flm_nthw,
957 			flm->v25.control->split_sdram_usage);
958 		flm_nthw_control_flush(be->p_flm_nthw);
959 	}
960 
961 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
962 	return 0;
963 }
964 
965 static int flm_status_flush(void *be_dev, const struct flm_func_s *flm)
966 {
967 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
968 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
969 
970 	if (flm->ver >= 25) {
971 		/* CALIBDONE, INITDONE, IDLE, and EFT_BP is read only */
972 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 0);
973 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 0);
974 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 0);
975 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
976 			&flm->v25.status->cache_buf_critical, 0);
977 		flm_nthw_status_flush(be->p_flm_nthw);
978 	}
979 
980 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
981 	return 0;
982 }
983 
984 static int flm_status_update(void *be_dev, const struct flm_func_s *flm)
985 {
986 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
987 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
988 
989 	if (flm->ver >= 25) {
990 		flm_nthw_status_update(be->p_flm_nthw);
991 		flm_nthw_status_calib_success(be->p_flm_nthw, &flm->v25.status->calib_success, 1);
992 		flm_nthw_status_calib_fail(be->p_flm_nthw, &flm->v25.status->calib_fail, 1);
993 		flm_nthw_status_initdone(be->p_flm_nthw, &flm->v25.status->initdone, 1);
994 		flm_nthw_status_idle(be->p_flm_nthw, &flm->v25.status->idle, 1);
995 		flm_nthw_status_critical(be->p_flm_nthw, &flm->v25.status->critical, 1);
996 		flm_nthw_status_panic(be->p_flm_nthw, &flm->v25.status->panic, 1);
997 		flm_nthw_status_crcerr(be->p_flm_nthw, &flm->v25.status->crcerr, 1);
998 		flm_nthw_status_eft_bp(be->p_flm_nthw, &flm->v25.status->eft_bp, 1);
999 		flm_nthw_status_cache_buf_crit(be->p_flm_nthw,
1000 			&flm->v25.status->cache_buf_critical, 1);
1001 	}
1002 
1003 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1004 	return 0;
1005 }
1006 
1007 static int flm_scan_flush(void *be_dev, const struct flm_func_s *flm)
1008 {
1009 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1010 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1011 
1012 	if (flm->ver >= 25) {
1013 		flm_nthw_scan_i(be->p_flm_nthw, flm->v25.scan->i);
1014 		flm_nthw_scan_flush(be->p_flm_nthw);
1015 	}
1016 
1017 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1018 	return 0;
1019 }
1020 
1021 static int flm_load_bin_flush(void *be_dev, const struct flm_func_s *flm)
1022 {
1023 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1024 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1025 
1026 	if (flm->ver >= 25) {
1027 		flm_nthw_load_bin(be->p_flm_nthw, flm->v25.load_bin->bin);
1028 		flm_nthw_load_bin_flush(be->p_flm_nthw);
1029 	}
1030 
1031 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1032 	return 0;
1033 }
1034 
1035 static int flm_prio_flush(void *be_dev, const struct flm_func_s *flm)
1036 {
1037 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1038 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1039 
1040 	if (flm->ver >= 25) {
1041 		flm_nthw_prio_limit0(be->p_flm_nthw, flm->v25.prio->limit0);
1042 		flm_nthw_prio_ft0(be->p_flm_nthw, flm->v25.prio->ft0);
1043 		flm_nthw_prio_limit1(be->p_flm_nthw, flm->v25.prio->limit1);
1044 		flm_nthw_prio_ft1(be->p_flm_nthw, flm->v25.prio->ft1);
1045 		flm_nthw_prio_limit2(be->p_flm_nthw, flm->v25.prio->limit2);
1046 		flm_nthw_prio_ft2(be->p_flm_nthw, flm->v25.prio->ft2);
1047 		flm_nthw_prio_limit3(be->p_flm_nthw, flm->v25.prio->limit3);
1048 		flm_nthw_prio_ft3(be->p_flm_nthw, flm->v25.prio->ft3);
1049 		flm_nthw_prio_flush(be->p_flm_nthw);
1050 	}
1051 
1052 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1053 	return 0;
1054 }
1055 
1056 static int flm_pst_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1057 {
1058 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1059 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1060 
1061 	if (flm->ver >= 25) {
1062 		flm_nthw_pst_cnt(be->p_flm_nthw, 1);
1063 
1064 		for (int i = 0; i < cnt; i++) {
1065 			flm_nthw_pst_select(be->p_flm_nthw, index + i);
1066 			flm_nthw_pst_bp(be->p_flm_nthw, flm->v25.pst[index + i].bp);
1067 			flm_nthw_pst_pp(be->p_flm_nthw, flm->v25.pst[index + i].pp);
1068 			flm_nthw_pst_tp(be->p_flm_nthw, flm->v25.pst[index + i].tp);
1069 			flm_nthw_pst_flush(be->p_flm_nthw);
1070 		}
1071 	}
1072 
1073 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1074 	return 0;
1075 }
1076 
1077 static int flm_rcp_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1078 {
1079 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1080 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1081 
1082 	if (flm->ver >= 25) {
1083 		flm_nthw_rcp_cnt(be->p_flm_nthw, 1);
1084 
1085 		for (int i = 0; i < cnt; i++) {
1086 			flm_nthw_rcp_select(be->p_flm_nthw, index + i);
1087 			flm_nthw_rcp_lookup(be->p_flm_nthw, flm->v25.rcp[index + i].lookup);
1088 			flm_nthw_rcp_qw0_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_dyn);
1089 			flm_nthw_rcp_qw0_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_ofs);
1090 			flm_nthw_rcp_qw0_sel(be->p_flm_nthw, flm->v25.rcp[index + i].qw0_sel);
1091 			flm_nthw_rcp_qw4_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_dyn);
1092 			flm_nthw_rcp_qw4_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].qw4_ofs);
1093 			flm_nthw_rcp_sw8_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_dyn);
1094 			flm_nthw_rcp_sw8_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_ofs);
1095 			flm_nthw_rcp_sw8_sel(be->p_flm_nthw, flm->v25.rcp[index + i].sw8_sel);
1096 			flm_nthw_rcp_sw9_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_dyn);
1097 			flm_nthw_rcp_sw9_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].sw9_ofs);
1098 			flm_nthw_rcp_mask(be->p_flm_nthw, flm->v25.rcp[index + i].mask);
1099 			flm_nthw_rcp_kid(be->p_flm_nthw, flm->v25.rcp[index + i].kid);
1100 			flm_nthw_rcp_opn(be->p_flm_nthw, flm->v25.rcp[index + i].opn);
1101 			flm_nthw_rcp_ipn(be->p_flm_nthw, flm->v25.rcp[index + i].ipn);
1102 			flm_nthw_rcp_byt_dyn(be->p_flm_nthw, flm->v25.rcp[index + i].byt_dyn);
1103 			flm_nthw_rcp_byt_ofs(be->p_flm_nthw, flm->v25.rcp[index + i].byt_ofs);
1104 			flm_nthw_rcp_txplm(be->p_flm_nthw, flm->v25.rcp[index + i].txplm);
1105 			flm_nthw_rcp_auto_ipv4_mask(be->p_flm_nthw,
1106 				flm->v25.rcp[index + i].auto_ipv4_mask);
1107 			flm_nthw_rcp_flush(be->p_flm_nthw);
1108 		}
1109 	}
1110 
1111 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1112 	return 0;
1113 }
1114 
1115 static int flm_scrub_flush(void *be_dev, const struct flm_func_s *flm, int index, int cnt)
1116 {
1117 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1118 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1119 
1120 	if (flm->ver >= 25) {
1121 		flm_nthw_scrub_cnt(be->p_flm_nthw, 1);
1122 
1123 		for (int i = 0; i < cnt; i++) {
1124 			flm_nthw_scrub_select(be->p_flm_nthw, index + i);
1125 			flm_nthw_scrub_t(be->p_flm_nthw, flm->v25.scrub[index + i].t);
1126 			flm_nthw_scrub_r(be->p_flm_nthw, flm->v25.scrub[index + i].r);
1127 			flm_nthw_scrub_del(be->p_flm_nthw, flm->v25.scrub[index + i].del);
1128 			flm_nthw_scrub_inf(be->p_flm_nthw, flm->v25.scrub[index + i].inf);
1129 			flm_nthw_scrub_flush(be->p_flm_nthw);
1130 		}
1131 	}
1132 
1133 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1134 	return 0;
1135 }
1136 
1137 static int flm_buf_ctrl_update(void *be_dev, const struct flm_func_s *flm)
1138 {
1139 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1140 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1141 
1142 	if (flm->ver >= 25) {
1143 		flm_nthw_buf_ctrl_update(be->p_flm_nthw,
1144 			&flm->v25.buf_ctrl->lrn_free,
1145 			&flm->v25.buf_ctrl->inf_avail,
1146 			&flm->v25.buf_ctrl->sta_avail);
1147 	}
1148 
1149 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1150 	return 0;
1151 }
1152 
1153 static int flm_stat_update(void *be_dev, const struct flm_func_s *flm)
1154 {
1155 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1156 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1157 
1158 	if (flm->ver >= 25) {
1159 		flm_nthw_stat_lrn_done_update(be->p_flm_nthw);
1160 		flm_nthw_stat_lrn_ignore_update(be->p_flm_nthw);
1161 		flm_nthw_stat_lrn_fail_update(be->p_flm_nthw);
1162 		flm_nthw_stat_unl_done_update(be->p_flm_nthw);
1163 		flm_nthw_stat_unl_ignore_update(be->p_flm_nthw);
1164 		flm_nthw_stat_rel_done_update(be->p_flm_nthw);
1165 		flm_nthw_stat_rel_ignore_update(be->p_flm_nthw);
1166 		flm_nthw_stat_aul_done_update(be->p_flm_nthw);
1167 		flm_nthw_stat_aul_ignore_update(be->p_flm_nthw);
1168 		flm_nthw_stat_aul_fail_update(be->p_flm_nthw);
1169 		flm_nthw_stat_tul_done_update(be->p_flm_nthw);
1170 		flm_nthw_stat_flows_update(be->p_flm_nthw);
1171 		flm_nthw_load_lps_update(be->p_flm_nthw);
1172 		flm_nthw_load_aps_update(be->p_flm_nthw);
1173 
1174 		flm_nthw_stat_lrn_done_cnt(be->p_flm_nthw, &flm->v25.lrn_done->cnt, 1);
1175 		flm_nthw_stat_lrn_ignore_cnt(be->p_flm_nthw, &flm->v25.lrn_ignore->cnt, 1);
1176 		flm_nthw_stat_lrn_fail_cnt(be->p_flm_nthw, &flm->v25.lrn_fail->cnt, 1);
1177 		flm_nthw_stat_unl_done_cnt(be->p_flm_nthw, &flm->v25.unl_done->cnt, 1);
1178 		flm_nthw_stat_unl_ignore_cnt(be->p_flm_nthw, &flm->v25.unl_ignore->cnt, 1);
1179 		flm_nthw_stat_rel_done_cnt(be->p_flm_nthw, &flm->v25.rel_done->cnt, 1);
1180 		flm_nthw_stat_rel_ignore_cnt(be->p_flm_nthw, &flm->v25.rel_ignore->cnt, 1);
1181 		flm_nthw_stat_aul_done_cnt(be->p_flm_nthw, &flm->v25.aul_done->cnt, 1);
1182 		flm_nthw_stat_aul_ignore_cnt(be->p_flm_nthw, &flm->v25.aul_ignore->cnt, 1);
1183 		flm_nthw_stat_aul_fail_cnt(be->p_flm_nthw, &flm->v25.aul_fail->cnt, 1);
1184 		flm_nthw_stat_tul_done_cnt(be->p_flm_nthw, &flm->v25.tul_done->cnt, 1);
1185 		flm_nthw_stat_flows_cnt(be->p_flm_nthw, &flm->v25.flows->cnt, 1);
1186 
1187 		flm_nthw_stat_prb_done_update(be->p_flm_nthw);
1188 		flm_nthw_stat_prb_ignore_update(be->p_flm_nthw);
1189 		flm_nthw_stat_prb_done_cnt(be->p_flm_nthw, &flm->v25.prb_done->cnt, 1);
1190 		flm_nthw_stat_prb_ignore_cnt(be->p_flm_nthw, &flm->v25.prb_ignore->cnt, 1);
1191 
1192 		flm_nthw_load_lps_cnt(be->p_flm_nthw, &flm->v25.load_lps->lps, 1);
1193 		flm_nthw_load_aps_cnt(be->p_flm_nthw, &flm->v25.load_aps->aps, 1);
1194 	}
1195 
1196 	if (flm->ver >= 25) {
1197 		flm_nthw_stat_sta_done_update(be->p_flm_nthw);
1198 		flm_nthw_stat_inf_done_update(be->p_flm_nthw);
1199 		flm_nthw_stat_inf_skip_update(be->p_flm_nthw);
1200 		flm_nthw_stat_pck_hit_update(be->p_flm_nthw);
1201 		flm_nthw_stat_pck_miss_update(be->p_flm_nthw);
1202 		flm_nthw_stat_pck_unh_update(be->p_flm_nthw);
1203 		flm_nthw_stat_pck_dis_update(be->p_flm_nthw);
1204 		flm_nthw_stat_csh_hit_update(be->p_flm_nthw);
1205 		flm_nthw_stat_csh_miss_update(be->p_flm_nthw);
1206 		flm_nthw_stat_csh_unh_update(be->p_flm_nthw);
1207 		flm_nthw_stat_cuc_start_update(be->p_flm_nthw);
1208 		flm_nthw_stat_cuc_move_update(be->p_flm_nthw);
1209 
1210 		flm_nthw_stat_sta_done_cnt(be->p_flm_nthw, &flm->v25.sta_done->cnt, 1);
1211 		flm_nthw_stat_inf_done_cnt(be->p_flm_nthw, &flm->v25.inf_done->cnt, 1);
1212 		flm_nthw_stat_inf_skip_cnt(be->p_flm_nthw, &flm->v25.inf_skip->cnt, 1);
1213 		flm_nthw_stat_pck_hit_cnt(be->p_flm_nthw, &flm->v25.pck_hit->cnt, 1);
1214 		flm_nthw_stat_pck_miss_cnt(be->p_flm_nthw, &flm->v25.pck_miss->cnt, 1);
1215 		flm_nthw_stat_pck_unh_cnt(be->p_flm_nthw, &flm->v25.pck_unh->cnt, 1);
1216 		flm_nthw_stat_pck_dis_cnt(be->p_flm_nthw, &flm->v25.pck_dis->cnt, 1);
1217 		flm_nthw_stat_csh_hit_cnt(be->p_flm_nthw, &flm->v25.csh_hit->cnt, 1);
1218 		flm_nthw_stat_csh_miss_cnt(be->p_flm_nthw, &flm->v25.csh_miss->cnt, 1);
1219 		flm_nthw_stat_csh_unh_cnt(be->p_flm_nthw, &flm->v25.csh_unh->cnt, 1);
1220 		flm_nthw_stat_cuc_start_cnt(be->p_flm_nthw, &flm->v25.cuc_start->cnt, 1);
1221 		flm_nthw_stat_cuc_move_cnt(be->p_flm_nthw, &flm->v25.cuc_move->cnt, 1);
1222 	}
1223 
1224 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1225 	return 0;
1226 }
1227 
1228 static int flm_lrn_data_flush(void *be_dev, const struct flm_func_s *flm, const uint32_t *lrn_data,
1229 	uint32_t records, uint32_t *handled_records,
1230 	uint32_t words_per_record, uint32_t *inf_word_cnt,
1231 	uint32_t *sta_word_cnt)
1232 {
1233 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1234 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1235 
1236 	int ret = flm_nthw_lrn_data_flush(be->p_flm_nthw, lrn_data, records, words_per_record,
1237 			handled_records, &flm->v25.buf_ctrl->lrn_free,
1238 			&flm->v25.buf_ctrl->inf_avail,
1239 			&flm->v25.buf_ctrl->sta_avail);
1240 
1241 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1242 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1243 
1244 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1245 	return ret;
1246 }
1247 
1248 static int flm_inf_sta_data_update(void *be_dev, const struct flm_func_s *flm, uint32_t *inf_data,
1249 	uint32_t inf_size, uint32_t *inf_word_cnt, uint32_t *sta_data,
1250 	uint32_t sta_size, uint32_t *sta_word_cnt)
1251 {
1252 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1253 	CHECK_DEBUG_ON(be, flm, be->p_flm_nthw);
1254 
1255 	int ret = flm_nthw_inf_sta_data_update(be->p_flm_nthw, inf_data, inf_size, sta_data,
1256 			sta_size, &flm->v25.buf_ctrl->lrn_free,
1257 			&flm->v25.buf_ctrl->inf_avail,
1258 			&flm->v25.buf_ctrl->sta_avail);
1259 
1260 	*inf_word_cnt = flm->v25.buf_ctrl->inf_avail;
1261 	*sta_word_cnt = flm->v25.buf_ctrl->sta_avail;
1262 
1263 	CHECK_DEBUG_OFF(flm, be->p_flm_nthw);
1264 	return ret;
1265 }
1266 
1267 /*
1268  * HSH
1269  */
1270 
1271 static bool hsh_get_present(void *be_dev)
1272 {
1273 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1274 	return be->p_hsh_nthw != NULL;
1275 }
1276 
1277 static uint32_t hsh_get_version(void *be_dev)
1278 {
1279 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1280 	return (uint32_t)((nthw_module_get_major_version(be->p_hsh_nthw->m_hsh) << 16) |
1281 			(nthw_module_get_minor_version(be->p_hsh_nthw->m_hsh) & 0xffff));
1282 }
1283 
1284 static int hsh_rcp_flush(void *be_dev, const struct hsh_func_s *hsh, int category, int cnt)
1285 {
1286 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1287 	CHECK_DEBUG_ON(be, hsh, be->p_hsh_nthw);
1288 
1289 	if (hsh->ver == 5) {
1290 		hsh_nthw_rcp_cnt(be->p_hsh_nthw, 1);
1291 
1292 		for (int i = 0; i < cnt; i++) {
1293 			hsh_nthw_rcp_select(be->p_hsh_nthw, category + i);
1294 			hsh_nthw_rcp_load_dist_type(be->p_hsh_nthw,
1295 				hsh->v5.rcp[category + i].load_dist_type);
1296 			hsh_nthw_rcp_mac_port_mask(be->p_hsh_nthw,
1297 				hsh->v5.rcp[category + i].mac_port_mask);
1298 			hsh_nthw_rcp_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].sort);
1299 			hsh_nthw_rcp_qw0_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_pe);
1300 			hsh_nthw_rcp_qw0_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw0_ofs);
1301 			hsh_nthw_rcp_qw4_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_pe);
1302 			hsh_nthw_rcp_qw4_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].qw4_ofs);
1303 			hsh_nthw_rcp_w8_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_pe);
1304 			hsh_nthw_rcp_w8_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_ofs);
1305 			hsh_nthw_rcp_w8_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w8_sort);
1306 			hsh_nthw_rcp_w9_pe(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_pe);
1307 			hsh_nthw_rcp_w9_ofs(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_ofs);
1308 			hsh_nthw_rcp_w9_sort(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_sort);
1309 			hsh_nthw_rcp_w9_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].w9_p);
1310 			hsh_nthw_rcp_p_mask(be->p_hsh_nthw, hsh->v5.rcp[category + i].p_mask);
1311 			hsh_nthw_rcp_word_mask(be->p_hsh_nthw,
1312 				hsh->v5.rcp[category + i].word_mask);
1313 			hsh_nthw_rcp_seed(be->p_hsh_nthw, hsh->v5.rcp[category + i].seed);
1314 			hsh_nthw_rcp_tnl_p(be->p_hsh_nthw, hsh->v5.rcp[category + i].tnl_p);
1315 			hsh_nthw_rcp_hsh_valid(be->p_hsh_nthw,
1316 				hsh->v5.rcp[category + i].hsh_valid);
1317 			hsh_nthw_rcp_hsh_type(be->p_hsh_nthw, hsh->v5.rcp[category + i].hsh_type);
1318 			hsh_nthw_rcp_toeplitz(be->p_hsh_nthw, hsh->v5.rcp[category + i].toeplitz);
1319 			hsh_nthw_rcp_k(be->p_hsh_nthw, hsh->v5.rcp[category + i].k);
1320 			hsh_nthw_rcp_auto_ipv4_mask(be->p_hsh_nthw,
1321 				hsh->v5.rcp[category + i].auto_ipv4_mask);
1322 			hsh_nthw_rcp_flush(be->p_hsh_nthw);
1323 		}
1324 	}
1325 
1326 	CHECK_DEBUG_OFF(hsh, be->p_hsh_nthw);
1327 	return 0;
1328 }
1329 
1330 /*
1331  * QSL
1332  */
1333 
1334 static bool qsl_get_present(void *be_dev)
1335 {
1336 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1337 	return be->p_qsl_nthw != NULL;
1338 }
1339 
1340 static uint32_t qsl_get_version(void *be_dev)
1341 {
1342 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1343 	return (uint32_t)((nthw_module_get_major_version(be->p_qsl_nthw->m_qsl) << 16) |
1344 			(nthw_module_get_minor_version(be->p_qsl_nthw->m_qsl) & 0xffff));
1345 }
1346 
1347 static int qsl_rcp_flush(void *be_dev, const struct qsl_func_s *qsl, int category, int cnt)
1348 {
1349 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1350 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1351 
1352 	if (qsl->ver == 7) {
1353 		qsl_nthw_rcp_cnt(be->p_qsl_nthw, 1);
1354 
1355 		for (int i = 0; i < cnt; i++) {
1356 			qsl_nthw_rcp_select(be->p_qsl_nthw, category + i);
1357 			qsl_nthw_rcp_discard(be->p_qsl_nthw, qsl->v7.rcp[category + i].discard);
1358 			qsl_nthw_rcp_drop(be->p_qsl_nthw, qsl->v7.rcp[category + i].drop);
1359 			qsl_nthw_rcp_tbl_lo(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_lo);
1360 			qsl_nthw_rcp_tbl_hi(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_hi);
1361 			qsl_nthw_rcp_tbl_idx(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_idx);
1362 			qsl_nthw_rcp_tbl_msk(be->p_qsl_nthw, qsl->v7.rcp[category + i].tbl_msk);
1363 			qsl_nthw_rcp_lr(be->p_qsl_nthw, qsl->v7.rcp[category + i].lr);
1364 			qsl_nthw_rcp_tsa(be->p_qsl_nthw, qsl->v7.rcp[category + i].tsa);
1365 			qsl_nthw_rcp_vli(be->p_qsl_nthw, qsl->v7.rcp[category + i].vli);
1366 			qsl_nthw_rcp_flush(be->p_qsl_nthw);
1367 		}
1368 	}
1369 
1370 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1371 	return 0;
1372 }
1373 
1374 static int qsl_qst_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1375 {
1376 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1377 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1378 
1379 	if (qsl->ver == 7) {
1380 		qsl_nthw_qst_cnt(be->p_qsl_nthw, 1);
1381 
1382 		for (int i = 0; i < cnt; i++) {
1383 			qsl_nthw_qst_select(be->p_qsl_nthw, entry + i);
1384 			qsl_nthw_qst_queue(be->p_qsl_nthw, qsl->v7.qst[entry + i].queue);
1385 			qsl_nthw_qst_en(be->p_qsl_nthw, qsl->v7.qst[entry + i].en);
1386 
1387 			qsl_nthw_qst_tx_port(be->p_qsl_nthw, qsl->v7.qst[entry + i].tx_port);
1388 			qsl_nthw_qst_lre(be->p_qsl_nthw, qsl->v7.qst[entry + i].lre);
1389 			qsl_nthw_qst_tci(be->p_qsl_nthw, qsl->v7.qst[entry + i].tci);
1390 			qsl_nthw_qst_ven(be->p_qsl_nthw, qsl->v7.qst[entry + i].ven);
1391 			qsl_nthw_qst_flush(be->p_qsl_nthw);
1392 		}
1393 	}
1394 
1395 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1396 	return 0;
1397 }
1398 
1399 static int qsl_qen_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1400 {
1401 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1402 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1403 
1404 	if (qsl->ver == 7) {
1405 		qsl_nthw_qen_cnt(be->p_qsl_nthw, 1);
1406 
1407 		for (int i = 0; i < cnt; i++) {
1408 			qsl_nthw_qen_select(be->p_qsl_nthw, entry + i);
1409 			qsl_nthw_qen_en(be->p_qsl_nthw, qsl->v7.qen[entry + i].en);
1410 			qsl_nthw_qen_flush(be->p_qsl_nthw);
1411 		}
1412 	}
1413 
1414 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1415 	return 0;
1416 }
1417 
1418 static int qsl_unmq_flush(void *be_dev, const struct qsl_func_s *qsl, int entry, int cnt)
1419 {
1420 	struct backend_dev_s *be = (struct backend_dev_s *)be_dev;
1421 	CHECK_DEBUG_ON(be, qsl, be->p_qsl_nthw);
1422 
1423 	if (qsl->ver == 7) {
1424 		qsl_nthw_unmq_cnt(be->p_qsl_nthw, 1);
1425 
1426 		for (int i = 0; i < cnt; i++) {
1427 			qsl_nthw_unmq_select(be->p_qsl_nthw, entry + i);
1428 			qsl_nthw_unmq_dest_queue(be->p_qsl_nthw,
1429 				qsl->v7.unmq[entry + i].dest_queue);
1430 			qsl_nthw_unmq_en(be->p_qsl_nthw, qsl->v7.unmq[entry + i].en);
1431 			qsl_nthw_unmq_flush(be->p_qsl_nthw);
1432 		}
1433 	}
1434 
1435 	CHECK_DEBUG_OFF(qsl, be->p_qsl_nthw);
1436 	return 0;
1437 }
1438 
1439 /*
1440  * DBS
1441  */
1442 
1443 static int alloc_rx_queue(void *be_dev, int queue_id)
1444 {
1445 	(void)be_dev;
1446 	(void)queue_id;
1447 	NT_LOG(ERR, FILTER, "ERROR alloc Rx queue");
1448 	return -1;
1449 }
1450 
1451 static int free_rx_queue(void *be_dev, int hw_queue)
1452 {
1453 	(void)be_dev;
1454 	(void)hw_queue;
1455 	NT_LOG(ERR, FILTER, "ERROR free Rx queue");
1456 	return 0;
1457 }
1458 
1459 const struct flow_api_backend_ops flow_be_iface = {
1460 	1,
1461 
1462 	set_debug_mode,
1463 	get_nb_phy_ports,
1464 	get_nb_rx_ports,
1465 	get_ltx_avail,
1466 	get_nb_cat_funcs,
1467 	get_nb_categories,
1468 	get_nb_cat_km_if_cnt,
1469 	get_nb_cat_km_if_m0,
1470 	get_nb_cat_km_if_m1,
1471 	get_nb_queues,
1472 	get_nb_km_flow_types,
1473 	get_nb_pm_ext,
1474 	get_nb_len,
1475 	get_kcc_size,
1476 	get_kcc_banks,
1477 	get_nb_km_categories,
1478 	get_nb_km_cam_banks,
1479 	get_nb_km_cam_record_words,
1480 	get_nb_km_cam_records,
1481 	get_nb_km_tcam_banks,
1482 	get_nb_km_tcam_bank_width,
1483 	get_nb_flm_categories,
1484 	get_nb_flm_size_mb,
1485 	get_nb_flm_entry_size,
1486 	get_nb_flm_variant,
1487 	get_nb_flm_prios,
1488 	get_nb_flm_pst_profiles,
1489 	get_nb_flm_scrub_profiles,
1490 	get_nb_flm_load_aps_max,
1491 	get_nb_qsl_categories,
1492 	get_nb_qsl_qst_entries,
1493 	get_nb_pdb_categories,
1494 	get_nb_roa_categories,
1495 	get_nb_tpe_categories,
1496 	get_nb_tx_cpy_writers,
1497 	get_nb_tx_cpy_mask_mem,
1498 	get_nb_tx_rpl_depth,
1499 	get_nb_tx_rpl_ext_categories,
1500 	get_nb_tpe_ifr_categories,
1501 	get_nb_rpp_per_ps,
1502 	get_nb_hsh_categories,
1503 	get_nb_hsh_toeplitz,
1504 
1505 	alloc_rx_queue,
1506 	free_rx_queue,
1507 
1508 	cat_get_present,
1509 	cat_get_version,
1510 	cat_cfn_flush,
1511 
1512 	cat_kce_flush,
1513 	cat_kcs_flush,
1514 	cat_fte_flush,
1515 
1516 	cat_cte_flush,
1517 	cat_cts_flush,
1518 	cat_cot_flush,
1519 	cat_cct_flush,
1520 	cat_exo_flush,
1521 	cat_rck_flush,
1522 	cat_len_flush,
1523 	cat_kcc_flush,
1524 
1525 	km_get_present,
1526 	km_get_version,
1527 	km_rcp_flush,
1528 	km_cam_flush,
1529 	km_tcam_flush,
1530 	km_tci_flush,
1531 	km_tcq_flush,
1532 
1533 	flm_get_present,
1534 	flm_get_version,
1535 	flm_control_flush,
1536 	flm_status_flush,
1537 	flm_status_update,
1538 	flm_scan_flush,
1539 	flm_load_bin_flush,
1540 	flm_prio_flush,
1541 	flm_pst_flush,
1542 	flm_rcp_flush,
1543 	flm_scrub_flush,
1544 	flm_buf_ctrl_update,
1545 	flm_stat_update,
1546 	flm_lrn_data_flush,
1547 	flm_inf_sta_data_update,
1548 
1549 	hsh_get_present,
1550 	hsh_get_version,
1551 	hsh_rcp_flush,
1552 
1553 	qsl_get_present,
1554 	qsl_get_version,
1555 	qsl_rcp_flush,
1556 	qsl_qst_flush,
1557 	qsl_qen_flush,
1558 	qsl_unmq_flush,
1559 };
1560 
1561 const struct flow_api_backend_ops *bin_flow_backend_init(nthw_fpga_t *p_fpga, void **dev)
1562 {
1563 	uint8_t physical_adapter_no = (uint8_t)p_fpga->p_fpga_info->adapter_no;
1564 
1565 	struct info_nthw *pinfonthw = info_nthw_new();
1566 	info_nthw_init(pinfonthw, p_fpga, physical_adapter_no);
1567 	be_devs[physical_adapter_no].p_info_nthw = pinfonthw;
1568 
1569 	/* Init nthw CAT */
1570 	if (cat_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1571 		struct cat_nthw *pcatnthw = cat_nthw_new();
1572 		cat_nthw_init(pcatnthw, p_fpga, physical_adapter_no);
1573 		be_devs[physical_adapter_no].p_cat_nthw = pcatnthw;
1574 
1575 	} else {
1576 		be_devs[physical_adapter_no].p_cat_nthw = NULL;
1577 	}
1578 
1579 	/* Init nthw KM */
1580 	if (km_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1581 		struct km_nthw *pkmnthw = km_nthw_new();
1582 		km_nthw_init(pkmnthw, p_fpga, physical_adapter_no);
1583 		be_devs[physical_adapter_no].p_km_nthw = pkmnthw;
1584 
1585 	} else {
1586 		be_devs[physical_adapter_no].p_km_nthw = NULL;
1587 	}
1588 
1589 	/* Init nthw FLM */
1590 	if (flm_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1591 		struct flm_nthw *pflmnthw = flm_nthw_new();
1592 		flm_nthw_init(pflmnthw, p_fpga, physical_adapter_no);
1593 		be_devs[physical_adapter_no].p_flm_nthw = pflmnthw;
1594 
1595 	} else {
1596 		be_devs[physical_adapter_no].p_flm_nthw = NULL;
1597 	}
1598 
1599 	/* Init nthw IFR */
1600 	if (ifr_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1601 		struct ifr_nthw *ifrnthw = ifr_nthw_new();
1602 		ifr_nthw_init(ifrnthw, p_fpga, physical_adapter_no);
1603 		be_devs[physical_adapter_no].p_ifr_nthw = ifrnthw;
1604 
1605 	} else {
1606 		be_devs[physical_adapter_no].p_ifr_nthw = NULL;
1607 	}
1608 
1609 	/* Init nthw HSH */
1610 	if (hsh_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1611 		struct hsh_nthw *phshnthw = hsh_nthw_new();
1612 		hsh_nthw_init(phshnthw, p_fpga, physical_adapter_no);
1613 		be_devs[physical_adapter_no].p_hsh_nthw = phshnthw;
1614 
1615 	} else {
1616 		be_devs[physical_adapter_no].p_hsh_nthw = NULL;
1617 	}
1618 
1619 	/* Init nthw QSL */
1620 	if (qsl_nthw_init(NULL, p_fpga, physical_adapter_no) == 0) {
1621 		struct qsl_nthw *pqslnthw = qsl_nthw_new();
1622 		qsl_nthw_init(pqslnthw, p_fpga, physical_adapter_no);
1623 		be_devs[physical_adapter_no].p_qsl_nthw = pqslnthw;
1624 
1625 	} else {
1626 		be_devs[physical_adapter_no].p_qsl_nthw = NULL;
1627 	}
1628 
1629 	be_devs[physical_adapter_no].adapter_no = physical_adapter_no;
1630 	*dev = (void *)&be_devs[physical_adapter_no];
1631 
1632 	return &flow_be_iface;
1633 }
1634 
1635 static void bin_flow_backend_done(void *dev)
1636 {
1637 	struct backend_dev_s *be_dev = (struct backend_dev_s *)dev;
1638 	info_nthw_delete(be_dev->p_info_nthw);
1639 	cat_nthw_delete(be_dev->p_cat_nthw);
1640 	km_nthw_delete(be_dev->p_km_nthw);
1641 	flm_nthw_delete(be_dev->p_flm_nthw);
1642 	hsh_nthw_delete(be_dev->p_hsh_nthw);
1643 	qsl_nthw_delete(be_dev->p_qsl_nthw);
1644 }
1645 
1646 static const struct flow_backend_ops ops = {
1647 	.bin_flow_backend_init = bin_flow_backend_init,
1648 	.bin_flow_backend_done = bin_flow_backend_done,
1649 };
1650 
1651 void flow_backend_init(void)
1652 {
1653 	register_flow_backend_ops(&ops);
1654 }
1655