xref: /dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c (revision 0f4e9909bc517d845e47da803f8b534691bbe5a3)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <unistd.h>
6 
7 #include <rte_common.h>
8 #include <rte_log.h>
9 #include <dev_driver.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
13 #include <rte_pci.h>
14 #include <bus_pci_driver.h>
15 #include <rte_byteorder.h>
16 #include <rte_cycles.h>
17 #include <rte_random.h>
18 
19 #include <rte_bbdev.h>
20 #include <rte_bbdev_pmd.h>
21 
22 #include "rte_pmd_fpga_5gnr_fec.h"
23 #include "fpga_5gnr_fec.h"
24 
25 #ifdef RTE_LIBRTE_BBDEV_DEBUG
26 RTE_LOG_REGISTER_DEFAULT(fpga_5gnr_fec_logtype, DEBUG);
27 #else
28 RTE_LOG_REGISTER_DEFAULT(fpga_5gnr_fec_logtype, NOTICE);
29 #endif
30 
31 #ifdef RTE_LIBRTE_BBDEV_DEBUG
32 
33 /* Read Ring Control Register of FPGA 5GNR FEC device */
34 static inline void
35 print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
36 {
37 	rte_bbdev_log_debug(
38 		"FPGA 5GNR MMIO base address @ %p | Ring Control Register @ offset = 0x%08"
39 		PRIx32, mmio_base, offset);
40 	rte_bbdev_log_debug(
41 		"RING_BASE_ADDR = 0x%016"PRIx64,
42 		fpga_5gnr_reg_read_64(mmio_base, offset));
43 	rte_bbdev_log_debug(
44 		"RING_HEAD_ADDR = 0x%016"PRIx64,
45 		fpga_5gnr_reg_read_64(mmio_base, offset +
46 				FPGA_5GNR_FEC_RING_HEAD_ADDR));
47 	rte_bbdev_log_debug(
48 		"RING_SIZE = 0x%04"PRIx16,
49 		fpga_5gnr_reg_read_16(mmio_base, offset +
50 				FPGA_5GNR_FEC_RING_SIZE));
51 	rte_bbdev_log_debug(
52 		"RING_MISC = 0x%02"PRIx8,
53 		fpga_5gnr_reg_read_8(mmio_base, offset +
54 				FPGA_5GNR_FEC_RING_MISC));
55 	rte_bbdev_log_debug(
56 		"RING_ENABLE = 0x%02"PRIx8,
57 		fpga_5gnr_reg_read_8(mmio_base, offset +
58 				FPGA_5GNR_FEC_RING_ENABLE));
59 	rte_bbdev_log_debug(
60 		"RING_FLUSH_QUEUE_EN = 0x%02"PRIx8,
61 		fpga_5gnr_reg_read_8(mmio_base, offset +
62 				FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN));
63 	rte_bbdev_log_debug(
64 		"RING_SHADOW_TAIL = 0x%04"PRIx16,
65 		fpga_5gnr_reg_read_16(mmio_base, offset +
66 				FPGA_5GNR_FEC_RING_SHADOW_TAIL));
67 	rte_bbdev_log_debug(
68 		"RING_HEAD_POINT = 0x%04"PRIx16,
69 		fpga_5gnr_reg_read_16(mmio_base, offset +
70 				FPGA_5GNR_FEC_RING_HEAD_POINT));
71 }
72 
73 /* Read Static Register of Vista Creek device. */
74 static inline void
75 print_static_reg_debug_info(void *mmio_base, uint8_t fpga_variant)
76 {
77 	uint16_t config;
78 	uint8_t qmap_done = fpga_5gnr_reg_read_8(mmio_base, FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);
79 	uint16_t lb_factor = fpga_5gnr_reg_read_16(mmio_base, FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR);
80 	uint16_t ring_desc_len = fpga_5gnr_reg_read_16(mmio_base, FPGA_5GNR_FEC_RING_DESC_LEN);
81 	if (fpga_variant == VC_5GNR_FPGA_VARIANT)
82 		config = fpga_5gnr_reg_read_16(mmio_base, VC_5GNR_CONFIGURATION);
83 
84 	if (fpga_variant == VC_5GNR_FPGA_VARIANT)
85 		rte_bbdev_log_debug("UL.DL Weights = %u.%u",
86 				((uint8_t)config), ((uint8_t)(config >> 8)));
87 	rte_bbdev_log_debug("UL.DL Load Balance = %u.%u",
88 			((uint8_t)lb_factor), ((uint8_t)(lb_factor >> 8)));
89 	rte_bbdev_log_debug("Queue-PF/VF Mapping Table = %s",
90 			(qmap_done > 0) ? "READY" : "NOT-READY");
91 	if (fpga_variant == VC_5GNR_FPGA_VARIANT)
92 		rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
93 				ring_desc_len * VC_5GNR_RING_DESC_LEN_UNIT_BYTES);
94 	else
95 		rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
96 				ring_desc_len * AGX100_RING_DESC_LEN_UNIT_BYTES);
97 }
98 
99 /* Print decode DMA Descriptor of Vista Creek Decoder device. */
100 static void
101 vc_5gnr_print_dma_dec_desc_debug_info(union vc_5gnr_dma_desc *desc)
102 {
103 	rte_bbdev_log_debug("DMA response desc %p",
104 			desc);
105 	rte_bbdev_log_debug("\t-- done(%"PRIu32") | iter(%"PRIu32") | et_pass(%"PRIu32")"
106 			" | crcb_pass (%"PRIu32") | error(%"PRIu32")",
107 			(uint32_t)desc->dec_req.done,
108 			(uint32_t)desc->dec_req.iter,
109 			(uint32_t)desc->dec_req.et_pass,
110 			(uint32_t)desc->dec_req.crcb_pass,
111 			(uint32_t)desc->dec_req.error);
112 	rte_bbdev_log_debug("\t-- qm_idx(%"PRIu32") | max_iter(%"PRIu32") | "
113 			"bg_idx (%"PRIu32") | harqin_en(%"PRIu32") | zc(%"PRIu32")",
114 			(uint32_t)desc->dec_req.qm_idx,
115 			(uint32_t)desc->dec_req.max_iter,
116 			(uint32_t)desc->dec_req.bg_idx,
117 			(uint32_t)desc->dec_req.harqin_en,
118 			(uint32_t)desc->dec_req.zc);
119 	rte_bbdev_log_debug("\t-- hbstroe_offset(%"PRIu32") | num_null (%"PRIu32") "
120 			"| irq_en(%"PRIu32")",
121 			(uint32_t)desc->dec_req.hbstroe_offset,
122 			(uint32_t)desc->dec_req.num_null,
123 			(uint32_t)desc->dec_req.irq_en);
124 	rte_bbdev_log_debug("\t-- ncb(%"PRIu32") | desc_idx (%"PRIu32") | "
125 			"drop_crc24b(%"PRIu32") | RV (%"PRIu32")",
126 			(uint32_t)desc->dec_req.ncb,
127 			(uint32_t)desc->dec_req.desc_idx,
128 			(uint32_t)desc->dec_req.drop_crc24b,
129 			(uint32_t)desc->dec_req.rv);
130 	rte_bbdev_log_debug("\t-- crc24b_ind(%"PRIu32") | et_dis (%"PRIu32")",
131 			(uint32_t)desc->dec_req.crc24b_ind,
132 			(uint32_t)desc->dec_req.et_dis);
133 	rte_bbdev_log_debug("\t-- harq_input_length(%"PRIu32") | rm_e(%"PRIu32")",
134 			(uint32_t)desc->dec_req.harq_input_length,
135 			(uint32_t)desc->dec_req.rm_e);
136 	rte_bbdev_log_debug("\t-- cbs_in_op(%"PRIu32") | in_add (0x%08"PRIx32"%08"PRIx32")"
137 			"| out_add (0x%08"PRIx32"%08"PRIx32")",
138 			(uint32_t)desc->dec_req.cbs_in_op,
139 			(uint32_t)desc->dec_req.in_addr_hi,
140 			(uint32_t)desc->dec_req.in_addr_lw,
141 			(uint32_t)desc->dec_req.out_addr_hi,
142 			(uint32_t)desc->dec_req.out_addr_lw);
143 	uint32_t *word = (uint32_t *) desc;
144 	rte_bbdev_log_debug("%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32", "
145 			"%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32,
146 			word[0], word[1], word[2], word[3],
147 			word[4], word[5], word[6], word[7]);
148 }
149 
150 /* Print decode DMA Descriptor of AGX100 Decoder device. */
151 static void
152 agx100_print_dma_dec_desc_debug_info(union agx100_dma_desc *desc)
153 {
154 	rte_bbdev_log_debug("DMA response desc %p",
155 			desc);
156 	rte_bbdev_log_debug("\t-- done(%"PRIu32") | tb_crc_pass(%"PRIu32") | cb_crc_all_pass(%"PRIu32")"
157 			" | cb_all_et_pass(%"PRIu32") | max_iter_ret(%"PRIu32") |"
158 			"cgb_crc_bitmap(%"PRIu32") | error_msg(%"PRIu32") | error_code(%"PRIu32") |"
159 			"et_dis (%"PRIu32") | harq_in_en(%"PRIu32") | max_iter(%"PRIu32")",
160 			(uint32_t)desc->dec_req.done,
161 			(uint32_t)desc->dec_req.tb_crc_pass,
162 			(uint32_t)desc->dec_req.cb_crc_all_pass,
163 			(uint32_t)desc->dec_req.cb_all_et_pass,
164 			(uint32_t)desc->dec_req.max_iter_ret,
165 			(uint32_t)desc->dec_req.cgb_crc_bitmap,
166 			(uint32_t)desc->dec_req.error_msg,
167 			(uint32_t)desc->dec_req.error_code,
168 			(uint32_t)desc->dec_req.et_dis,
169 			(uint32_t)desc->dec_req.harq_in_en,
170 			(uint32_t)desc->dec_req.max_iter);
171 	rte_bbdev_log_debug("\t-- ncb(%"PRIu32") | bg_idx (%"PRIu32") | qm_idx (%"PRIu32")"
172 			"| zc(%"PRIu32") | rv(%"PRIu32") | int_en(%"PRIu32")",
173 			(uint32_t)desc->dec_req.ncb,
174 			(uint32_t)desc->dec_req.bg_idx,
175 			(uint32_t)desc->dec_req.qm_idx,
176 			(uint32_t)desc->dec_req.zc,
177 			(uint32_t)desc->dec_req.rv,
178 			(uint32_t)desc->dec_req.int_en);
179 	rte_bbdev_log_debug("\t-- max_cbg(%"PRIu32") | cbgti(%"PRIu32") | cbgfi(%"PRIu32") |"
180 			"cbgs(%"PRIu32") | desc_idx(%"PRIu32")",
181 			(uint32_t)desc->dec_req.max_cbg,
182 			(uint32_t)desc->dec_req.cbgti,
183 			(uint32_t)desc->dec_req.cbgfi,
184 			(uint32_t)desc->dec_req.cbgs,
185 			(uint32_t)desc->dec_req.desc_idx);
186 	rte_bbdev_log_debug("\t-- ca(%"PRIu32") | c(%"PRIu32") | llr_pckg(%"PRIu32") |"
187 			"syndrome_check_mode(%"PRIu32") | num_null(%"PRIu32")",
188 			(uint32_t)desc->dec_req.ca,
189 			(uint32_t)desc->dec_req.c,
190 			(uint32_t)desc->dec_req.llr_pckg,
191 			(uint32_t)desc->dec_req.syndrome_check_mode,
192 			(uint32_t)desc->dec_req.num_null);
193 	rte_bbdev_log_debug("\t-- ea(%"PRIu32") | eba(%"PRIu32")",
194 			(uint32_t)desc->dec_req.ea,
195 			(uint32_t)desc->dec_req.eba);
196 	rte_bbdev_log_debug("\t-- hbstore_offset_out(%"PRIu32")",
197 			(uint32_t)desc->dec_req.hbstore_offset_out);
198 	rte_bbdev_log_debug("\t-- hbstore_offset_in(%"PRIu32") | en_slice_ts(%"PRIu32") |"
199 			"en_host_ts(%"PRIu32") | en_cb_wr_status(%"PRIu32")"
200 			" | en_output_sg(%"PRIu32") | en_input_sg(%"PRIu32") | tb_cb(%"PRIu32")"
201 			" | crc24b_ind(%"PRIu32")| drop_crc24b(%"PRIu32")",
202 			(uint32_t)desc->dec_req.hbstore_offset_in,
203 			(uint32_t)desc->dec_req.en_slice_ts,
204 			(uint32_t)desc->dec_req.en_host_ts,
205 			(uint32_t)desc->dec_req.en_cb_wr_status,
206 			(uint32_t)desc->dec_req.en_output_sg,
207 			(uint32_t)desc->dec_req.en_input_sg,
208 			(uint32_t)desc->dec_req.tb_cb,
209 			(uint32_t)desc->dec_req.crc24b_ind,
210 			(uint32_t)desc->dec_req.drop_crc24b);
211 	rte_bbdev_log_debug("\t-- harq_input_length_a(%"PRIu32") | harq_input_length_b(%"PRIu32")",
212 			(uint32_t)desc->dec_req.harq_input_length_a,
213 			(uint32_t)desc->dec_req.harq_input_length_b);
214 	rte_bbdev_log_debug("\t-- input_slice_table_addr_lo(%"PRIu32")"
215 			" | input_start_addr_lo(%"PRIu32")",
216 			(uint32_t)desc->dec_req.input_slice_table_addr_lo,
217 			(uint32_t)desc->dec_req.input_start_addr_lo);
218 	rte_bbdev_log_debug("\t-- input_slice_table_addr_hi(%"PRIu32")"
219 			" | input_start_addr_hi(%"PRIu32")",
220 			(uint32_t)desc->dec_req.input_slice_table_addr_hi,
221 			(uint32_t)desc->dec_req.input_start_addr_hi);
222 	rte_bbdev_log_debug("\t-- input_slice_num(%"PRIu32") | input_length(%"PRIu32")",
223 			(uint32_t)desc->dec_req.input_slice_num,
224 			(uint32_t)desc->dec_req.input_length);
225 	rte_bbdev_log_debug("\t-- output_slice_table_addr_lo(%"PRIu32")"
226 			" | output_start_addr_lo(%"PRIu32")",
227 			(uint32_t)desc->dec_req.output_slice_table_addr_lo,
228 			(uint32_t)desc->dec_req.output_start_addr_lo);
229 	rte_bbdev_log_debug("\t-- output_slice_table_addr_hi(%"PRIu32")"
230 			" | output_start_addr_hi(%"PRIu32")",
231 			(uint32_t)desc->dec_req.output_slice_table_addr_hi,
232 			(uint32_t)desc->dec_req.output_start_addr_hi);
233 	rte_bbdev_log_debug("\t-- output_slice_num(%"PRIu32") | output_length(%"PRIu32")",
234 			(uint32_t)desc->dec_req.output_slice_num,
235 			(uint32_t)desc->dec_req.output_length);
236 	rte_bbdev_log_debug("\t-- enqueue_timestamp(%"PRIu32")",
237 			(uint32_t)desc->dec_req.enqueue_timestamp);
238 	rte_bbdev_log_debug("\t-- completion_timestamp(%"PRIu32")",
239 			(uint32_t)desc->dec_req.completion_timestamp);
240 
241 	uint32_t *word = (uint32_t *) desc;
242 	rte_bbdev_log_debug("%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32", "
243 			"%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32", "
244 			"%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32", "
245 			"%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32,
246 			word[0], word[1], word[2], word[3],
247 			word[4], word[5], word[6], word[7],
248 			word[8], word[9], word[10], word[11],
249 			word[12], word[13], word[14], word[15]);
250 }
251 
252 /* Print decode DMA Descriptor of Vista Creek encoder device. */
253 static void
254 vc_5gnr_print_dma_enc_desc_debug_info(union vc_5gnr_dma_desc *desc)
255 {
256 	rte_bbdev_log_debug("DMA response desc %p",
257 			desc);
258 	rte_bbdev_log_debug("%"PRIu32" %"PRIu32,
259 			(uint32_t)desc->enc_req.done,
260 			(uint32_t)desc->enc_req.error);
261 	rte_bbdev_log_debug("K' %"PRIu32" E %"PRIu32" desc %"PRIu32" Z %"PRIu32,
262 			(uint32_t)desc->enc_req.k_,
263 			(uint32_t)desc->enc_req.rm_e,
264 			(uint32_t)desc->enc_req.desc_idx,
265 			(uint32_t)desc->enc_req.zc);
266 	rte_bbdev_log_debug("BG %"PRIu32" Qm %"PRIu32" CRC %"PRIu32" IRQ %"PRIu32,
267 			(uint32_t)desc->enc_req.bg_idx,
268 			(uint32_t)desc->enc_req.qm_idx,
269 			(uint32_t)desc->enc_req.crc_en,
270 			(uint32_t)desc->enc_req.irq_en);
271 	rte_bbdev_log_debug("k0 %"PRIu32" Ncb %"PRIu32" F %"PRIu32,
272 			(uint32_t)desc->enc_req.k0,
273 			(uint32_t)desc->enc_req.ncb,
274 			(uint32_t)desc->enc_req.num_null);
275 	uint32_t *word = (uint32_t *) desc;
276 	rte_bbdev_log_debug("%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32", "
277 			"%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32,
278 			word[0], word[1], word[2], word[3],
279 			word[4], word[5], word[6], word[7]);
280 }
281 
282 /* Print decode DMA Descriptor of AGX100 encoder device. */
283 static void
284 agx100_print_dma_enc_desc_debug_info(union agx100_dma_desc *desc)
285 {
286 	rte_bbdev_log_debug("DMA response desc %p",
287 			desc);
288 	rte_bbdev_log_debug("\t-- done(%"PRIu32") | error_msg(%"PRIu32") | error_code(%"PRIu32")",
289 			(uint32_t)desc->enc_req.done,
290 			(uint32_t)desc->enc_req.error_msg,
291 			(uint32_t)desc->enc_req.error_code);
292 	rte_bbdev_log_debug("\t-- ncb(%"PRIu32") | bg_idx (%"PRIu32") | qm_idx (%"PRIu32")"
293 			"| zc(%"PRIu32") | rv(%"PRIu32") | int_en(%"PRIu32")",
294 			(uint32_t)desc->enc_req.ncb,
295 			(uint32_t)desc->enc_req.bg_idx,
296 			(uint32_t)desc->enc_req.qm_idx,
297 			(uint32_t)desc->enc_req.zc,
298 			(uint32_t)desc->enc_req.rv,
299 			(uint32_t)desc->enc_req.int_en);
300 	rte_bbdev_log_debug("\t-- max_cbg(%"PRIu32") | cbgti(%"PRIu32") | cbgs(%"PRIu32") | "
301 			"desc_idx(%"PRIu32")",
302 			(uint32_t)desc->enc_req.max_cbg,
303 			(uint32_t)desc->enc_req.cbgti,
304 			(uint32_t)desc->enc_req.cbgs,
305 			(uint32_t)desc->enc_req.desc_idx);
306 	rte_bbdev_log_debug("\t-- ca(%"PRIu32") | c(%"PRIu32") | num_null(%"PRIu32")",
307 			(uint32_t)desc->enc_req.ca,
308 			(uint32_t)desc->enc_req.c,
309 			(uint32_t)desc->enc_req.num_null);
310 	rte_bbdev_log_debug("\t-- ea(%"PRIu32")",
311 			(uint32_t)desc->enc_req.ea);
312 	rte_bbdev_log_debug("\t-- eb(%"PRIu32")",
313 			(uint32_t)desc->enc_req.eb);
314 	rte_bbdev_log_debug("\t-- k_(%"PRIu32") | en_slice_ts(%"PRIu32") | en_host_ts(%"PRIu32") | "
315 			"en_cb_wr_status(%"PRIu32") | en_output_sg(%"PRIu32") | "
316 			"en_input_sg(%"PRIu32") | tb_cb(%"PRIu32") | crc_en(%"PRIu32")",
317 			(uint32_t)desc->enc_req.k_,
318 			(uint32_t)desc->enc_req.en_slice_ts,
319 			(uint32_t)desc->enc_req.en_host_ts,
320 			(uint32_t)desc->enc_req.en_cb_wr_status,
321 			(uint32_t)desc->enc_req.en_output_sg,
322 			(uint32_t)desc->enc_req.en_input_sg,
323 			(uint32_t)desc->enc_req.tb_cb,
324 			(uint32_t)desc->enc_req.crc_en);
325 	rte_bbdev_log_debug("\t-- input_slice_table_addr_lo(%"PRIu32")"
326 			" | input_start_addr_lo(%"PRIu32")",
327 			(uint32_t)desc->enc_req.input_slice_table_addr_lo,
328 			(uint32_t)desc->enc_req.input_start_addr_lo);
329 	rte_bbdev_log_debug("\t-- input_slice_table_addr_hi(%"PRIu32")"
330 			" | input_start_addr_hi(%"PRIu32")",
331 			(uint32_t)desc->enc_req.input_slice_table_addr_hi,
332 			(uint32_t)desc->enc_req.input_start_addr_hi);
333 	rte_bbdev_log_debug("\t-- input_slice_num(%"PRIu32") | input_length(%"PRIu32")",
334 			(uint32_t)desc->enc_req.input_slice_num,
335 			(uint32_t)desc->enc_req.input_length);
336 	rte_bbdev_log_debug("\t-- output_slice_table_addr_lo(%"PRIu32")"
337 			" | output_start_addr_lo(%"PRIu32")",
338 			(uint32_t)desc->enc_req.output_slice_table_addr_lo,
339 			(uint32_t)desc->enc_req.output_start_addr_lo);
340 	rte_bbdev_log_debug("\t-- output_slice_table_addr_hi(%"PRIu32")"
341 			" | output_start_addr_hi(%"PRIu32")",
342 			(uint32_t)desc->enc_req.output_slice_table_addr_hi,
343 			(uint32_t)desc->enc_req.output_start_addr_hi);
344 	rte_bbdev_log_debug("\t-- output_slice_num(%"PRIu32") | output_length(%"PRIu32")",
345 			(uint32_t)desc->enc_req.output_slice_num,
346 			(uint32_t)desc->enc_req.output_length);
347 	rte_bbdev_log_debug("\t-- enqueue_timestamp(%"PRIu32")",
348 			(uint32_t)desc->enc_req.enqueue_timestamp);
349 	rte_bbdev_log_debug("\t-- completion_timestamp(%"PRIu32")",
350 			(uint32_t)desc->enc_req.completion_timestamp);
351 
352 	uint32_t *word = (uint32_t *) desc;
353 	rte_bbdev_log_debug("%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32", "
354 			"%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32", "
355 			"%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32", "
356 			"%08"PRIx32", %08"PRIx32", %08"PRIx32", %08"PRIx32,
357 			word[0], word[1], word[2], word[3],
358 			word[4], word[5], word[6], word[7],
359 			word[8], word[9], word[10], word[11],
360 			word[12], word[13], word[14], word[15]);
361 }
362 
363 #endif
364 
365 /**
366  * Helper function that returns queue ID if queue is valid
367  * or FPGA_5GNR_INVALID_HW_QUEUE_ID otherwise.
368  */
369 static inline uint32_t
370 fpga_5gnr_get_queue_map(struct fpga_5gnr_fec_device *d, uint32_t q_id)
371 {
372 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
373 		return fpga_5gnr_reg_read_32(d->mmio_base, VC_5GNR_QUEUE_MAP + (q_id << 2));
374 	else
375 		return fpga_5gnr_reg_read_32(d->mmio_base, AGX100_QUEUE_MAP + (q_id << 2));
376 }
377 
378 static int
379 fpga_5gnr_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
380 {
381 	/* Number of queues bound to a PF/VF */
382 	uint32_t hw_q_num = 0;
383 	uint32_t ring_size, payload, address, q_id, offset;
384 	rte_iova_t phys_addr;
385 	struct fpga_5gnr_ring_ctrl_reg ring_reg;
386 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
387 
388 	address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
389 	if (!(fpga_5gnr_reg_read_32(d->mmio_base, address) & 0x1)) {
390 		rte_bbdev_log(ERR,
391 				"Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?",
392 				dev->data->name);
393 		return -EPERM;
394 	}
395 
396 	/* Clear queue registers structure */
397 	memset(&ring_reg, 0, sizeof(struct fpga_5gnr_ring_ctrl_reg));
398 
399 	/* Scan queue map.
400 	 * If a queue is valid and mapped to a calling PF/VF the read value is
401 	 * replaced with a queue ID and if it's not then
402 	 * FPGA_5GNR_INVALID_HW_QUEUE_ID is returned.
403 	 */
404 	for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
405 		uint32_t hw_q_id = fpga_5gnr_get_queue_map(d, q_id);
406 
407 		rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
408 				dev->device->name, q_id, hw_q_id);
409 
410 		if (hw_q_id != FPGA_5GNR_INVALID_HW_QUEUE_ID) {
411 			d->q_bound_bit_map |= (1ULL << q_id);
412 			/* Clear queue register of found queue */
413 			offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
414 				(sizeof(struct fpga_5gnr_ring_ctrl_reg) * q_id);
415 			fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
416 			++hw_q_num;
417 		}
418 	}
419 	if (hw_q_num == 0) {
420 		rte_bbdev_log(ERR,
421 			"No HW queues assigned to this device. Probably this is a VF configured for PF mode. Check device configuration!");
422 		return -ENODEV;
423 	}
424 
425 	if (num_queues > hw_q_num) {
426 		rte_bbdev_log(ERR,
427 			"Not enough queues for device %s! Requested: %u, available: %u",
428 			dev->device->name, num_queues, hw_q_num);
429 		return -EINVAL;
430 	}
431 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
432 		ring_size = FPGA_5GNR_RING_MAX_SIZE * sizeof(struct vc_5gnr_dma_dec_desc);
433 	else
434 		ring_size = FPGA_5GNR_RING_MAX_SIZE * sizeof(struct agx100_dma_dec_desc);
435 
436 	/* Enforce 32 byte alignment */
437 	RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
438 
439 	/* Allocate memory for SW descriptor rings */
440 	d->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
441 			num_queues * ring_size, RTE_CACHE_LINE_SIZE,
442 			socket_id);
443 	if (d->sw_rings == NULL) {
444 		rte_bbdev_log(ERR,
445 				"Failed to allocate memory for %s:%u sw_rings",
446 				dev->device->driver->name, dev->data->dev_id);
447 		return -ENOMEM;
448 	}
449 
450 	d->sw_rings_phys = rte_malloc_virt2iova(d->sw_rings);
451 	d->sw_ring_size = ring_size;
452 	d->sw_ring_max_depth = FPGA_5GNR_RING_MAX_SIZE;
453 
454 	/* Allocate memory for ring flush status */
455 	d->flush_queue_status = rte_zmalloc_socket(NULL,
456 			sizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);
457 	if (d->flush_queue_status == NULL) {
458 		rte_bbdev_log(ERR,
459 				"Failed to allocate memory for %s:%u flush_queue_status",
460 				dev->device->driver->name, dev->data->dev_id);
461 		return -ENOMEM;
462 	}
463 
464 	/* Set the flush status address registers */
465 	phys_addr = rte_malloc_virt2iova(d->flush_queue_status);
466 
467 	address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;
468 	payload = (uint32_t)(phys_addr);
469 	fpga_5gnr_reg_write_32(d->mmio_base, address, payload);
470 
471 	address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI;
472 	payload = (uint32_t)(phys_addr >> 32);
473 	fpga_5gnr_reg_write_32(d->mmio_base, address, payload);
474 
475 	return 0;
476 }
477 
478 static int
479 fpga_5gnr_dev_close(struct rte_bbdev *dev)
480 {
481 	struct fpga_5gnr_fec_device *fpga_5gnr_dev = dev->data->dev_private;
482 
483 	rte_free(fpga_5gnr_dev->sw_rings);
484 	rte_free(fpga_5gnr_dev->flush_queue_status);
485 
486 	return 0;
487 }
488 
489 static void
490 fpga_5gnr_dev_info_get(struct rte_bbdev *dev, struct rte_bbdev_driver_info *dev_info)
491 {
492 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
493 	uint32_t q_id = 0;
494 
495 	static const struct rte_bbdev_op_cap vc_5gnr_bbdev_capabilities[] = {
496 		{
497 			.type   = RTE_BBDEV_OP_LDPC_ENC,
498 			.cap.ldpc_enc = {
499 				.capability_flags =
500 						RTE_BBDEV_LDPC_RATE_MATCH |
501 						RTE_BBDEV_LDPC_ENC_INTERRUPTS |
502 						RTE_BBDEV_LDPC_CRC_24B_ATTACH,
503 				.num_buffers_src =
504 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
505 				.num_buffers_dst =
506 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
507 			}
508 		},
509 		{
510 		.type   = RTE_BBDEV_OP_LDPC_DEC,
511 		.cap.ldpc_dec = {
512 			.capability_flags =
513 				RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
514 				RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
515 				RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
516 				RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
517 				RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
518 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
519 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
520 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |
521 				RTE_BBDEV_LDPC_DEC_INTERRUPTS |
522 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS,
523 			.llr_size = 6,
524 			.llr_decimals = 2,
525 			.num_buffers_src =
526 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
527 			.num_buffers_hard_out =
528 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
529 			.num_buffers_soft_out = 0,
530 		}
531 		},
532 		RTE_BBDEV_END_OF_CAPABILITIES_LIST()
533 	};
534 
535 	static const struct rte_bbdev_op_cap agx100_bbdev_capabilities[] = {
536 		{
537 			.type   = RTE_BBDEV_OP_LDPC_ENC,
538 			.cap.ldpc_enc = {
539 				.capability_flags =
540 						RTE_BBDEV_LDPC_RATE_MATCH |
541 						RTE_BBDEV_LDPC_CRC_24B_ATTACH,
542 				.num_buffers_src =
543 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
544 				.num_buffers_dst =
545 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
546 			}
547 		},
548 		{
549 		.type   = RTE_BBDEV_OP_LDPC_DEC,
550 		.cap.ldpc_dec = {
551 			.capability_flags =
552 					RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
553 					RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
554 					RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
555 					RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
556 					RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
557 					RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
558 					RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
559 					RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |
560 					RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS,
561 			.llr_size = 6,
562 			.llr_decimals = 2,
563 			.num_buffers_src =
564 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
565 			.num_buffers_hard_out =
566 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
567 			.num_buffers_soft_out = 0,
568 		}
569 		},
570 		RTE_BBDEV_END_OF_CAPABILITIES_LIST()
571 	};
572 
573 	/* Check the HARQ DDR size available */
574 	uint8_t timeout_counter = 0;
575 	uint32_t harq_buf_ready = fpga_5gnr_reg_read_32(d->mmio_base,
576 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
577 	while (harq_buf_ready != 1) {
578 		usleep(FPGA_5GNR_TIMEOUT_CHECK_INTERVAL);
579 		timeout_counter++;
580 		harq_buf_ready = fpga_5gnr_reg_read_32(d->mmio_base,
581 				FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
582 		if (timeout_counter > FPGA_5GNR_HARQ_RDY_TIMEOUT) {
583 			rte_bbdev_log(ERR, "HARQ Buffer not ready %d", harq_buf_ready);
584 			harq_buf_ready = 1;
585 		}
586 	}
587 	uint32_t harq_buf_size = fpga_5gnr_reg_read_32(d->mmio_base,
588 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
589 
590 	static struct rte_bbdev_queue_conf default_queue_conf;
591 	default_queue_conf.socket = dev->data->socket_id;
592 	default_queue_conf.queue_size = FPGA_5GNR_RING_MAX_SIZE;
593 
594 	dev_info->driver_name = dev->device->driver->name;
595 	dev_info->queue_size_lim = FPGA_5GNR_RING_MAX_SIZE;
596 	dev_info->hardware_accelerated = true;
597 	dev_info->min_alignment = 1;
598 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
599 		dev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;
600 	else
601 		dev_info->harq_buffer_size = harq_buf_size << 10;
602 	dev_info->default_queue_conf = default_queue_conf;
603 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
604 		dev_info->capabilities = vc_5gnr_bbdev_capabilities;
605 	else
606 		dev_info->capabilities = agx100_bbdev_capabilities;
607 	dev_info->cpu_flag_reqs = NULL;
608 	dev_info->data_endianness = RTE_LITTLE_ENDIAN;
609 	dev_info->device_status = RTE_BBDEV_DEV_NOT_SUPPORTED;
610 
611 	/* Calculates number of queues assigned to device */
612 	dev_info->max_num_queues = 0;
613 	for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
614 		uint32_t hw_q_id = fpga_5gnr_get_queue_map(d, q_id);
615 
616 		if (hw_q_id != FPGA_5GNR_INVALID_HW_QUEUE_ID)
617 			dev_info->max_num_queues++;
618 	}
619 	/* Expose number of queue per operation type */
620 	dev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;
621 	dev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = 0;
622 	dev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = 0;
623 	dev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = dev_info->max_num_queues / 2;
624 	dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = dev_info->max_num_queues / 2;
625 	dev_info->num_queues[RTE_BBDEV_OP_FFT] = 0;
626 	dev_info->num_queues[RTE_BBDEV_OP_MLDTS] = 0;
627 	dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = 1;
628 	dev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = 1;
629 }
630 
631 /**
632  * Find index of queue bound to current PF/VF which is unassigned. Return -1
633  * when there is no available queue
634  */
635 static inline int
636 fpga_5gnr_find_free_queue_idx(struct rte_bbdev *dev,
637 		const struct rte_bbdev_queue_conf *conf)
638 {
639 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
640 	uint64_t q_idx;
641 	uint8_t i = 0;
642 	uint8_t range = d->total_num_queues >> 1;
643 
644 	if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {
645 		i = d->total_num_queues >> 1;
646 		range = d->total_num_queues;
647 	}
648 
649 	for (; i < range; ++i) {
650 		q_idx = 1ULL << i;
651 		/* Check if index of queue is bound to current PF/VF */
652 		if (d->q_bound_bit_map & q_idx)
653 			/* Check if found queue was not already assigned */
654 			if (!(d->q_assigned_bit_map & q_idx)) {
655 				d->q_assigned_bit_map |= q_idx;
656 				return i;
657 			}
658 	}
659 
660 	rte_bbdev_log(INFO, "Failed to find free queue on %s", dev->data->name);
661 
662 	return -1;
663 }
664 
665 static int
666 fpga_5gnr_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
667 		const struct rte_bbdev_queue_conf *conf)
668 {
669 	uint32_t address, ring_offset;
670 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
671 	struct fpga_5gnr_queue *q;
672 	int8_t q_idx;
673 
674 	/* Check if there is a free queue to assign */
675 	q_idx = fpga_5gnr_find_free_queue_idx(dev, conf);
676 	if (q_idx == -1)
677 		return -1;
678 
679 	/* Allocate the queue data structure. */
680 	q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
681 			RTE_CACHE_LINE_SIZE, conf->socket);
682 	if (q == NULL) {
683 		/* Mark queue as un-assigned */
684 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
685 		rte_bbdev_log(ERR, "Failed to allocate queue memory");
686 		return -ENOMEM;
687 	}
688 
689 	q->d = d;
690 	q->q_idx = q_idx;
691 
692 	/* Set ring_base_addr */
693 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
694 		q->vc_5gnr_ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
695 	else
696 		q->agx100_ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
697 
698 	q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys + (d->sw_ring_size * queue_id);
699 
700 	/* Allocate memory for Completion Head variable*/
701 	q->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,
702 			sizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);
703 	if (q->ring_head_addr == NULL) {
704 		/* Mark queue as un-assigned */
705 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
706 		rte_free(q);
707 		rte_bbdev_log(ERR,
708 				"Failed to allocate memory for %s:%u completion_head",
709 				dev->device->driver->name, dev->data->dev_id);
710 		return -ENOMEM;
711 	}
712 	/* Set ring_head_addr */
713 	q->ring_ctrl_reg.ring_head_addr = rte_malloc_virt2iova(q->ring_head_addr);
714 
715 	/* Clear shadow_completion_head */
716 	q->shadow_completion_head = 0;
717 
718 	/* Set ring_size */
719 	if (conf->queue_size > FPGA_5GNR_RING_MAX_SIZE) {
720 		/* Mark queue as un-assigned */
721 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
722 		rte_free(q->ring_head_addr);
723 		rte_free(q);
724 		rte_bbdev_log(ERR,
725 				"Size of queue is too big %d (MAX: %d ) for %s:%u",
726 				conf->queue_size, FPGA_5GNR_RING_MAX_SIZE,
727 				dev->device->driver->name, dev->data->dev_id);
728 		return -EINVAL;
729 	}
730 	q->ring_ctrl_reg.ring_size = conf->queue_size;
731 
732 	/* Set Miscellaneous FPGA 5GNR register. */
733 	/* Max iteration number for TTI mitigation - todo */
734 	q->ring_ctrl_reg.max_ul_dec = 0;
735 	/* Enable max iteration number for TTI - todo */
736 	q->ring_ctrl_reg.max_ul_dec_en = 0;
737 
738 	/* Enable the ring */
739 	q->ring_ctrl_reg.enable = 1;
740 
741 	/* Set FPGA 5GNR head_point and tail registers */
742 	q->ring_ctrl_reg.head_point = q->tail = 0;
743 
744 	/* Set FPGA 5GNR shadow_tail register */
745 	q->ring_ctrl_reg.shadow_tail = q->tail;
746 
747 	/* Calculates the ring offset for found queue */
748 	ring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
749 			(sizeof(struct fpga_5gnr_ring_ctrl_reg) * q_idx);
750 
751 	/* Set FPGA 5GNR Ring Control Registers */
752 	fpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);
753 
754 	/* Store MMIO register of shadow_tail */
755 	address = ring_offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL;
756 	q->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);
757 
758 	q->head_free_desc = q->tail;
759 
760 	/* Set wrap mask */
761 	q->sw_ring_wrap_mask = conf->queue_size - 1;
762 
763 	rte_bbdev_log_debug("Setup dev%u q%u: queue_idx=%u",
764 			dev->data->dev_id, queue_id, q->q_idx);
765 
766 	dev->data->queues[queue_id].queue_private = q;
767 
768 	rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA 5GNR queue[%d]", queue_id, q_idx);
769 
770 #ifdef RTE_LIBRTE_BBDEV_DEBUG
771 	/* Read FPGA Ring Control Registers after configuration*/
772 	print_ring_reg_debug_info(d->mmio_base, ring_offset);
773 #endif
774 	return 0;
775 }
776 
777 static int
778 fpga_5gnr_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
779 {
780 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
781 	struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
782 	struct fpga_5gnr_ring_ctrl_reg ring_reg;
783 	uint32_t offset;
784 
785 	rte_bbdev_log_debug("FPGA 5GNR Queue[%d] released", queue_id);
786 
787 	if (q != NULL) {
788 		memset(&ring_reg, 0, sizeof(struct fpga_5gnr_ring_ctrl_reg));
789 		offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
790 			(sizeof(struct fpga_5gnr_ring_ctrl_reg) * q->q_idx);
791 		/* Disable queue */
792 		fpga_5gnr_reg_write_8(d->mmio_base,
793 				offset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);
794 		/* Clear queue registers */
795 		fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
796 
797 		/* Mark the Queue as un-assigned */
798 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));
799 		rte_free(q->ring_head_addr);
800 		rte_free(q);
801 		dev->data->queues[queue_id].queue_private = NULL;
802 	}
803 
804 	return 0;
805 }
806 
807 /* Function starts a device queue. */
808 static int
809 fpga_5gnr_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
810 {
811 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
812 	struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
813 	uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
814 			(sizeof(struct fpga_5gnr_ring_ctrl_reg) * q->q_idx);
815 	uint8_t enable = 0x01;
816 	uint16_t zero = 0x0000;
817 #ifdef RTE_LIBRTE_BBDEV_DEBUG
818 	if (d == NULL) {
819 		rte_bbdev_log(ERR, "Invalid device pointer");
820 		return -1;
821 	}
822 #endif
823 	if (dev->data->queues[queue_id].queue_private == NULL) {
824 		rte_bbdev_log(ERR, "Cannot start invalid queue %d", queue_id);
825 		return -1;
826 	}
827 
828 	/* Clear queue head and tail variables */
829 	q->tail = q->head_free_desc = 0;
830 
831 	/* Clear FPGA 5GNR head_point and tail registers */
832 	fpga_5gnr_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT, zero);
833 	fpga_5gnr_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL, zero);
834 
835 	/* Enable queue */
836 	fpga_5gnr_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE, enable);
837 
838 	rte_bbdev_log_debug("FPGA 5GNR Queue[%d] started", queue_id);
839 	return 0;
840 }
841 
842 /* Function stops a device queue. */
843 static int
844 fpga_5gnr_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
845 {
846 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
847 #ifdef RTE_LIBRTE_BBDEV_DEBUG
848 	if (d == NULL) {
849 		rte_bbdev_log(ERR, "Invalid device pointer");
850 		return -1;
851 	}
852 #endif
853 	struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
854 	uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
855 			(sizeof(struct fpga_5gnr_ring_ctrl_reg) * q->q_idx);
856 	uint8_t payload = 0x01;
857 	uint8_t counter = 0;
858 	uint8_t timeout = FPGA_5GNR_QUEUE_FLUSH_TIMEOUT_US / FPGA_5GNR_TIMEOUT_CHECK_INTERVAL;
859 
860 	/* Set flush_queue_en bit to trigger queue flushing */
861 	fpga_5gnr_reg_write_8(d->mmio_base,
862 			offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);
863 
864 	/** Check if queue flush is completed.
865 	 * FPGA 5GNR will update the completion flag after queue flushing is
866 	 * completed. If completion flag is not updated within 1ms it is
867 	 * considered as a failure.
868 	 */
869 	while (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx) & payload)) {
870 		if (counter > timeout) {
871 			rte_bbdev_log(ERR, "FPGA 5GNR Queue Flush failed for queue %d", queue_id);
872 			return -1;
873 		}
874 		usleep(FPGA_5GNR_TIMEOUT_CHECK_INTERVAL);
875 		counter++;
876 	}
877 
878 	/* Disable queue */
879 	payload = 0x00;
880 	fpga_5gnr_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE, payload);
881 
882 	rte_bbdev_log_debug("FPGA 5GNR Queue[%d] stopped", queue_id);
883 	return 0;
884 }
885 
886 static inline uint16_t
887 get_queue_id(struct rte_bbdev_data *data, uint8_t q_idx)
888 {
889 	uint16_t queue_id;
890 
891 	for (queue_id = 0; queue_id < data->num_queues; ++queue_id) {
892 		struct fpga_5gnr_queue *q = data->queues[queue_id].queue_private;
893 		if (q != NULL && q->q_idx == q_idx)
894 			return queue_id;
895 	}
896 
897 	return -1;
898 }
899 
900 /* Interrupt handler triggered by FPGA 5GNR dev for handling specific interrupt. */
901 static void
902 fpga_5gnr_dev_interrupt_handler(void *cb_arg)
903 {
904 	struct rte_bbdev *dev = cb_arg;
905 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
906 	struct fpga_5gnr_queue *q;
907 	uint64_t ring_head;
908 	uint64_t q_idx;
909 	uint16_t queue_id;
910 	uint8_t i;
911 
912 	/* Scan queue assigned to this device */
913 	for (i = 0; i < d->total_num_queues; ++i) {
914 		q_idx = 1ULL << i;
915 		if (d->q_bound_bit_map & q_idx) {
916 			queue_id = get_queue_id(dev->data, i);
917 			if (queue_id == (uint16_t) -1)
918 				continue;
919 
920 			/* Check if completion head was changed */
921 			q = dev->data->queues[queue_id].queue_private;
922 			ring_head = *q->ring_head_addr;
923 			if (q->shadow_completion_head != ring_head &&
924 				q->irq_enable == 1) {
925 				q->shadow_completion_head = ring_head;
926 				rte_bbdev_pmd_callback_process(
927 						dev,
928 						RTE_BBDEV_EVENT_DEQUEUE,
929 						&queue_id);
930 			}
931 		}
932 	}
933 }
934 
935 static int
936 fpga_5gnr_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)
937 {
938 	struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
939 
940 	if (!rte_intr_cap_multiple(dev->intr_handle))
941 		return -ENOTSUP;
942 
943 	q->irq_enable = 1;
944 
945 	return 0;
946 }
947 
948 static int
949 fpga_5gnr_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)
950 {
951 	struct fpga_5gnr_queue *q = dev->data->queues[queue_id].queue_private;
952 	q->irq_enable = 0;
953 
954 	return 0;
955 }
956 
957 static int
958 fpga_5gnr_intr_enable(struct rte_bbdev *dev)
959 {
960 	int ret;
961 	uint8_t i;
962 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
963 	uint8_t num_intr_vec;
964 
965 	num_intr_vec = d->total_num_queues - RTE_INTR_VEC_RXTX_OFFSET;
966 	if (!rte_intr_cap_multiple(dev->intr_handle)) {
967 		rte_bbdev_log(ERR, "Multiple intr vector is not supported by FPGA (%s)",
968 				dev->data->name);
969 		return -ENOTSUP;
970 	}
971 
972 	/* Create event file descriptors for each of the supported queues (Maximum 64).
973 	 * Event fds will be mapped to FPGA IRQs in rte_intr_enable().
974 	 * This is a 1:1 mapping where the IRQ number is a direct translation to the queue number.
975 	 *
976 	 * num_intr_vec event fds are created as rte_intr_enable()
977 	 * mapped the first IRQ to already created interrupt event file
978 	 * descriptor (intr_handle->fd).
979 	 */
980 	if (rte_intr_efd_enable(dev->intr_handle, num_intr_vec)) {
981 		rte_bbdev_log(ERR, "Failed to create fds for %u queues", dev->data->num_queues);
982 		return -1;
983 	}
984 
985 	/* TODO Each event file descriptor is overwritten by interrupt event
986 	 * file descriptor. That descriptor is added to epoll observed list.
987 	 * It ensures that callback function assigned to that descriptor will
988 	 * invoked when any FPGA queue issues interrupt.
989 	 */
990 	for (i = 0; i < num_intr_vec; ++i) {
991 		if (rte_intr_efds_index_set(dev->intr_handle, i,
992 				rte_intr_fd_get(dev->intr_handle)))
993 			return -rte_errno;
994 	}
995 
996 	if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec", dev->data->num_queues)) {
997 		rte_bbdev_log(ERR, "Failed to allocate %u vectors", dev->data->num_queues);
998 		return -ENOMEM;
999 	}
1000 
1001 	ret = rte_intr_enable(dev->intr_handle);
1002 	if (ret < 0) {
1003 		rte_bbdev_log(ERR,
1004 				"Couldn't enable interrupts for device: %s",
1005 				dev->data->name);
1006 		return ret;
1007 	}
1008 
1009 	ret = rte_intr_callback_register(dev->intr_handle, fpga_5gnr_dev_interrupt_handler, dev);
1010 	if (ret < 0) {
1011 		rte_bbdev_log(ERR,
1012 				"Couldn't register interrupt callback for device: %s",
1013 				dev->data->name);
1014 		return ret;
1015 	}
1016 
1017 	return 0;
1018 }
1019 
1020 static const struct rte_bbdev_ops fpga_5gnr_ops = {
1021 	.setup_queues = fpga_5gnr_setup_queues,
1022 	.intr_enable = fpga_5gnr_intr_enable,
1023 	.close = fpga_5gnr_dev_close,
1024 	.info_get = fpga_5gnr_dev_info_get,
1025 	.queue_setup = fpga_5gnr_queue_setup,
1026 	.queue_stop = fpga_5gnr_queue_stop,
1027 	.queue_start = fpga_5gnr_queue_start,
1028 	.queue_release = fpga_5gnr_queue_release,
1029 	.queue_intr_enable = fpga_5gnr_queue_intr_enable,
1030 	.queue_intr_disable = fpga_5gnr_queue_intr_disable
1031 };
1032 
1033 /* Provide the descriptor index on a given queue */
1034 static inline uint16_t
1035 fpga_5gnr_desc_idx(struct fpga_5gnr_queue *q, uint16_t offset)
1036 {
1037 	return (q->head_free_desc + offset) & q->sw_ring_wrap_mask;
1038 }
1039 
1040 /* Provide the VC 5GNR descriptor pointer on a given queue */
1041 static inline union vc_5gnr_dma_desc*
1042 vc_5gnr_get_desc(struct fpga_5gnr_queue *q, uint16_t offset)
1043 {
1044 	return q->vc_5gnr_ring_addr + fpga_5gnr_desc_idx(q, offset);
1045 }
1046 
1047 /* Provide the AGX100 descriptor pointer on a given queue */
1048 static inline union agx100_dma_desc*
1049 agx100_get_desc(struct fpga_5gnr_queue *q, uint16_t offset)
1050 {
1051 	return q->agx100_ring_addr + fpga_5gnr_desc_idx(q, offset);
1052 }
1053 
1054 /* Provide the descriptor index for the tail of a given queue */
1055 static inline uint16_t
1056 fpga_5gnr_desc_idx_tail(struct fpga_5gnr_queue *q, uint16_t offset)
1057 {
1058 	return (q->tail + offset) & q->sw_ring_wrap_mask;
1059 }
1060 
1061 /* Provide the descriptor tail pointer on a given queue */
1062 static inline union vc_5gnr_dma_desc*
1063 vc_5gnr_get_desc_tail(struct fpga_5gnr_queue *q, uint16_t offset)
1064 {
1065 	return q->vc_5gnr_ring_addr + fpga_5gnr_desc_idx_tail(q, offset);
1066 }
1067 
1068 /* Provide the descriptor tail pointer on a given queue */
1069 static inline union agx100_dma_desc*
1070 agx100_get_desc_tail(struct fpga_5gnr_queue *q, uint16_t offset)
1071 {
1072 	return q->agx100_ring_addr + fpga_5gnr_desc_idx_tail(q, offset);
1073 }
1074 
1075 static inline void
1076 fpga_5gnr_dma_enqueue(struct fpga_5gnr_queue *q, uint16_t num_desc,
1077 		struct rte_bbdev_stats *queue_stats)
1078 {
1079 	uint64_t start_time = 0;
1080 	queue_stats->acc_offload_cycles = 0;
1081 
1082 	/* Update tail and shadow_tail register */
1083 	q->tail = fpga_5gnr_desc_idx_tail(q, num_desc);
1084 
1085 	rte_wmb();
1086 
1087 	/* Start time measurement for enqueue function offload. */
1088 	start_time = rte_rdtsc_precise();
1089 	mmio_write_16(q->shadow_tail_addr, q->tail);
1090 
1091 	rte_wmb();
1092 	queue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
1093 }
1094 
1095 /* Read flag value 0/1/ from bitmap */
1096 static inline bool
1097 check_bit(uint32_t bitmap, uint32_t bitmask)
1098 {
1099 	return bitmap & bitmask;
1100 }
1101 
1102 /* Vista Creek 5GNR FPGA descriptor errors.
1103  * Print an error if a descriptor error has occurred.
1104  * Return 0 on success, 1 on failure.
1105  */
1106 static inline int
1107 vc_5gnr_check_desc_error(uint32_t error_code) {
1108 	switch (error_code) {
1109 	case VC_5GNR_DESC_ERR_NO_ERR:
1110 		return 0;
1111 	case VC_5GNR_DESC_ERR_K_P_OUT_OF_RANGE:
1112 		rte_bbdev_log(ERR, "Encode block size K' is out of range");
1113 		break;
1114 	case VC_5GNR_DESC_ERR_Z_C_NOT_LEGAL:
1115 		rte_bbdev_log(ERR, "Zc is illegal");
1116 		break;
1117 	case VC_5GNR_DESC_ERR_DESC_OFFSET_ERR:
1118 		rte_bbdev_log(ERR,
1119 				"Queue offset does not meet the expectation in the FPGA"
1120 				);
1121 		break;
1122 	case VC_5GNR_DESC_ERR_DESC_READ_FAIL:
1123 		rte_bbdev_log(ERR, "Unsuccessful completion for descriptor read");
1124 		break;
1125 	case VC_5GNR_DESC_ERR_DESC_READ_TIMEOUT:
1126 		rte_bbdev_log(ERR, "Descriptor read time-out");
1127 		break;
1128 	case VC_5GNR_DESC_ERR_DESC_READ_TLP_POISONED:
1129 		rte_bbdev_log(ERR, "Descriptor read TLP poisoned");
1130 		break;
1131 	case VC_5GNR_DESC_ERR_HARQ_INPUT_LEN:
1132 		rte_bbdev_log(ERR, "HARQ input length is invalid");
1133 		break;
1134 	case VC_5GNR_DESC_ERR_CB_READ_FAIL:
1135 		rte_bbdev_log(ERR, "Unsuccessful completion for code block");
1136 		break;
1137 	case VC_5GNR_DESC_ERR_CB_READ_TIMEOUT:
1138 		rte_bbdev_log(ERR, "Code block read time-out");
1139 		break;
1140 	case VC_5GNR_DESC_ERR_CB_READ_TLP_POISONED:
1141 		rte_bbdev_log(ERR, "Code block read TLP poisoned");
1142 		break;
1143 	case VC_5GNR_DESC_ERR_HBSTORE_ERR:
1144 		rte_bbdev_log(ERR, "Hbstroe exceeds HARQ buffer size.");
1145 		break;
1146 	default:
1147 		rte_bbdev_log(ERR, "Descriptor error unknown error code %u", error_code);
1148 		break;
1149 	}
1150 	return 1;
1151 }
1152 
1153 /* AGX100 FPGA descriptor errors
1154  * Print an error if a descriptor error has occurred.
1155  * Return 0 on success, 1 on failure
1156  */
1157 static inline int
1158 agx100_check_desc_error(uint32_t error_code, uint32_t error_msg) {
1159 	uint8_t error = error_code << 4 | error_msg;
1160 	switch (error) {
1161 	case AGX100_DESC_ERR_NO_ERR:
1162 		return 0;
1163 	case AGX100_DESC_ERR_E_NOT_LEGAL:
1164 		rte_bbdev_log(ERR, "Invalid output length of rate matcher E");
1165 		break;
1166 	case AGX100_DESC_ERR_K_P_OUT_OF_RANGE:
1167 		rte_bbdev_log(ERR, "Encode block size K' is out of range");
1168 		break;
1169 	case AGX100_DESC_ERR_NCB_OUT_OF_RANGE:
1170 		rte_bbdev_log(ERR, "Ncb circular buffer size is out of range");
1171 		break;
1172 	case AGX100_DESC_ERR_Z_C_NOT_LEGAL:
1173 		rte_bbdev_log(ERR, "Zc is illegal");
1174 		break;
1175 	case AGX100_DESC_ERR_DESC_INDEX_ERR:
1176 		rte_bbdev_log(ERR,
1177 				"Desc_index received does not meet the expectation in the AGX100"
1178 				);
1179 		break;
1180 	case AGX100_DESC_ERR_HARQ_INPUT_LEN_A:
1181 		rte_bbdev_log(ERR, "HARQ input length A is invalid.");
1182 		break;
1183 	case AGX100_DESC_ERR_HARQ_INPUT_LEN_B:
1184 		rte_bbdev_log(ERR, "HARQ input length B is invalid.");
1185 		break;
1186 	case AGX100_DESC_ERR_HBSTORE_OFFSET_ERR:
1187 		rte_bbdev_log(ERR, "Hbstore exceeds HARQ buffer size.");
1188 		break;
1189 	case AGX100_DESC_ERR_TB_CBG_ERR:
1190 		rte_bbdev_log(ERR, "Total CB number C=0 or CB number with Ea Ca=0 or Ca>C.");
1191 		break;
1192 	case AGX100_DESC_ERR_CBG_OUT_OF_RANGE:
1193 		rte_bbdev_log(ERR, "Cbgti or max_cbg is out of range");
1194 		break;
1195 	case AGX100_DESC_ERR_CW_RM_NOT_LEGAL:
1196 		rte_bbdev_log(ERR, "Cw_rm is illegal");
1197 		break;
1198 	case AGX100_DESC_ERR_UNSUPPORTED_REQ:
1199 		rte_bbdev_log(ERR, "Unsupported request for descriptor");
1200 		break;
1201 	case AGX100_DESC_ERR_RESERVED:
1202 		rte_bbdev_log(ERR, "Reserved");
1203 		break;
1204 	case AGX100_DESC_ERR_DESC_ABORT:
1205 		rte_bbdev_log(ERR, "Completed abort for descriptor");
1206 		break;
1207 	case AGX100_DESC_ERR_DESC_READ_TLP_POISONED:
1208 		rte_bbdev_log(ERR, "Descriptor read TLP poisoned");
1209 		break;
1210 	default:
1211 		rte_bbdev_log(ERR,
1212 				"Descriptor error unknown error code %u error msg %u",
1213 				error_code, error_msg);
1214 		break;
1215 	}
1216 	return 1;
1217 }
1218 
1219 /* Compute value of k0.
1220  * Based on 3GPP 38.212 Table 5.4.2.1-2
1221  * Starting position of different redundancy versions, k0
1222  */
1223 static inline uint16_t
1224 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
1225 {
1226 	uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
1227 	if (rv_index == 0)
1228 		return 0;
1229 	if (z_c == 0)
1230 		return 0;
1231 	if (n_cb == n) {
1232 		if (rv_index == 1)
1233 			return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
1234 		else if (rv_index == 2)
1235 			return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
1236 		else
1237 			return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
1238 	}
1239 	/* LBRM case - includes a division by N */
1240 	if (rv_index == 1)
1241 		return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
1242 				/ n) * z_c;
1243 	else if (rv_index == 2)
1244 		return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
1245 				/ n) * z_c;
1246 	else
1247 		return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
1248 				/ n) * z_c;
1249 }
1250 
1251 /**
1252  * Vista Creek 5GNR FPGA
1253  * Set DMA descriptor for encode operation (1 Code Block)
1254  *
1255  * @param op
1256  *   Pointer to a single encode operation.
1257  * @param desc
1258  *   Pointer to DMA descriptor.
1259  * @param input
1260  *   Pointer to pointer to input data which will be decoded.
1261  * @param e
1262  *   E value (length of output in bits).
1263  * @param ncb
1264  *   Ncb value (size of the soft buffer).
1265  * @param out_length
1266  *   Length of output buffer
1267  * @param in_offset
1268  *   Input offset in rte_mbuf structure. It is used for calculating the point
1269  *   where data is starting.
1270  * @param out_offset
1271  *   Output offset in rte_mbuf structure. It is used for calculating the point
1272  *   where hard output data will be stored.
1273  * @param cbs_in_op
1274  *   Number of CBs contained in one operation.
1275  */
1276 static inline int
1277 vc_5gnr_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
1278 		struct vc_5gnr_dma_enc_desc *desc, struct rte_mbuf *input,
1279 		struct rte_mbuf *output, uint16_t k_,  uint16_t e,
1280 		uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
1281 		uint8_t cbs_in_op)
1282 {
1283 	/* reset */
1284 	desc->done = 0;
1285 	desc->error = 0;
1286 	desc->k_ = k_;
1287 	desc->rm_e = e;
1288 	desc->desc_idx = desc_offset;
1289 	desc->zc = op->ldpc_enc.z_c;
1290 	desc->bg_idx = op->ldpc_enc.basegraph - 1;
1291 	desc->qm_idx = op->ldpc_enc.q_m / 2;
1292 	desc->crc_en = check_bit(op->ldpc_enc.op_flags,
1293 			RTE_BBDEV_LDPC_CRC_24B_ATTACH);
1294 	desc->irq_en = 0;
1295 	desc->k0 = get_k0(op->ldpc_enc.n_cb, op->ldpc_enc.z_c,
1296 			op->ldpc_enc.basegraph, op->ldpc_enc.rv_index);
1297 	desc->ncb = op->ldpc_enc.n_cb;
1298 	desc->num_null = op->ldpc_enc.n_filler;
1299 	/* Set inbound data buffer address */
1300 	desc->in_addr_hi = (uint32_t)(
1301 			rte_pktmbuf_iova_offset(input, in_offset) >> 32);
1302 	desc->in_addr_lw = (uint32_t)(
1303 			rte_pktmbuf_iova_offset(input, in_offset));
1304 
1305 	desc->out_addr_hi = (uint32_t)(
1306 			rte_pktmbuf_iova_offset(output, out_offset) >> 32);
1307 	desc->out_addr_lw = (uint32_t)(
1308 			rte_pktmbuf_iova_offset(output, out_offset));
1309 	/* Save software context needed for dequeue */
1310 	desc->op_addr = op;
1311 	/* Set total number of CBs in an op */
1312 	desc->cbs_in_op = cbs_in_op;
1313 	return 0;
1314 }
1315 
1316 /**
1317  * AGX100 FPGA
1318  * Set DMA descriptor for encode operation (1 Code Block)
1319  *
1320  * @param op
1321  *   Pointer to a single encode operation.
1322  * @param desc
1323  *   Pointer to DMA descriptor.
1324  * @param input
1325  *   Pointer to pointer to input data which will be decoded.
1326  * @param e
1327  *   E value (length of output in bits).
1328  * @param ncb
1329  *   Ncb value (size of the soft buffer).
1330  * @param out_length
1331  *   Length of output buffer
1332  * @param in_offset
1333  *   Input offset in rte_mbuf structure. It is used for calculating the point
1334  *   where data is starting.
1335  * @param out_offset
1336  *   Output offset in rte_mbuf structure. It is used for calculating the point
1337  *   where hard output data will be stored.
1338  * @param cbs_in_op
1339  *   Number of CBs contained in one operation.
1340  */
1341 static inline int
1342 agx100_dma_desc_le_fill(struct rte_bbdev_enc_op *op,
1343 		struct agx100_dma_enc_desc *desc, struct rte_mbuf *input,
1344 		struct rte_mbuf *output, uint16_t k_,  uint32_t e,
1345 		uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
1346 		uint8_t cbs_in_op)
1347 {
1348 	/* reset. */
1349 	desc->done = 0;
1350 	desc->error_msg = 0;
1351 	desc->error_code = 0;
1352 	desc->ncb = op->ldpc_enc.n_cb;
1353 	desc->bg_idx = op->ldpc_enc.basegraph - 1;
1354 	desc->qm_idx = op->ldpc_enc.q_m >> 1;
1355 	desc->zc = op->ldpc_enc.z_c;
1356 	desc->rv = op->ldpc_enc.rv_index;
1357 	desc->int_en = 0;	/**< Set by device externally. */
1358 	desc->max_cbg = 0;	/**< TODO: CBG specific. */
1359 	desc->cbgti = 0;	/**< TODO: CBG specific. */
1360 	desc->cbgs = 0;		/**< TODO: CBG specific. */
1361 	desc->desc_idx = desc_offset;
1362 	desc->ca = 0;	/**< TODO: CBG specific. */
1363 	desc->c = 0;	/**< TODO: CBG specific. */
1364 	desc->num_null = op->ldpc_enc.n_filler;
1365 	desc->ea = e;
1366 	desc->eb = e;	/**< TODO: TB/CBG specific. */
1367 	desc->k_ = k_;
1368 	desc->en_slice_ts = 0;	/**< TODO: Slice specific. */
1369 	desc->en_host_ts = 0;	/**< TODO: Slice specific. */
1370 	desc->en_cb_wr_status = 0;	/**< TODO: Event Queue specific. */
1371 	desc->en_output_sg = 0;	/**< TODO: Slice specific. */
1372 	desc->en_input_sg = 0;	/**< TODO: Slice specific. */
1373 	desc->tb_cb = 0;	/**< Descriptor for CB. TODO: Add TB and CBG logic. */
1374 	desc->crc_en = check_bit(op->ldpc_enc.op_flags,
1375 			RTE_BBDEV_LDPC_CRC_24B_ATTACH);
1376 
1377 	/* Set inbound/outbound data buffer address. */
1378 	/* TODO: add logic for input_slice. */
1379 	desc->output_start_addr_hi = (uint32_t)(
1380 			rte_pktmbuf_iova_offset(output, out_offset) >> 32);
1381 	desc->output_start_addr_lo = (uint32_t)(
1382 			rte_pktmbuf_iova_offset(output, out_offset));
1383 	desc->input_start_addr_hi = (uint32_t)(
1384 			rte_pktmbuf_iova_offset(input, in_offset) >> 32);
1385 	desc->input_start_addr_lo = (uint32_t)(
1386 			rte_pktmbuf_iova_offset(input, in_offset));
1387 	desc->output_length = (e + 7) >> 3; /* in bytes. */
1388 	desc->input_length = input->data_len;
1389 	desc->enqueue_timestamp = 0;
1390 	desc->completion_timestamp = 0;
1391 	/* Save software context needed for dequeue. */
1392 	desc->op_addr = op;
1393 	/* Set total number of CBs in an op. */
1394 	desc->cbs_in_op = cbs_in_op;
1395 	return 0;
1396 }
1397 
1398 /**
1399  * Vista Creek 5GNR FPGA
1400  * Set DMA descriptor for decode operation (1 Code Block)
1401  *
1402  * @param op
1403  *   Pointer to a single encode operation.
1404  * @param desc
1405  *   Pointer to DMA descriptor.
1406  * @param input
1407  *   Pointer to pointer to input data which will be decoded.
1408  * @param in_offset
1409  *   Input offset in rte_mbuf structure. It is used for calculating the point
1410  *   where data is starting.
1411  * @param out_offset
1412  *   Output offset in rte_mbuf structure. It is used for calculating the point
1413  *   where hard output data will be stored.
1414  * @param cbs_in_op
1415  *   Number of CBs contained in one operation.
1416  */
1417 static inline int
1418 vc_5gnr_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
1419 		struct vc_5gnr_dma_dec_desc *desc,
1420 		struct rte_mbuf *input,	struct rte_mbuf *output,
1421 		uint16_t harq_in_length,
1422 		uint32_t in_offset, uint32_t out_offset,
1423 		uint32_t harq_offset,
1424 		uint16_t desc_offset,
1425 		uint8_t cbs_in_op)
1426 {
1427 	/* reset */
1428 	desc->done = 0;
1429 	desc->error = 0;
1430 	/* Set inbound data buffer address */
1431 	desc->in_addr_hi = (uint32_t)(
1432 			rte_pktmbuf_iova_offset(input, in_offset) >> 32);
1433 	desc->in_addr_lw = (uint32_t)(
1434 			rte_pktmbuf_iova_offset(input, in_offset));
1435 	desc->rm_e = op->ldpc_dec.cb_params.e;
1436 	desc->harq_input_length = harq_in_length;
1437 	desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
1438 			RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
1439 	desc->rv = op->ldpc_dec.rv_index;
1440 	desc->crc24b_ind = check_bit(op->ldpc_dec.op_flags,
1441 			RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
1442 	desc->drop_crc24b = check_bit(op->ldpc_dec.op_flags,
1443 			RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP);
1444 	desc->desc_idx = desc_offset;
1445 	desc->ncb = op->ldpc_dec.n_cb;
1446 	desc->num_null = op->ldpc_dec.n_filler;
1447 	desc->hbstroe_offset = harq_offset >> 10;
1448 	desc->zc = op->ldpc_dec.z_c;
1449 	desc->harqin_en = check_bit(op->ldpc_dec.op_flags,
1450 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
1451 	desc->bg_idx = op->ldpc_dec.basegraph - 1;
1452 	desc->max_iter = op->ldpc_dec.iter_max;
1453 	desc->qm_idx = op->ldpc_dec.q_m / 2;
1454 	desc->out_addr_hi = (uint32_t)(
1455 			rte_pktmbuf_iova_offset(output, out_offset) >> 32);
1456 	desc->out_addr_lw = (uint32_t)(
1457 			rte_pktmbuf_iova_offset(output, out_offset));
1458 	/* Save software context needed for dequeue */
1459 	desc->op_addr = op;
1460 	/* Set total number of CBs in an op */
1461 	desc->cbs_in_op = cbs_in_op;
1462 
1463 	return 0;
1464 }
1465 
1466 /**
1467  * AGX100 FPGA
1468  * Set DMA descriptor for decode operation (1 Code Block)
1469  *
1470  * @param op
1471  *   Pointer to a single encode operation.
1472  * @param desc
1473  *   Pointer to DMA descriptor.
1474  * @param input
1475  *   Pointer to pointer to input data which will be decoded.
1476  * @param in_offset
1477  *   Input offset in rte_mbuf structure. It is used for calculating the point
1478  *   where data is starting.
1479  * @param out_offset
1480  *   Output offset in rte_mbuf structure. It is used for calculating the point
1481  *   where hard output data will be stored.
1482  * @param cbs_in_op
1483  *   Number of CBs contained in one operation.
1484  */
1485 static inline int
1486 agx100_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
1487 		struct agx100_dma_dec_desc *desc,
1488 		struct rte_mbuf *input,	struct rte_mbuf *output,
1489 		uint16_t harq_in_length,
1490 		uint32_t in_offset, uint32_t out_offset,
1491 		uint32_t harq_in_offset,
1492 		uint32_t harq_out_offset,
1493 		uint16_t desc_offset,
1494 		uint8_t cbs_in_op)
1495 {
1496 	/* reset. */
1497 	desc->done = 0;
1498 	desc->tb_crc_pass = 0;
1499 	desc->cb_crc_all_pass = 0;
1500 	desc->cb_all_et_pass = 0;
1501 	desc->max_iter_ret = 0;
1502 	desc->cgb_crc_bitmap = 0;	/**< TODO: CBG specific. */
1503 	desc->error_msg = 0;
1504 	desc->error_code = 0;
1505 	desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
1506 			RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
1507 	desc->harq_in_en = check_bit(op->ldpc_dec.op_flags,
1508 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
1509 	desc->max_iter = op->ldpc_dec.iter_max;
1510 	desc->ncb = op->ldpc_dec.n_cb;
1511 	desc->bg_idx = op->ldpc_dec.basegraph - 1;
1512 	desc->qm_idx = op->ldpc_dec.q_m >> 1;
1513 	desc->zc = op->ldpc_dec.z_c;
1514 	desc->rv = op->ldpc_dec.rv_index;
1515 	desc->int_en = 0;	/**< Set by device externally. */
1516 	desc->max_cbg = 0;	/**< TODO: CBG specific. */
1517 	desc->cbgti = 0;	/**< TODO: CBG specific. */
1518 	desc->cbgfi = 0;	/**< TODO: CBG specific. */
1519 	desc->cbgs = 0;		/**< TODO: CBG specific. */
1520 	desc->desc_idx = desc_offset;
1521 	desc->ca = 0;	/**< TODO: CBG specific. */
1522 	desc->c = 0;		/**< TODO: CBG specific. */
1523 	desc->llr_pckg = 0;		/**< TODO: Not implemented yet. */
1524 	desc->syndrome_check_mode = 1;	/**< TODO: Make it configurable. */
1525 	desc->num_null = op->ldpc_dec.n_filler;
1526 	desc->ea = op->ldpc_dec.cb_params.e;	/**< TODO: TB/CBG specific. */
1527 	desc->eba = 0;	/**< TODO: TB/CBG specific. */
1528 	desc->hbstore_offset_out = harq_out_offset >> 10;
1529 	desc->hbstore_offset_in = harq_in_offset >> 10;
1530 	desc->en_slice_ts = 0;	/**< TODO: Slice specific. */
1531 	desc->en_host_ts = 0;	/**< TODO: Slice specific. */
1532 	desc->en_cb_wr_status = 0;	/**< TODO: Event Queue specific. */
1533 	desc->en_output_sg = 0;	/**< TODO: Slice specific. */
1534 	desc->en_input_sg = 0;	/**< TODO: Slice specific. */
1535 	desc->tb_cb = 0; /**< Descriptor for CB. TODO: Add TB and CBG logic. */
1536 	desc->crc24b_ind = check_bit(op->ldpc_dec.op_flags,
1537 			RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
1538 	desc->drop_crc24b = check_bit(op->ldpc_dec.op_flags,
1539 			RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP);
1540 	desc->harq_input_length_a =
1541 			harq_in_length; /**< Descriptor for CB. TODO: Add TB and CBG logic. */
1542 	desc->harq_input_length_b = 0; /**< Descriptor for CB. TODO: Add TB and CBG logic. */
1543 	/* Set inbound/outbound data buffer address. */
1544 	/* TODO: add logic for input_slice. */
1545 	desc->output_start_addr_hi = (uint32_t)(
1546 			rte_pktmbuf_iova_offset(output, out_offset) >> 32);
1547 	desc->output_start_addr_lo = (uint32_t)(
1548 			rte_pktmbuf_iova_offset(output, out_offset));
1549 	desc->input_start_addr_hi = (uint32_t)(
1550 			rte_pktmbuf_iova_offset(input, in_offset) >> 32);
1551 	desc->input_start_addr_lo = (uint32_t)(
1552 			rte_pktmbuf_iova_offset(input, in_offset));
1553 	desc->output_length = (((op->ldpc_dec.basegraph == 1) ? 22 : 10) * op->ldpc_dec.z_c
1554 			- op->ldpc_dec.n_filler - desc->drop_crc24b * 24) >> 3;
1555 	desc->input_length = op->ldpc_dec.cb_params.e;	/**< TODO: TB/CBG specific. */
1556 	desc->enqueue_timestamp = 0;
1557 	desc->completion_timestamp = 0;
1558 	/* Save software context needed for dequeue. */
1559 	desc->op_addr = op;
1560 	/* Set total number of CBs in an op. */
1561 	desc->cbs_in_op = cbs_in_op;
1562 	return 0;
1563 }
1564 
1565 /* Validates LDPC encoder parameters for VC 5GNR FPGA. */
1566 static inline int
1567 vc_5gnr_validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
1568 {
1569 	struct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;
1570 	int z_c, n_filler, K, Kp, q_m, n_cb, N, k0, crc24;
1571 	int32_t L, Lcb, cw, cw_rm, e;
1572 
1573 	if (ldpc_enc->input.data == NULL) {
1574 		rte_bbdev_log(ERR, "Invalid input pointer");
1575 		return -1;
1576 	}
1577 	if (ldpc_enc->output.data == NULL) {
1578 		rte_bbdev_log(ERR, "Invalid output pointer");
1579 		return -1;
1580 	}
1581 	if (ldpc_enc->input.length == 0) {
1582 		rte_bbdev_log(ERR, "CB size (%u) is null",
1583 				ldpc_enc->input.length);
1584 		return -1;
1585 	}
1586 	if ((ldpc_enc->basegraph > 2) || (ldpc_enc->basegraph == 0)) {
1587 		rte_bbdev_log(ERR,
1588 				"BG (%u) is out of range 1 <= value <= 2",
1589 				ldpc_enc->basegraph);
1590 		return -1;
1591 	}
1592 	if (ldpc_enc->rv_index > 3) {
1593 		rte_bbdev_log(ERR,
1594 				"rv_index (%u) is out of range 0 <= value <= 3",
1595 				ldpc_enc->rv_index);
1596 		return -1;
1597 	}
1598 	if (ldpc_enc->code_block_mode > RTE_BBDEV_CODE_BLOCK) {
1599 		rte_bbdev_log(ERR,
1600 				"code_block_mode (%u) is out of range 0 <= value <= 1",
1601 				ldpc_enc->code_block_mode);
1602 		return -1;
1603 	}
1604 
1605 	if (ldpc_enc->input.length >
1606 		RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) {
1607 		rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
1608 				ldpc_enc->input.length,
1609 				RTE_BBDEV_LDPC_MAX_CB_SIZE);
1610 		return -1;
1611 	}
1612 
1613 	z_c = ldpc_enc->z_c;
1614 	/* Check Zc is valid value */
1615 	if ((z_c > 384) || (z_c < 4)) {
1616 		rte_bbdev_log(ERR, "Zc (%u) is out of range", z_c);
1617 		return -1;
1618 	}
1619 	if (z_c > 256) {
1620 		if ((z_c % 32) != 0) {
1621 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1622 			return -1;
1623 		}
1624 	} else if (z_c > 128) {
1625 		if ((z_c % 16) != 0) {
1626 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1627 			return -1;
1628 		}
1629 	} else if (z_c > 64) {
1630 		if ((z_c % 8) != 0) {
1631 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1632 			return -1;
1633 		}
1634 	} else if (z_c > 32) {
1635 		if ((z_c % 4) != 0) {
1636 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1637 			return -1;
1638 		}
1639 	} else if (z_c > 16) {
1640 		if ((z_c % 2) != 0) {
1641 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1642 			return -1;
1643 		}
1644 	}
1645 
1646 	n_filler = ldpc_enc->n_filler;
1647 	K = (ldpc_enc->basegraph == 1 ? 22 : 10) * ldpc_enc->z_c;
1648 	Kp = K - n_filler;
1649 	q_m = ldpc_enc->q_m;
1650 	n_cb = ldpc_enc->n_cb;
1651 	N = (ldpc_enc->basegraph == 1 ? N_ZC_1 : N_ZC_2) * z_c;
1652 	k0 = get_k0(n_cb, z_c, ldpc_enc->basegraph, ldpc_enc->rv_index);
1653 	crc24 = 0;
1654 	e = ldpc_enc->cb_params.e;
1655 
1656 	if (check_bit(op->ldpc_enc.op_flags, RTE_BBDEV_LDPC_CRC_24B_ATTACH))
1657 		crc24 = 24;
1658 
1659 	if (K < (int) (ldpc_enc->input.length * 8 + n_filler) + crc24) {
1660 		rte_bbdev_log(ERR, "K and F not matching input size %u %u %u",
1661 				K, n_filler, ldpc_enc->input.length);
1662 		return -1;
1663 	}
1664 	if (ldpc_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1665 		rte_bbdev_log(ERR, "TB mode not supported");
1666 		return -1;
1667 
1668 	}
1669 
1670 	/* K' range check */
1671 	if (Kp % 8 > 0) {
1672 		rte_bbdev_log(ERR, "K' not byte aligned %u", Kp);
1673 		return -1;
1674 	}
1675 	if ((crc24 > 0) && (Kp < 292)) {
1676 		rte_bbdev_log(ERR, "Invalid CRC24 for small block %u", Kp);
1677 		return -1;
1678 	}
1679 	if (Kp < 24) {
1680 		rte_bbdev_log(ERR, "K' too small %u", Kp);
1681 		return -1;
1682 	}
1683 	if (n_filler >= (K - 2 * z_c)) {
1684 		rte_bbdev_log(ERR, "K - F invalid %u %u", K, n_filler);
1685 		return -1;
1686 	}
1687 	/* Ncb range check */
1688 	if ((n_cb > N) || (n_cb < 32) || (n_cb <= (Kp - crc24))) {
1689 		rte_bbdev_log(ERR, "Ncb (%u) is out of range K  %d N %d", n_cb, K, N);
1690 		return -1;
1691 	}
1692 	/* Qm range check */
1693 	if (!check_bit(op->ldpc_enc.op_flags, RTE_BBDEV_LDPC_INTERLEAVER_BYPASS) &&
1694 			((q_m == 0) || ((q_m > 2) && ((q_m % 2) == 1)) || (q_m > 8))) {
1695 		rte_bbdev_log(ERR, "Qm (%u) is out of range", q_m);
1696 		return -1;
1697 	}
1698 	/* K0 range check */
1699 	if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c)) && (k0 < (K - 2 * z_c)))) {
1700 		rte_bbdev_log(ERR, "K0 (%u) is out of range", k0);
1701 		return -1;
1702 	}
1703 	/* E range check */
1704 	if (e <= RTE_MAX(32, z_c)) {
1705 		rte_bbdev_log(ERR, "E is too small %"PRIu32"", e);
1706 		return -1;
1707 	}
1708 	if ((e > 0xFFFF)) {
1709 		rte_bbdev_log(ERR, "E is too large for N3000 %"PRIu32" > 64k", e);
1710 		return -1;
1711 	}
1712 	if (q_m > 0) {
1713 		if (e % q_m > 0) {
1714 			rte_bbdev_log(ERR, "E %"PRIu32" not multiple of qm %d", e, q_m);
1715 			return -1;
1716 		}
1717 	}
1718 	/* Code word in RM range check */
1719 	if (k0 > (Kp - 2 * z_c))
1720 		L = k0 + e;
1721 	else
1722 		L = k0 + e + n_filler;
1723 	Lcb = RTE_MIN(L, n_cb);
1724 	if (ldpc_enc->basegraph == 1) {
1725 		if (Lcb <= 25 * z_c)
1726 			cw = 25 * z_c;
1727 		else if (Lcb <= 27 * z_c)
1728 			cw = 27 * z_c;
1729 		else if (Lcb <= 30 * z_c)
1730 			cw = 30 * z_c;
1731 		else if (Lcb <= 33 * z_c)
1732 			cw = 33 * z_c;
1733 		else if (Lcb <= 44 * z_c)
1734 			cw = 44 * z_c;
1735 		else if (Lcb <= 55 * z_c)
1736 			cw = 55 * z_c;
1737 		else
1738 			cw = 66 * z_c;
1739 	} else {
1740 		if (Lcb <= 15 * z_c)
1741 			cw = 15 * z_c;
1742 		else if (Lcb <= 20 * z_c)
1743 			cw = 20 * z_c;
1744 		else if (Lcb <= 25 * z_c)
1745 			cw = 25 * z_c;
1746 		else if (Lcb <= 30 * z_c)
1747 			cw = 30 * z_c;
1748 		else
1749 			cw = 50 * z_c;
1750 	}
1751 	if (n_cb < Kp - 2 * z_c)
1752 		cw_rm = n_cb;
1753 	else if ((Kp - 2 * z_c <= n_cb) && (n_cb < K - 2 * z_c))
1754 		cw_rm = Kp - 2 * z_c;
1755 	else if ((K - 2 * z_c <= n_cb) && (n_cb < cw))
1756 		cw_rm = n_cb - n_filler;
1757 	else
1758 		cw_rm = cw - n_filler;
1759 	if (cw_rm <= 32) {
1760 		rte_bbdev_log(ERR, "Invalid Ratematching");
1761 		return -1;
1762 	}
1763 	return 0;
1764 }
1765 
1766 /* Validates LDPC decoder parameters for VC 5GNR FPGA. */
1767 static inline int
1768 vc_5gnr_validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
1769 {
1770 	struct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;
1771 	int z_c, n_filler, K, Kp, q_m, n_cb, N, k0, crc24;
1772 	int32_t L, Lcb, cw, cw_rm, e;
1773 
1774 	if (check_bit(ldpc_dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK))
1775 		return 0;
1776 	if (ldpc_dec->input.data == NULL) {
1777 		rte_bbdev_log(ERR, "Invalid input pointer");
1778 		return -1;
1779 	}
1780 	if (ldpc_dec->hard_output.data == NULL) {
1781 		rte_bbdev_log(ERR, "Invalid output pointer");
1782 		return -1;
1783 	}
1784 	if (ldpc_dec->input.length == 0) {
1785 		rte_bbdev_log(ERR, "input is null");
1786 		return -1;
1787 	}
1788 	if ((ldpc_dec->basegraph > 2) || (ldpc_dec->basegraph == 0)) {
1789 		rte_bbdev_log(ERR,
1790 				"BG (%u) is out of range 1 <= value <= 2",
1791 				ldpc_dec->basegraph);
1792 		return -1;
1793 	}
1794 	if (ldpc_dec->iter_max == 0) {
1795 		rte_bbdev_log(ERR,
1796 				"iter_max (%u) is equal to 0",
1797 				ldpc_dec->iter_max);
1798 		return -1;
1799 	}
1800 	if (ldpc_dec->rv_index > 3) {
1801 		rte_bbdev_log(ERR,
1802 				"rv_index (%u) is out of range 0 <= value <= 3",
1803 				ldpc_dec->rv_index);
1804 		return -1;
1805 	}
1806 	if (ldpc_dec->code_block_mode > RTE_BBDEV_CODE_BLOCK) {
1807 		rte_bbdev_log(ERR,
1808 				"code_block_mode (%u) is out of range 0 <= value <= 1",
1809 				ldpc_dec->code_block_mode);
1810 		return -1;
1811 	}
1812 	if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_DECODE_BYPASS)) {
1813 		rte_bbdev_log(ERR, "Avoid LDPC Decode bypass");
1814 		return -1;
1815 	}
1816 
1817 	z_c = ldpc_dec->z_c;
1818 	/* Check Zc is valid value */
1819 	if ((z_c > 384) || (z_c < 4)) {
1820 		rte_bbdev_log(ERR, "Zc (%u) is out of range", z_c);
1821 		return -1;
1822 	}
1823 	if (z_c > 256) {
1824 		if ((z_c % 32) != 0) {
1825 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1826 			return -1;
1827 		}
1828 	} else if (z_c > 128) {
1829 		if ((z_c % 16) != 0) {
1830 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1831 			return -1;
1832 		}
1833 	} else if (z_c > 64) {
1834 		if ((z_c % 8) != 0) {
1835 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1836 			return -1;
1837 		}
1838 	} else if (z_c > 32) {
1839 		if ((z_c % 4) != 0) {
1840 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1841 			return -1;
1842 		}
1843 	} else if (z_c > 16) {
1844 		if ((z_c % 2) != 0) {
1845 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1846 			return -1;
1847 		}
1848 	}
1849 
1850 	n_filler = ldpc_dec->n_filler;
1851 	K = (ldpc_dec->basegraph == 1 ? 22 : 10) * ldpc_dec->z_c;
1852 	Kp = K - n_filler;
1853 	q_m = ldpc_dec->q_m;
1854 	n_cb = ldpc_dec->n_cb;
1855 	N = (ldpc_dec->basegraph == 1 ? N_ZC_1 : N_ZC_2) * z_c;
1856 	k0 = get_k0(n_cb, z_c, ldpc_dec->basegraph, ldpc_dec->rv_index);
1857 	crc24 = 0;
1858 	e = ldpc_dec->cb_params.e;
1859 
1860 	if (check_bit(op->ldpc_dec.op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK))
1861 		crc24 = 24;
1862 
1863 	if (ldpc_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1864 		rte_bbdev_log(ERR, "TB mode not supported");
1865 		return -1;
1866 	}
1867 	/* Enforce HARQ input length */
1868 	ldpc_dec->harq_combined_input.length = RTE_MIN((uint32_t) n_cb,
1869 			ldpc_dec->harq_combined_input.length);
1870 	if ((ldpc_dec->harq_combined_input.length == 0) &&
1871 			check_bit(ldpc_dec->op_flags,
1872 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1873 		rte_bbdev_log(ERR,
1874 				"HARQ input length (%u) should not be null",
1875 				ldpc_dec->harq_combined_input.length);
1876 		return -1;
1877 	}
1878 	if ((ldpc_dec->harq_combined_input.length > 0) &&
1879 			!check_bit(ldpc_dec->op_flags,
1880 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1881 		ldpc_dec->harq_combined_input.length = 0;
1882 	}
1883 
1884 	/* K' range check */
1885 	if (Kp % 8 > 0) {
1886 		rte_bbdev_log(ERR, "K' not byte aligned %u", Kp);
1887 		return -1;
1888 	}
1889 	if ((crc24 > 0) && (Kp < 292)) {
1890 		rte_bbdev_log(ERR, "Invalid CRC24 for small block %u", Kp);
1891 		return -1;
1892 	}
1893 	if (Kp < 24) {
1894 		rte_bbdev_log(ERR, "K' too small %u", Kp);
1895 		return -1;
1896 	}
1897 	if (n_filler >= (K - 2 * z_c)) {
1898 		rte_bbdev_log(ERR, "K - F invalid %u %u", K, n_filler);
1899 		return -1;
1900 	}
1901 	/* Ncb range check */
1902 	if (n_cb != N) {
1903 		rte_bbdev_log(ERR, "Ncb (%u) is out of range K  %d N %d", n_cb, K, N);
1904 		return -1;
1905 	}
1906 	/* Qm range check */
1907 	if (!check_bit(op->ldpc_dec.op_flags,
1908 			RTE_BBDEV_LDPC_INTERLEAVER_BYPASS) &&
1909 			((q_m == 0) || ((q_m > 2) && ((q_m % 2) == 1))
1910 			|| (q_m > 8))) {
1911 		rte_bbdev_log(ERR, "Qm (%u) is out of range", q_m);
1912 		return -1;
1913 	}
1914 	/* K0 range check */
1915 	if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c)) && (k0 < (K - 2 * z_c)))) {
1916 		rte_bbdev_log(ERR, "K0 (%u) is out of range", k0);
1917 		return -1;
1918 	}
1919 	/* E range check */
1920 	if (e <= RTE_MAX(32, z_c)) {
1921 		rte_bbdev_log(ERR, "E is too small");
1922 		return -1;
1923 	}
1924 	if ((e > 0xFFFF)) {
1925 		rte_bbdev_log(ERR, "E is too large");
1926 		return -1;
1927 	}
1928 	if (q_m > 0) {
1929 		if (e % q_m > 0) {
1930 			rte_bbdev_log(ERR, "E not multiple of qm %d", q_m);
1931 			return -1;
1932 		}
1933 	}
1934 	/* Code word in RM range check */
1935 	if (k0 > (Kp - 2 * z_c))
1936 		L = k0 + e;
1937 	else
1938 		L = k0 + e + n_filler;
1939 
1940 	Lcb = RTE_MIN(n_cb, RTE_MAX(L, (int32_t) ldpc_dec->harq_combined_input.length));
1941 	if (ldpc_dec->basegraph == 1) {
1942 		if (Lcb <= 25 * z_c)
1943 			cw = 25 * z_c;
1944 		else if (Lcb <= 27 * z_c)
1945 			cw = 27 * z_c;
1946 		else if (Lcb <= 30 * z_c)
1947 			cw = 30 * z_c;
1948 		else if (Lcb <= 33 * z_c)
1949 			cw = 33 * z_c;
1950 		else if (Lcb <= 44 * z_c)
1951 			cw = 44 * z_c;
1952 		else if (Lcb <= 55 * z_c)
1953 			cw = 55 * z_c;
1954 		else
1955 			cw = 66 * z_c;
1956 	} else {
1957 		if (Lcb <= 15 * z_c)
1958 			cw = 15 * z_c;
1959 		else if (Lcb <= 20 * z_c)
1960 			cw = 20 * z_c;
1961 		else if (Lcb <= 25 * z_c)
1962 			cw = 25 * z_c;
1963 		else if (Lcb <= 30 * z_c)
1964 			cw = 30 * z_c;
1965 		else
1966 			cw = 50 * z_c;
1967 	}
1968 	cw_rm = cw - n_filler;
1969 	if (cw_rm <= 32) {
1970 		rte_bbdev_log(ERR, "Invalid Ratematching");
1971 		return -1;
1972 	}
1973 	return 0;
1974 }
1975 
1976 static inline char *
1977 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
1978 {
1979 	if (unlikely(len > rte_pktmbuf_tailroom(m)))
1980 		return NULL;
1981 
1982 	char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
1983 	m->data_len = (uint16_t)(m->data_len + len);
1984 	m_head->pkt_len  = (m_head->pkt_len + len);
1985 	return tail;
1986 }
1987 
1988 static inline void
1989 fpga_5gnr_mutex_acquisition(struct fpga_5gnr_queue *q)
1990 {
1991 	uint32_t mutex_ctrl, mutex_read, cnt = 0;
1992 	/* Assign a unique id for the duration of the DDR access */
1993 	q->ddr_mutex_uuid = rte_rand();
1994 	/* Request and wait for acquisition of the mutex */
1995 	mutex_ctrl = (q->ddr_mutex_uuid << 16) + 1;
1996 	do {
1997 		if (cnt > 0)
1998 			usleep(FPGA_5GNR_TIMEOUT_CHECK_INTERVAL);
1999 		rte_bbdev_log_debug("Acquiring Mutex for %x", q->ddr_mutex_uuid);
2000 		fpga_5gnr_reg_write_32(q->d->mmio_base, FPGA_5GNR_FEC_MUTEX, mutex_ctrl);
2001 		mutex_read = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_MUTEX);
2002 		rte_bbdev_log_debug("Mutex %x cnt %d owner %x",
2003 				mutex_read, cnt, q->ddr_mutex_uuid);
2004 		cnt++;
2005 	} while ((mutex_read >> 16) != q->ddr_mutex_uuid);
2006 }
2007 
2008 static inline void
2009 fpga_5gnr_mutex_free(struct fpga_5gnr_queue *q)
2010 {
2011 	uint32_t mutex_ctrl = q->ddr_mutex_uuid << 16;
2012 	fpga_5gnr_reg_write_32(q->d->mmio_base, FPGA_5GNR_FEC_MUTEX, mutex_ctrl);
2013 }
2014 
2015 static inline int
2016 fpga_5gnr_harq_write_loopback(struct fpga_5gnr_queue *q,
2017 		struct rte_mbuf *harq_input, uint16_t harq_in_length,
2018 		uint32_t harq_in_offset, uint32_t harq_out_offset)
2019 {
2020 	fpga_5gnr_mutex_acquisition(q);
2021 	uint32_t out_offset = harq_out_offset;
2022 	uint32_t in_offset = harq_in_offset;
2023 	uint32_t left_length = harq_in_length;
2024 	uint32_t reg_32, increment = 0;
2025 	uint64_t *input = NULL;
2026 	uint32_t last_transaction = left_length % FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
2027 	uint64_t last_word;
2028 	struct fpga_5gnr_fec_device *d = q->d;
2029 
2030 	if (last_transaction > 0)
2031 		left_length -= last_transaction;
2032 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
2033 		/*
2034 		 * Get HARQ buffer size for each VF/PF: When 0x00, there is no
2035 		 * available DDR space for the corresponding VF/PF.
2036 		 */
2037 		reg_32 = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
2038 		if (reg_32 < harq_in_length) {
2039 			left_length = reg_32;
2040 			rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size");
2041 		}
2042 	}
2043 
2044 	input = rte_pktmbuf_mtod_offset(harq_input, uint64_t *, in_offset);
2045 
2046 	while (left_length > 0) {
2047 		if (fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) ==  1) {
2048 			if (d->fpga_variant == AGX100_FPGA_VARIANT) {
2049 				fpga_5gnr_reg_write_32(q->d->mmio_base,
2050 						FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
2051 						out_offset >> 3);
2052 			} else {
2053 				fpga_5gnr_reg_write_32(q->d->mmio_base,
2054 						FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
2055 						out_offset);
2056 			}
2057 			fpga_5gnr_reg_write_64(q->d->mmio_base,
2058 					FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
2059 					input[increment]);
2060 			left_length -= FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
2061 			out_offset += FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
2062 			increment++;
2063 			fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
2064 		}
2065 	}
2066 	while (last_transaction > 0) {
2067 		if (fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) ==  1) {
2068 			if (d->fpga_variant == AGX100_FPGA_VARIANT) {
2069 				fpga_5gnr_reg_write_32(q->d->mmio_base,
2070 						FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
2071 						out_offset >> 3);
2072 			} else {
2073 				fpga_5gnr_reg_write_32(q->d->mmio_base,
2074 						FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
2075 						out_offset);
2076 			}
2077 			last_word = input[increment];
2078 			last_word &= (uint64_t)(1ULL << (last_transaction * 4)) - 1;
2079 			fpga_5gnr_reg_write_64(q->d->mmio_base,
2080 					FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
2081 					last_word);
2082 			fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
2083 			last_transaction = 0;
2084 		}
2085 	}
2086 	fpga_5gnr_mutex_free(q);
2087 	return 1;
2088 }
2089 
2090 static inline int
2091 fpga_5gnr_harq_read_loopback(struct fpga_5gnr_queue *q,
2092 		struct rte_mbuf *harq_output, uint16_t harq_in_length,
2093 		uint32_t harq_in_offset, uint32_t harq_out_offset)
2094 {
2095 	fpga_5gnr_mutex_acquisition(q);
2096 	uint32_t left_length, in_offset = harq_in_offset;
2097 	uint64_t reg;
2098 	uint32_t increment = 0;
2099 	uint64_t *input = NULL;
2100 	uint32_t last_transaction = harq_in_length % FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
2101 	struct fpga_5gnr_fec_device *d = q->d;
2102 
2103 	if (last_transaction > 0)
2104 		harq_in_length += (8 - last_transaction);
2105 
2106 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
2107 		reg = fpga_5gnr_reg_read_32(q->d->mmio_base, FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
2108 		if (reg < harq_in_length) {
2109 			harq_in_length = reg;
2110 			rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size");
2111 		}
2112 	}
2113 
2114 	if (!mbuf_append(harq_output, harq_output, harq_in_length)) {
2115 		rte_bbdev_log(ERR, "HARQ output buffer warning %d %d",
2116 				harq_output->buf_len - rte_pktmbuf_headroom(harq_output),
2117 				harq_in_length);
2118 		harq_in_length = harq_output->buf_len - rte_pktmbuf_headroom(harq_output);
2119 		if (!mbuf_append(harq_output, harq_output, harq_in_length)) {
2120 			rte_bbdev_log(ERR, "HARQ output buffer issue %d %d",
2121 					harq_output->buf_len, harq_in_length);
2122 			return -1;
2123 		}
2124 	}
2125 	left_length = harq_in_length;
2126 
2127 	input = rte_pktmbuf_mtod_offset(harq_output, uint64_t *, harq_out_offset);
2128 
2129 	while (left_length > 0) {
2130 		if (d->fpga_variant == AGX100_FPGA_VARIANT) {
2131 			fpga_5gnr_reg_write_32(q->d->mmio_base,
2132 					FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS,
2133 					in_offset >> 3);
2134 		} else {
2135 			fpga_5gnr_reg_write_32(q->d->mmio_base,
2136 					FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS,
2137 					in_offset);
2138 		}
2139 		fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 1);
2140 		reg = fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
2141 		while (reg != 1) {
2142 			reg = fpga_5gnr_reg_read_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
2143 			if (reg == FPGA_5GNR_DDR_OVERFLOW) {
2144 				rte_bbdev_log(ERR, "Read address is overflow!");
2145 				return -1;
2146 			}
2147 		}
2148 		input[increment] = fpga_5gnr_reg_read_64(q->d->mmio_base,
2149 			FPGA_5GNR_FEC_DDR4_RD_DATA_REGS);
2150 		left_length -= FPGA_5GNR_DDR_RD_DATA_LEN_IN_BYTES;
2151 		in_offset += FPGA_5GNR_DDR_WR_DATA_LEN_IN_BYTES;
2152 		increment++;
2153 		if (d->fpga_variant == AGX100_FPGA_VARIANT)
2154 			fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_RDY_REGS, 0);
2155 		else
2156 			fpga_5gnr_reg_write_8(q->d->mmio_base, FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 0);
2157 	}
2158 	fpga_5gnr_mutex_free(q);
2159 	return 1;
2160 }
2161 
2162 static inline int
2163 enqueue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op *op,
2164 		uint16_t desc_offset)
2165 {
2166 	union vc_5gnr_dma_desc *vc_5gnr_desc;
2167 	union agx100_dma_desc *agx100_desc;
2168 	int ret;
2169 	uint8_t c, crc24_bits = 0;
2170 	struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
2171 	uint16_t in_offset = enc->input.offset;
2172 	uint16_t out_offset = enc->output.offset;
2173 	struct rte_mbuf *m_in = enc->input.data;
2174 	struct rte_mbuf *m_out = enc->output.data;
2175 	struct rte_mbuf *m_out_head = enc->output.data;
2176 	uint32_t in_length, out_length, e;
2177 	uint16_t total_left = enc->input.length;
2178 	uint16_t ring_offset;
2179 	uint16_t K, k_;
2180 	struct fpga_5gnr_fec_device *d = q->d;
2181 
2182 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
2183 		if (vc_5gnr_validate_ldpc_enc_op(op) == -1) {
2184 			rte_bbdev_log(ERR, "LDPC encoder validation rejected");
2185 			return -EINVAL;
2186 		}
2187 	}
2188 
2189 	/* Clear op status */
2190 	op->status = 0;
2191 
2192 	if (m_in == NULL || m_out == NULL) {
2193 		rte_bbdev_log(ERR, "Invalid mbuf pointer");
2194 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
2195 		return -EINVAL;
2196 	}
2197 
2198 	if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)
2199 		crc24_bits = 24;
2200 
2201 	if (enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
2202 		/* TODO: For Transport Block mode. */
2203 		rte_bbdev_log(ERR, "Transport Block not supported yet");
2204 		return -1;
2205 	}
2206 	/* For Code Block mode. */
2207 	c = 1;
2208 	e = enc->cb_params.e;
2209 
2210 	/* Update total_left */
2211 	K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
2212 	k_ = K - enc->n_filler;
2213 	in_length = (k_ - crc24_bits) >> 3;
2214 	out_length = (e + 7) >> 3;
2215 
2216 	total_left = rte_pktmbuf_data_len(m_in) - in_offset;
2217 
2218 	/* Update offsets */
2219 	if (total_left != in_length) {
2220 		op->status |= 1 << RTE_BBDEV_DATA_ERROR;
2221 		rte_bbdev_log(ERR,
2222 				"Mismatch between mbuf length and included CBs sizes %d",
2223 				total_left);
2224 	}
2225 
2226 	mbuf_append(m_out_head, m_out, out_length);
2227 
2228 	/* Offset into the ring. */
2229 	ring_offset = fpga_5gnr_desc_idx_tail(q, desc_offset);
2230 
2231 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
2232 		/* Setup DMA Descriptor. */
2233 		vc_5gnr_desc = vc_5gnr_get_desc_tail(q, desc_offset);
2234 		ret = vc_5gnr_dma_desc_te_fill(op, &vc_5gnr_desc->enc_req, m_in, m_out,
2235 				k_, e, in_offset, out_offset, ring_offset, c);
2236 	} else {
2237 		/* Setup DMA Descriptor. */
2238 		agx100_desc = agx100_get_desc_tail(q, desc_offset);
2239 		ret = agx100_dma_desc_le_fill(op, &agx100_desc->enc_req, m_in, m_out,
2240 				k_, e, in_offset, out_offset, ring_offset, c);
2241 	}
2242 
2243 	if (unlikely(ret < 0))
2244 		return ret;
2245 
2246 	/* Update lengths */
2247 	total_left -= in_length;
2248 	op->ldpc_enc.output.length += out_length;
2249 
2250 	if (total_left > 0) {
2251 		rte_bbdev_log(ERR,
2252 			"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
2253 				total_left, in_length);
2254 		return -1;
2255 	}
2256 
2257 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2258 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT)
2259 		vc_5gnr_print_dma_enc_desc_debug_info(vc_5gnr_desc);
2260 	else
2261 		agx100_print_dma_enc_desc_debug_info(agx100_desc);
2262 #endif
2263 	return 1;
2264 }
2265 
2266 static inline int
2267 vc_5gnr_enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *op,
2268 		uint16_t desc_offset)
2269 {
2270 	union vc_5gnr_dma_desc *desc;
2271 	int ret;
2272 	uint16_t ring_offset;
2273 	uint8_t c;
2274 	uint16_t e, in_length, out_length, k0, l, seg_total_left, sys_cols;
2275 	uint16_t K, parity_offset, harq_in_length = 0, harq_out_length = 0;
2276 	uint16_t crc24_overlap = 0;
2277 	struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
2278 	struct rte_mbuf *m_in = dec->input.data;
2279 	struct rte_mbuf *m_out = dec->hard_output.data;
2280 	struct rte_mbuf *m_out_head = dec->hard_output.data;
2281 	uint16_t in_offset = dec->input.offset;
2282 	uint16_t out_offset = dec->hard_output.offset;
2283 	uint32_t harq_offset = 0;
2284 
2285 	if (vc_5gnr_validate_ldpc_dec_op(op) == -1) {
2286 		rte_bbdev_log(ERR, "LDPC decoder validation rejected");
2287 		return -EINVAL;
2288 	}
2289 
2290 	/* Clear op status */
2291 	op->status = 0;
2292 
2293 	/* Setup DMA Descriptor */
2294 	ring_offset = fpga_5gnr_desc_idx_tail(q, desc_offset);
2295 	desc = vc_5gnr_get_desc_tail(q, desc_offset);
2296 
2297 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
2298 		struct rte_mbuf *harq_in = dec->harq_combined_input.data;
2299 		struct rte_mbuf *harq_out = dec->harq_combined_output.data;
2300 		harq_in_length = dec->harq_combined_input.length;
2301 		uint32_t harq_in_offset = dec->harq_combined_input.offset;
2302 		uint32_t harq_out_offset = dec->harq_combined_output.offset;
2303 
2304 		if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE)) {
2305 			ret = fpga_5gnr_harq_write_loopback(q, harq_in,
2306 					harq_in_length, harq_in_offset,
2307 					harq_out_offset);
2308 		} else if (check_bit(dec->op_flags,
2309 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE
2310 				)) {
2311 			ret = fpga_5gnr_harq_read_loopback(q, harq_out,
2312 				harq_in_length, harq_in_offset,
2313 				harq_out_offset);
2314 			dec->harq_combined_output.length = harq_in_length;
2315 		} else {
2316 			rte_bbdev_log(ERR, "OP flag Err!");
2317 			ret = -1;
2318 		}
2319 
2320 		/* Set descriptor for dequeue */
2321 		desc->dec_req.done = 1;
2322 		desc->dec_req.error = 0;
2323 		desc->dec_req.op_addr = op;
2324 		desc->dec_req.cbs_in_op = 1;
2325 
2326 		/* Mark this dummy descriptor to be dropped by HW */
2327 		desc->dec_req.desc_idx = (ring_offset + 1) & q->sw_ring_wrap_mask;
2328 
2329 		return ret; /* Error or number of CB */
2330 	}
2331 
2332 	if (m_in == NULL || m_out == NULL) {
2333 		rte_bbdev_log(ERR, "Invalid mbuf pointer");
2334 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
2335 		return -1;
2336 	}
2337 
2338 	c = 1;
2339 	e = dec->cb_params.e;
2340 
2341 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
2342 		crc24_overlap = 24;
2343 
2344 	sys_cols = (dec->basegraph == 1) ? 22 : 10;
2345 	K = sys_cols * dec->z_c;
2346 	parity_offset = K - 2 * dec->z_c;
2347 
2348 	out_length = ((K - crc24_overlap - dec->n_filler) >> 3);
2349 	in_length = e;
2350 	seg_total_left = dec->input.length;
2351 
2352 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
2353 		harq_in_length = RTE_MIN(dec->harq_combined_input.length, (uint32_t)dec->n_cb);
2354 
2355 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
2356 		k0 = get_k0(dec->n_cb, dec->z_c, dec->basegraph, dec->rv_index);
2357 		if (k0 > parity_offset)
2358 			l = k0 + e;
2359 		else
2360 			l = k0 + e + dec->n_filler;
2361 		harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l), dec->n_cb);
2362 		dec->harq_combined_output.length = harq_out_length;
2363 	}
2364 
2365 	mbuf_append(m_out_head, m_out, out_length);
2366 
2367 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
2368 		harq_offset = dec->harq_combined_input.offset;
2369 	else if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
2370 		harq_offset = dec->harq_combined_output.offset;
2371 
2372 	if ((harq_offset & 0x3FF) > 0) {
2373 		rte_bbdev_log(ERR, "Invalid HARQ offset %d", harq_offset);
2374 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
2375 		return -1;
2376 	}
2377 
2378 	ret = vc_5gnr_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
2379 		harq_in_length, in_offset, out_offset, harq_offset,
2380 		ring_offset, c);
2381 
2382 	if (unlikely(ret < 0))
2383 		return ret;
2384 	/* Update lengths */
2385 	seg_total_left -= in_length;
2386 	op->ldpc_dec.hard_output.length += out_length;
2387 	if (seg_total_left > 0) {
2388 		rte_bbdev_log(ERR,
2389 				"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
2390 				seg_total_left, in_length);
2391 		return -1;
2392 	}
2393 
2394 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2395 	vc_5gnr_print_dma_dec_desc_debug_info(desc);
2396 #endif
2397 
2398 	return 1;
2399 }
2400 
2401 static inline int
2402 agx100_enqueue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op *op,
2403 		uint16_t desc_offset)
2404 {
2405 	union agx100_dma_desc *desc;
2406 	int ret;
2407 	uint16_t ring_offset;
2408 	uint8_t c;
2409 	uint16_t e, in_length, out_length, k0, l, seg_total_left, sys_cols;
2410 	uint16_t K, parity_offset, harq_in_length = 0, harq_out_length = 0;
2411 	uint16_t crc24_overlap = 0;
2412 	struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
2413 	struct rte_mbuf *m_in = dec->input.data;
2414 	struct rte_mbuf *m_out = dec->hard_output.data;
2415 	struct rte_mbuf *m_out_head = dec->hard_output.data;
2416 	uint16_t in_offset = dec->input.offset;
2417 	uint16_t out_offset = dec->hard_output.offset;
2418 	uint32_t harq_in_offset = 0;
2419 	uint32_t harq_out_offset = 0;
2420 
2421 	/* Clear op status. */
2422 	op->status = 0;
2423 
2424 	/* Setup DMA Descriptor. */
2425 	ring_offset = fpga_5gnr_desc_idx_tail(q, desc_offset);
2426 	desc = agx100_get_desc_tail(q, desc_offset);
2427 
2428 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
2429 		struct rte_mbuf *harq_in = dec->harq_combined_input.data;
2430 		struct rte_mbuf *harq_out = dec->harq_combined_output.data;
2431 		harq_in_length = dec->harq_combined_input.length;
2432 		uint32_t harq_in_offset = dec->harq_combined_input.offset;
2433 		uint32_t harq_out_offset = dec->harq_combined_output.offset;
2434 
2435 		if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE)) {
2436 			ret = fpga_5gnr_harq_write_loopback(q, harq_in,
2437 					harq_in_length, harq_in_offset,
2438 					harq_out_offset);
2439 		} else if (check_bit(dec->op_flags,
2440 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE)) {
2441 			ret = fpga_5gnr_harq_read_loopback(q, harq_out,
2442 					harq_in_length, harq_in_offset,
2443 					harq_out_offset);
2444 			dec->harq_combined_output.length = harq_in_length;
2445 		} else {
2446 			rte_bbdev_log(ERR, "OP flag Err!");
2447 			ret = -1;
2448 		}
2449 
2450 		/* Set descriptor for dequeue. */
2451 		desc->dec_req.done = 1;
2452 		desc->dec_req.error_code = 0;
2453 		desc->dec_req.error_msg = 0;
2454 		desc->dec_req.op_addr = op;
2455 		desc->dec_req.cbs_in_op = 1;
2456 
2457 		/* Mark this dummy descriptor to be dropped by HW. */
2458 		desc->dec_req.desc_idx = (ring_offset + 1) & q->sw_ring_wrap_mask;
2459 
2460 		return ret; /* Error or number of CB. */
2461 	}
2462 
2463 	if (m_in == NULL || m_out == NULL) {
2464 		rte_bbdev_log(ERR, "Invalid mbuf pointer");
2465 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
2466 		return -1;
2467 	}
2468 
2469 	c = 1;
2470 	e = dec->cb_params.e;
2471 
2472 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
2473 		crc24_overlap = 24;
2474 
2475 	sys_cols = (dec->basegraph == 1) ? 22 : 10;
2476 	K = sys_cols * dec->z_c;
2477 	parity_offset = K - 2 * dec->z_c;
2478 
2479 	out_length = ((K - crc24_overlap - dec->n_filler) >> 3);
2480 	in_length = e;
2481 	seg_total_left = dec->input.length;
2482 
2483 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
2484 		harq_in_length = RTE_MIN(dec->harq_combined_input.length, (uint32_t)dec->n_cb);
2485 
2486 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
2487 		k0 = get_k0(dec->n_cb, dec->z_c, dec->basegraph, dec->rv_index);
2488 		if (k0 > parity_offset)
2489 			l = k0 + e;
2490 		else
2491 			l = k0 + e + dec->n_filler;
2492 		harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l), dec->n_cb);
2493 		dec->harq_combined_output.length = harq_out_length;
2494 	}
2495 
2496 	mbuf_append(m_out_head, m_out, out_length);
2497 	harq_in_offset = dec->harq_combined_input.offset;
2498 	harq_out_offset = dec->harq_combined_output.offset;
2499 
2500 	ret = agx100_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
2501 		harq_in_length, in_offset, out_offset, harq_in_offset,
2502 		harq_out_offset, ring_offset, c);
2503 
2504 	if (unlikely(ret < 0))
2505 		return ret;
2506 	/* Update lengths. */
2507 	seg_total_left -= in_length;
2508 	op->ldpc_dec.hard_output.length += out_length;
2509 	if (seg_total_left > 0) {
2510 		rte_bbdev_log(ERR,
2511 				"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
2512 				seg_total_left, in_length);
2513 		return -1;
2514 	}
2515 
2516 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2517 	agx100_print_dma_dec_desc_debug_info(desc);
2518 #endif
2519 
2520 	return 1;
2521 }
2522 
2523 static uint16_t
2524 fpga_5gnr_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
2525 		struct rte_bbdev_enc_op **ops, uint16_t num)
2526 {
2527 	uint16_t i, total_enqueued_cbs = 0;
2528 	int32_t avail;
2529 	int enqueued_cbs;
2530 	struct fpga_5gnr_queue *q = q_data->queue_private;
2531 	union vc_5gnr_dma_desc *vc_5gnr_desc;
2532 	union agx100_dma_desc *agx100_desc;
2533 	struct fpga_5gnr_fec_device *d = q->d;
2534 
2535 	/* Check if queue is not full */
2536 	if (unlikely((fpga_5gnr_desc_idx_tail(q, 1)) == q->head_free_desc))
2537 		return 0;
2538 
2539 	/* Calculates available space */
2540 	avail = (q->head_free_desc > q->tail) ?
2541 		q->head_free_desc - q->tail - 1 :
2542 		q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
2543 
2544 	for (i = 0; i < num; ++i) {
2545 		/* Check if there is available space for further
2546 		 * processing
2547 		 */
2548 		if (unlikely(avail - 1 < 0))
2549 			break;
2550 		avail -= 1;
2551 		enqueued_cbs = enqueue_ldpc_enc_one_op_cb(q, ops[i], total_enqueued_cbs);
2552 
2553 		if (enqueued_cbs < 0)
2554 			break;
2555 
2556 		total_enqueued_cbs += enqueued_cbs;
2557 
2558 		rte_bbdev_log_debug("enqueuing enc ops [%d/%d] | head %d | tail %d",
2559 				total_enqueued_cbs, num,
2560 				q->head_free_desc, q->tail);
2561 	}
2562 
2563 	/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
2564 	 * only when all previous CBs were already processed.
2565 	 */
2566 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
2567 		vc_5gnr_desc = vc_5gnr_get_desc_tail(q, total_enqueued_cbs - 1);
2568 		vc_5gnr_desc->enc_req.irq_en = q->irq_enable;
2569 	} else {
2570 		agx100_desc = agx100_get_desc_tail(q, total_enqueued_cbs - 1);
2571 		agx100_desc->enc_req.int_en = q->irq_enable;
2572 	}
2573 
2574 	fpga_5gnr_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
2575 
2576 	/* Update stats */
2577 	q_data->queue_stats.enqueued_count += i;
2578 	q_data->queue_stats.enqueue_err_count += num - i;
2579 
2580 	return i;
2581 }
2582 
2583 static uint16_t
2584 fpga_5gnr_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
2585 		struct rte_bbdev_dec_op **ops, uint16_t num)
2586 {
2587 	uint16_t i, total_enqueued_cbs = 0;
2588 	int32_t avail;
2589 	int enqueued_cbs;
2590 	struct fpga_5gnr_queue *q = q_data->queue_private;
2591 	union vc_5gnr_dma_desc *vc_5gnr_desc;
2592 	union agx100_dma_desc *agx100_desc;
2593 	struct fpga_5gnr_fec_device *d = q->d;
2594 
2595 	/* Check if queue is not full */
2596 	if (unlikely((fpga_5gnr_desc_idx_tail(q, 1)) == q->head_free_desc))
2597 		return 0;
2598 
2599 	/* Calculates available space */
2600 	avail = (q->head_free_desc > q->tail) ?
2601 		q->head_free_desc - q->tail - 1 :
2602 		q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
2603 
2604 	for (i = 0; i < num; ++i) {
2605 
2606 		/* Check if there is available space for further
2607 		 * processing
2608 		 */
2609 		if (unlikely(avail - 1 < 0))
2610 			break;
2611 		avail -= 1;
2612 		if (q->d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
2613 			enqueued_cbs = vc_5gnr_enqueue_ldpc_dec_one_op_cb(q, ops[i],
2614 					total_enqueued_cbs);
2615 		} else {
2616 			enqueued_cbs = agx100_enqueue_ldpc_dec_one_op_cb(q, ops[i],
2617 					total_enqueued_cbs);
2618 		}
2619 
2620 		if (enqueued_cbs < 0)
2621 			break;
2622 
2623 		total_enqueued_cbs += enqueued_cbs;
2624 
2625 		rte_bbdev_log_debug("enqueuing dec ops [%d/%d] | head %d | tail %d",
2626 				total_enqueued_cbs, num,
2627 				q->head_free_desc, q->tail);
2628 	}
2629 
2630 	/* Update stats */
2631 	q_data->queue_stats.enqueued_count += i;
2632 	q_data->queue_stats.enqueue_err_count += num - i;
2633 
2634 	/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
2635 	 * only when all previous CBs were already processed.
2636 	 */
2637 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
2638 		vc_5gnr_desc = vc_5gnr_get_desc_tail(q, total_enqueued_cbs - 1);
2639 		vc_5gnr_desc->enc_req.irq_en = q->irq_enable;
2640 	} else {
2641 		agx100_desc = agx100_get_desc_tail(q, total_enqueued_cbs - 1);
2642 		agx100_desc->enc_req.int_en = q->irq_enable;
2643 	}
2644 
2645 	fpga_5gnr_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
2646 	return i;
2647 }
2648 
2649 
2650 static inline int
2651 vc_5gnr_dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op **op,
2652 		uint16_t desc_offset)
2653 {
2654 	union vc_5gnr_dma_desc *desc;
2655 	int desc_error;
2656 	/* Set current desc */
2657 	desc = vc_5gnr_get_desc(q, desc_offset);
2658 
2659 	/*check if done */
2660 	if (desc->enc_req.done == 0)
2661 		return -1;
2662 
2663 	/* make sure the response is read atomically */
2664 	rte_smp_rmb();
2665 
2666 	rte_bbdev_log_debug("DMA response desc %p", desc);
2667 
2668 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2669 	vc_5gnr_print_dma_enc_desc_debug_info(desc);
2670 #endif
2671 	*op = desc->enc_req.op_addr;
2672 	/* Check the descriptor error field, return 1 on error */
2673 	desc_error = vc_5gnr_check_desc_error(desc->enc_req.error);
2674 	(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
2675 
2676 	return 1;
2677 }
2678 
2679 static inline int
2680 agx100_dequeue_ldpc_enc_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_enc_op **op,
2681 		uint16_t desc_offset)
2682 {
2683 	union agx100_dma_desc *desc;
2684 	int desc_error;
2685 
2686 	/* Set current desc. */
2687 	desc = agx100_get_desc(q, desc_offset);
2688 	/*check if done */
2689 	if (desc->enc_req.done == 0)
2690 		return -1;
2691 
2692 	/* make sure the response is read atomically. */
2693 	rte_smp_rmb();
2694 
2695 	rte_bbdev_log_debug("DMA response desc %p", desc);
2696 
2697 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2698 	agx100_print_dma_enc_desc_debug_info(desc);
2699 #endif
2700 	*op = desc->enc_req.op_addr;
2701 	/* Check the descriptor error field, return 1 on error. */
2702 	desc_error = agx100_check_desc_error(desc->enc_req.error_code,
2703 			desc->enc_req.error_msg);
2704 
2705 	(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
2706 
2707 	return 1;
2708 }
2709 
2710 static inline int
2711 vc_5gnr_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op **op,
2712 		uint16_t desc_offset)
2713 {
2714 	union vc_5gnr_dma_desc *desc;
2715 	int desc_error;
2716 
2717 	/* Set descriptor */
2718 	desc = vc_5gnr_get_desc(q, desc_offset);
2719 
2720 	/* Verify done bit is set */
2721 	if (desc->dec_req.done == 0)
2722 		return -1;
2723 
2724 	/* make sure the response is read atomically */
2725 	rte_smp_rmb();
2726 
2727 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2728 	vc_5gnr_print_dma_dec_desc_debug_info(desc);
2729 #endif
2730 
2731 	*op = desc->dec_req.op_addr;
2732 
2733 	if (check_bit((*op)->ldpc_dec.op_flags,
2734 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
2735 		(*op)->status = 0;
2736 		return 1;
2737 	}
2738 
2739 	/* FPGA reports iterations based on round-up minus 1 */
2740 	(*op)->ldpc_dec.iter_count = desc->dec_req.iter + 1;
2741 
2742 	/* CRC Check criteria */
2743 	if (desc->dec_req.crc24b_ind && !(desc->dec_req.crcb_pass))
2744 		(*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
2745 
2746 	/* et_pass = 0 when decoder fails */
2747 	(*op)->status |= !(desc->dec_req.et_pass) << RTE_BBDEV_SYNDROME_ERROR;
2748 
2749 	/* Check the descriptor error field, return 1 on error */
2750 	desc_error = vc_5gnr_check_desc_error(desc->dec_req.error);
2751 
2752 	(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
2753 
2754 	return 1;
2755 }
2756 
2757 static inline int
2758 agx100_dequeue_ldpc_dec_one_op_cb(struct fpga_5gnr_queue *q, struct rte_bbdev_dec_op **op,
2759 		uint16_t desc_offset)
2760 {
2761 	union agx100_dma_desc *desc;
2762 	int desc_error;
2763 
2764 	/* Set descriptor. */
2765 	desc = agx100_get_desc(q, desc_offset);
2766 	/* Verify done bit is set. */
2767 	if (desc->dec_req.done == 0)
2768 		return -1;
2769 
2770 	/* make sure the response is read atomically. */
2771 	rte_smp_rmb();
2772 
2773 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2774 	agx100_print_dma_dec_desc_debug_info(desc);
2775 #endif
2776 
2777 	*op = desc->dec_req.op_addr;
2778 
2779 	if (check_bit((*op)->ldpc_dec.op_flags, RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
2780 		(*op)->status = 0;
2781 		return 1;
2782 	}
2783 
2784 	/* FPGA reports iterations based on round-up minus 1. */
2785 	(*op)->ldpc_dec.iter_count = desc->dec_req.max_iter_ret + 1;
2786 
2787 	/* CRC Check criteria. */
2788 	if (desc->dec_req.crc24b_ind && !(desc->dec_req.cb_crc_all_pass))
2789 		(*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
2790 
2791 	/* et_pass = 0 when decoder fails. */
2792 	(*op)->status |= !(desc->dec_req.cb_all_et_pass) << RTE_BBDEV_SYNDROME_ERROR;
2793 
2794 	/* Check the descriptor error field, return 1 on error. */
2795 	desc_error = agx100_check_desc_error(desc->dec_req.error_code,
2796 			desc->dec_req.error_msg);
2797 
2798 	(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
2799 	return 1;
2800 }
2801 
2802 static uint16_t
2803 fpga_5gnr_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
2804 		struct rte_bbdev_enc_op **ops, uint16_t num)
2805 {
2806 	struct fpga_5gnr_queue *q = q_data->queue_private;
2807 	uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
2808 	uint16_t i;
2809 	uint16_t dequeued_cbs = 0;
2810 	int ret;
2811 
2812 	for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
2813 		if (q->d->fpga_variant == VC_5GNR_FPGA_VARIANT)
2814 			ret = vc_5gnr_dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
2815 		else
2816 			ret = agx100_dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
2817 
2818 		if (ret < 0)
2819 			break;
2820 
2821 		dequeued_cbs += ret;
2822 
2823 		rte_bbdev_log_debug("dequeuing enc ops [%d/%d] | head %d | tail %d",
2824 				dequeued_cbs, num, q->head_free_desc, q->tail);
2825 	}
2826 
2827 	/* Update head */
2828 	q->head_free_desc = fpga_5gnr_desc_idx(q, dequeued_cbs);
2829 
2830 	/* Update stats */
2831 	q_data->queue_stats.dequeued_count += i;
2832 
2833 	return i;
2834 }
2835 
2836 static uint16_t
2837 fpga_5gnr_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
2838 		struct rte_bbdev_dec_op **ops, uint16_t num)
2839 {
2840 	struct fpga_5gnr_queue *q = q_data->queue_private;
2841 	uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
2842 	uint16_t i;
2843 	uint16_t dequeued_cbs = 0;
2844 	int ret;
2845 
2846 	for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
2847 		if (q->d->fpga_variant == VC_5GNR_FPGA_VARIANT)
2848 			ret = vc_5gnr_dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
2849 		else
2850 			ret = agx100_dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
2851 
2852 		if (ret < 0)
2853 			break;
2854 
2855 		dequeued_cbs += ret;
2856 
2857 		rte_bbdev_log_debug("dequeuing dec ops [%d/%d] | head %d | tail %d",
2858 				dequeued_cbs, num, q->head_free_desc, q->tail);
2859 	}
2860 
2861 	/* Update head */
2862 	q->head_free_desc = fpga_5gnr_desc_idx(q, dequeued_cbs);
2863 
2864 	/* Update stats */
2865 	q_data->queue_stats.dequeued_count += i;
2866 
2867 	return i;
2868 }
2869 
2870 
2871 /* Initialization Function */
2872 static void
2873 fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
2874 {
2875 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2876 
2877 	dev->dev_ops = &fpga_5gnr_ops;
2878 	dev->enqueue_ldpc_enc_ops = fpga_5gnr_enqueue_ldpc_enc;
2879 	dev->enqueue_ldpc_dec_ops = fpga_5gnr_enqueue_ldpc_dec;
2880 	dev->dequeue_ldpc_enc_ops = fpga_5gnr_dequeue_ldpc_enc;
2881 	dev->dequeue_ldpc_dec_ops = fpga_5gnr_dequeue_ldpc_dec;
2882 
2883 	/* Device variant specific handling. */
2884 	if ((pci_dev->id.device_id == AGX100_PF_DEVICE_ID) ||
2885 			(pci_dev->id.device_id == AGX100_VF_DEVICE_ID)) {
2886 		((struct fpga_5gnr_fec_device *) dev->data->dev_private)->fpga_variant =
2887 				AGX100_FPGA_VARIANT;
2888 		((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
2889 				!strcmp(drv->driver.name, RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
2890 		((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
2891 				pci_dev->mem_resource[0].addr;
2892 		/* Maximum number of queues possible for this device. */
2893 		((struct fpga_5gnr_fec_device *) dev->data->dev_private)->total_num_queues =
2894 				fpga_5gnr_reg_read_32(pci_dev->mem_resource[0].addr,
2895 				FPGA_5GNR_FEC_VERSION_ID) >> 24;
2896 	} else {
2897 		((struct fpga_5gnr_fec_device *) dev->data->dev_private)->fpga_variant =
2898 				VC_5GNR_FPGA_VARIANT;
2899 		((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
2900 				!strcmp(drv->driver.name, RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
2901 		((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
2902 				pci_dev->mem_resource[0].addr;
2903 		((struct fpga_5gnr_fec_device *) dev->data->dev_private)->total_num_queues =
2904 				VC_5GNR_TOTAL_NUM_QUEUES;
2905 	}
2906 
2907 	rte_bbdev_log_debug(
2908 			"Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
2909 			drv->driver.name, dev->data->name,
2910 			(void *)pci_dev->mem_resource[0].addr,
2911 			pci_dev->mem_resource[0].phys_addr);
2912 }
2913 
2914 static int
2915 fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
2916 	struct rte_pci_device *pci_dev)
2917 {
2918 	struct rte_bbdev *bbdev = NULL;
2919 	char dev_name[RTE_BBDEV_NAME_MAX_LEN];
2920 	struct fpga_5gnr_fec_device *d;
2921 
2922 	if (pci_dev == NULL) {
2923 		rte_bbdev_log(ERR, "NULL PCI device");
2924 		return -EINVAL;
2925 	}
2926 
2927 	rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
2928 
2929 	/* Allocate memory to be used privately by drivers */
2930 	bbdev = rte_bbdev_allocate(pci_dev->device.name);
2931 	if (bbdev == NULL)
2932 		return -ENODEV;
2933 
2934 	/* allocate device private memory */
2935 	bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
2936 			sizeof(struct fpga_5gnr_fec_device),
2937 			RTE_CACHE_LINE_SIZE,
2938 			pci_dev->device.numa_node);
2939 
2940 	if (bbdev->data->dev_private == NULL) {
2941 		rte_bbdev_log(CRIT,
2942 				"Allocate of %zu bytes for device \"%s\" failed",
2943 				sizeof(struct fpga_5gnr_fec_device), dev_name);
2944 				rte_bbdev_release(bbdev);
2945 			return -ENOMEM;
2946 	}
2947 
2948 	/* Fill HW specific part of device structure */
2949 	bbdev->device = &pci_dev->device;
2950 	bbdev->intr_handle = pci_dev->intr_handle;
2951 	bbdev->data->socket_id = pci_dev->device.numa_node;
2952 
2953 	/* Invoke FPGA 5GNR FEC device initialization function */
2954 	fpga_5gnr_fec_init(bbdev, pci_drv);
2955 
2956 	rte_bbdev_log_debug("bbdev id = %u [%s]",
2957 			bbdev->data->dev_id, dev_name);
2958 
2959 	d = bbdev->data->dev_private;
2960 	if (d->fpga_variant == VC_5GNR_FPGA_VARIANT) {
2961 		uint32_t version_id = fpga_5gnr_reg_read_32(d->mmio_base, FPGA_5GNR_FEC_VERSION_ID);
2962 		rte_bbdev_log(INFO, "Vista Creek FPGA RTL v%u.%u",
2963 				((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
2964 	} else {
2965 		uint32_t version_num_queues = fpga_5gnr_reg_read_32(d->mmio_base,
2966 				FPGA_5GNR_FEC_VERSION_ID);
2967 		uint8_t major_version_id = version_num_queues >> 16;
2968 		uint8_t minor_version_id = version_num_queues >> 8;
2969 		uint8_t patch_id = version_num_queues;
2970 
2971 		rte_bbdev_log(INFO, "AGX100 RTL v%u.%u.%u",
2972 				major_version_id, minor_version_id, patch_id);
2973 	}
2974 
2975 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2976 	print_static_reg_debug_info(d->mmio_base, d->fpga_variant);
2977 #endif
2978 	return 0;
2979 }
2980 
2981 static int
2982 fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
2983 {
2984 	struct rte_bbdev *bbdev;
2985 	int ret;
2986 	uint8_t dev_id;
2987 
2988 	if (pci_dev == NULL)
2989 		return -EINVAL;
2990 
2991 	/* Find device */
2992 	bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
2993 	if (bbdev == NULL) {
2994 		rte_bbdev_log(CRIT,
2995 				"Couldn't find HW dev \"%s\" to uninitialise it",
2996 				pci_dev->device.name);
2997 		return -ENODEV;
2998 	}
2999 	dev_id = bbdev->data->dev_id;
3000 
3001 	/* free device private memory before close */
3002 	rte_free(bbdev->data->dev_private);
3003 
3004 	/* Close device */
3005 	ret = rte_bbdev_close(dev_id);
3006 	if (ret < 0)
3007 		rte_bbdev_log(ERR,
3008 				"Device %i failed to close during uninit: %i",
3009 				dev_id, ret);
3010 
3011 	/* release bbdev from library */
3012 	ret = rte_bbdev_release(bbdev);
3013 	if (ret)
3014 		rte_bbdev_log(ERR, "Device %i failed to uninit: %i", dev_id, ret);
3015 
3016 	rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
3017 
3018 	return 0;
3019 }
3020 
3021 static inline void
3022 fpga_5gnr_set_default_conf(struct rte_fpga_5gnr_fec_conf *def_conf)
3023 {
3024 	/* clear default configuration before initialization */
3025 	memset(def_conf, 0, sizeof(struct rte_fpga_5gnr_fec_conf));
3026 	/* Set pf mode to true */
3027 	def_conf->pf_mode_en = true;
3028 
3029 	/* Set ratio between UL and DL to 1:1 (unit of weight is 3 CBs) */
3030 	def_conf->ul_bandwidth = 3;
3031 	def_conf->dl_bandwidth = 3;
3032 
3033 	/* Set Load Balance Factor to 64 */
3034 	def_conf->dl_load_balance = 64;
3035 	def_conf->ul_load_balance = 64;
3036 }
3037 
3038 /* Initial configuration of Vista Creek device. */
3039 static int vc_5gnr_configure(const char *dev_name, const struct rte_fpga_5gnr_fec_conf *conf)
3040 {
3041 	uint32_t payload_32, address;
3042 	uint16_t payload_16;
3043 	uint8_t payload_8;
3044 	uint16_t q_id, vf_id, total_q_id, total_ul_q_id, total_dl_q_id;
3045 	struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
3046 	struct rte_fpga_5gnr_fec_conf def_conf;
3047 
3048 	if (bbdev == NULL) {
3049 		rte_bbdev_log(ERR,
3050 				"Invalid dev_name (%s), or device is not yet initialised",
3051 				dev_name);
3052 		return -ENODEV;
3053 	}
3054 
3055 	struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
3056 
3057 	if (conf == NULL) {
3058 		rte_bbdev_log(ERR, "VC FPGA Configuration was not provided.");
3059 		rte_bbdev_log(ERR, "Default configuration will be loaded.");
3060 		fpga_5gnr_set_default_conf(&def_conf);
3061 		conf = &def_conf;
3062 	}
3063 
3064 	/*
3065 	 * Configure UL:DL ratio.
3066 	 * [7:0]: UL weight
3067 	 * [15:8]: DL weight
3068 	 */
3069 	payload_16 = (conf->dl_bandwidth << 8) | conf->ul_bandwidth;
3070 	address = VC_5GNR_CONFIGURATION;
3071 	fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
3072 
3073 	/* Clear all queues registers */
3074 	payload_32 = FPGA_5GNR_INVALID_HW_QUEUE_ID;
3075 	for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
3076 		address = (q_id << 2) + VC_5GNR_QUEUE_MAP;
3077 		fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
3078 	}
3079 
3080 	/*
3081 	 * If PF mode is enabled allocate all queues for PF only.
3082 	 *
3083 	 * For VF mode each VF can have different number of UL and DL queues.
3084 	 * Total number of queues to configure cannot exceed VC FPGA
3085 	 * capabilities - 64 queues - 32 queues for UL and 32 queues for DL.
3086 	 * Queues mapping is done according to configuration:
3087 	 *
3088 	 * UL queues:
3089 	 * |                Q_ID              | VF_ID |
3090 	 * |                 0                |   0   |
3091 	 * |                ...               |   0   |
3092 	 * | conf->vf_dl_queues_number[0] - 1 |   0   |
3093 	 * | conf->vf_dl_queues_number[0]     |   1   |
3094 	 * |                ...               |   1   |
3095 	 * | conf->vf_dl_queues_number[1] - 1 |   1   |
3096 	 * |                ...               |  ...  |
3097 	 * | conf->vf_dl_queues_number[7] - 1 |   7   |
3098 	 *
3099 	 * DL queues:
3100 	 * |                Q_ID              | VF_ID |
3101 	 * |                 32               |   0   |
3102 	 * |                ...               |   0   |
3103 	 * | conf->vf_ul_queues_number[0] - 1 |   0   |
3104 	 * | conf->vf_ul_queues_number[0]     |   1   |
3105 	 * |                ...               |   1   |
3106 	 * | conf->vf_ul_queues_number[1] - 1 |   1   |
3107 	 * |                ...               |  ...  |
3108 	 * | conf->vf_ul_queues_number[7] - 1 |   7   |
3109 	 *
3110 	 * Example of configuration:
3111 	 * conf->vf_ul_queues_number[0] = 4;  -> 4 UL queues for VF0
3112 	 * conf->vf_dl_queues_number[0] = 4;  -> 4 DL queues for VF0
3113 	 * conf->vf_ul_queues_number[1] = 2;  -> 2 UL queues for VF1
3114 	 * conf->vf_dl_queues_number[1] = 2;  -> 2 DL queues for VF1
3115 	 *
3116 	 * UL:
3117 	 * | Q_ID | VF_ID |
3118 	 * |   0  |   0   |
3119 	 * |   1  |   0   |
3120 	 * |   2  |   0   |
3121 	 * |   3  |   0   |
3122 	 * |   4  |   1   |
3123 	 * |   5  |   1   |
3124 	 *
3125 	 * DL:
3126 	 * | Q_ID | VF_ID |
3127 	 * |  32  |   0   |
3128 	 * |  33  |   0   |
3129 	 * |  34  |   0   |
3130 	 * |  35  |   0   |
3131 	 * |  36  |   1   |
3132 	 * |  37  |   1   |
3133 	 */
3134 	if (conf->pf_mode_en) {
3135 		payload_32 = 0x1;
3136 		for (q_id = 0; q_id < d->total_num_queues; ++q_id) {
3137 			address = (q_id << 2) + VC_5GNR_QUEUE_MAP;
3138 			fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
3139 		}
3140 	} else {
3141 		/* Calculate total number of UL and DL queues to configure */
3142 		total_ul_q_id = total_dl_q_id = 0;
3143 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
3144 			total_ul_q_id += conf->vf_ul_queues_number[vf_id];
3145 			total_dl_q_id += conf->vf_dl_queues_number[vf_id];
3146 		}
3147 		total_q_id = total_dl_q_id + total_ul_q_id;
3148 		/*
3149 		 * Check if total number of queues to configure does not exceed
3150 		 * FPGA capabilities (64 queues - 32 UL and 32 DL queues)
3151 		 */
3152 		if ((total_ul_q_id > VC_5GNR_NUM_UL_QUEUES) ||
3153 			(total_dl_q_id > VC_5GNR_NUM_DL_QUEUES) ||
3154 			(total_q_id > d->total_num_queues)) {
3155 			rte_bbdev_log(ERR,
3156 					"VC 5GNR FPGA Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, FPGA_Q %u",
3157 					total_ul_q_id, total_dl_q_id,
3158 					d->total_num_queues);
3159 			return -EINVAL;
3160 		}
3161 		total_ul_q_id = 0;
3162 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
3163 			for (q_id = 0; q_id < conf->vf_ul_queues_number[vf_id];
3164 					++q_id, ++total_ul_q_id) {
3165 				address = (total_ul_q_id << 2) + VC_5GNR_QUEUE_MAP;
3166 				payload_32 = ((0x80 + vf_id) << 16) | 0x1;
3167 				fpga_5gnr_reg_write_32(d->mmio_base, address,
3168 						payload_32);
3169 			}
3170 		}
3171 		total_dl_q_id = 0;
3172 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
3173 			for (q_id = 0; q_id < conf->vf_dl_queues_number[vf_id];
3174 					++q_id, ++total_dl_q_id) {
3175 				address = ((total_dl_q_id + VC_5GNR_NUM_UL_QUEUES)
3176 						<< 2) + VC_5GNR_QUEUE_MAP;
3177 				payload_32 = ((0x80 + vf_id) << 16) | 0x1;
3178 				fpga_5gnr_reg_write_32(d->mmio_base, address,
3179 						payload_32);
3180 			}
3181 		}
3182 	}
3183 
3184 	/* Setting Load Balance Factor */
3185 	payload_16 = (conf->dl_load_balance << 8) | (conf->ul_load_balance);
3186 	address = FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR;
3187 	fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
3188 
3189 	/* Setting length of ring descriptor entry */
3190 	payload_16 = FPGA_5GNR_RING_DESC_ENTRY_LENGTH;
3191 	address = FPGA_5GNR_FEC_RING_DESC_LEN;
3192 	fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
3193 
3194 	/* Queue PF/VF mapping table is ready */
3195 	payload_8 = 0x1;
3196 	address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
3197 	fpga_5gnr_reg_write_8(d->mmio_base, address, payload_8);
3198 
3199 	rte_bbdev_log_debug("PF Vista Creek 5GNR FPGA configuration complete for %s", dev_name);
3200 
3201 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3202 	print_static_reg_debug_info(d->mmio_base, d->fpga_variant);
3203 #endif
3204 	return 0;
3205 }
3206 
3207 /* Initial configuration of AGX100 device. */
3208 static int agx100_configure(const char *dev_name, const struct rte_fpga_5gnr_fec_conf *conf)
3209 {
3210 	uint32_t payload_32, address;
3211 	uint16_t payload_16;
3212 	uint8_t payload_8;
3213 	uint16_t q_id, vf_id, total_q_id, total_ul_q_id, total_dl_q_id;
3214 	struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
3215 	struct rte_fpga_5gnr_fec_conf def_conf;
3216 
3217 	if (bbdev == NULL) {
3218 		rte_bbdev_log(ERR,
3219 				"Invalid dev_name (%s), or device is not yet initialised",
3220 				dev_name);
3221 		return -ENODEV;
3222 	}
3223 
3224 	struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
3225 
3226 	if (conf == NULL) {
3227 		rte_bbdev_log(ERR, "AGX100 Configuration was not provided.");
3228 		rte_bbdev_log(ERR, "Default configuration will be loaded.");
3229 		fpga_5gnr_set_default_conf(&def_conf);
3230 		conf = &def_conf;
3231 	}
3232 
3233 	uint8_t total_num_queues = d->total_num_queues;
3234 	uint8_t num_ul_queues = total_num_queues >> 1;
3235 	uint8_t num_dl_queues = total_num_queues >> 1;
3236 
3237 	/* Clear all queues registers */
3238 	payload_32 = FPGA_5GNR_INVALID_HW_QUEUE_ID;
3239 	for (q_id = 0; q_id < total_num_queues; ++q_id) {
3240 		address = (q_id << 2) + AGX100_QUEUE_MAP;
3241 		fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
3242 	}
3243 
3244 	/*
3245 	 * If PF mode is enabled allocate all queues for PF only.
3246 	 *
3247 	 * For VF mode each VF can have different number of UL and DL queues.
3248 	 * Total number of queues to configure cannot exceed AGX100
3249 	 * capabilities - 64 queues - 32 queues for UL and 32 queues for DL.
3250 	 * Queues mapping is done according to configuration:
3251 	 *
3252 	 * UL queues:
3253 	 * |                Q_ID              | VF_ID |
3254 	 * |                 0                |   0   |
3255 	 * |                ...               |   0   |
3256 	 * | conf->vf_dl_queues_number[0] - 1 |   0   |
3257 	 * | conf->vf_dl_queues_number[0]     |   1   |
3258 	 * |                ...               |   1   |
3259 	 * | conf->vf_dl_queues_number[1] - 1 |   1   |
3260 	 * |                ...               |  ...  |
3261 	 * | conf->vf_dl_queues_number[7] - 1 |   7   |
3262 	 *
3263 	 * DL queues:
3264 	 * |                Q_ID              | VF_ID |
3265 	 * |                 32               |   0   |
3266 	 * |                ...               |   0   |
3267 	 * | conf->vf_ul_queues_number[0] - 1 |   0   |
3268 	 * | conf->vf_ul_queues_number[0]     |   1   |
3269 	 * |                ...               |   1   |
3270 	 * | conf->vf_ul_queues_number[1] - 1 |   1   |
3271 	 * |                ...               |  ...  |
3272 	 * | conf->vf_ul_queues_number[7] - 1 |   7   |
3273 	 *
3274 	 * Example of configuration:
3275 	 * conf->vf_ul_queues_number[0] = 4;  -> 4 UL queues for VF0
3276 	 * conf->vf_dl_queues_number[0] = 4;  -> 4 DL queues for VF0
3277 	 * conf->vf_ul_queues_number[1] = 2;  -> 2 UL queues for VF1
3278 	 * conf->vf_dl_queues_number[1] = 2;  -> 2 DL queues for VF1
3279 	 *
3280 	 * UL:
3281 	 * | Q_ID | VF_ID |
3282 	 * |   0  |   0   |
3283 	 * |   1  |   0   |
3284 	 * |   2  |   0   |
3285 	 * |   3  |   0   |
3286 	 * |   4  |   1   |
3287 	 * |   5  |   1   |
3288 	 *
3289 	 * DL:
3290 	 * | Q_ID | VF_ID |
3291 	 * |  32  |   0   |
3292 	 * |  33  |   0   |
3293 	 * |  34  |   0   |
3294 	 * |  35  |   0   |
3295 	 * |  36  |   1   |
3296 	 * |  37  |   1   |
3297 	 */
3298 	if (conf->pf_mode_en) {
3299 		payload_32 = 0x1;
3300 		for (q_id = 0; q_id < total_num_queues; ++q_id) {
3301 			address = (q_id << 2) + AGX100_QUEUE_MAP;
3302 			fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
3303 		}
3304 	} else {
3305 		/* Calculate total number of UL and DL queues to configure. */
3306 		total_ul_q_id = total_dl_q_id = 0;
3307 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
3308 			total_ul_q_id += conf->vf_ul_queues_number[vf_id];
3309 			total_dl_q_id += conf->vf_dl_queues_number[vf_id];
3310 		}
3311 		total_q_id = total_dl_q_id + total_ul_q_id;
3312 		/*
3313 		 * Check if total number of queues to configure does not exceed
3314 		 * AGX100 capabilities (64 queues - 32 UL and 32 DL queues)
3315 		 */
3316 		if ((total_ul_q_id > num_ul_queues) ||
3317 				(total_dl_q_id > num_dl_queues) ||
3318 				(total_q_id > total_num_queues)) {
3319 			rte_bbdev_log(ERR,
3320 					"AGX100 Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, AGX100_Q %u",
3321 					total_ul_q_id, total_dl_q_id,
3322 					total_num_queues);
3323 			return -EINVAL;
3324 		}
3325 		total_ul_q_id = 0;
3326 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
3327 			for (q_id = 0; q_id < conf->vf_ul_queues_number[vf_id];
3328 					++q_id, ++total_ul_q_id) {
3329 				address = (total_ul_q_id << 2) + AGX100_QUEUE_MAP;
3330 				payload_32 = ((0x80 + vf_id) << 16) | 0x1;
3331 				fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
3332 			}
3333 		}
3334 		total_dl_q_id = 0;
3335 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
3336 			for (q_id = 0; q_id < conf->vf_dl_queues_number[vf_id];
3337 					++q_id, ++total_dl_q_id) {
3338 				address = ((total_dl_q_id + num_ul_queues)
3339 						<< 2) + AGX100_QUEUE_MAP;
3340 				payload_32 = ((0x80 + vf_id) << 16) | 0x1;
3341 				fpga_5gnr_reg_write_32(d->mmio_base, address, payload_32);
3342 			}
3343 		}
3344 	}
3345 
3346 	/* Setting Load Balance Factor. */
3347 	payload_16 = (conf->dl_load_balance << 8) | (conf->ul_load_balance);
3348 	address = FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR;
3349 	fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
3350 
3351 	/* Setting length of ring descriptor entry. */
3352 	payload_16 = FPGA_5GNR_RING_DESC_ENTRY_LENGTH;
3353 	address = FPGA_5GNR_FEC_RING_DESC_LEN;
3354 	fpga_5gnr_reg_write_16(d->mmio_base, address, payload_16);
3355 
3356 	/* Queue PF/VF mapping table is ready. */
3357 	payload_8 = 0x1;
3358 	address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
3359 	fpga_5gnr_reg_write_8(d->mmio_base, address, payload_8);
3360 
3361 	rte_bbdev_log_debug("PF AGX100 configuration complete for %s", dev_name);
3362 
3363 #ifdef RTE_LIBRTE_BBDEV_DEBUG
3364 	print_static_reg_debug_info(d->mmio_base, d->fpga_variant);
3365 #endif
3366 	return 0;
3367 }
3368 
3369 int rte_fpga_5gnr_fec_configure(const char *dev_name, const struct rte_fpga_5gnr_fec_conf *conf)
3370 {
3371 	struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
3372 	if (bbdev == NULL) {
3373 		rte_bbdev_log(ERR, "Invalid dev_name (%s), or device is not yet initialised",
3374 				dev_name);
3375 		return -ENODEV;
3376 	}
3377 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(bbdev->device);
3378 	rte_bbdev_log(INFO, "Configure dev id %x", pci_dev->id.device_id);
3379 	if (pci_dev->id.device_id == VC_5GNR_PF_DEVICE_ID)
3380 		return vc_5gnr_configure(dev_name, conf);
3381 	else if (pci_dev->id.device_id == AGX100_PF_DEVICE_ID)
3382 		return agx100_configure(dev_name, conf);
3383 
3384 	rte_bbdev_log(ERR, "Invalid device_id (%d)", pci_dev->id.device_id);
3385 	return -ENODEV;
3386 }
3387 
3388 /* FPGA 5GNR FEC PCI PF address map */
3389 static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {
3390 	{
3391 		RTE_PCI_DEVICE(AGX100_VENDOR_ID, AGX100_PF_DEVICE_ID)
3392 	},
3393 	{
3394 		RTE_PCI_DEVICE(VC_5GNR_VENDOR_ID, VC_5GNR_PF_DEVICE_ID)
3395 	},
3396 	{.device_id = 0},
3397 };
3398 
3399 static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {
3400 	.probe = fpga_5gnr_fec_probe,
3401 	.remove = fpga_5gnr_fec_remove,
3402 	.id_table = pci_id_fpga_5gnr_fec_pf_map,
3403 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING
3404 };
3405 
3406 /* FPGA 5GNR FEC PCI VF address map */
3407 static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {
3408 	{
3409 		RTE_PCI_DEVICE(AGX100_VENDOR_ID, AGX100_VF_DEVICE_ID)
3410 	},
3411 	{
3412 		RTE_PCI_DEVICE(VC_5GNR_VENDOR_ID, VC_5GNR_VF_DEVICE_ID)
3413 	},
3414 	{.device_id = 0},
3415 };
3416 
3417 static struct rte_pci_driver fpga_5gnr_fec_pci_vf_driver = {
3418 	.probe = fpga_5gnr_fec_probe,
3419 	.remove = fpga_5gnr_fec_remove,
3420 	.id_table = pci_id_fpga_5gnr_fec_vf_map,
3421 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING
3422 };
3423 
3424 
3425 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_PF_DRIVER_NAME, fpga_5gnr_fec_pci_pf_driver);
3426 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_PF_DRIVER_NAME, pci_id_fpga_5gnr_fec_pf_map);
3427 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_VF_DRIVER_NAME, fpga_5gnr_fec_pci_vf_driver);
3428 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_VF_DRIVER_NAME, pci_id_fpga_5gnr_fec_vf_map);
3429