xref: /dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c (revision daa02b5cddbb8e11b31d41e2bf7bb1ae64dcae2f)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <unistd.h>
6 
7 #include <rte_common.h>
8 #include <rte_log.h>
9 #include <rte_dev.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
13 #include <rte_pci.h>
14 #include <rte_bus_pci.h>
15 #include <rte_byteorder.h>
16 #ifdef RTE_BBDEV_OFFLOAD_COST
17 #include <rte_cycles.h>
18 #endif
19 
20 #include <rte_bbdev.h>
21 #include <rte_bbdev_pmd.h>
22 
23 #include "fpga_5gnr_fec.h"
24 #include "rte_pmd_fpga_5gnr_fec.h"
25 
26 #ifdef RTE_LIBRTE_BBDEV_DEBUG
27 RTE_LOG_REGISTER_DEFAULT(fpga_5gnr_fec_logtype, DEBUG);
28 #else
29 RTE_LOG_REGISTER_DEFAULT(fpga_5gnr_fec_logtype, NOTICE);
30 #endif
31 
32 #ifdef RTE_LIBRTE_BBDEV_DEBUG
33 
34 /* Read Ring Control Register of FPGA 5GNR FEC device */
35 static inline void
36 print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
37 {
38 	rte_bbdev_log_debug(
39 		"FPGA MMIO base address @ %p | Ring Control Register @ offset = 0x%08"
40 		PRIx32, mmio_base, offset);
41 	rte_bbdev_log_debug(
42 		"RING_BASE_ADDR = 0x%016"PRIx64,
43 		fpga_reg_read_64(mmio_base, offset));
44 	rte_bbdev_log_debug(
45 		"RING_HEAD_ADDR = 0x%016"PRIx64,
46 		fpga_reg_read_64(mmio_base, offset +
47 				FPGA_5GNR_FEC_RING_HEAD_ADDR));
48 	rte_bbdev_log_debug(
49 		"RING_SIZE = 0x%04"PRIx16,
50 		fpga_reg_read_16(mmio_base, offset +
51 				FPGA_5GNR_FEC_RING_SIZE));
52 	rte_bbdev_log_debug(
53 		"RING_MISC = 0x%02"PRIx8,
54 		fpga_reg_read_8(mmio_base, offset +
55 				FPGA_5GNR_FEC_RING_MISC));
56 	rte_bbdev_log_debug(
57 		"RING_ENABLE = 0x%02"PRIx8,
58 		fpga_reg_read_8(mmio_base, offset +
59 				FPGA_5GNR_FEC_RING_ENABLE));
60 	rte_bbdev_log_debug(
61 		"RING_FLUSH_QUEUE_EN = 0x%02"PRIx8,
62 		fpga_reg_read_8(mmio_base, offset +
63 				FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN));
64 	rte_bbdev_log_debug(
65 		"RING_SHADOW_TAIL = 0x%04"PRIx16,
66 		fpga_reg_read_16(mmio_base, offset +
67 				FPGA_5GNR_FEC_RING_SHADOW_TAIL));
68 	rte_bbdev_log_debug(
69 		"RING_HEAD_POINT = 0x%04"PRIx16,
70 		fpga_reg_read_16(mmio_base, offset +
71 				FPGA_5GNR_FEC_RING_HEAD_POINT));
72 }
73 
74 /* Read Static Register of FPGA 5GNR FEC device */
75 static inline void
76 print_static_reg_debug_info(void *mmio_base)
77 {
78 	uint16_t config = fpga_reg_read_16(mmio_base,
79 			FPGA_5GNR_FEC_CONFIGURATION);
80 	uint8_t qmap_done = fpga_reg_read_8(mmio_base,
81 			FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);
82 	uint16_t lb_factor = fpga_reg_read_16(mmio_base,
83 			FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR);
84 	uint16_t ring_desc_len = fpga_reg_read_16(mmio_base,
85 			FPGA_5GNR_FEC_RING_DESC_LEN);
86 	uint16_t flr_time_out = fpga_reg_read_16(mmio_base,
87 			FPGA_5GNR_FEC_FLR_TIME_OUT);
88 
89 	rte_bbdev_log_debug("UL.DL Weights = %u.%u",
90 			((uint8_t)config), ((uint8_t)(config >> 8)));
91 	rte_bbdev_log_debug("UL.DL Load Balance = %u.%u",
92 			((uint8_t)lb_factor), ((uint8_t)(lb_factor >> 8)));
93 	rte_bbdev_log_debug("Queue-PF/VF Mapping Table = %s",
94 			(qmap_done > 0) ? "READY" : "NOT-READY");
95 	rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
96 			ring_desc_len*FPGA_RING_DESC_LEN_UNIT_BYTES);
97 	rte_bbdev_log_debug("FLR Timeout = %f usec",
98 			(float)flr_time_out*FPGA_FLR_TIMEOUT_UNIT);
99 }
100 
101 /* Print decode DMA Descriptor of FPGA 5GNR Decoder device */
102 static void
103 print_dma_dec_desc_debug_info(union fpga_dma_desc *desc)
104 {
105 	rte_bbdev_log_debug("DMA response desc %p\n"
106 		"\t-- done(%"PRIu32") | iter(%"PRIu32") | et_pass(%"PRIu32")"
107 		" | crcb_pass (%"PRIu32") | error(%"PRIu32")\n"
108 		"\t-- qm_idx(%"PRIu32") | max_iter(%"PRIu32") | "
109 		"bg_idx (%"PRIu32") | harqin_en(%"PRIu32") | zc(%"PRIu32")\n"
110 		"\t-- hbstroe_offset(%"PRIu32") | num_null (%"PRIu32") "
111 		"| irq_en(%"PRIu32")\n"
112 		"\t-- ncb(%"PRIu32") | desc_idx (%"PRIu32") | "
113 		"drop_crc24b(%"PRIu32") | RV (%"PRIu32")\n"
114 		"\t-- crc24b_ind(%"PRIu32") | et_dis (%"PRIu32")\n"
115 		"\t-- harq_input_length(%"PRIu32") | rm_e(%"PRIu32")\n"
116 		"\t-- cbs_in_op(%"PRIu32") | in_add (0x%08"PRIx32"%08"PRIx32")"
117 		"| out_add (0x%08"PRIx32"%08"PRIx32")",
118 		desc,
119 		(uint32_t)desc->dec_req.done,
120 		(uint32_t)desc->dec_req.iter,
121 		(uint32_t)desc->dec_req.et_pass,
122 		(uint32_t)desc->dec_req.crcb_pass,
123 		(uint32_t)desc->dec_req.error,
124 		(uint32_t)desc->dec_req.qm_idx,
125 		(uint32_t)desc->dec_req.max_iter,
126 		(uint32_t)desc->dec_req.bg_idx,
127 		(uint32_t)desc->dec_req.harqin_en,
128 		(uint32_t)desc->dec_req.zc,
129 		(uint32_t)desc->dec_req.hbstroe_offset,
130 		(uint32_t)desc->dec_req.num_null,
131 		(uint32_t)desc->dec_req.irq_en,
132 		(uint32_t)desc->dec_req.ncb,
133 		(uint32_t)desc->dec_req.desc_idx,
134 		(uint32_t)desc->dec_req.drop_crc24b,
135 		(uint32_t)desc->dec_req.rv,
136 		(uint32_t)desc->dec_req.crc24b_ind,
137 		(uint32_t)desc->dec_req.et_dis,
138 		(uint32_t)desc->dec_req.harq_input_length,
139 		(uint32_t)desc->dec_req.rm_e,
140 		(uint32_t)desc->dec_req.cbs_in_op,
141 		(uint32_t)desc->dec_req.in_addr_hi,
142 		(uint32_t)desc->dec_req.in_addr_lw,
143 		(uint32_t)desc->dec_req.out_addr_hi,
144 		(uint32_t)desc->dec_req.out_addr_lw);
145 	uint32_t *word = (uint32_t *) desc;
146 	rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
147 			"%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
148 			word[0], word[1], word[2], word[3],
149 			word[4], word[5], word[6], word[7]);
150 }
151 
152 /* Print decode DMA Descriptor of FPGA 5GNR encoder device */
153 static void
154 print_dma_enc_desc_debug_info(union fpga_dma_desc *desc)
155 {
156 	rte_bbdev_log_debug("DMA response desc %p\n"
157 			"%"PRIu32" %"PRIu32"\n"
158 			"K' %"PRIu32" E %"PRIu32" desc %"PRIu32" Z %"PRIu32"\n"
159 			"BG %"PRIu32" Qm %"PRIu32" CRC %"PRIu32" IRQ %"PRIu32"\n"
160 			"k0 %"PRIu32" Ncb %"PRIu32" F %"PRIu32"\n",
161 			desc,
162 			(uint32_t)desc->enc_req.done,
163 			(uint32_t)desc->enc_req.error,
164 
165 			(uint32_t)desc->enc_req.k_,
166 			(uint32_t)desc->enc_req.rm_e,
167 			(uint32_t)desc->enc_req.desc_idx,
168 			(uint32_t)desc->enc_req.zc,
169 
170 			(uint32_t)desc->enc_req.bg_idx,
171 			(uint32_t)desc->enc_req.qm_idx,
172 			(uint32_t)desc->enc_req.crc_en,
173 			(uint32_t)desc->enc_req.irq_en,
174 
175 			(uint32_t)desc->enc_req.k0,
176 			(uint32_t)desc->enc_req.ncb,
177 			(uint32_t)desc->enc_req.num_null);
178 	uint32_t *word = (uint32_t *) desc;
179 	rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
180 			"%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
181 			word[0], word[1], word[2], word[3],
182 			word[4], word[5], word[6], word[7]);
183 }
184 
185 #endif
186 
187 static int
188 fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
189 {
190 	/* Number of queues bound to a PF/VF */
191 	uint32_t hw_q_num = 0;
192 	uint32_t ring_size, payload, address, q_id, offset;
193 	rte_iova_t phys_addr;
194 	struct fpga_ring_ctrl_reg ring_reg;
195 	struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
196 
197 	address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
198 	if (!(fpga_reg_read_32(fpga_dev->mmio_base, address) & 0x1)) {
199 		rte_bbdev_log(ERR,
200 				"Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?",
201 				dev->data->name);
202 		return -EPERM;
203 	}
204 
205 	/* Clear queue registers structure */
206 	memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
207 
208 	/* Scan queue map.
209 	 * If a queue is valid and mapped to a calling PF/VF the read value is
210 	 * replaced with a queue ID and if it's not then
211 	 * FPGA_INVALID_HW_QUEUE_ID is returned.
212 	 */
213 	for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
214 		uint32_t hw_q_id = fpga_reg_read_32(fpga_dev->mmio_base,
215 				FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
216 
217 		rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
218 				dev->device->name, q_id, hw_q_id);
219 
220 		if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID) {
221 			fpga_dev->q_bound_bit_map |= (1ULL << q_id);
222 			/* Clear queue register of found queue */
223 			offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
224 				(sizeof(struct fpga_ring_ctrl_reg) * q_id);
225 			fpga_ring_reg_write(fpga_dev->mmio_base,
226 					offset, ring_reg);
227 			++hw_q_num;
228 		}
229 	}
230 	if (hw_q_num == 0) {
231 		rte_bbdev_log(ERR,
232 			"No HW queues assigned to this device. Probably this is a VF configured for PF mode. Check device configuration!");
233 		return -ENODEV;
234 	}
235 
236 	if (num_queues > hw_q_num) {
237 		rte_bbdev_log(ERR,
238 			"Not enough queues for device %s! Requested: %u, available: %u",
239 			dev->device->name, num_queues, hw_q_num);
240 		return -EINVAL;
241 	}
242 
243 	ring_size = FPGA_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);
244 
245 	/* Enforce 32 byte alignment */
246 	RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
247 
248 	/* Allocate memory for SW descriptor rings */
249 	fpga_dev->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
250 			num_queues * ring_size, RTE_CACHE_LINE_SIZE,
251 			socket_id);
252 	if (fpga_dev->sw_rings == NULL) {
253 		rte_bbdev_log(ERR,
254 				"Failed to allocate memory for %s:%u sw_rings",
255 				dev->device->driver->name, dev->data->dev_id);
256 		return -ENOMEM;
257 	}
258 
259 	fpga_dev->sw_rings_phys = rte_malloc_virt2iova(fpga_dev->sw_rings);
260 	fpga_dev->sw_ring_size = ring_size;
261 	fpga_dev->sw_ring_max_depth = FPGA_RING_MAX_SIZE;
262 
263 	/* Allocate memory for ring flush status */
264 	fpga_dev->flush_queue_status = rte_zmalloc_socket(NULL,
265 			sizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);
266 	if (fpga_dev->flush_queue_status == NULL) {
267 		rte_bbdev_log(ERR,
268 				"Failed to allocate memory for %s:%u flush_queue_status",
269 				dev->device->driver->name, dev->data->dev_id);
270 		return -ENOMEM;
271 	}
272 
273 	/* Set the flush status address registers */
274 	phys_addr = rte_malloc_virt2iova(fpga_dev->flush_queue_status);
275 
276 	address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;
277 	payload = (uint32_t)(phys_addr);
278 	fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
279 
280 	address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI;
281 	payload = (uint32_t)(phys_addr >> 32);
282 	fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
283 
284 	return 0;
285 }
286 
287 static int
288 fpga_dev_close(struct rte_bbdev *dev)
289 {
290 	struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
291 
292 	rte_free(fpga_dev->sw_rings);
293 	rte_free(fpga_dev->flush_queue_status);
294 
295 	return 0;
296 }
297 
298 static void
299 fpga_dev_info_get(struct rte_bbdev *dev,
300 		struct rte_bbdev_driver_info *dev_info)
301 {
302 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
303 	uint32_t q_id = 0;
304 
305 	static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
306 		{
307 			.type   = RTE_BBDEV_OP_LDPC_ENC,
308 			.cap.ldpc_enc = {
309 				.capability_flags =
310 						RTE_BBDEV_LDPC_RATE_MATCH |
311 						RTE_BBDEV_LDPC_ENC_INTERRUPTS |
312 						RTE_BBDEV_LDPC_CRC_24B_ATTACH,
313 				.num_buffers_src =
314 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
315 				.num_buffers_dst =
316 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
317 			}
318 		},
319 		{
320 		.type   = RTE_BBDEV_OP_LDPC_DEC,
321 		.cap.ldpc_dec = {
322 			.capability_flags =
323 				RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
324 				RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
325 				RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
326 				RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
327 				RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
328 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
329 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
330 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |
331 				RTE_BBDEV_LDPC_DEC_INTERRUPTS |
332 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS,
333 			.llr_size = 6,
334 			.llr_decimals = 2,
335 			.num_buffers_src =
336 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
337 			.num_buffers_hard_out =
338 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
339 			.num_buffers_soft_out = 0,
340 		}
341 		},
342 		RTE_BBDEV_END_OF_CAPABILITIES_LIST()
343 	};
344 
345 	/* Check the HARQ DDR size available */
346 	uint8_t timeout_counter = 0;
347 	uint32_t harq_buf_ready = fpga_reg_read_32(d->mmio_base,
348 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
349 	while (harq_buf_ready != 1) {
350 		usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
351 		timeout_counter++;
352 		harq_buf_ready = fpga_reg_read_32(d->mmio_base,
353 				FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
354 		if (timeout_counter > FPGA_HARQ_RDY_TIMEOUT) {
355 			rte_bbdev_log(ERR, "HARQ Buffer not ready %d",
356 					harq_buf_ready);
357 			harq_buf_ready = 1;
358 		}
359 	}
360 	uint32_t harq_buf_size = fpga_reg_read_32(d->mmio_base,
361 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
362 
363 	static struct rte_bbdev_queue_conf default_queue_conf;
364 	default_queue_conf.socket = dev->data->socket_id;
365 	default_queue_conf.queue_size = FPGA_RING_MAX_SIZE;
366 
367 	dev_info->driver_name = dev->device->driver->name;
368 	dev_info->queue_size_lim = FPGA_RING_MAX_SIZE;
369 	dev_info->hardware_accelerated = true;
370 	dev_info->min_alignment = 64;
371 	dev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;
372 	dev_info->default_queue_conf = default_queue_conf;
373 	dev_info->capabilities = bbdev_capabilities;
374 	dev_info->cpu_flag_reqs = NULL;
375 	dev_info->data_endianness = RTE_LITTLE_ENDIAN;
376 
377 	/* Calculates number of queues assigned to device */
378 	dev_info->max_num_queues = 0;
379 	for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
380 		uint32_t hw_q_id = fpga_reg_read_32(d->mmio_base,
381 				FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
382 		if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)
383 			dev_info->max_num_queues++;
384 	}
385 }
386 
387 /**
388  * Find index of queue bound to current PF/VF which is unassigned. Return -1
389  * when there is no available queue
390  */
391 static inline int
392 fpga_find_free_queue_idx(struct rte_bbdev *dev,
393 		const struct rte_bbdev_queue_conf *conf)
394 {
395 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
396 	uint64_t q_idx;
397 	uint8_t i = 0;
398 	uint8_t range = FPGA_TOTAL_NUM_QUEUES >> 1;
399 
400 	if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {
401 		i = FPGA_NUM_DL_QUEUES;
402 		range = FPGA_TOTAL_NUM_QUEUES;
403 	}
404 
405 	for (; i < range; ++i) {
406 		q_idx = 1ULL << i;
407 		/* Check if index of queue is bound to current PF/VF */
408 		if (d->q_bound_bit_map & q_idx)
409 			/* Check if found queue was not already assigned */
410 			if (!(d->q_assigned_bit_map & q_idx)) {
411 				d->q_assigned_bit_map |= q_idx;
412 				return i;
413 			}
414 	}
415 
416 	rte_bbdev_log(INFO, "Failed to find free queue on %s", dev->data->name);
417 
418 	return -1;
419 }
420 
421 static int
422 fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
423 		const struct rte_bbdev_queue_conf *conf)
424 {
425 	uint32_t address, ring_offset;
426 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
427 	struct fpga_queue *q;
428 	int8_t q_idx;
429 
430 	/* Check if there is a free queue to assign */
431 	q_idx = fpga_find_free_queue_idx(dev, conf);
432 	if (q_idx == -1)
433 		return -1;
434 
435 	/* Allocate the queue data structure. */
436 	q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
437 			RTE_CACHE_LINE_SIZE, conf->socket);
438 	if (q == NULL) {
439 		/* Mark queue as un-assigned */
440 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
441 		rte_bbdev_log(ERR, "Failed to allocate queue memory");
442 		return -ENOMEM;
443 	}
444 
445 	q->d = d;
446 	q->q_idx = q_idx;
447 
448 	/* Set ring_base_addr */
449 	q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
450 	q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys +
451 			(d->sw_ring_size * queue_id);
452 
453 	/* Allocate memory for Completion Head variable*/
454 	q->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,
455 			sizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);
456 	if (q->ring_head_addr == NULL) {
457 		/* Mark queue as un-assigned */
458 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
459 		rte_free(q);
460 		rte_bbdev_log(ERR,
461 				"Failed to allocate memory for %s:%u completion_head",
462 				dev->device->driver->name, dev->data->dev_id);
463 		return -ENOMEM;
464 	}
465 	/* Set ring_head_addr */
466 	q->ring_ctrl_reg.ring_head_addr =
467 			rte_malloc_virt2iova(q->ring_head_addr);
468 
469 	/* Clear shadow_completion_head */
470 	q->shadow_completion_head = 0;
471 
472 	/* Set ring_size */
473 	if (conf->queue_size > FPGA_RING_MAX_SIZE) {
474 		/* Mark queue as un-assigned */
475 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
476 		rte_free(q->ring_head_addr);
477 		rte_free(q);
478 		rte_bbdev_log(ERR,
479 				"Size of queue is too big %d (MAX: %d ) for %s:%u",
480 				conf->queue_size, FPGA_RING_MAX_SIZE,
481 				dev->device->driver->name, dev->data->dev_id);
482 		return -EINVAL;
483 	}
484 	q->ring_ctrl_reg.ring_size = conf->queue_size;
485 
486 	/* Set Miscellaneous FPGA register*/
487 	/* Max iteration number for TTI mitigation - todo */
488 	q->ring_ctrl_reg.max_ul_dec = 0;
489 	/* Enable max iteration number for TTI - todo */
490 	q->ring_ctrl_reg.max_ul_dec_en = 0;
491 
492 	/* Enable the ring */
493 	q->ring_ctrl_reg.enable = 1;
494 
495 	/* Set FPGA head_point and tail registers */
496 	q->ring_ctrl_reg.head_point = q->tail = 0;
497 
498 	/* Set FPGA shadow_tail register */
499 	q->ring_ctrl_reg.shadow_tail = q->tail;
500 
501 	/* Calculates the ring offset for found queue */
502 	ring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
503 			(sizeof(struct fpga_ring_ctrl_reg) * q_idx);
504 
505 	/* Set FPGA Ring Control Registers */
506 	fpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);
507 
508 	/* Store MMIO register of shadow_tail */
509 	address = ring_offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL;
510 	q->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);
511 
512 	q->head_free_desc = q->tail;
513 
514 	/* Set wrap mask */
515 	q->sw_ring_wrap_mask = conf->queue_size - 1;
516 
517 	rte_bbdev_log_debug("Setup dev%u q%u: queue_idx=%u",
518 			dev->data->dev_id, queue_id, q->q_idx);
519 
520 	dev->data->queues[queue_id].queue_private = q;
521 
522 	rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA queue[%d]",
523 			queue_id, q_idx);
524 
525 #ifdef RTE_LIBRTE_BBDEV_DEBUG
526 	/* Read FPGA Ring Control Registers after configuration*/
527 	print_ring_reg_debug_info(d->mmio_base, ring_offset);
528 #endif
529 	return 0;
530 }
531 
532 static int
533 fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
534 {
535 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
536 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
537 	struct fpga_ring_ctrl_reg ring_reg;
538 	uint32_t offset;
539 
540 	rte_bbdev_log_debug("FPGA Queue[%d] released", queue_id);
541 
542 	if (q != NULL) {
543 		memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
544 		offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
545 			(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
546 		/* Disable queue */
547 		fpga_reg_write_8(d->mmio_base,
548 				offset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);
549 		/* Clear queue registers */
550 		fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
551 
552 		/* Mark the Queue as un-assigned */
553 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));
554 		rte_free(q->ring_head_addr);
555 		rte_free(q);
556 		dev->data->queues[queue_id].queue_private = NULL;
557 	}
558 
559 	return 0;
560 }
561 
562 /* Function starts a device queue. */
563 static int
564 fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
565 {
566 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
567 #ifdef RTE_LIBRTE_BBDEV_DEBUG
568 	if (d == NULL) {
569 		rte_bbdev_log(ERR, "Invalid device pointer");
570 		return -1;
571 	}
572 #endif
573 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
574 	uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
575 			(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
576 	uint8_t enable = 0x01;
577 	uint16_t zero = 0x0000;
578 
579 	/* Clear queue head and tail variables */
580 	q->tail = q->head_free_desc = 0;
581 
582 	/* Clear FPGA head_point and tail registers */
583 	fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
584 			zero);
585 	fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
586 			zero);
587 
588 	/* Enable queue */
589 	fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
590 			enable);
591 
592 	rte_bbdev_log_debug("FPGA Queue[%d] started", queue_id);
593 	return 0;
594 }
595 
596 /* Function stops a device queue. */
597 static int
598 fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
599 {
600 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
601 #ifdef RTE_LIBRTE_BBDEV_DEBUG
602 	if (d == NULL) {
603 		rte_bbdev_log(ERR, "Invalid device pointer");
604 		return -1;
605 	}
606 #endif
607 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
608 	uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
609 			(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
610 	uint8_t payload = 0x01;
611 	uint8_t counter = 0;
612 	uint8_t timeout = FPGA_QUEUE_FLUSH_TIMEOUT_US /
613 			FPGA_TIMEOUT_CHECK_INTERVAL;
614 
615 	/* Set flush_queue_en bit to trigger queue flushing */
616 	fpga_reg_write_8(d->mmio_base,
617 			offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);
618 
619 	/** Check if queue flush is completed.
620 	 * FPGA will update the completion flag after queue flushing is
621 	 * completed. If completion flag is not updated within 1ms it is
622 	 * considered as a failure.
623 	 */
624 	while (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx)
625 			& payload)) {
626 		if (counter > timeout) {
627 			rte_bbdev_log(ERR, "FPGA Queue Flush failed for queue %d",
628 					queue_id);
629 			return -1;
630 		}
631 		usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
632 		counter++;
633 	}
634 
635 	/* Disable queue */
636 	payload = 0x00;
637 	fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
638 			payload);
639 
640 	rte_bbdev_log_debug("FPGA Queue[%d] stopped", queue_id);
641 	return 0;
642 }
643 
644 static inline uint16_t
645 get_queue_id(struct rte_bbdev_data *data, uint8_t q_idx)
646 {
647 	uint16_t queue_id;
648 
649 	for (queue_id = 0; queue_id < data->num_queues; ++queue_id) {
650 		struct fpga_queue *q = data->queues[queue_id].queue_private;
651 		if (q != NULL && q->q_idx == q_idx)
652 			return queue_id;
653 	}
654 
655 	return -1;
656 }
657 
658 /* Interrupt handler triggered by FPGA dev for handling specific interrupt */
659 static void
660 fpga_dev_interrupt_handler(void *cb_arg)
661 {
662 	struct rte_bbdev *dev = cb_arg;
663 	struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
664 	struct fpga_queue *q;
665 	uint64_t ring_head;
666 	uint64_t q_idx;
667 	uint16_t queue_id;
668 	uint8_t i;
669 
670 	/* Scan queue assigned to this device */
671 	for (i = 0; i < FPGA_TOTAL_NUM_QUEUES; ++i) {
672 		q_idx = 1ULL << i;
673 		if (fpga_dev->q_bound_bit_map & q_idx) {
674 			queue_id = get_queue_id(dev->data, i);
675 			if (queue_id == (uint16_t) -1)
676 				continue;
677 
678 			/* Check if completion head was changed */
679 			q = dev->data->queues[queue_id].queue_private;
680 			ring_head = *q->ring_head_addr;
681 			if (q->shadow_completion_head != ring_head &&
682 				q->irq_enable == 1) {
683 				q->shadow_completion_head = ring_head;
684 				rte_bbdev_pmd_callback_process(
685 						dev,
686 						RTE_BBDEV_EVENT_DEQUEUE,
687 						&queue_id);
688 			}
689 		}
690 	}
691 }
692 
693 static int
694 fpga_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)
695 {
696 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
697 
698 	if (!rte_intr_cap_multiple(dev->intr_handle))
699 		return -ENOTSUP;
700 
701 	q->irq_enable = 1;
702 
703 	return 0;
704 }
705 
706 static int
707 fpga_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)
708 {
709 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
710 	q->irq_enable = 0;
711 
712 	return 0;
713 }
714 
715 static int
716 fpga_intr_enable(struct rte_bbdev *dev)
717 {
718 	int ret;
719 	uint8_t i;
720 
721 	if (!rte_intr_cap_multiple(dev->intr_handle)) {
722 		rte_bbdev_log(ERR, "Multiple intr vector is not supported by FPGA (%s)",
723 				dev->data->name);
724 		return -ENOTSUP;
725 	}
726 
727 	/* Create event file descriptors for each of 64 queue. Event fds will be
728 	 * mapped to FPGA IRQs in rte_intr_enable(). This is a 1:1 mapping where
729 	 * the IRQ number is a direct translation to the queue number.
730 	 *
731 	 * 63 (FPGA_NUM_INTR_VEC) event fds are created as rte_intr_enable()
732 	 * mapped the first IRQ to already created interrupt event file
733 	 * descriptor (intr_handle->fd).
734 	 */
735 	if (rte_intr_efd_enable(dev->intr_handle, FPGA_NUM_INTR_VEC)) {
736 		rte_bbdev_log(ERR, "Failed to create fds for %u queues",
737 				dev->data->num_queues);
738 		return -1;
739 	}
740 
741 	/* TODO Each event file descriptor is overwritten by interrupt event
742 	 * file descriptor. That descriptor is added to epoll observed list.
743 	 * It ensures that callback function assigned to that descriptor will
744 	 * invoked when any FPGA queue issues interrupt.
745 	 */
746 	for (i = 0; i < FPGA_NUM_INTR_VEC; ++i)
747 		dev->intr_handle->efds[i] = dev->intr_handle->fd;
748 
749 	if (!dev->intr_handle->intr_vec) {
750 		dev->intr_handle->intr_vec = rte_zmalloc("intr_vec",
751 				dev->data->num_queues * sizeof(int), 0);
752 		if (!dev->intr_handle->intr_vec) {
753 			rte_bbdev_log(ERR, "Failed to allocate %u vectors",
754 					dev->data->num_queues);
755 			return -ENOMEM;
756 		}
757 	}
758 
759 	ret = rte_intr_enable(dev->intr_handle);
760 	if (ret < 0) {
761 		rte_bbdev_log(ERR,
762 				"Couldn't enable interrupts for device: %s",
763 				dev->data->name);
764 		return ret;
765 	}
766 
767 	ret = rte_intr_callback_register(dev->intr_handle,
768 			fpga_dev_interrupt_handler, dev);
769 	if (ret < 0) {
770 		rte_bbdev_log(ERR,
771 				"Couldn't register interrupt callback for device: %s",
772 				dev->data->name);
773 		return ret;
774 	}
775 
776 	return 0;
777 }
778 
779 static const struct rte_bbdev_ops fpga_ops = {
780 	.setup_queues = fpga_setup_queues,
781 	.intr_enable = fpga_intr_enable,
782 	.close = fpga_dev_close,
783 	.info_get = fpga_dev_info_get,
784 	.queue_setup = fpga_queue_setup,
785 	.queue_stop = fpga_queue_stop,
786 	.queue_start = fpga_queue_start,
787 	.queue_release = fpga_queue_release,
788 	.queue_intr_enable = fpga_queue_intr_enable,
789 	.queue_intr_disable = fpga_queue_intr_disable
790 };
791 
792 static inline void
793 fpga_dma_enqueue(struct fpga_queue *q, uint16_t num_desc,
794 		struct rte_bbdev_stats *queue_stats)
795 {
796 #ifdef RTE_BBDEV_OFFLOAD_COST
797 	uint64_t start_time = 0;
798 	queue_stats->acc_offload_cycles = 0;
799 #else
800 	RTE_SET_USED(queue_stats);
801 #endif
802 
803 	/* Update tail and shadow_tail register */
804 	q->tail = (q->tail + num_desc) & q->sw_ring_wrap_mask;
805 
806 	rte_wmb();
807 
808 #ifdef RTE_BBDEV_OFFLOAD_COST
809 	/* Start time measurement for enqueue function offload. */
810 	start_time = rte_rdtsc_precise();
811 #endif
812 	mmio_write_16(q->shadow_tail_addr, q->tail);
813 
814 #ifdef RTE_BBDEV_OFFLOAD_COST
815 	rte_wmb();
816 	queue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
817 #endif
818 }
819 
820 /* Read flag value 0/1/ from bitmap */
821 static inline bool
822 check_bit(uint32_t bitmap, uint32_t bitmask)
823 {
824 	return bitmap & bitmask;
825 }
826 
827 /* Print an error if a descriptor error has occurred.
828  *  Return 0 on success, 1 on failure
829  */
830 static inline int
831 check_desc_error(uint32_t error_code) {
832 	switch (error_code) {
833 	case DESC_ERR_NO_ERR:
834 		return 0;
835 	case DESC_ERR_K_P_OUT_OF_RANGE:
836 		rte_bbdev_log(ERR, "Encode block size K' is out of range");
837 		break;
838 	case DESC_ERR_Z_C_NOT_LEGAL:
839 		rte_bbdev_log(ERR, "Zc is illegal");
840 		break;
841 	case DESC_ERR_DESC_OFFSET_ERR:
842 		rte_bbdev_log(ERR,
843 				"Queue offset does not meet the expectation in the FPGA"
844 				);
845 		break;
846 	case DESC_ERR_DESC_READ_FAIL:
847 		rte_bbdev_log(ERR, "Unsuccessful completion for descriptor read");
848 		break;
849 	case DESC_ERR_DESC_READ_TIMEOUT:
850 		rte_bbdev_log(ERR, "Descriptor read time-out");
851 		break;
852 	case DESC_ERR_DESC_READ_TLP_POISONED:
853 		rte_bbdev_log(ERR, "Descriptor read TLP poisoned");
854 		break;
855 	case DESC_ERR_CB_READ_FAIL:
856 		rte_bbdev_log(ERR, "Unsuccessful completion for code block");
857 		break;
858 	case DESC_ERR_CB_READ_TIMEOUT:
859 		rte_bbdev_log(ERR, "Code block read time-out");
860 		break;
861 	case DESC_ERR_CB_READ_TLP_POISONED:
862 		rte_bbdev_log(ERR, "Code block read TLP poisoned");
863 		break;
864 	case DESC_ERR_HBSTORE_ERR:
865 		rte_bbdev_log(ERR, "Hbstroe exceeds HARQ buffer size.");
866 		break;
867 	default:
868 		rte_bbdev_log(ERR, "Descriptor error unknown error code %u",
869 				error_code);
870 		break;
871 	}
872 	return 1;
873 }
874 
875 /* Compute value of k0.
876  * Based on 3GPP 38.212 Table 5.4.2.1-2
877  * Starting position of different redundancy versions, k0
878  */
879 static inline uint16_t
880 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
881 {
882 	if (rv_index == 0)
883 		return 0;
884 	uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
885 	if (n_cb == n) {
886 		if (rv_index == 1)
887 			return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
888 		else if (rv_index == 2)
889 			return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
890 		else
891 			return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
892 	}
893 	/* LBRM case - includes a division by N */
894 	if (rv_index == 1)
895 		return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
896 				/ n) * z_c;
897 	else if (rv_index == 2)
898 		return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
899 				/ n) * z_c;
900 	else
901 		return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
902 				/ n) * z_c;
903 }
904 
905 /**
906  * Set DMA descriptor for encode operation (1 Code Block)
907  *
908  * @param op
909  *   Pointer to a single encode operation.
910  * @param desc
911  *   Pointer to DMA descriptor.
912  * @param input
913  *   Pointer to pointer to input data which will be decoded.
914  * @param e
915  *   E value (length of output in bits).
916  * @param ncb
917  *   Ncb value (size of the soft buffer).
918  * @param out_length
919  *   Length of output buffer
920  * @param in_offset
921  *   Input offset in rte_mbuf structure. It is used for calculating the point
922  *   where data is starting.
923  * @param out_offset
924  *   Output offset in rte_mbuf structure. It is used for calculating the point
925  *   where hard output data will be stored.
926  * @param cbs_in_op
927  *   Number of CBs contained in one operation.
928  */
929 static inline int
930 fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
931 		struct fpga_dma_enc_desc *desc, struct rte_mbuf *input,
932 		struct rte_mbuf *output, uint16_t k_,  uint16_t e,
933 		uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
934 		uint8_t cbs_in_op)
935 {
936 	/* reset */
937 	desc->done = 0;
938 	desc->error = 0;
939 	desc->k_ = k_;
940 	desc->rm_e = e;
941 	desc->desc_idx = desc_offset;
942 	desc->zc = op->ldpc_enc.z_c;
943 	desc->bg_idx = op->ldpc_enc.basegraph - 1;
944 	desc->qm_idx = op->ldpc_enc.q_m / 2;
945 	desc->crc_en = check_bit(op->ldpc_enc.op_flags,
946 			RTE_BBDEV_LDPC_CRC_24B_ATTACH);
947 	desc->irq_en = 0;
948 	desc->k0 = get_k0(op->ldpc_enc.n_cb, op->ldpc_enc.z_c,
949 			op->ldpc_enc.basegraph, op->ldpc_enc.rv_index);
950 	desc->ncb = op->ldpc_enc.n_cb;
951 	desc->num_null = op->ldpc_enc.n_filler;
952 	/* Set inbound data buffer address */
953 	desc->in_addr_hi = (uint32_t)(
954 			rte_pktmbuf_iova_offset(input, in_offset) >> 32);
955 	desc->in_addr_lw = (uint32_t)(
956 			rte_pktmbuf_iova_offset(input, in_offset));
957 
958 	desc->out_addr_hi = (uint32_t)(
959 			rte_pktmbuf_iova_offset(output, out_offset) >> 32);
960 	desc->out_addr_lw = (uint32_t)(
961 			rte_pktmbuf_iova_offset(output, out_offset));
962 	/* Save software context needed for dequeue */
963 	desc->op_addr = op;
964 	/* Set total number of CBs in an op */
965 	desc->cbs_in_op = cbs_in_op;
966 	return 0;
967 }
968 
969 /**
970  * Set DMA descriptor for decode operation (1 Code Block)
971  *
972  * @param op
973  *   Pointer to a single encode operation.
974  * @param desc
975  *   Pointer to DMA descriptor.
976  * @param input
977  *   Pointer to pointer to input data which will be decoded.
978  * @param in_offset
979  *   Input offset in rte_mbuf structure. It is used for calculating the point
980  *   where data is starting.
981  * @param out_offset
982  *   Output offset in rte_mbuf structure. It is used for calculating the point
983  *   where hard output data will be stored.
984  * @param cbs_in_op
985  *   Number of CBs contained in one operation.
986  */
987 static inline int
988 fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
989 		struct fpga_dma_dec_desc *desc,
990 		struct rte_mbuf *input,	struct rte_mbuf *output,
991 		uint16_t harq_in_length,
992 		uint32_t in_offset, uint32_t out_offset,
993 		uint32_t harq_offset,
994 		uint16_t desc_offset,
995 		uint8_t cbs_in_op)
996 {
997 	/* reset */
998 	desc->done = 0;
999 	desc->error = 0;
1000 	/* Set inbound data buffer address */
1001 	desc->in_addr_hi = (uint32_t)(
1002 			rte_pktmbuf_iova_offset(input, in_offset) >> 32);
1003 	desc->in_addr_lw = (uint32_t)(
1004 			rte_pktmbuf_iova_offset(input, in_offset));
1005 	desc->rm_e = op->ldpc_dec.cb_params.e;
1006 	desc->harq_input_length = harq_in_length;
1007 	desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
1008 			RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
1009 	desc->rv = op->ldpc_dec.rv_index;
1010 	desc->crc24b_ind = check_bit(op->ldpc_dec.op_flags,
1011 			RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
1012 	desc->drop_crc24b = check_bit(op->ldpc_dec.op_flags,
1013 			RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP);
1014 	desc->desc_idx = desc_offset;
1015 	desc->ncb = op->ldpc_dec.n_cb;
1016 	desc->num_null = op->ldpc_dec.n_filler;
1017 	desc->hbstroe_offset = harq_offset >> 10;
1018 	desc->zc = op->ldpc_dec.z_c;
1019 	desc->harqin_en = check_bit(op->ldpc_dec.op_flags,
1020 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
1021 	desc->bg_idx = op->ldpc_dec.basegraph - 1;
1022 	desc->max_iter = op->ldpc_dec.iter_max;
1023 	desc->qm_idx = op->ldpc_dec.q_m / 2;
1024 	desc->out_addr_hi = (uint32_t)(
1025 			rte_pktmbuf_iova_offset(output, out_offset) >> 32);
1026 	desc->out_addr_lw = (uint32_t)(
1027 			rte_pktmbuf_iova_offset(output, out_offset));
1028 	/* Save software context needed for dequeue */
1029 	desc->op_addr = op;
1030 	/* Set total number of CBs in an op */
1031 	desc->cbs_in_op = cbs_in_op;
1032 
1033 	return 0;
1034 }
1035 
1036 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1037 /* Validates LDPC encoder parameters */
1038 static int
1039 validate_enc_op(struct rte_bbdev_enc_op *op __rte_unused)
1040 {
1041 	struct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;
1042 	struct rte_bbdev_op_enc_ldpc_cb_params *cb = NULL;
1043 	struct rte_bbdev_op_enc_ldpc_tb_params *tb = NULL;
1044 
1045 
1046 	if (ldpc_enc->input.length >
1047 			RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) {
1048 		rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
1049 				ldpc_enc->input.length,
1050 				RTE_BBDEV_LDPC_MAX_CB_SIZE);
1051 		return -1;
1052 	}
1053 
1054 	if (op->mempool == NULL) {
1055 		rte_bbdev_log(ERR, "Invalid mempool pointer");
1056 		return -1;
1057 	}
1058 	if (ldpc_enc->input.data == NULL) {
1059 		rte_bbdev_log(ERR, "Invalid input pointer");
1060 		return -1;
1061 	}
1062 	if (ldpc_enc->output.data == NULL) {
1063 		rte_bbdev_log(ERR, "Invalid output pointer");
1064 		return -1;
1065 	}
1066 	if ((ldpc_enc->basegraph > 2) || (ldpc_enc->basegraph == 0)) {
1067 		rte_bbdev_log(ERR,
1068 				"basegraph (%u) is out of range 1 <= value <= 2",
1069 				ldpc_enc->basegraph);
1070 		return -1;
1071 	}
1072 	if (ldpc_enc->code_block_mode > RTE_BBDEV_CODE_BLOCK) {
1073 		rte_bbdev_log(ERR,
1074 				"code_block_mode (%u) is out of range 0:Tb 1:CB",
1075 				ldpc_enc->code_block_mode);
1076 		return -1;
1077 	}
1078 
1079 	if (ldpc_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1080 		tb = &ldpc_enc->tb_params;
1081 		if (tb->c == 0) {
1082 			rte_bbdev_log(ERR,
1083 					"c (%u) is out of range 1 <= value <= %u",
1084 					tb->c, RTE_BBDEV_LDPC_MAX_CODE_BLOCKS);
1085 			return -1;
1086 		}
1087 		if (tb->cab > tb->c) {
1088 			rte_bbdev_log(ERR,
1089 					"cab (%u) is greater than c (%u)",
1090 					tb->cab, tb->c);
1091 			return -1;
1092 		}
1093 		if ((tb->ea < RTE_BBDEV_LDPC_MIN_CB_SIZE)
1094 				&& tb->r < tb->cab) {
1095 			rte_bbdev_log(ERR,
1096 					"ea (%u) is less than %u or it is not even",
1097 					tb->ea, RTE_BBDEV_LDPC_MIN_CB_SIZE);
1098 			return -1;
1099 		}
1100 		if ((tb->eb < RTE_BBDEV_LDPC_MIN_CB_SIZE)
1101 				&& tb->c > tb->cab) {
1102 			rte_bbdev_log(ERR,
1103 					"eb (%u) is less than %u",
1104 					tb->eb, RTE_BBDEV_LDPC_MIN_CB_SIZE);
1105 			return -1;
1106 		}
1107 		if (tb->r > (tb->c - 1)) {
1108 			rte_bbdev_log(ERR,
1109 					"r (%u) is greater than c - 1 (%u)",
1110 					tb->r, tb->c - 1);
1111 			return -1;
1112 		}
1113 	} else {
1114 		cb = &ldpc_enc->cb_params;
1115 		if (cb->e < RTE_BBDEV_LDPC_MIN_CB_SIZE) {
1116 			rte_bbdev_log(ERR,
1117 					"e (%u) is less than %u or it is not even",
1118 					cb->e, RTE_BBDEV_LDPC_MIN_CB_SIZE);
1119 			return -1;
1120 		}
1121 	}
1122 	return 0;
1123 }
1124 #endif
1125 
1126 static inline char *
1127 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
1128 {
1129 	if (unlikely(len > rte_pktmbuf_tailroom(m)))
1130 		return NULL;
1131 
1132 	char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
1133 	m->data_len = (uint16_t)(m->data_len + len);
1134 	m_head->pkt_len  = (m_head->pkt_len + len);
1135 	return tail;
1136 }
1137 
1138 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1139 /* Validates LDPC decoder parameters */
1140 static int
1141 validate_dec_op(struct rte_bbdev_dec_op *op __rte_unused)
1142 {
1143 	struct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;
1144 	struct rte_bbdev_op_dec_ldpc_cb_params *cb = NULL;
1145 	struct rte_bbdev_op_dec_ldpc_tb_params *tb = NULL;
1146 
1147 	if (op->mempool == NULL) {
1148 		rte_bbdev_log(ERR, "Invalid mempool pointer");
1149 		return -1;
1150 	}
1151 	if (ldpc_dec->rv_index > 3) {
1152 		rte_bbdev_log(ERR,
1153 				"rv_index (%u) is out of range 0 <= value <= 3",
1154 				ldpc_dec->rv_index);
1155 		return -1;
1156 	}
1157 
1158 	if (ldpc_dec->iter_max == 0) {
1159 		rte_bbdev_log(ERR,
1160 				"iter_max (%u) is equal to 0",
1161 				ldpc_dec->iter_max);
1162 		return -1;
1163 	}
1164 
1165 	if (ldpc_dec->code_block_mode > RTE_BBDEV_CODE_BLOCK) {
1166 		rte_bbdev_log(ERR,
1167 				"code_block_mode (%u) is out of range 0 <= value <= 1",
1168 				ldpc_dec->code_block_mode);
1169 		return -1;
1170 	}
1171 
1172 	if (ldpc_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1173 		tb = &ldpc_dec->tb_params;
1174 		if (tb->c < 1) {
1175 			rte_bbdev_log(ERR,
1176 					"c (%u) is out of range 1 <= value <= %u",
1177 					tb->c, RTE_BBDEV_LDPC_MAX_CODE_BLOCKS);
1178 			return -1;
1179 		}
1180 		if (tb->cab > tb->c) {
1181 			rte_bbdev_log(ERR,
1182 					"cab (%u) is greater than c (%u)",
1183 					tb->cab, tb->c);
1184 			return -1;
1185 		}
1186 	} else {
1187 		cb = &ldpc_dec->cb_params;
1188 		if (cb->e < RTE_BBDEV_LDPC_MIN_CB_SIZE) {
1189 			rte_bbdev_log(ERR,
1190 					"e (%u) is out of range %u <= value <= %u",
1191 					cb->e, RTE_BBDEV_LDPC_MIN_CB_SIZE,
1192 					RTE_BBDEV_LDPC_MAX_CB_SIZE);
1193 			return -1;
1194 		}
1195 	}
1196 
1197 	return 0;
1198 }
1199 #endif
1200 
1201 static inline int
1202 fpga_harq_write_loopback(struct fpga_5gnr_fec_device *fpga_dev,
1203 		struct rte_mbuf *harq_input, uint16_t harq_in_length,
1204 		uint32_t harq_in_offset, uint32_t harq_out_offset)
1205 {
1206 	uint32_t out_offset = harq_out_offset;
1207 	uint32_t in_offset = harq_in_offset;
1208 	uint32_t left_length = harq_in_length;
1209 	uint32_t reg_32, increment = 0;
1210 	uint64_t *input = NULL;
1211 	uint32_t last_transaction = left_length
1212 			% FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1213 	uint64_t last_word;
1214 
1215 	if (last_transaction > 0)
1216 		left_length -= last_transaction;
1217 
1218 	/*
1219 	 * Get HARQ buffer size for each VF/PF: When 0x00, there is no
1220 	 * available DDR space for the corresponding VF/PF.
1221 	 */
1222 	reg_32 = fpga_reg_read_32(fpga_dev->mmio_base,
1223 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
1224 	if (reg_32 < harq_in_length) {
1225 		left_length = reg_32;
1226 		rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
1227 	}
1228 
1229 	input = (uint64_t *)rte_pktmbuf_mtod_offset(harq_input,
1230 			uint8_t *, in_offset);
1231 
1232 	while (left_length > 0) {
1233 		if (fpga_reg_read_8(fpga_dev->mmio_base,
1234 				FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) ==  1) {
1235 			fpga_reg_write_32(fpga_dev->mmio_base,
1236 					FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
1237 					out_offset);
1238 			fpga_reg_write_64(fpga_dev->mmio_base,
1239 					FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
1240 					input[increment]);
1241 			left_length -= FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1242 			out_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1243 			increment++;
1244 			fpga_reg_write_8(fpga_dev->mmio_base,
1245 					FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
1246 		}
1247 	}
1248 	while (last_transaction > 0) {
1249 		if (fpga_reg_read_8(fpga_dev->mmio_base,
1250 				FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) ==  1) {
1251 			fpga_reg_write_32(fpga_dev->mmio_base,
1252 					FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
1253 					out_offset);
1254 			last_word = input[increment];
1255 			last_word &= (uint64_t)(1 << (last_transaction * 4))
1256 					- 1;
1257 			fpga_reg_write_64(fpga_dev->mmio_base,
1258 					FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
1259 					last_word);
1260 			fpga_reg_write_8(fpga_dev->mmio_base,
1261 					FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
1262 			last_transaction = 0;
1263 		}
1264 	}
1265 	return 1;
1266 }
1267 
1268 static inline int
1269 fpga_harq_read_loopback(struct fpga_5gnr_fec_device *fpga_dev,
1270 		struct rte_mbuf *harq_output, uint16_t harq_in_length,
1271 		uint32_t harq_in_offset, uint32_t harq_out_offset)
1272 {
1273 	uint32_t left_length, in_offset = harq_in_offset;
1274 	uint64_t reg;
1275 	uint32_t increment = 0;
1276 	uint64_t *input = NULL;
1277 	uint32_t last_transaction = harq_in_length
1278 			% FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1279 
1280 	if (last_transaction > 0)
1281 		harq_in_length += (8 - last_transaction);
1282 
1283 	reg = fpga_reg_read_32(fpga_dev->mmio_base,
1284 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
1285 	if (reg < harq_in_length) {
1286 		harq_in_length = reg;
1287 		rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
1288 	}
1289 
1290 	if (!mbuf_append(harq_output, harq_output, harq_in_length)) {
1291 		rte_bbdev_log(ERR, "HARQ output buffer warning %d %d\n",
1292 				harq_output->buf_len -
1293 				rte_pktmbuf_headroom(harq_output),
1294 				harq_in_length);
1295 		harq_in_length = harq_output->buf_len -
1296 				rte_pktmbuf_headroom(harq_output);
1297 		if (!mbuf_append(harq_output, harq_output, harq_in_length)) {
1298 			rte_bbdev_log(ERR, "HARQ output buffer issue %d %d\n",
1299 					harq_output->buf_len, harq_in_length);
1300 			return -1;
1301 		}
1302 	}
1303 	left_length = harq_in_length;
1304 
1305 	input = (uint64_t *)rte_pktmbuf_mtod_offset(harq_output,
1306 			uint8_t *, harq_out_offset);
1307 
1308 	while (left_length > 0) {
1309 		fpga_reg_write_32(fpga_dev->mmio_base,
1310 			FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS, in_offset);
1311 		fpga_reg_write_8(fpga_dev->mmio_base,
1312 				FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 1);
1313 		reg = fpga_reg_read_8(fpga_dev->mmio_base,
1314 			FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
1315 		while (reg != 1) {
1316 			reg = fpga_reg_read_8(fpga_dev->mmio_base,
1317 				FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
1318 			if (reg == FPGA_DDR_OVERFLOW) {
1319 				rte_bbdev_log(ERR,
1320 						"Read address is overflow!\n");
1321 				return -1;
1322 			}
1323 		}
1324 		input[increment] = fpga_reg_read_64(fpga_dev->mmio_base,
1325 			FPGA_5GNR_FEC_DDR4_RD_DATA_REGS);
1326 		left_length -= FPGA_5GNR_FEC_DDR_RD_DATA_LEN_IN_BYTES;
1327 		in_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1328 		increment++;
1329 		fpga_reg_write_8(fpga_dev->mmio_base,
1330 				FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 0);
1331 	}
1332 	return 1;
1333 }
1334 
1335 static inline int
1336 enqueue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,
1337 		uint16_t desc_offset)
1338 {
1339 	union fpga_dma_desc *desc;
1340 	int ret;
1341 	uint8_t c, crc24_bits = 0;
1342 	struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1343 	uint16_t in_offset = enc->input.offset;
1344 	uint16_t out_offset = enc->output.offset;
1345 	struct rte_mbuf *m_in = enc->input.data;
1346 	struct rte_mbuf *m_out = enc->output.data;
1347 	struct rte_mbuf *m_out_head = enc->output.data;
1348 	uint32_t in_length, out_length, e;
1349 	uint16_t total_left = enc->input.length;
1350 	uint16_t ring_offset;
1351 	uint16_t K, k_;
1352 
1353 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1354 	/* Validate op structure */
1355 	/* FIXME */
1356 	if (validate_enc_op(op) == -1) {
1357 		rte_bbdev_log(ERR, "LDPC encoder validation failed");
1358 		return -EINVAL;
1359 	}
1360 #endif
1361 
1362 	/* Clear op status */
1363 	op->status = 0;
1364 
1365 	if (m_in == NULL || m_out == NULL) {
1366 		rte_bbdev_log(ERR, "Invalid mbuf pointer");
1367 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
1368 		return -EINVAL;
1369 	}
1370 
1371 	if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)
1372 		crc24_bits = 24;
1373 
1374 	if (enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1375 		/* For Transport Block mode */
1376 		/* FIXME */
1377 		c = enc->tb_params.c;
1378 		e = enc->tb_params.ea;
1379 	} else { /* For Code Block mode */
1380 		c = 1;
1381 		e = enc->cb_params.e;
1382 	}
1383 
1384 	/* Update total_left */
1385 	K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1386 	k_ = K - enc->n_filler;
1387 	in_length = (k_ - crc24_bits) >> 3;
1388 	out_length = (e + 7) >> 3;
1389 
1390 	total_left = rte_pktmbuf_data_len(m_in) - in_offset;
1391 
1392 	/* Update offsets */
1393 	if (total_left != in_length) {
1394 		op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1395 		rte_bbdev_log(ERR,
1396 				"Mismatch between mbuf length and included CBs sizes %d",
1397 				total_left);
1398 	}
1399 
1400 	mbuf_append(m_out_head, m_out, out_length);
1401 
1402 	/* Offset into the ring */
1403 	ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
1404 	/* Setup DMA Descriptor */
1405 	desc = q->ring_addr + ring_offset;
1406 
1407 	ret = fpga_dma_desc_te_fill(op, &desc->enc_req, m_in, m_out,
1408 			k_, e, in_offset, out_offset, ring_offset, c);
1409 	if (unlikely(ret < 0))
1410 		return ret;
1411 
1412 	/* Update lengths */
1413 	total_left -= in_length;
1414 	op->ldpc_enc.output.length += out_length;
1415 
1416 	if (total_left > 0) {
1417 		rte_bbdev_log(ERR,
1418 			"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1419 				total_left, in_length);
1420 		return -1;
1421 	}
1422 
1423 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1424 	print_dma_enc_desc_debug_info(desc);
1425 #endif
1426 	return 1;
1427 }
1428 
1429 static inline int
1430 enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,
1431 		uint16_t desc_offset)
1432 {
1433 	union fpga_dma_desc *desc;
1434 	int ret;
1435 	uint16_t ring_offset;
1436 	uint8_t c;
1437 	uint16_t e, in_length, out_length, k0, l, seg_total_left, sys_cols;
1438 	uint16_t K, parity_offset, harq_in_length = 0, harq_out_length = 0;
1439 	uint16_t crc24_overlap = 0;
1440 	struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1441 	struct rte_mbuf *m_in = dec->input.data;
1442 	struct rte_mbuf *m_out = dec->hard_output.data;
1443 	struct rte_mbuf *m_out_head = dec->hard_output.data;
1444 	uint16_t in_offset = dec->input.offset;
1445 	uint16_t out_offset = dec->hard_output.offset;
1446 	uint32_t harq_offset = 0;
1447 
1448 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1449 		/* Validate op structure */
1450 		if (validate_dec_op(op) == -1) {
1451 			rte_bbdev_log(ERR, "LDPC decoder validation failed");
1452 			return -EINVAL;
1453 		}
1454 #endif
1455 
1456 	/* Clear op status */
1457 	op->status = 0;
1458 
1459 	/* Setup DMA Descriptor */
1460 	ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
1461 	desc = q->ring_addr + ring_offset;
1462 
1463 	if (check_bit(dec->op_flags,
1464 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1465 		struct rte_mbuf *harq_in = dec->harq_combined_input.data;
1466 		struct rte_mbuf *harq_out = dec->harq_combined_output.data;
1467 		harq_in_length = dec->harq_combined_input.length;
1468 		uint32_t harq_in_offset = dec->harq_combined_input.offset;
1469 		uint32_t harq_out_offset = dec->harq_combined_output.offset;
1470 
1471 		if (check_bit(dec->op_flags,
1472 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE
1473 				)) {
1474 			ret = fpga_harq_write_loopback(q->d, harq_in,
1475 					harq_in_length, harq_in_offset,
1476 					harq_out_offset);
1477 		} else if (check_bit(dec->op_flags,
1478 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE
1479 				)) {
1480 			ret = fpga_harq_read_loopback(q->d, harq_out,
1481 				harq_in_length, harq_in_offset,
1482 				harq_out_offset);
1483 			dec->harq_combined_output.length = harq_in_length;
1484 		} else {
1485 			rte_bbdev_log(ERR, "OP flag Err!");
1486 			ret = -1;
1487 		}
1488 		/* Set descriptor for dequeue */
1489 		desc->dec_req.done = 1;
1490 		desc->dec_req.error = 0;
1491 		desc->dec_req.op_addr = op;
1492 		desc->dec_req.cbs_in_op = 1;
1493 		/* Mark this dummy descriptor to be dropped by HW */
1494 		desc->dec_req.desc_idx = (ring_offset + 1)
1495 				& q->sw_ring_wrap_mask;
1496 		return ret; /* Error or number of CB */
1497 	}
1498 
1499 	if (m_in == NULL || m_out == NULL) {
1500 		rte_bbdev_log(ERR, "Invalid mbuf pointer");
1501 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
1502 		return -1;
1503 	}
1504 
1505 	c = 1;
1506 	e = dec->cb_params.e;
1507 
1508 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1509 		crc24_overlap = 24;
1510 
1511 	sys_cols = (dec->basegraph == 1) ? 22 : 10;
1512 	K = sys_cols * dec->z_c;
1513 	parity_offset = K - 2 * dec->z_c;
1514 
1515 	out_length = ((K - crc24_overlap - dec->n_filler) >> 3);
1516 	in_length = e;
1517 	seg_total_left = dec->input.length;
1518 
1519 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1520 		harq_in_length = RTE_MIN(dec->harq_combined_input.length,
1521 				(uint32_t)dec->n_cb);
1522 	}
1523 
1524 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1525 		k0 = get_k0(dec->n_cb, dec->z_c,
1526 				dec->basegraph, dec->rv_index);
1527 		if (k0 > parity_offset)
1528 			l = k0 + e;
1529 		else
1530 			l = k0 + e + dec->n_filler;
1531 		harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l),
1532 				dec->n_cb - dec->n_filler);
1533 		dec->harq_combined_output.length = harq_out_length;
1534 	}
1535 
1536 	mbuf_append(m_out_head, m_out, out_length);
1537 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
1538 		harq_offset = dec->harq_combined_input.offset;
1539 	else if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
1540 		harq_offset = dec->harq_combined_output.offset;
1541 
1542 	if ((harq_offset & 0x3FF) > 0) {
1543 		rte_bbdev_log(ERR, "Invalid HARQ offset %d", harq_offset);
1544 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
1545 		return -1;
1546 	}
1547 
1548 	ret = fpga_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
1549 		harq_in_length, in_offset, out_offset, harq_offset,
1550 		ring_offset, c);
1551 	if (unlikely(ret < 0))
1552 		return ret;
1553 	/* Update lengths */
1554 	seg_total_left -= in_length;
1555 	op->ldpc_dec.hard_output.length += out_length;
1556 	if (seg_total_left > 0) {
1557 		rte_bbdev_log(ERR,
1558 				"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1559 				seg_total_left, in_length);
1560 		return -1;
1561 	}
1562 
1563 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1564 	print_dma_dec_desc_debug_info(desc);
1565 #endif
1566 
1567 	return 1;
1568 }
1569 
1570 static uint16_t
1571 fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1572 		struct rte_bbdev_enc_op **ops, uint16_t num)
1573 {
1574 	uint16_t i, total_enqueued_cbs = 0;
1575 	int32_t avail;
1576 	int enqueued_cbs;
1577 	struct fpga_queue *q = q_data->queue_private;
1578 	union fpga_dma_desc *desc;
1579 
1580 	/* Check if queue is not full */
1581 	if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
1582 			q->head_free_desc))
1583 		return 0;
1584 
1585 	/* Calculates available space */
1586 	avail = (q->head_free_desc > q->tail) ?
1587 		q->head_free_desc - q->tail - 1 :
1588 		q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
1589 
1590 	for (i = 0; i < num; ++i) {
1591 
1592 		/* Check if there is available space for further
1593 		 * processing
1594 		 */
1595 		if (unlikely(avail - 1 < 0))
1596 			break;
1597 		avail -= 1;
1598 		enqueued_cbs = enqueue_ldpc_enc_one_op_cb(q, ops[i],
1599 				total_enqueued_cbs);
1600 
1601 		if (enqueued_cbs < 0)
1602 			break;
1603 
1604 		total_enqueued_cbs += enqueued_cbs;
1605 
1606 		rte_bbdev_log_debug("enqueuing enc ops [%d/%d] | head %d | tail %d",
1607 				total_enqueued_cbs, num,
1608 				q->head_free_desc, q->tail);
1609 	}
1610 
1611 	/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
1612 	 * only when all previous CBs were already processed.
1613 	 */
1614 	desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
1615 			& q->sw_ring_wrap_mask);
1616 	desc->enc_req.irq_en = q->irq_enable;
1617 
1618 	fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
1619 
1620 	/* Update stats */
1621 	q_data->queue_stats.enqueued_count += i;
1622 	q_data->queue_stats.enqueue_err_count += num - i;
1623 
1624 	return i;
1625 }
1626 
1627 static uint16_t
1628 fpga_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1629 		struct rte_bbdev_dec_op **ops, uint16_t num)
1630 {
1631 	uint16_t i, total_enqueued_cbs = 0;
1632 	int32_t avail;
1633 	int enqueued_cbs;
1634 	struct fpga_queue *q = q_data->queue_private;
1635 	union fpga_dma_desc *desc;
1636 
1637 	/* Check if queue is not full */
1638 	if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
1639 			q->head_free_desc))
1640 		return 0;
1641 
1642 	/* Calculates available space */
1643 	avail = (q->head_free_desc > q->tail) ?
1644 		q->head_free_desc - q->tail - 1 :
1645 		q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
1646 
1647 	for (i = 0; i < num; ++i) {
1648 
1649 		/* Check if there is available space for further
1650 		 * processing
1651 		 */
1652 		if (unlikely(avail - 1 < 0))
1653 			break;
1654 		avail -= 1;
1655 		enqueued_cbs = enqueue_ldpc_dec_one_op_cb(q, ops[i],
1656 				total_enqueued_cbs);
1657 
1658 		if (enqueued_cbs < 0)
1659 			break;
1660 
1661 		total_enqueued_cbs += enqueued_cbs;
1662 
1663 		rte_bbdev_log_debug("enqueuing dec ops [%d/%d] | head %d | tail %d",
1664 				total_enqueued_cbs, num,
1665 				q->head_free_desc, q->tail);
1666 	}
1667 
1668 	/* Update stats */
1669 	q_data->queue_stats.enqueued_count += i;
1670 	q_data->queue_stats.enqueue_err_count += num - i;
1671 
1672 	/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
1673 	 * only when all previous CBs were already processed.
1674 	 */
1675 	desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
1676 			& q->sw_ring_wrap_mask);
1677 	desc->enc_req.irq_en = q->irq_enable;
1678 	fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
1679 	return i;
1680 }
1681 
1682 
1683 static inline int
1684 dequeue_ldpc_enc_one_op_cb(struct fpga_queue *q,
1685 		struct rte_bbdev_enc_op **op,
1686 		uint16_t desc_offset)
1687 {
1688 	union fpga_dma_desc *desc;
1689 	int desc_error;
1690 	/* Set current desc */
1691 	desc = q->ring_addr + ((q->head_free_desc + desc_offset)
1692 			& q->sw_ring_wrap_mask);
1693 
1694 	/*check if done */
1695 	if (desc->enc_req.done == 0)
1696 		return -1;
1697 
1698 	/* make sure the response is read atomically */
1699 	rte_smp_rmb();
1700 
1701 	rte_bbdev_log_debug("DMA response desc %p", desc);
1702 
1703 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1704 	print_dma_enc_desc_debug_info(desc);
1705 #endif
1706 
1707 	*op = desc->enc_req.op_addr;
1708 	/* Check the descriptor error field, return 1 on error */
1709 	desc_error = check_desc_error(desc->enc_req.error);
1710 	(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
1711 
1712 	return 1;
1713 }
1714 
1715 
1716 static inline int
1717 dequeue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
1718 		uint16_t desc_offset)
1719 {
1720 	union fpga_dma_desc *desc;
1721 	int desc_error;
1722 	/* Set descriptor */
1723 	desc = q->ring_addr + ((q->head_free_desc + desc_offset)
1724 			& q->sw_ring_wrap_mask);
1725 
1726 	/* Verify done bit is set */
1727 	if (desc->dec_req.done == 0)
1728 		return -1;
1729 
1730 	/* make sure the response is read atomically */
1731 	rte_smp_rmb();
1732 
1733 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1734 	print_dma_dec_desc_debug_info(desc);
1735 #endif
1736 
1737 	*op = desc->dec_req.op_addr;
1738 
1739 	if (check_bit((*op)->ldpc_dec.op_flags,
1740 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1741 		(*op)->status = 0;
1742 		return 1;
1743 	}
1744 
1745 	/* FPGA reports iterations based on round-up minus 1 */
1746 	(*op)->ldpc_dec.iter_count = desc->dec_req.iter + 1;
1747 	/* CRC Check criteria */
1748 	if (desc->dec_req.crc24b_ind && !(desc->dec_req.crcb_pass))
1749 		(*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
1750 	/* et_pass = 0 when decoder fails */
1751 	(*op)->status |= !(desc->dec_req.et_pass) << RTE_BBDEV_SYNDROME_ERROR;
1752 	/* Check the descriptor error field, return 1 on error */
1753 	desc_error = check_desc_error(desc->dec_req.error);
1754 	(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
1755 	return 1;
1756 }
1757 
1758 static uint16_t
1759 fpga_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1760 		struct rte_bbdev_enc_op **ops, uint16_t num)
1761 {
1762 	struct fpga_queue *q = q_data->queue_private;
1763 	uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
1764 	uint16_t i;
1765 	uint16_t dequeued_cbs = 0;
1766 	int ret;
1767 
1768 	for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
1769 		ret = dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
1770 
1771 		if (ret < 0)
1772 			break;
1773 
1774 		dequeued_cbs += ret;
1775 
1776 		rte_bbdev_log_debug("dequeuing enc ops [%d/%d] | head %d | tail %d",
1777 				dequeued_cbs, num, q->head_free_desc, q->tail);
1778 	}
1779 
1780 	/* Update head */
1781 	q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
1782 			q->sw_ring_wrap_mask;
1783 
1784 	/* Update stats */
1785 	q_data->queue_stats.dequeued_count += i;
1786 
1787 	return i;
1788 }
1789 
1790 static uint16_t
1791 fpga_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1792 		struct rte_bbdev_dec_op **ops, uint16_t num)
1793 {
1794 	struct fpga_queue *q = q_data->queue_private;
1795 	uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
1796 	uint16_t i;
1797 	uint16_t dequeued_cbs = 0;
1798 	int ret;
1799 
1800 	for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
1801 		ret = dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
1802 
1803 		if (ret < 0)
1804 			break;
1805 
1806 		dequeued_cbs += ret;
1807 
1808 		rte_bbdev_log_debug("dequeuing dec ops [%d/%d] | head %d | tail %d",
1809 				dequeued_cbs, num, q->head_free_desc, q->tail);
1810 	}
1811 
1812 	/* Update head */
1813 	q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
1814 			q->sw_ring_wrap_mask;
1815 
1816 	/* Update stats */
1817 	q_data->queue_stats.dequeued_count += i;
1818 
1819 	return i;
1820 }
1821 
1822 
1823 /* Initialization Function */
1824 static void
1825 fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
1826 {
1827 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
1828 
1829 	dev->dev_ops = &fpga_ops;
1830 	dev->enqueue_ldpc_enc_ops = fpga_enqueue_ldpc_enc;
1831 	dev->enqueue_ldpc_dec_ops = fpga_enqueue_ldpc_dec;
1832 	dev->dequeue_ldpc_enc_ops = fpga_dequeue_ldpc_enc;
1833 	dev->dequeue_ldpc_dec_ops = fpga_dequeue_ldpc_dec;
1834 
1835 	((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
1836 			!strcmp(drv->driver.name,
1837 					RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
1838 	((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
1839 			pci_dev->mem_resource[0].addr;
1840 
1841 	rte_bbdev_log_debug(
1842 			"Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
1843 			drv->driver.name, dev->data->name,
1844 			(void *)pci_dev->mem_resource[0].addr,
1845 			pci_dev->mem_resource[0].phys_addr);
1846 }
1847 
1848 static int
1849 fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
1850 	struct rte_pci_device *pci_dev)
1851 {
1852 	struct rte_bbdev *bbdev = NULL;
1853 	char dev_name[RTE_BBDEV_NAME_MAX_LEN];
1854 
1855 	if (pci_dev == NULL) {
1856 		rte_bbdev_log(ERR, "NULL PCI device");
1857 		return -EINVAL;
1858 	}
1859 
1860 	rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
1861 
1862 	/* Allocate memory to be used privately by drivers */
1863 	bbdev = rte_bbdev_allocate(pci_dev->device.name);
1864 	if (bbdev == NULL)
1865 		return -ENODEV;
1866 
1867 	/* allocate device private memory */
1868 	bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
1869 			sizeof(struct fpga_5gnr_fec_device),
1870 			RTE_CACHE_LINE_SIZE,
1871 			pci_dev->device.numa_node);
1872 
1873 	if (bbdev->data->dev_private == NULL) {
1874 		rte_bbdev_log(CRIT,
1875 				"Allocate of %zu bytes for device \"%s\" failed",
1876 				sizeof(struct fpga_5gnr_fec_device), dev_name);
1877 				rte_bbdev_release(bbdev);
1878 			return -ENOMEM;
1879 	}
1880 
1881 	/* Fill HW specific part of device structure */
1882 	bbdev->device = &pci_dev->device;
1883 	bbdev->intr_handle = &pci_dev->intr_handle;
1884 	bbdev->data->socket_id = pci_dev->device.numa_node;
1885 
1886 	/* Invoke FEC FPGA device initialization function */
1887 	fpga_5gnr_fec_init(bbdev, pci_drv);
1888 
1889 	rte_bbdev_log_debug("bbdev id = %u [%s]",
1890 			bbdev->data->dev_id, dev_name);
1891 
1892 	struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
1893 	uint32_t version_id = fpga_reg_read_32(d->mmio_base,
1894 			FPGA_5GNR_FEC_VERSION_ID);
1895 	rte_bbdev_log(INFO, "FEC FPGA RTL v%u.%u",
1896 		((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
1897 
1898 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1899 	if (!strcmp(pci_drv->driver.name,
1900 			RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME)))
1901 		print_static_reg_debug_info(d->mmio_base);
1902 #endif
1903 	return 0;
1904 }
1905 
1906 static int
1907 fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
1908 {
1909 	struct rte_bbdev *bbdev;
1910 	int ret;
1911 	uint8_t dev_id;
1912 
1913 	if (pci_dev == NULL)
1914 		return -EINVAL;
1915 
1916 	/* Find device */
1917 	bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
1918 	if (bbdev == NULL) {
1919 		rte_bbdev_log(CRIT,
1920 				"Couldn't find HW dev \"%s\" to uninitialise it",
1921 				pci_dev->device.name);
1922 		return -ENODEV;
1923 	}
1924 	dev_id = bbdev->data->dev_id;
1925 
1926 	/* free device private memory before close */
1927 	rte_free(bbdev->data->dev_private);
1928 
1929 	/* Close device */
1930 	ret = rte_bbdev_close(dev_id);
1931 	if (ret < 0)
1932 		rte_bbdev_log(ERR,
1933 				"Device %i failed to close during uninit: %i",
1934 				dev_id, ret);
1935 
1936 	/* release bbdev from library */
1937 	ret = rte_bbdev_release(bbdev);
1938 	if (ret)
1939 		rte_bbdev_log(ERR, "Device %i failed to uninit: %i", dev_id,
1940 				ret);
1941 
1942 	rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
1943 
1944 	return 0;
1945 }
1946 
1947 static inline void
1948 set_default_fpga_conf(struct rte_fpga_5gnr_fec_conf *def_conf)
1949 {
1950 	/* clear default configuration before initialization */
1951 	memset(def_conf, 0, sizeof(struct rte_fpga_5gnr_fec_conf));
1952 	/* Set pf mode to true */
1953 	def_conf->pf_mode_en = true;
1954 
1955 	/* Set ratio between UL and DL to 1:1 (unit of weight is 3 CBs) */
1956 	def_conf->ul_bandwidth = 3;
1957 	def_conf->dl_bandwidth = 3;
1958 
1959 	/* Set Load Balance Factor to 64 */
1960 	def_conf->dl_load_balance = 64;
1961 	def_conf->ul_load_balance = 64;
1962 }
1963 
1964 /* Initial configuration of FPGA 5GNR FEC device */
1965 int
1966 rte_fpga_5gnr_fec_configure(const char *dev_name,
1967 		const struct rte_fpga_5gnr_fec_conf *conf)
1968 {
1969 	uint32_t payload_32, address;
1970 	uint16_t payload_16;
1971 	uint8_t payload_8;
1972 	uint16_t q_id, vf_id, total_q_id, total_ul_q_id, total_dl_q_id;
1973 	struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
1974 	struct rte_fpga_5gnr_fec_conf def_conf;
1975 
1976 	if (bbdev == NULL) {
1977 		rte_bbdev_log(ERR,
1978 				"Invalid dev_name (%s), or device is not yet initialised",
1979 				dev_name);
1980 		return -ENODEV;
1981 	}
1982 
1983 	struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
1984 
1985 	if (conf == NULL) {
1986 		rte_bbdev_log(ERR,
1987 				"FPGA Configuration was not provided. Default configuration will be loaded.");
1988 		set_default_fpga_conf(&def_conf);
1989 		conf = &def_conf;
1990 	}
1991 
1992 	/*
1993 	 * Configure UL:DL ratio.
1994 	 * [7:0]: UL weight
1995 	 * [15:8]: DL weight
1996 	 */
1997 	payload_16 = (conf->dl_bandwidth << 8) | conf->ul_bandwidth;
1998 	address = FPGA_5GNR_FEC_CONFIGURATION;
1999 	fpga_reg_write_16(d->mmio_base, address, payload_16);
2000 
2001 	/* Clear all queues registers */
2002 	payload_32 = FPGA_INVALID_HW_QUEUE_ID;
2003 	for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
2004 		address = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;
2005 		fpga_reg_write_32(d->mmio_base, address, payload_32);
2006 	}
2007 
2008 	/*
2009 	 * If PF mode is enabled allocate all queues for PF only.
2010 	 *
2011 	 * For VF mode each VF can have different number of UL and DL queues.
2012 	 * Total number of queues to configure cannot exceed FPGA
2013 	 * capabilities - 64 queues - 32 queues for UL and 32 queues for DL.
2014 	 * Queues mapping is done according to configuration:
2015 	 *
2016 	 * UL queues:
2017 	 * |                Q_ID              | VF_ID |
2018 	 * |                 0                |   0   |
2019 	 * |                ...               |   0   |
2020 	 * | conf->vf_dl_queues_number[0] - 1 |   0   |
2021 	 * | conf->vf_dl_queues_number[0]     |   1   |
2022 	 * |                ...               |   1   |
2023 	 * | conf->vf_dl_queues_number[1] - 1 |   1   |
2024 	 * |                ...               |  ...  |
2025 	 * | conf->vf_dl_queues_number[7] - 1 |   7   |
2026 	 *
2027 	 * DL queues:
2028 	 * |                Q_ID              | VF_ID |
2029 	 * |                 32               |   0   |
2030 	 * |                ...               |   0   |
2031 	 * | conf->vf_ul_queues_number[0] - 1 |   0   |
2032 	 * | conf->vf_ul_queues_number[0]     |   1   |
2033 	 * |                ...               |   1   |
2034 	 * | conf->vf_ul_queues_number[1] - 1 |   1   |
2035 	 * |                ...               |  ...  |
2036 	 * | conf->vf_ul_queues_number[7] - 1 |   7   |
2037 	 *
2038 	 * Example of configuration:
2039 	 * conf->vf_ul_queues_number[0] = 4;  -> 4 UL queues for VF0
2040 	 * conf->vf_dl_queues_number[0] = 4;  -> 4 DL queues for VF0
2041 	 * conf->vf_ul_queues_number[1] = 2;  -> 2 UL queues for VF1
2042 	 * conf->vf_dl_queues_number[1] = 2;  -> 2 DL queues for VF1
2043 	 *
2044 	 * UL:
2045 	 * | Q_ID | VF_ID |
2046 	 * |   0  |   0   |
2047 	 * |   1  |   0   |
2048 	 * |   2  |   0   |
2049 	 * |   3  |   0   |
2050 	 * |   4  |   1   |
2051 	 * |   5  |   1   |
2052 	 *
2053 	 * DL:
2054 	 * | Q_ID | VF_ID |
2055 	 * |  32  |   0   |
2056 	 * |  33  |   0   |
2057 	 * |  34  |   0   |
2058 	 * |  35  |   0   |
2059 	 * |  36  |   1   |
2060 	 * |  37  |   1   |
2061 	 */
2062 	if (conf->pf_mode_en) {
2063 		payload_32 = 0x1;
2064 		for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
2065 			address = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;
2066 			fpga_reg_write_32(d->mmio_base, address, payload_32);
2067 		}
2068 	} else {
2069 		/* Calculate total number of UL and DL queues to configure */
2070 		total_ul_q_id = total_dl_q_id = 0;
2071 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
2072 			total_ul_q_id += conf->vf_ul_queues_number[vf_id];
2073 			total_dl_q_id += conf->vf_dl_queues_number[vf_id];
2074 		}
2075 		total_q_id = total_dl_q_id + total_ul_q_id;
2076 		/*
2077 		 * Check if total number of queues to configure does not exceed
2078 		 * FPGA capabilities (64 queues - 32 UL and 32 DL queues)
2079 		 */
2080 		if ((total_ul_q_id > FPGA_NUM_UL_QUEUES) ||
2081 			(total_dl_q_id > FPGA_NUM_DL_QUEUES) ||
2082 			(total_q_id > FPGA_TOTAL_NUM_QUEUES)) {
2083 			rte_bbdev_log(ERR,
2084 					"FPGA Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, FPGA_Q %u",
2085 					total_ul_q_id, total_dl_q_id,
2086 					FPGA_TOTAL_NUM_QUEUES);
2087 			return -EINVAL;
2088 		}
2089 		total_ul_q_id = 0;
2090 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
2091 			for (q_id = 0; q_id < conf->vf_ul_queues_number[vf_id];
2092 					++q_id, ++total_ul_q_id) {
2093 				address = (total_ul_q_id << 2) +
2094 						FPGA_5GNR_FEC_QUEUE_MAP;
2095 				payload_32 = ((0x80 + vf_id) << 16) | 0x1;
2096 				fpga_reg_write_32(d->mmio_base, address,
2097 						payload_32);
2098 			}
2099 		}
2100 		total_dl_q_id = 0;
2101 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
2102 			for (q_id = 0; q_id < conf->vf_dl_queues_number[vf_id];
2103 					++q_id, ++total_dl_q_id) {
2104 				address = ((total_dl_q_id + FPGA_NUM_UL_QUEUES)
2105 						<< 2) + FPGA_5GNR_FEC_QUEUE_MAP;
2106 				payload_32 = ((0x80 + vf_id) << 16) | 0x1;
2107 				fpga_reg_write_32(d->mmio_base, address,
2108 						payload_32);
2109 			}
2110 		}
2111 	}
2112 
2113 	/* Setting Load Balance Factor */
2114 	payload_16 = (conf->dl_load_balance << 8) | (conf->ul_load_balance);
2115 	address = FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR;
2116 	fpga_reg_write_16(d->mmio_base, address, payload_16);
2117 
2118 	/* Setting length of ring descriptor entry */
2119 	payload_16 = FPGA_RING_DESC_ENTRY_LENGTH;
2120 	address = FPGA_5GNR_FEC_RING_DESC_LEN;
2121 	fpga_reg_write_16(d->mmio_base, address, payload_16);
2122 
2123 	/* Setting FLR timeout value */
2124 	payload_16 = conf->flr_time_out;
2125 	address = FPGA_5GNR_FEC_FLR_TIME_OUT;
2126 	fpga_reg_write_16(d->mmio_base, address, payload_16);
2127 
2128 	/* Queue PF/VF mapping table is ready */
2129 	payload_8 = 0x1;
2130 	address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
2131 	fpga_reg_write_8(d->mmio_base, address, payload_8);
2132 
2133 	rte_bbdev_log_debug("PF FPGA 5GNR FEC configuration complete for %s",
2134 			dev_name);
2135 
2136 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2137 	print_static_reg_debug_info(d->mmio_base);
2138 #endif
2139 	return 0;
2140 }
2141 
2142 /* FPGA 5GNR FEC PCI PF address map */
2143 static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {
2144 	{
2145 		RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
2146 				FPGA_5GNR_FEC_PF_DEVICE_ID)
2147 	},
2148 	{.device_id = 0},
2149 };
2150 
2151 static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {
2152 	.probe = fpga_5gnr_fec_probe,
2153 	.remove = fpga_5gnr_fec_remove,
2154 	.id_table = pci_id_fpga_5gnr_fec_pf_map,
2155 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING
2156 };
2157 
2158 /* FPGA 5GNR FEC PCI VF address map */
2159 static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {
2160 	{
2161 		RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
2162 				FPGA_5GNR_FEC_VF_DEVICE_ID)
2163 	},
2164 	{.device_id = 0},
2165 };
2166 
2167 static struct rte_pci_driver fpga_5gnr_fec_pci_vf_driver = {
2168 	.probe = fpga_5gnr_fec_probe,
2169 	.remove = fpga_5gnr_fec_remove,
2170 	.id_table = pci_id_fpga_5gnr_fec_vf_map,
2171 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING
2172 };
2173 
2174 
2175 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_PF_DRIVER_NAME, fpga_5gnr_fec_pci_pf_driver);
2176 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_PF_DRIVER_NAME,
2177 		pci_id_fpga_5gnr_fec_pf_map);
2178 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_VF_DRIVER_NAME, fpga_5gnr_fec_pci_vf_driver);
2179 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_VF_DRIVER_NAME,
2180 		pci_id_fpga_5gnr_fec_vf_map);
2181