xref: /dpdk/drivers/baseband/fpga_5gnr_fec/rte_fpga_5gnr_fec.c (revision 3da59f30a23f2e795d2315f3d949e1b3e0ce0c3d)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2020 Intel Corporation
3  */
4 
5 #include <unistd.h>
6 
7 #include <rte_common.h>
8 #include <rte_log.h>
9 #include <dev_driver.h>
10 #include <rte_malloc.h>
11 #include <rte_mempool.h>
12 #include <rte_errno.h>
13 #include <rte_pci.h>
14 #include <bus_pci_driver.h>
15 #include <rte_byteorder.h>
16 #include <rte_cycles.h>
17 
18 #include <rte_bbdev.h>
19 #include <rte_bbdev_pmd.h>
20 
21 #include "fpga_5gnr_fec.h"
22 #include "rte_pmd_fpga_5gnr_fec.h"
23 
24 #ifdef RTE_LIBRTE_BBDEV_DEBUG
25 RTE_LOG_REGISTER_DEFAULT(fpga_5gnr_fec_logtype, DEBUG);
26 #else
27 RTE_LOG_REGISTER_DEFAULT(fpga_5gnr_fec_logtype, NOTICE);
28 #endif
29 
30 #ifdef RTE_LIBRTE_BBDEV_DEBUG
31 
32 /* Read Ring Control Register of FPGA 5GNR FEC device */
33 static inline void
34 print_ring_reg_debug_info(void *mmio_base, uint32_t offset)
35 {
36 	rte_bbdev_log_debug(
37 		"FPGA MMIO base address @ %p | Ring Control Register @ offset = 0x%08"
38 		PRIx32, mmio_base, offset);
39 	rte_bbdev_log_debug(
40 		"RING_BASE_ADDR = 0x%016"PRIx64,
41 		fpga_reg_read_64(mmio_base, offset));
42 	rte_bbdev_log_debug(
43 		"RING_HEAD_ADDR = 0x%016"PRIx64,
44 		fpga_reg_read_64(mmio_base, offset +
45 				FPGA_5GNR_FEC_RING_HEAD_ADDR));
46 	rte_bbdev_log_debug(
47 		"RING_SIZE = 0x%04"PRIx16,
48 		fpga_reg_read_16(mmio_base, offset +
49 				FPGA_5GNR_FEC_RING_SIZE));
50 	rte_bbdev_log_debug(
51 		"RING_MISC = 0x%02"PRIx8,
52 		fpga_reg_read_8(mmio_base, offset +
53 				FPGA_5GNR_FEC_RING_MISC));
54 	rte_bbdev_log_debug(
55 		"RING_ENABLE = 0x%02"PRIx8,
56 		fpga_reg_read_8(mmio_base, offset +
57 				FPGA_5GNR_FEC_RING_ENABLE));
58 	rte_bbdev_log_debug(
59 		"RING_FLUSH_QUEUE_EN = 0x%02"PRIx8,
60 		fpga_reg_read_8(mmio_base, offset +
61 				FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN));
62 	rte_bbdev_log_debug(
63 		"RING_SHADOW_TAIL = 0x%04"PRIx16,
64 		fpga_reg_read_16(mmio_base, offset +
65 				FPGA_5GNR_FEC_RING_SHADOW_TAIL));
66 	rte_bbdev_log_debug(
67 		"RING_HEAD_POINT = 0x%04"PRIx16,
68 		fpga_reg_read_16(mmio_base, offset +
69 				FPGA_5GNR_FEC_RING_HEAD_POINT));
70 }
71 
72 /* Read Static Register of FPGA 5GNR FEC device */
73 static inline void
74 print_static_reg_debug_info(void *mmio_base)
75 {
76 	uint16_t config = fpga_reg_read_16(mmio_base,
77 			FPGA_5GNR_FEC_CONFIGURATION);
78 	uint8_t qmap_done = fpga_reg_read_8(mmio_base,
79 			FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE);
80 	uint16_t lb_factor = fpga_reg_read_16(mmio_base,
81 			FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR);
82 	uint16_t ring_desc_len = fpga_reg_read_16(mmio_base,
83 			FPGA_5GNR_FEC_RING_DESC_LEN);
84 
85 	rte_bbdev_log_debug("UL.DL Weights = %u.%u",
86 			((uint8_t)config), ((uint8_t)(config >> 8)));
87 	rte_bbdev_log_debug("UL.DL Load Balance = %u.%u",
88 			((uint8_t)lb_factor), ((uint8_t)(lb_factor >> 8)));
89 	rte_bbdev_log_debug("Queue-PF/VF Mapping Table = %s",
90 			(qmap_done > 0) ? "READY" : "NOT-READY");
91 	rte_bbdev_log_debug("Ring Descriptor Size = %u bytes",
92 			ring_desc_len*FPGA_RING_DESC_LEN_UNIT_BYTES);
93 }
94 
95 /* Print decode DMA Descriptor of FPGA 5GNR Decoder device */
96 static void
97 print_dma_dec_desc_debug_info(union fpga_dma_desc *desc)
98 {
99 	rte_bbdev_log_debug("DMA response desc %p\n"
100 		"\t-- done(%"PRIu32") | iter(%"PRIu32") | et_pass(%"PRIu32")"
101 		" | crcb_pass (%"PRIu32") | error(%"PRIu32")\n"
102 		"\t-- qm_idx(%"PRIu32") | max_iter(%"PRIu32") | "
103 		"bg_idx (%"PRIu32") | harqin_en(%"PRIu32") | zc(%"PRIu32")\n"
104 		"\t-- hbstroe_offset(%"PRIu32") | num_null (%"PRIu32") "
105 		"| irq_en(%"PRIu32")\n"
106 		"\t-- ncb(%"PRIu32") | desc_idx (%"PRIu32") | "
107 		"drop_crc24b(%"PRIu32") | RV (%"PRIu32")\n"
108 		"\t-- crc24b_ind(%"PRIu32") | et_dis (%"PRIu32")\n"
109 		"\t-- harq_input_length(%"PRIu32") | rm_e(%"PRIu32")\n"
110 		"\t-- cbs_in_op(%"PRIu32") | in_add (0x%08"PRIx32"%08"PRIx32")"
111 		"| out_add (0x%08"PRIx32"%08"PRIx32")",
112 		desc,
113 		(uint32_t)desc->dec_req.done,
114 		(uint32_t)desc->dec_req.iter,
115 		(uint32_t)desc->dec_req.et_pass,
116 		(uint32_t)desc->dec_req.crcb_pass,
117 		(uint32_t)desc->dec_req.error,
118 		(uint32_t)desc->dec_req.qm_idx,
119 		(uint32_t)desc->dec_req.max_iter,
120 		(uint32_t)desc->dec_req.bg_idx,
121 		(uint32_t)desc->dec_req.harqin_en,
122 		(uint32_t)desc->dec_req.zc,
123 		(uint32_t)desc->dec_req.hbstroe_offset,
124 		(uint32_t)desc->dec_req.num_null,
125 		(uint32_t)desc->dec_req.irq_en,
126 		(uint32_t)desc->dec_req.ncb,
127 		(uint32_t)desc->dec_req.desc_idx,
128 		(uint32_t)desc->dec_req.drop_crc24b,
129 		(uint32_t)desc->dec_req.rv,
130 		(uint32_t)desc->dec_req.crc24b_ind,
131 		(uint32_t)desc->dec_req.et_dis,
132 		(uint32_t)desc->dec_req.harq_input_length,
133 		(uint32_t)desc->dec_req.rm_e,
134 		(uint32_t)desc->dec_req.cbs_in_op,
135 		(uint32_t)desc->dec_req.in_addr_hi,
136 		(uint32_t)desc->dec_req.in_addr_lw,
137 		(uint32_t)desc->dec_req.out_addr_hi,
138 		(uint32_t)desc->dec_req.out_addr_lw);
139 	uint32_t *word = (uint32_t *) desc;
140 	rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
141 			"%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
142 			word[0], word[1], word[2], word[3],
143 			word[4], word[5], word[6], word[7]);
144 }
145 
146 /* Print decode DMA Descriptor of FPGA 5GNR encoder device */
147 static void
148 print_dma_enc_desc_debug_info(union fpga_dma_desc *desc)
149 {
150 	rte_bbdev_log_debug("DMA response desc %p\n"
151 			"%"PRIu32" %"PRIu32"\n"
152 			"K' %"PRIu32" E %"PRIu32" desc %"PRIu32" Z %"PRIu32"\n"
153 			"BG %"PRIu32" Qm %"PRIu32" CRC %"PRIu32" IRQ %"PRIu32"\n"
154 			"k0 %"PRIu32" Ncb %"PRIu32" F %"PRIu32"\n",
155 			desc,
156 			(uint32_t)desc->enc_req.done,
157 			(uint32_t)desc->enc_req.error,
158 
159 			(uint32_t)desc->enc_req.k_,
160 			(uint32_t)desc->enc_req.rm_e,
161 			(uint32_t)desc->enc_req.desc_idx,
162 			(uint32_t)desc->enc_req.zc,
163 
164 			(uint32_t)desc->enc_req.bg_idx,
165 			(uint32_t)desc->enc_req.qm_idx,
166 			(uint32_t)desc->enc_req.crc_en,
167 			(uint32_t)desc->enc_req.irq_en,
168 
169 			(uint32_t)desc->enc_req.k0,
170 			(uint32_t)desc->enc_req.ncb,
171 			(uint32_t)desc->enc_req.num_null);
172 	uint32_t *word = (uint32_t *) desc;
173 	rte_bbdev_log_debug("%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n"
174 			"%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n%08"PRIx32"\n",
175 			word[0], word[1], word[2], word[3],
176 			word[4], word[5], word[6], word[7]);
177 }
178 
179 #endif
180 
181 static int
182 fpga_setup_queues(struct rte_bbdev *dev, uint16_t num_queues, int socket_id)
183 {
184 	/* Number of queues bound to a PF/VF */
185 	uint32_t hw_q_num = 0;
186 	uint32_t ring_size, payload, address, q_id, offset;
187 	rte_iova_t phys_addr;
188 	struct fpga_ring_ctrl_reg ring_reg;
189 	struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
190 
191 	address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
192 	if (!(fpga_reg_read_32(fpga_dev->mmio_base, address) & 0x1)) {
193 		rte_bbdev_log(ERR,
194 				"Queue-PF/VF mapping is not set! Was PF configured for device (%s) ?",
195 				dev->data->name);
196 		return -EPERM;
197 	}
198 
199 	/* Clear queue registers structure */
200 	memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
201 
202 	/* Scan queue map.
203 	 * If a queue is valid and mapped to a calling PF/VF the read value is
204 	 * replaced with a queue ID and if it's not then
205 	 * FPGA_INVALID_HW_QUEUE_ID is returned.
206 	 */
207 	for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
208 		uint32_t hw_q_id = fpga_reg_read_32(fpga_dev->mmio_base,
209 				FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
210 
211 		rte_bbdev_log_debug("%s: queue ID: %u, registry queue ID: %u",
212 				dev->device->name, q_id, hw_q_id);
213 
214 		if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID) {
215 			fpga_dev->q_bound_bit_map |= (1ULL << q_id);
216 			/* Clear queue register of found queue */
217 			offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
218 				(sizeof(struct fpga_ring_ctrl_reg) * q_id);
219 			fpga_ring_reg_write(fpga_dev->mmio_base,
220 					offset, ring_reg);
221 			++hw_q_num;
222 		}
223 	}
224 	if (hw_q_num == 0) {
225 		rte_bbdev_log(ERR,
226 			"No HW queues assigned to this device. Probably this is a VF configured for PF mode. Check device configuration!");
227 		return -ENODEV;
228 	}
229 
230 	if (num_queues > hw_q_num) {
231 		rte_bbdev_log(ERR,
232 			"Not enough queues for device %s! Requested: %u, available: %u",
233 			dev->device->name, num_queues, hw_q_num);
234 		return -EINVAL;
235 	}
236 
237 	ring_size = FPGA_RING_MAX_SIZE * sizeof(struct fpga_dma_dec_desc);
238 
239 	/* Enforce 32 byte alignment */
240 	RTE_BUILD_BUG_ON((RTE_CACHE_LINE_SIZE % 32) != 0);
241 
242 	/* Allocate memory for SW descriptor rings */
243 	fpga_dev->sw_rings = rte_zmalloc_socket(dev->device->driver->name,
244 			num_queues * ring_size, RTE_CACHE_LINE_SIZE,
245 			socket_id);
246 	if (fpga_dev->sw_rings == NULL) {
247 		rte_bbdev_log(ERR,
248 				"Failed to allocate memory for %s:%u sw_rings",
249 				dev->device->driver->name, dev->data->dev_id);
250 		return -ENOMEM;
251 	}
252 
253 	fpga_dev->sw_rings_phys = rte_malloc_virt2iova(fpga_dev->sw_rings);
254 	fpga_dev->sw_ring_size = ring_size;
255 	fpga_dev->sw_ring_max_depth = FPGA_RING_MAX_SIZE;
256 
257 	/* Allocate memory for ring flush status */
258 	fpga_dev->flush_queue_status = rte_zmalloc_socket(NULL,
259 			sizeof(uint64_t), RTE_CACHE_LINE_SIZE, socket_id);
260 	if (fpga_dev->flush_queue_status == NULL) {
261 		rte_bbdev_log(ERR,
262 				"Failed to allocate memory for %s:%u flush_queue_status",
263 				dev->device->driver->name, dev->data->dev_id);
264 		return -ENOMEM;
265 	}
266 
267 	/* Set the flush status address registers */
268 	phys_addr = rte_malloc_virt2iova(fpga_dev->flush_queue_status);
269 
270 	address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_LW;
271 	payload = (uint32_t)(phys_addr);
272 	fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
273 
274 	address = FPGA_5GNR_FEC_VFQ_FLUSH_STATUS_HI;
275 	payload = (uint32_t)(phys_addr >> 32);
276 	fpga_reg_write_32(fpga_dev->mmio_base, address, payload);
277 
278 	return 0;
279 }
280 
281 static int
282 fpga_dev_close(struct rte_bbdev *dev)
283 {
284 	struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
285 
286 	rte_free(fpga_dev->sw_rings);
287 	rte_free(fpga_dev->flush_queue_status);
288 
289 	return 0;
290 }
291 
292 static void
293 fpga_dev_info_get(struct rte_bbdev *dev,
294 		struct rte_bbdev_driver_info *dev_info)
295 {
296 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
297 	uint32_t q_id = 0;
298 
299 	static const struct rte_bbdev_op_cap bbdev_capabilities[] = {
300 		{
301 			.type   = RTE_BBDEV_OP_LDPC_ENC,
302 			.cap.ldpc_enc = {
303 				.capability_flags =
304 						RTE_BBDEV_LDPC_RATE_MATCH |
305 						RTE_BBDEV_LDPC_ENC_INTERRUPTS |
306 						RTE_BBDEV_LDPC_CRC_24B_ATTACH,
307 				.num_buffers_src =
308 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
309 				.num_buffers_dst =
310 						RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
311 			}
312 		},
313 		{
314 		.type   = RTE_BBDEV_OP_LDPC_DEC,
315 		.cap.ldpc_dec = {
316 			.capability_flags =
317 				RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK |
318 				RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP |
319 				RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE |
320 				RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE |
321 				RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE |
322 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE |
323 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE |
324 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK |
325 				RTE_BBDEV_LDPC_DEC_INTERRUPTS |
326 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_FILLERS,
327 			.llr_size = 6,
328 			.llr_decimals = 2,
329 			.num_buffers_src =
330 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
331 			.num_buffers_hard_out =
332 					RTE_BBDEV_LDPC_MAX_CODE_BLOCKS,
333 			.num_buffers_soft_out = 0,
334 		}
335 		},
336 		RTE_BBDEV_END_OF_CAPABILITIES_LIST()
337 	};
338 
339 	/* Check the HARQ DDR size available */
340 	uint8_t timeout_counter = 0;
341 	uint32_t harq_buf_ready = fpga_reg_read_32(d->mmio_base,
342 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
343 	while (harq_buf_ready != 1) {
344 		usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
345 		timeout_counter++;
346 		harq_buf_ready = fpga_reg_read_32(d->mmio_base,
347 				FPGA_5GNR_FEC_HARQ_BUF_SIZE_RDY_REGS);
348 		if (timeout_counter > FPGA_HARQ_RDY_TIMEOUT) {
349 			rte_bbdev_log(ERR, "HARQ Buffer not ready %d",
350 					harq_buf_ready);
351 			harq_buf_ready = 1;
352 		}
353 	}
354 	uint32_t harq_buf_size = fpga_reg_read_32(d->mmio_base,
355 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
356 
357 	static struct rte_bbdev_queue_conf default_queue_conf;
358 	default_queue_conf.socket = dev->data->socket_id;
359 	default_queue_conf.queue_size = FPGA_RING_MAX_SIZE;
360 
361 	dev_info->driver_name = dev->device->driver->name;
362 	dev_info->queue_size_lim = FPGA_RING_MAX_SIZE;
363 	dev_info->hardware_accelerated = true;
364 	dev_info->min_alignment = 64;
365 	dev_info->harq_buffer_size = (harq_buf_size >> 10) + 1;
366 	dev_info->default_queue_conf = default_queue_conf;
367 	dev_info->capabilities = bbdev_capabilities;
368 	dev_info->cpu_flag_reqs = NULL;
369 	dev_info->data_endianness = RTE_LITTLE_ENDIAN;
370 	dev_info->device_status = RTE_BBDEV_DEV_NOT_SUPPORTED;
371 
372 	/* Calculates number of queues assigned to device */
373 	dev_info->max_num_queues = 0;
374 	for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
375 		uint32_t hw_q_id = fpga_reg_read_32(d->mmio_base,
376 				FPGA_5GNR_FEC_QUEUE_MAP + (q_id << 2));
377 		if (hw_q_id != FPGA_INVALID_HW_QUEUE_ID)
378 			dev_info->max_num_queues++;
379 	}
380 	/* Expose number of queue per operation type */
381 	dev_info->num_queues[RTE_BBDEV_OP_NONE] = 0;
382 	dev_info->num_queues[RTE_BBDEV_OP_TURBO_DEC] = 0;
383 	dev_info->num_queues[RTE_BBDEV_OP_TURBO_ENC] = 0;
384 	dev_info->num_queues[RTE_BBDEV_OP_LDPC_DEC] = dev_info->max_num_queues / 2;
385 	dev_info->num_queues[RTE_BBDEV_OP_LDPC_ENC] = dev_info->max_num_queues / 2;
386 	dev_info->queue_priority[RTE_BBDEV_OP_LDPC_DEC] = 1;
387 	dev_info->queue_priority[RTE_BBDEV_OP_LDPC_ENC] = 1;
388 }
389 
390 /**
391  * Find index of queue bound to current PF/VF which is unassigned. Return -1
392  * when there is no available queue
393  */
394 static inline int
395 fpga_find_free_queue_idx(struct rte_bbdev *dev,
396 		const struct rte_bbdev_queue_conf *conf)
397 {
398 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
399 	uint64_t q_idx;
400 	uint8_t i = 0;
401 	uint8_t range = FPGA_TOTAL_NUM_QUEUES >> 1;
402 
403 	if (conf->op_type == RTE_BBDEV_OP_LDPC_ENC) {
404 		i = FPGA_NUM_DL_QUEUES;
405 		range = FPGA_TOTAL_NUM_QUEUES;
406 	}
407 
408 	for (; i < range; ++i) {
409 		q_idx = 1ULL << i;
410 		/* Check if index of queue is bound to current PF/VF */
411 		if (d->q_bound_bit_map & q_idx)
412 			/* Check if found queue was not already assigned */
413 			if (!(d->q_assigned_bit_map & q_idx)) {
414 				d->q_assigned_bit_map |= q_idx;
415 				return i;
416 			}
417 	}
418 
419 	rte_bbdev_log(INFO, "Failed to find free queue on %s", dev->data->name);
420 
421 	return -1;
422 }
423 
424 static int
425 fpga_queue_setup(struct rte_bbdev *dev, uint16_t queue_id,
426 		const struct rte_bbdev_queue_conf *conf)
427 {
428 	uint32_t address, ring_offset;
429 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
430 	struct fpga_queue *q;
431 	int8_t q_idx;
432 
433 	/* Check if there is a free queue to assign */
434 	q_idx = fpga_find_free_queue_idx(dev, conf);
435 	if (q_idx == -1)
436 		return -1;
437 
438 	/* Allocate the queue data structure. */
439 	q = rte_zmalloc_socket(dev->device->driver->name, sizeof(*q),
440 			RTE_CACHE_LINE_SIZE, conf->socket);
441 	if (q == NULL) {
442 		/* Mark queue as un-assigned */
443 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
444 		rte_bbdev_log(ERR, "Failed to allocate queue memory");
445 		return -ENOMEM;
446 	}
447 
448 	q->d = d;
449 	q->q_idx = q_idx;
450 
451 	/* Set ring_base_addr */
452 	q->ring_addr = RTE_PTR_ADD(d->sw_rings, (d->sw_ring_size * queue_id));
453 	q->ring_ctrl_reg.ring_base_addr = d->sw_rings_phys +
454 			(d->sw_ring_size * queue_id);
455 
456 	/* Allocate memory for Completion Head variable*/
457 	q->ring_head_addr = rte_zmalloc_socket(dev->device->driver->name,
458 			sizeof(uint64_t), RTE_CACHE_LINE_SIZE, conf->socket);
459 	if (q->ring_head_addr == NULL) {
460 		/* Mark queue as un-assigned */
461 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
462 		rte_free(q);
463 		rte_bbdev_log(ERR,
464 				"Failed to allocate memory for %s:%u completion_head",
465 				dev->device->driver->name, dev->data->dev_id);
466 		return -ENOMEM;
467 	}
468 	/* Set ring_head_addr */
469 	q->ring_ctrl_reg.ring_head_addr =
470 			rte_malloc_virt2iova(q->ring_head_addr);
471 
472 	/* Clear shadow_completion_head */
473 	q->shadow_completion_head = 0;
474 
475 	/* Set ring_size */
476 	if (conf->queue_size > FPGA_RING_MAX_SIZE) {
477 		/* Mark queue as un-assigned */
478 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q_idx));
479 		rte_free(q->ring_head_addr);
480 		rte_free(q);
481 		rte_bbdev_log(ERR,
482 				"Size of queue is too big %d (MAX: %d ) for %s:%u",
483 				conf->queue_size, FPGA_RING_MAX_SIZE,
484 				dev->device->driver->name, dev->data->dev_id);
485 		return -EINVAL;
486 	}
487 	q->ring_ctrl_reg.ring_size = conf->queue_size;
488 
489 	/* Set Miscellaneous FPGA register*/
490 	/* Max iteration number for TTI mitigation - todo */
491 	q->ring_ctrl_reg.max_ul_dec = 0;
492 	/* Enable max iteration number for TTI - todo */
493 	q->ring_ctrl_reg.max_ul_dec_en = 0;
494 
495 	/* Enable the ring */
496 	q->ring_ctrl_reg.enable = 1;
497 
498 	/* Set FPGA head_point and tail registers */
499 	q->ring_ctrl_reg.head_point = q->tail = 0;
500 
501 	/* Set FPGA shadow_tail register */
502 	q->ring_ctrl_reg.shadow_tail = q->tail;
503 
504 	/* Calculates the ring offset for found queue */
505 	ring_offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
506 			(sizeof(struct fpga_ring_ctrl_reg) * q_idx);
507 
508 	/* Set FPGA Ring Control Registers */
509 	fpga_ring_reg_write(d->mmio_base, ring_offset, q->ring_ctrl_reg);
510 
511 	/* Store MMIO register of shadow_tail */
512 	address = ring_offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL;
513 	q->shadow_tail_addr = RTE_PTR_ADD(d->mmio_base, address);
514 
515 	q->head_free_desc = q->tail;
516 
517 	/* Set wrap mask */
518 	q->sw_ring_wrap_mask = conf->queue_size - 1;
519 
520 	rte_bbdev_log_debug("Setup dev%u q%u: queue_idx=%u",
521 			dev->data->dev_id, queue_id, q->q_idx);
522 
523 	dev->data->queues[queue_id].queue_private = q;
524 
525 	rte_bbdev_log_debug("BBDEV queue[%d] set up for FPGA queue[%d]",
526 			queue_id, q_idx);
527 
528 #ifdef RTE_LIBRTE_BBDEV_DEBUG
529 	/* Read FPGA Ring Control Registers after configuration*/
530 	print_ring_reg_debug_info(d->mmio_base, ring_offset);
531 #endif
532 	return 0;
533 }
534 
535 static int
536 fpga_queue_release(struct rte_bbdev *dev, uint16_t queue_id)
537 {
538 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
539 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
540 	struct fpga_ring_ctrl_reg ring_reg;
541 	uint32_t offset;
542 
543 	rte_bbdev_log_debug("FPGA Queue[%d] released", queue_id);
544 
545 	if (q != NULL) {
546 		memset(&ring_reg, 0, sizeof(struct fpga_ring_ctrl_reg));
547 		offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
548 			(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
549 		/* Disable queue */
550 		fpga_reg_write_8(d->mmio_base,
551 				offset + FPGA_5GNR_FEC_RING_ENABLE, 0x00);
552 		/* Clear queue registers */
553 		fpga_ring_reg_write(d->mmio_base, offset, ring_reg);
554 
555 		/* Mark the Queue as un-assigned */
556 		d->q_assigned_bit_map &= (0xFFFFFFFF - (1ULL << q->q_idx));
557 		rte_free(q->ring_head_addr);
558 		rte_free(q);
559 		dev->data->queues[queue_id].queue_private = NULL;
560 	}
561 
562 	return 0;
563 }
564 
565 /* Function starts a device queue. */
566 static int
567 fpga_queue_start(struct rte_bbdev *dev, uint16_t queue_id)
568 {
569 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
570 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
571 	uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
572 			(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
573 	uint8_t enable = 0x01;
574 	uint16_t zero = 0x0000;
575 #ifdef RTE_LIBRTE_BBDEV_DEBUG
576 	if (d == NULL) {
577 		rte_bbdev_log(ERR, "Invalid device pointer");
578 		return -1;
579 	}
580 #endif
581 	if (dev->data->queues[queue_id].queue_private == NULL) {
582 		rte_bbdev_log(ERR, "Cannot start invalid queue %d", queue_id);
583 		return -1;
584 	}
585 
586 	/* Clear queue head and tail variables */
587 	q->tail = q->head_free_desc = 0;
588 
589 	/* Clear FPGA head_point and tail registers */
590 	fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_HEAD_POINT,
591 			zero);
592 	fpga_reg_write_16(d->mmio_base, offset + FPGA_5GNR_FEC_RING_SHADOW_TAIL,
593 			zero);
594 
595 	/* Enable queue */
596 	fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
597 			enable);
598 
599 	rte_bbdev_log_debug("FPGA Queue[%d] started", queue_id);
600 	return 0;
601 }
602 
603 /* Function stops a device queue. */
604 static int
605 fpga_queue_stop(struct rte_bbdev *dev, uint16_t queue_id)
606 {
607 	struct fpga_5gnr_fec_device *d = dev->data->dev_private;
608 #ifdef RTE_LIBRTE_BBDEV_DEBUG
609 	if (d == NULL) {
610 		rte_bbdev_log(ERR, "Invalid device pointer");
611 		return -1;
612 	}
613 #endif
614 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
615 	uint32_t offset = FPGA_5GNR_FEC_RING_CTRL_REGS +
616 			(sizeof(struct fpga_ring_ctrl_reg) * q->q_idx);
617 	uint8_t payload = 0x01;
618 	uint8_t counter = 0;
619 	uint8_t timeout = FPGA_QUEUE_FLUSH_TIMEOUT_US /
620 			FPGA_TIMEOUT_CHECK_INTERVAL;
621 
622 	/* Set flush_queue_en bit to trigger queue flushing */
623 	fpga_reg_write_8(d->mmio_base,
624 			offset + FPGA_5GNR_FEC_RING_FLUSH_QUEUE_EN, payload);
625 
626 	/** Check if queue flush is completed.
627 	 * FPGA will update the completion flag after queue flushing is
628 	 * completed. If completion flag is not updated within 1ms it is
629 	 * considered as a failure.
630 	 */
631 	while (!(*((volatile uint8_t *)d->flush_queue_status + q->q_idx)
632 			& payload)) {
633 		if (counter > timeout) {
634 			rte_bbdev_log(ERR, "FPGA Queue Flush failed for queue %d",
635 					queue_id);
636 			return -1;
637 		}
638 		usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
639 		counter++;
640 	}
641 
642 	/* Disable queue */
643 	payload = 0x00;
644 	fpga_reg_write_8(d->mmio_base, offset + FPGA_5GNR_FEC_RING_ENABLE,
645 			payload);
646 
647 	rte_bbdev_log_debug("FPGA Queue[%d] stopped", queue_id);
648 	return 0;
649 }
650 
651 static inline uint16_t
652 get_queue_id(struct rte_bbdev_data *data, uint8_t q_idx)
653 {
654 	uint16_t queue_id;
655 
656 	for (queue_id = 0; queue_id < data->num_queues; ++queue_id) {
657 		struct fpga_queue *q = data->queues[queue_id].queue_private;
658 		if (q != NULL && q->q_idx == q_idx)
659 			return queue_id;
660 	}
661 
662 	return -1;
663 }
664 
665 /* Interrupt handler triggered by FPGA dev for handling specific interrupt */
666 static void
667 fpga_dev_interrupt_handler(void *cb_arg)
668 {
669 	struct rte_bbdev *dev = cb_arg;
670 	struct fpga_5gnr_fec_device *fpga_dev = dev->data->dev_private;
671 	struct fpga_queue *q;
672 	uint64_t ring_head;
673 	uint64_t q_idx;
674 	uint16_t queue_id;
675 	uint8_t i;
676 
677 	/* Scan queue assigned to this device */
678 	for (i = 0; i < FPGA_TOTAL_NUM_QUEUES; ++i) {
679 		q_idx = 1ULL << i;
680 		if (fpga_dev->q_bound_bit_map & q_idx) {
681 			queue_id = get_queue_id(dev->data, i);
682 			if (queue_id == (uint16_t) -1)
683 				continue;
684 
685 			/* Check if completion head was changed */
686 			q = dev->data->queues[queue_id].queue_private;
687 			ring_head = *q->ring_head_addr;
688 			if (q->shadow_completion_head != ring_head &&
689 				q->irq_enable == 1) {
690 				q->shadow_completion_head = ring_head;
691 				rte_bbdev_pmd_callback_process(
692 						dev,
693 						RTE_BBDEV_EVENT_DEQUEUE,
694 						&queue_id);
695 			}
696 		}
697 	}
698 }
699 
700 static int
701 fpga_queue_intr_enable(struct rte_bbdev *dev, uint16_t queue_id)
702 {
703 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
704 
705 	if (!rte_intr_cap_multiple(dev->intr_handle))
706 		return -ENOTSUP;
707 
708 	q->irq_enable = 1;
709 
710 	return 0;
711 }
712 
713 static int
714 fpga_queue_intr_disable(struct rte_bbdev *dev, uint16_t queue_id)
715 {
716 	struct fpga_queue *q = dev->data->queues[queue_id].queue_private;
717 	q->irq_enable = 0;
718 
719 	return 0;
720 }
721 
722 static int
723 fpga_intr_enable(struct rte_bbdev *dev)
724 {
725 	int ret;
726 	uint8_t i;
727 
728 	if (!rte_intr_cap_multiple(dev->intr_handle)) {
729 		rte_bbdev_log(ERR, "Multiple intr vector is not supported by FPGA (%s)",
730 				dev->data->name);
731 		return -ENOTSUP;
732 	}
733 
734 	/* Create event file descriptors for each of 64 queue. Event fds will be
735 	 * mapped to FPGA IRQs in rte_intr_enable(). This is a 1:1 mapping where
736 	 * the IRQ number is a direct translation to the queue number.
737 	 *
738 	 * 63 (FPGA_NUM_INTR_VEC) event fds are created as rte_intr_enable()
739 	 * mapped the first IRQ to already created interrupt event file
740 	 * descriptor (intr_handle->fd).
741 	 */
742 	if (rte_intr_efd_enable(dev->intr_handle, FPGA_NUM_INTR_VEC)) {
743 		rte_bbdev_log(ERR, "Failed to create fds for %u queues",
744 				dev->data->num_queues);
745 		return -1;
746 	}
747 
748 	/* TODO Each event file descriptor is overwritten by interrupt event
749 	 * file descriptor. That descriptor is added to epoll observed list.
750 	 * It ensures that callback function assigned to that descriptor will
751 	 * invoked when any FPGA queue issues interrupt.
752 	 */
753 	for (i = 0; i < FPGA_NUM_INTR_VEC; ++i) {
754 		if (rte_intr_efds_index_set(dev->intr_handle, i,
755 				rte_intr_fd_get(dev->intr_handle)))
756 			return -rte_errno;
757 	}
758 
759 	if (rte_intr_vec_list_alloc(dev->intr_handle, "intr_vec",
760 			dev->data->num_queues)) {
761 		rte_bbdev_log(ERR, "Failed to allocate %u vectors",
762 				dev->data->num_queues);
763 		return -ENOMEM;
764 	}
765 
766 	ret = rte_intr_enable(dev->intr_handle);
767 	if (ret < 0) {
768 		rte_bbdev_log(ERR,
769 				"Couldn't enable interrupts for device: %s",
770 				dev->data->name);
771 		return ret;
772 	}
773 
774 	ret = rte_intr_callback_register(dev->intr_handle,
775 			fpga_dev_interrupt_handler, dev);
776 	if (ret < 0) {
777 		rte_bbdev_log(ERR,
778 				"Couldn't register interrupt callback for device: %s",
779 				dev->data->name);
780 		return ret;
781 	}
782 
783 	return 0;
784 }
785 
786 static const struct rte_bbdev_ops fpga_ops = {
787 	.setup_queues = fpga_setup_queues,
788 	.intr_enable = fpga_intr_enable,
789 	.close = fpga_dev_close,
790 	.info_get = fpga_dev_info_get,
791 	.queue_setup = fpga_queue_setup,
792 	.queue_stop = fpga_queue_stop,
793 	.queue_start = fpga_queue_start,
794 	.queue_release = fpga_queue_release,
795 	.queue_intr_enable = fpga_queue_intr_enable,
796 	.queue_intr_disable = fpga_queue_intr_disable
797 };
798 
799 static inline void
800 fpga_dma_enqueue(struct fpga_queue *q, uint16_t num_desc,
801 		struct rte_bbdev_stats *queue_stats)
802 {
803 	uint64_t start_time = 0;
804 	queue_stats->acc_offload_cycles = 0;
805 
806 	/* Update tail and shadow_tail register */
807 	q->tail = (q->tail + num_desc) & q->sw_ring_wrap_mask;
808 
809 	rte_wmb();
810 
811 	/* Start time measurement for enqueue function offload. */
812 	start_time = rte_rdtsc_precise();
813 	mmio_write_16(q->shadow_tail_addr, q->tail);
814 
815 	rte_wmb();
816 	queue_stats->acc_offload_cycles += rte_rdtsc_precise() - start_time;
817 }
818 
819 /* Read flag value 0/1/ from bitmap */
820 static inline bool
821 check_bit(uint32_t bitmap, uint32_t bitmask)
822 {
823 	return bitmap & bitmask;
824 }
825 
826 /* Print an error if a descriptor error has occurred.
827  *  Return 0 on success, 1 on failure
828  */
829 static inline int
830 check_desc_error(uint32_t error_code) {
831 	switch (error_code) {
832 	case DESC_ERR_NO_ERR:
833 		return 0;
834 	case DESC_ERR_K_P_OUT_OF_RANGE:
835 		rte_bbdev_log(ERR, "Encode block size K' is out of range");
836 		break;
837 	case DESC_ERR_Z_C_NOT_LEGAL:
838 		rte_bbdev_log(ERR, "Zc is illegal");
839 		break;
840 	case DESC_ERR_DESC_OFFSET_ERR:
841 		rte_bbdev_log(ERR,
842 				"Queue offset does not meet the expectation in the FPGA"
843 				);
844 		break;
845 	case DESC_ERR_DESC_READ_FAIL:
846 		rte_bbdev_log(ERR, "Unsuccessful completion for descriptor read");
847 		break;
848 	case DESC_ERR_DESC_READ_TIMEOUT:
849 		rte_bbdev_log(ERR, "Descriptor read time-out");
850 		break;
851 	case DESC_ERR_DESC_READ_TLP_POISONED:
852 		rte_bbdev_log(ERR, "Descriptor read TLP poisoned");
853 		break;
854 	case DESC_ERR_HARQ_INPUT_LEN:
855 		rte_bbdev_log(ERR, "HARQ input length is invalid");
856 		break;
857 	case DESC_ERR_CB_READ_FAIL:
858 		rte_bbdev_log(ERR, "Unsuccessful completion for code block");
859 		break;
860 	case DESC_ERR_CB_READ_TIMEOUT:
861 		rte_bbdev_log(ERR, "Code block read time-out");
862 		break;
863 	case DESC_ERR_CB_READ_TLP_POISONED:
864 		rte_bbdev_log(ERR, "Code block read TLP poisoned");
865 		break;
866 	case DESC_ERR_HBSTORE_ERR:
867 		rte_bbdev_log(ERR, "Hbstroe exceeds HARQ buffer size.");
868 		break;
869 	default:
870 		rte_bbdev_log(ERR, "Descriptor error unknown error code %u",
871 				error_code);
872 		break;
873 	}
874 	return 1;
875 }
876 
877 /* Compute value of k0.
878  * Based on 3GPP 38.212 Table 5.4.2.1-2
879  * Starting position of different redundancy versions, k0
880  */
881 static inline uint16_t
882 get_k0(uint16_t n_cb, uint16_t z_c, uint8_t bg, uint8_t rv_index)
883 {
884 	uint16_t n = (bg == 1 ? N_ZC_1 : N_ZC_2) * z_c;
885 	if (rv_index == 0)
886 		return 0;
887 	if (z_c == 0)
888 		return 0;
889 	if (n_cb == n) {
890 		if (rv_index == 1)
891 			return (bg == 1 ? K0_1_1 : K0_1_2) * z_c;
892 		else if (rv_index == 2)
893 			return (bg == 1 ? K0_2_1 : K0_2_2) * z_c;
894 		else
895 			return (bg == 1 ? K0_3_1 : K0_3_2) * z_c;
896 	}
897 	/* LBRM case - includes a division by N */
898 	if (rv_index == 1)
899 		return (((bg == 1 ? K0_1_1 : K0_1_2) * n_cb)
900 				/ n) * z_c;
901 	else if (rv_index == 2)
902 		return (((bg == 1 ? K0_2_1 : K0_2_2) * n_cb)
903 				/ n) * z_c;
904 	else
905 		return (((bg == 1 ? K0_3_1 : K0_3_2) * n_cb)
906 				/ n) * z_c;
907 }
908 
909 /**
910  * Set DMA descriptor for encode operation (1 Code Block)
911  *
912  * @param op
913  *   Pointer to a single encode operation.
914  * @param desc
915  *   Pointer to DMA descriptor.
916  * @param input
917  *   Pointer to pointer to input data which will be decoded.
918  * @param e
919  *   E value (length of output in bits).
920  * @param ncb
921  *   Ncb value (size of the soft buffer).
922  * @param out_length
923  *   Length of output buffer
924  * @param in_offset
925  *   Input offset in rte_mbuf structure. It is used for calculating the point
926  *   where data is starting.
927  * @param out_offset
928  *   Output offset in rte_mbuf structure. It is used for calculating the point
929  *   where hard output data will be stored.
930  * @param cbs_in_op
931  *   Number of CBs contained in one operation.
932  */
933 static inline int
934 fpga_dma_desc_te_fill(struct rte_bbdev_enc_op *op,
935 		struct fpga_dma_enc_desc *desc, struct rte_mbuf *input,
936 		struct rte_mbuf *output, uint16_t k_,  uint16_t e,
937 		uint32_t in_offset, uint32_t out_offset, uint16_t desc_offset,
938 		uint8_t cbs_in_op)
939 {
940 	/* reset */
941 	desc->done = 0;
942 	desc->error = 0;
943 	desc->k_ = k_;
944 	desc->rm_e = e;
945 	desc->desc_idx = desc_offset;
946 	desc->zc = op->ldpc_enc.z_c;
947 	desc->bg_idx = op->ldpc_enc.basegraph - 1;
948 	desc->qm_idx = op->ldpc_enc.q_m / 2;
949 	desc->crc_en = check_bit(op->ldpc_enc.op_flags,
950 			RTE_BBDEV_LDPC_CRC_24B_ATTACH);
951 	desc->irq_en = 0;
952 	desc->k0 = get_k0(op->ldpc_enc.n_cb, op->ldpc_enc.z_c,
953 			op->ldpc_enc.basegraph, op->ldpc_enc.rv_index);
954 	desc->ncb = op->ldpc_enc.n_cb;
955 	desc->num_null = op->ldpc_enc.n_filler;
956 	/* Set inbound data buffer address */
957 	desc->in_addr_hi = (uint32_t)(
958 			rte_pktmbuf_iova_offset(input, in_offset) >> 32);
959 	desc->in_addr_lw = (uint32_t)(
960 			rte_pktmbuf_iova_offset(input, in_offset));
961 
962 	desc->out_addr_hi = (uint32_t)(
963 			rte_pktmbuf_iova_offset(output, out_offset) >> 32);
964 	desc->out_addr_lw = (uint32_t)(
965 			rte_pktmbuf_iova_offset(output, out_offset));
966 	/* Save software context needed for dequeue */
967 	desc->op_addr = op;
968 	/* Set total number of CBs in an op */
969 	desc->cbs_in_op = cbs_in_op;
970 	return 0;
971 }
972 
973 /**
974  * Set DMA descriptor for decode operation (1 Code Block)
975  *
976  * @param op
977  *   Pointer to a single encode operation.
978  * @param desc
979  *   Pointer to DMA descriptor.
980  * @param input
981  *   Pointer to pointer to input data which will be decoded.
982  * @param in_offset
983  *   Input offset in rte_mbuf structure. It is used for calculating the point
984  *   where data is starting.
985  * @param out_offset
986  *   Output offset in rte_mbuf structure. It is used for calculating the point
987  *   where hard output data will be stored.
988  * @param cbs_in_op
989  *   Number of CBs contained in one operation.
990  */
991 static inline int
992 fpga_dma_desc_ld_fill(struct rte_bbdev_dec_op *op,
993 		struct fpga_dma_dec_desc *desc,
994 		struct rte_mbuf *input,	struct rte_mbuf *output,
995 		uint16_t harq_in_length,
996 		uint32_t in_offset, uint32_t out_offset,
997 		uint32_t harq_offset,
998 		uint16_t desc_offset,
999 		uint8_t cbs_in_op)
1000 {
1001 	/* reset */
1002 	desc->done = 0;
1003 	desc->error = 0;
1004 	/* Set inbound data buffer address */
1005 	desc->in_addr_hi = (uint32_t)(
1006 			rte_pktmbuf_iova_offset(input, in_offset) >> 32);
1007 	desc->in_addr_lw = (uint32_t)(
1008 			rte_pktmbuf_iova_offset(input, in_offset));
1009 	desc->rm_e = op->ldpc_dec.cb_params.e;
1010 	desc->harq_input_length = harq_in_length;
1011 	desc->et_dis = !check_bit(op->ldpc_dec.op_flags,
1012 			RTE_BBDEV_LDPC_ITERATION_STOP_ENABLE);
1013 	desc->rv = op->ldpc_dec.rv_index;
1014 	desc->crc24b_ind = check_bit(op->ldpc_dec.op_flags,
1015 			RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK);
1016 	desc->drop_crc24b = check_bit(op->ldpc_dec.op_flags,
1017 			RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP);
1018 	desc->desc_idx = desc_offset;
1019 	desc->ncb = op->ldpc_dec.n_cb;
1020 	desc->num_null = op->ldpc_dec.n_filler;
1021 	desc->hbstroe_offset = harq_offset >> 10;
1022 	desc->zc = op->ldpc_dec.z_c;
1023 	desc->harqin_en = check_bit(op->ldpc_dec.op_flags,
1024 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE);
1025 	desc->bg_idx = op->ldpc_dec.basegraph - 1;
1026 	desc->max_iter = op->ldpc_dec.iter_max;
1027 	desc->qm_idx = op->ldpc_dec.q_m / 2;
1028 	desc->out_addr_hi = (uint32_t)(
1029 			rte_pktmbuf_iova_offset(output, out_offset) >> 32);
1030 	desc->out_addr_lw = (uint32_t)(
1031 			rte_pktmbuf_iova_offset(output, out_offset));
1032 	/* Save software context needed for dequeue */
1033 	desc->op_addr = op;
1034 	/* Set total number of CBs in an op */
1035 	desc->cbs_in_op = cbs_in_op;
1036 
1037 	return 0;
1038 }
1039 
1040 /* Validates LDPC encoder parameters */
1041 static inline int
1042 validate_ldpc_enc_op(struct rte_bbdev_enc_op *op)
1043 {
1044 	struct rte_bbdev_op_ldpc_enc *ldpc_enc = &op->ldpc_enc;
1045 
1046 	if (op->mempool == NULL) {
1047 		rte_bbdev_log(ERR, "Invalid mempool pointer");
1048 		return -1;
1049 	}
1050 	if (ldpc_enc->input.data == NULL) {
1051 		rte_bbdev_log(ERR, "Invalid input pointer");
1052 		return -1;
1053 	}
1054 	if (ldpc_enc->output.data == NULL) {
1055 		rte_bbdev_log(ERR, "Invalid output pointer");
1056 		return -1;
1057 	}
1058 	if (ldpc_enc->input.length == 0) {
1059 		rte_bbdev_log(ERR, "CB size (%u) is null",
1060 				ldpc_enc->input.length);
1061 		return -1;
1062 	}
1063 	if ((ldpc_enc->basegraph > 2) || (ldpc_enc->basegraph == 0)) {
1064 		rte_bbdev_log(ERR,
1065 				"BG (%u) is out of range 1 <= value <= 2",
1066 				ldpc_enc->basegraph);
1067 		return -1;
1068 	}
1069 	if (ldpc_enc->rv_index > 3) {
1070 		rte_bbdev_log(ERR,
1071 				"rv_index (%u) is out of range 0 <= value <= 3",
1072 				ldpc_enc->rv_index);
1073 		return -1;
1074 	}
1075 	if (ldpc_enc->code_block_mode > RTE_BBDEV_CODE_BLOCK) {
1076 		rte_bbdev_log(ERR,
1077 				"code_block_mode (%u) is out of range 0 <= value <= 1",
1078 				ldpc_enc->code_block_mode);
1079 		return -1;
1080 	}
1081 
1082 	if (ldpc_enc->input.length >
1083 		RTE_BBDEV_LDPC_MAX_CB_SIZE >> 3) {
1084 		rte_bbdev_log(ERR, "CB size (%u) is too big, max: %d",
1085 				ldpc_enc->input.length,
1086 				RTE_BBDEV_LDPC_MAX_CB_SIZE);
1087 		return -1;
1088 	}
1089 	int z_c = ldpc_enc->z_c;
1090 	/* Check Zc is valid value */
1091 	if ((z_c > 384) || (z_c < 4)) {
1092 		rte_bbdev_log(ERR, "Zc (%u) is out of range", z_c);
1093 		return -1;
1094 	}
1095 	if (z_c > 256) {
1096 		if ((z_c % 32) != 0) {
1097 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1098 			return -1;
1099 		}
1100 	} else if (z_c > 128) {
1101 		if ((z_c % 16) != 0) {
1102 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1103 			return -1;
1104 		}
1105 	} else if (z_c > 64) {
1106 		if ((z_c % 8) != 0) {
1107 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1108 			return -1;
1109 		}
1110 	} else if (z_c > 32) {
1111 		if ((z_c % 4) != 0) {
1112 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1113 			return -1;
1114 		}
1115 	} else if (z_c > 16) {
1116 		if ((z_c % 2) != 0) {
1117 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1118 			return -1;
1119 		}
1120 	}
1121 
1122 	int n_filler = ldpc_enc->n_filler;
1123 	int K = (ldpc_enc->basegraph == 1 ? 22 : 10) * ldpc_enc->z_c;
1124 	int Kp = K - n_filler;
1125 	int q_m = ldpc_enc->q_m;
1126 	int n_cb = ldpc_enc->n_cb;
1127 	int N = (ldpc_enc->basegraph == 1 ? N_ZC_1 : N_ZC_2) * z_c;
1128 	int k0 = get_k0(n_cb, z_c, ldpc_enc->basegraph,
1129 			ldpc_enc->rv_index);
1130 	int crc24 = 0;
1131 	int32_t L, Lcb, cw, cw_rm;
1132 	int32_t e = ldpc_enc->cb_params.e;
1133 	if (check_bit(op->ldpc_enc.op_flags,
1134 			RTE_BBDEV_LDPC_CRC_24B_ATTACH))
1135 		crc24 = 24;
1136 
1137 	if (K < (int) (ldpc_enc->input.length * 8 + n_filler) + crc24) {
1138 		rte_bbdev_log(ERR, "K and F not matching input size %u %u %u",
1139 				K, n_filler, ldpc_enc->input.length);
1140 		return -1;
1141 	}
1142 	if (ldpc_enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1143 		rte_bbdev_log(ERR, "TB mode not supported");
1144 		return -1;
1145 
1146 	}
1147 
1148 	/* K' range check */
1149 	if (Kp % 8 > 0) {
1150 		rte_bbdev_log(ERR, "K' not byte aligned %u", Kp);
1151 		return -1;
1152 	}
1153 	if ((crc24 > 0) && (Kp < 292)) {
1154 		rte_bbdev_log(ERR, "Invalid CRC24 for small block %u", Kp);
1155 		return -1;
1156 	}
1157 	if (Kp < 24) {
1158 		rte_bbdev_log(ERR, "K' too small %u", Kp);
1159 		return -1;
1160 	}
1161 	if (n_filler >= (K - 2 * z_c)) {
1162 		rte_bbdev_log(ERR, "K - F invalid %u %u", K, n_filler);
1163 		return -1;
1164 	}
1165 	/* Ncb range check */
1166 	if ((n_cb > N) || (n_cb < 32) || (n_cb <= (Kp - crc24))) {
1167 		rte_bbdev_log(ERR, "Ncb (%u) is out of range K  %d N %d", n_cb, K, N);
1168 		return -1;
1169 	}
1170 	/* Qm range check */
1171 	if (!check_bit(op->ldpc_enc.op_flags, RTE_BBDEV_LDPC_INTERLEAVER_BYPASS) &&
1172 			((q_m == 0) || ((q_m > 2) && ((q_m % 2) == 1)) || (q_m > 8))) {
1173 		rte_bbdev_log(ERR, "Qm (%u) is out of range", q_m);
1174 		return -1;
1175 	}
1176 	/* K0 range check */
1177 	if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c))
1178 			&& (k0 < (K - 2 * z_c)))) {
1179 		rte_bbdev_log(ERR, "K0 (%u) is out of range", k0);
1180 		return -1;
1181 	}
1182 	/* E range check */
1183 	if (e <= RTE_MAX(32, z_c)) {
1184 		rte_bbdev_log(ERR, "E is too small %"PRIu32"", e);
1185 		return -1;
1186 	}
1187 	if ((e > 0xFFFF)) {
1188 		rte_bbdev_log(ERR, "E is too large for N3000 %"PRIu32" > 64k", e);
1189 		return -1;
1190 	}
1191 	if (q_m > 0) {
1192 		if (e % q_m > 0) {
1193 			rte_bbdev_log(ERR, "E %"PRIu32" not multiple of qm %d", e, q_m);
1194 			return -1;
1195 		}
1196 	}
1197 	/* Code word in RM range check */
1198 	if (k0 > (Kp - 2 * z_c))
1199 		L = k0 + e;
1200 	else
1201 		L = k0 + e + n_filler;
1202 	Lcb = RTE_MIN(L, n_cb);
1203 	if (ldpc_enc->basegraph == 1) {
1204 		if (Lcb <= 25 * z_c)
1205 			cw = 25 * z_c;
1206 		else if (Lcb <= 27 * z_c)
1207 			cw = 27 * z_c;
1208 		else if (Lcb <= 30 * z_c)
1209 			cw = 30 * z_c;
1210 		else if (Lcb <= 33 * z_c)
1211 			cw = 33 * z_c;
1212 		else if (Lcb <= 44 * z_c)
1213 			cw = 44 * z_c;
1214 		else if (Lcb <= 55 * z_c)
1215 			cw = 55 * z_c;
1216 		else
1217 			cw = 66 * z_c;
1218 	} else {
1219 		if (Lcb <= 15 * z_c)
1220 			cw = 15 * z_c;
1221 		else if (Lcb <= 20 * z_c)
1222 			cw = 20 * z_c;
1223 		else if (Lcb <= 25 * z_c)
1224 			cw = 25 * z_c;
1225 		else if (Lcb <= 30 * z_c)
1226 			cw = 30 * z_c;
1227 		else
1228 			cw = 50 * z_c;
1229 	}
1230 	if (n_cb < Kp - 2 * z_c)
1231 		cw_rm = n_cb;
1232 	else if ((Kp - 2 * z_c <= n_cb) && (n_cb < K - 2 * z_c))
1233 		cw_rm = Kp - 2 * z_c;
1234 	else if ((K - 2 * z_c <= n_cb) && (n_cb < cw))
1235 		cw_rm = n_cb - n_filler;
1236 	else
1237 		cw_rm = cw - n_filler;
1238 	if (cw_rm <= 32) {
1239 		rte_bbdev_log(ERR,
1240 				"Invalid Ratematching");
1241 		return -1;
1242 	}
1243 	return 0;
1244 }
1245 
1246 /* Validates LDPC decoder parameters */
1247 static inline int
1248 validate_ldpc_dec_op(struct rte_bbdev_dec_op *op)
1249 {
1250 	struct rte_bbdev_op_ldpc_dec *ldpc_dec = &op->ldpc_dec;
1251 	if (check_bit(ldpc_dec->op_flags,
1252 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK))
1253 		return 0;
1254 	if (ldpc_dec->input.data == NULL) {
1255 		rte_bbdev_log(ERR, "Invalid input pointer");
1256 		return -1;
1257 	}
1258 	if (ldpc_dec->hard_output.data == NULL) {
1259 		rte_bbdev_log(ERR, "Invalid output pointer");
1260 		return -1;
1261 	}
1262 	if (ldpc_dec->input.length == 0) {
1263 		rte_bbdev_log(ERR, "input is null");
1264 		return -1;
1265 	}
1266 	if ((ldpc_dec->basegraph > 2) || (ldpc_dec->basegraph == 0)) {
1267 		rte_bbdev_log(ERR,
1268 				"BG (%u) is out of range 1 <= value <= 2",
1269 				ldpc_dec->basegraph);
1270 		return -1;
1271 	}
1272 	if (ldpc_dec->iter_max == 0) {
1273 		rte_bbdev_log(ERR,
1274 				"iter_max (%u) is equal to 0",
1275 				ldpc_dec->iter_max);
1276 		return -1;
1277 	}
1278 	if (ldpc_dec->rv_index > 3) {
1279 		rte_bbdev_log(ERR,
1280 				"rv_index (%u) is out of range 0 <= value <= 3",
1281 				ldpc_dec->rv_index);
1282 		return -1;
1283 	}
1284 	if (ldpc_dec->code_block_mode > RTE_BBDEV_CODE_BLOCK) {
1285 		rte_bbdev_log(ERR,
1286 				"code_block_mode (%u) is out of range 0 <= value <= 1",
1287 				ldpc_dec->code_block_mode);
1288 		return -1;
1289 	}
1290 	if (check_bit(op->ldpc_dec.op_flags,
1291 			RTE_BBDEV_LDPC_DECODE_BYPASS)) {
1292 		rte_bbdev_log(ERR, "Avoid LDPC Decode bypass");
1293 		return -1;
1294 	}
1295 	int z_c = ldpc_dec->z_c;
1296 	/* Check Zc is valid value */
1297 	if ((z_c > 384) || (z_c < 4)) {
1298 		rte_bbdev_log(ERR,
1299 				"Zc (%u) is out of range",
1300 				z_c);
1301 		return -1;
1302 	}
1303 	if (z_c > 256) {
1304 		if ((z_c % 32) != 0) {
1305 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1306 			return -1;
1307 		}
1308 	} else if (z_c > 128) {
1309 		if ((z_c % 16) != 0) {
1310 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1311 			return -1;
1312 		}
1313 	} else if (z_c > 64) {
1314 		if ((z_c % 8) != 0) {
1315 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1316 			return -1;
1317 		}
1318 	} else if (z_c > 32) {
1319 		if ((z_c % 4) != 0) {
1320 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1321 			return -1;
1322 		}
1323 	} else if (z_c > 16) {
1324 		if ((z_c % 2) != 0) {
1325 			rte_bbdev_log(ERR, "Invalid Zc %d", z_c);
1326 			return -1;
1327 		}
1328 	}
1329 
1330 	int n_filler = ldpc_dec->n_filler;
1331 	int K = (ldpc_dec->basegraph == 1 ? 22 : 10) * ldpc_dec->z_c;
1332 	int Kp = K - n_filler;
1333 	int q_m = ldpc_dec->q_m;
1334 	int n_cb = ldpc_dec->n_cb;
1335 	int N = (ldpc_dec->basegraph == 1 ? N_ZC_1 : N_ZC_2) * z_c;
1336 	int k0 = get_k0(n_cb, z_c, ldpc_dec->basegraph,
1337 			ldpc_dec->rv_index);
1338 	int crc24 = 0;
1339 	int32_t L, Lcb, cw, cw_rm;
1340 	int32_t e = ldpc_dec->cb_params.e;
1341 	if (check_bit(op->ldpc_dec.op_flags,
1342 			RTE_BBDEV_LDPC_CRC_TYPE_24B_CHECK))
1343 		crc24 = 24;
1344 
1345 	if (ldpc_dec->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1346 		rte_bbdev_log(ERR,
1347 				"TB mode not supported");
1348 		return -1;
1349 	}
1350 	/* Enforce HARQ input length */
1351 	ldpc_dec->harq_combined_input.length = RTE_MIN((uint32_t) n_cb,
1352 			ldpc_dec->harq_combined_input.length);
1353 	if ((ldpc_dec->harq_combined_input.length == 0) &&
1354 			check_bit(ldpc_dec->op_flags,
1355 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1356 		rte_bbdev_log(ERR,
1357 				"HARQ input length (%u) should not be null",
1358 				ldpc_dec->harq_combined_input.length);
1359 		return -1;
1360 	}
1361 	if ((ldpc_dec->harq_combined_input.length > 0) &&
1362 			!check_bit(ldpc_dec->op_flags,
1363 			RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1364 		ldpc_dec->harq_combined_input.length = 0;
1365 	}
1366 
1367 	/* K' range check */
1368 	if (Kp % 8 > 0) {
1369 		rte_bbdev_log(ERR,
1370 				"K' not byte aligned %u",
1371 				Kp);
1372 		return -1;
1373 	}
1374 	if ((crc24 > 0) && (Kp < 292)) {
1375 		rte_bbdev_log(ERR,
1376 				"Invalid CRC24 for small block %u",
1377 				Kp);
1378 		return -1;
1379 	}
1380 	if (Kp < 24) {
1381 		rte_bbdev_log(ERR,
1382 				"K' too small %u",
1383 				Kp);
1384 		return -1;
1385 	}
1386 	if (n_filler >= (K - 2 * z_c)) {
1387 		rte_bbdev_log(ERR,
1388 				"K - F invalid %u %u",
1389 				K, n_filler);
1390 		return -1;
1391 	}
1392 	/* Ncb range check */
1393 	if (n_cb != N) {
1394 		rte_bbdev_log(ERR,
1395 				"Ncb (%u) is out of range K  %d N %d",
1396 				n_cb, K, N);
1397 		return -1;
1398 	}
1399 	/* Qm range check */
1400 	if (!check_bit(op->ldpc_dec.op_flags,
1401 			RTE_BBDEV_LDPC_INTERLEAVER_BYPASS) &&
1402 			((q_m == 0) || ((q_m > 2) && ((q_m % 2) == 1))
1403 			|| (q_m > 8))) {
1404 		rte_bbdev_log(ERR,
1405 				"Qm (%u) is out of range",
1406 				q_m);
1407 		return -1;
1408 	}
1409 	/* K0 range check */
1410 	if (((k0 % z_c) > 0) || (k0 >= n_cb) || ((k0 >= (Kp - 2 * z_c))
1411 			&& (k0 < (K - 2 * z_c)))) {
1412 		rte_bbdev_log(ERR,
1413 				"K0 (%u) is out of range",
1414 				k0);
1415 		return -1;
1416 	}
1417 	/* E range check */
1418 	if (e <= RTE_MAX(32, z_c)) {
1419 		rte_bbdev_log(ERR,
1420 				"E is too small");
1421 		return -1;
1422 	}
1423 	if ((e > 0xFFFF)) {
1424 		rte_bbdev_log(ERR,
1425 				"E is too large");
1426 		return -1;
1427 	}
1428 	if (q_m > 0) {
1429 		if (e % q_m > 0) {
1430 			rte_bbdev_log(ERR,
1431 					"E not multiple of qm %d", q_m);
1432 			return -1;
1433 		}
1434 	}
1435 	/* Code word in RM range check */
1436 	if (k0 > (Kp - 2 * z_c))
1437 		L = k0 + e;
1438 	else
1439 		L = k0 + e + n_filler;
1440 	Lcb = RTE_MIN(n_cb, RTE_MAX(L,
1441 			(int32_t) ldpc_dec->harq_combined_input.length));
1442 	if (ldpc_dec->basegraph == 1) {
1443 		if (Lcb <= 25 * z_c)
1444 			cw = 25 * z_c;
1445 		else if (Lcb <= 27 * z_c)
1446 			cw = 27 * z_c;
1447 		else if (Lcb <= 30 * z_c)
1448 			cw = 30 * z_c;
1449 		else if (Lcb <= 33 * z_c)
1450 			cw = 33 * z_c;
1451 		else if (Lcb <= 44 * z_c)
1452 			cw = 44 * z_c;
1453 		else if (Lcb <= 55 * z_c)
1454 			cw = 55 * z_c;
1455 		else
1456 			cw = 66 * z_c;
1457 	} else {
1458 		if (Lcb <= 15 * z_c)
1459 			cw = 15 * z_c;
1460 		else if (Lcb <= 20 * z_c)
1461 			cw = 20 * z_c;
1462 		else if (Lcb <= 25 * z_c)
1463 			cw = 25 * z_c;
1464 		else if (Lcb <= 30 * z_c)
1465 			cw = 30 * z_c;
1466 		else
1467 			cw = 50 * z_c;
1468 	}
1469 	cw_rm = cw - n_filler;
1470 	if (cw_rm <= 32) {
1471 		rte_bbdev_log(ERR,
1472 				"Invalid Ratematching");
1473 		return -1;
1474 	}
1475 	return 0;
1476 }
1477 
1478 static inline char *
1479 mbuf_append(struct rte_mbuf *m_head, struct rte_mbuf *m, uint16_t len)
1480 {
1481 	if (unlikely(len > rte_pktmbuf_tailroom(m)))
1482 		return NULL;
1483 
1484 	char *tail = (char *)m->buf_addr + m->data_off + m->data_len;
1485 	m->data_len = (uint16_t)(m->data_len + len);
1486 	m_head->pkt_len  = (m_head->pkt_len + len);
1487 	return tail;
1488 }
1489 
1490 static inline void
1491 fpga_mutex_acquisition(struct fpga_queue *q)
1492 {
1493 	uint32_t mutex_ctrl, mutex_read, cnt = 0;
1494 	/* Assign a unique id for the duration of the DDR access */
1495 	q->ddr_mutex_uuid = rand();
1496 	/* Request and wait for acquisition of the mutex */
1497 	mutex_ctrl = (q->ddr_mutex_uuid << 16) + 1;
1498 	do {
1499 		if (cnt > 0)
1500 			usleep(FPGA_TIMEOUT_CHECK_INTERVAL);
1501 		rte_bbdev_log_debug("Acquiring Mutex for %x\n",
1502 				q->ddr_mutex_uuid);
1503 		fpga_reg_write_32(q->d->mmio_base,
1504 				FPGA_5GNR_FEC_MUTEX,
1505 				mutex_ctrl);
1506 		mutex_read = fpga_reg_read_32(q->d->mmio_base,
1507 				FPGA_5GNR_FEC_MUTEX);
1508 		rte_bbdev_log_debug("Mutex %x cnt %d owner %x\n",
1509 				mutex_read, cnt, q->ddr_mutex_uuid);
1510 		cnt++;
1511 	} while ((mutex_read >> 16) != q->ddr_mutex_uuid);
1512 }
1513 
1514 static inline void
1515 fpga_mutex_free(struct fpga_queue *q)
1516 {
1517 	uint32_t mutex_ctrl = q->ddr_mutex_uuid << 16;
1518 	fpga_reg_write_32(q->d->mmio_base,
1519 			FPGA_5GNR_FEC_MUTEX,
1520 			mutex_ctrl);
1521 }
1522 
1523 static inline int
1524 fpga_harq_write_loopback(struct fpga_queue *q,
1525 		struct rte_mbuf *harq_input, uint16_t harq_in_length,
1526 		uint32_t harq_in_offset, uint32_t harq_out_offset)
1527 {
1528 	fpga_mutex_acquisition(q);
1529 	uint32_t out_offset = harq_out_offset;
1530 	uint32_t in_offset = harq_in_offset;
1531 	uint32_t left_length = harq_in_length;
1532 	uint32_t reg_32, increment = 0;
1533 	uint64_t *input = NULL;
1534 	uint32_t last_transaction = left_length
1535 			% FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1536 	uint64_t last_word;
1537 
1538 	if (last_transaction > 0)
1539 		left_length -= last_transaction;
1540 
1541 	/*
1542 	 * Get HARQ buffer size for each VF/PF: When 0x00, there is no
1543 	 * available DDR space for the corresponding VF/PF.
1544 	 */
1545 	reg_32 = fpga_reg_read_32(q->d->mmio_base,
1546 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
1547 	if (reg_32 < harq_in_length) {
1548 		left_length = reg_32;
1549 		rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
1550 	}
1551 
1552 	input = rte_pktmbuf_mtod_offset(harq_input, uint64_t *, in_offset);
1553 
1554 	while (left_length > 0) {
1555 		if (fpga_reg_read_8(q->d->mmio_base,
1556 				FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) ==  1) {
1557 			fpga_reg_write_32(q->d->mmio_base,
1558 					FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
1559 					out_offset);
1560 			fpga_reg_write_64(q->d->mmio_base,
1561 					FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
1562 					input[increment]);
1563 			left_length -= FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1564 			out_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1565 			increment++;
1566 			fpga_reg_write_8(q->d->mmio_base,
1567 					FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
1568 		}
1569 	}
1570 	while (last_transaction > 0) {
1571 		if (fpga_reg_read_8(q->d->mmio_base,
1572 				FPGA_5GNR_FEC_DDR4_ADDR_RDY_REGS) ==  1) {
1573 			fpga_reg_write_32(q->d->mmio_base,
1574 					FPGA_5GNR_FEC_DDR4_WR_ADDR_REGS,
1575 					out_offset);
1576 			last_word = input[increment];
1577 			last_word &= (uint64_t)(1 << (last_transaction * 4))
1578 					- 1;
1579 			fpga_reg_write_64(q->d->mmio_base,
1580 					FPGA_5GNR_FEC_DDR4_WR_DATA_REGS,
1581 					last_word);
1582 			fpga_reg_write_8(q->d->mmio_base,
1583 					FPGA_5GNR_FEC_DDR4_WR_DONE_REGS, 1);
1584 			last_transaction = 0;
1585 		}
1586 	}
1587 	fpga_mutex_free(q);
1588 	return 1;
1589 }
1590 
1591 static inline int
1592 fpga_harq_read_loopback(struct fpga_queue *q,
1593 		struct rte_mbuf *harq_output, uint16_t harq_in_length,
1594 		uint32_t harq_in_offset, uint32_t harq_out_offset)
1595 {
1596 	fpga_mutex_acquisition(q);
1597 	uint32_t left_length, in_offset = harq_in_offset;
1598 	uint64_t reg;
1599 	uint32_t increment = 0;
1600 	uint64_t *input = NULL;
1601 	uint32_t last_transaction = harq_in_length
1602 			% FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1603 
1604 	if (last_transaction > 0)
1605 		harq_in_length += (8 - last_transaction);
1606 
1607 	reg = fpga_reg_read_32(q->d->mmio_base,
1608 			FPGA_5GNR_FEC_HARQ_BUF_SIZE_REGS);
1609 	if (reg < harq_in_length) {
1610 		harq_in_length = reg;
1611 		rte_bbdev_log(ERR, "HARQ in length > HARQ buffer size\n");
1612 	}
1613 
1614 	if (!mbuf_append(harq_output, harq_output, harq_in_length)) {
1615 		rte_bbdev_log(ERR, "HARQ output buffer warning %d %d\n",
1616 				harq_output->buf_len -
1617 				rte_pktmbuf_headroom(harq_output),
1618 				harq_in_length);
1619 		harq_in_length = harq_output->buf_len -
1620 				rte_pktmbuf_headroom(harq_output);
1621 		if (!mbuf_append(harq_output, harq_output, harq_in_length)) {
1622 			rte_bbdev_log(ERR, "HARQ output buffer issue %d %d\n",
1623 					harq_output->buf_len, harq_in_length);
1624 			return -1;
1625 		}
1626 	}
1627 	left_length = harq_in_length;
1628 
1629 	input = rte_pktmbuf_mtod_offset(harq_output, uint64_t *,
1630 					harq_out_offset);
1631 
1632 	while (left_length > 0) {
1633 		fpga_reg_write_32(q->d->mmio_base,
1634 			FPGA_5GNR_FEC_DDR4_RD_ADDR_REGS, in_offset);
1635 		fpga_reg_write_8(q->d->mmio_base,
1636 				FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 1);
1637 		reg = fpga_reg_read_8(q->d->mmio_base,
1638 			FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
1639 		while (reg != 1) {
1640 			reg = fpga_reg_read_8(q->d->mmio_base,
1641 				FPGA_5GNR_FEC_DDR4_RD_RDY_REGS);
1642 			if (reg == FPGA_DDR_OVERFLOW) {
1643 				rte_bbdev_log(ERR,
1644 						"Read address is overflow!\n");
1645 				return -1;
1646 			}
1647 		}
1648 		input[increment] = fpga_reg_read_64(q->d->mmio_base,
1649 			FPGA_5GNR_FEC_DDR4_RD_DATA_REGS);
1650 		left_length -= FPGA_5GNR_FEC_DDR_RD_DATA_LEN_IN_BYTES;
1651 		in_offset += FPGA_5GNR_FEC_DDR_WR_DATA_LEN_IN_BYTES;
1652 		increment++;
1653 		fpga_reg_write_8(q->d->mmio_base,
1654 				FPGA_5GNR_FEC_DDR4_RD_DONE_REGS, 0);
1655 	}
1656 	fpga_mutex_free(q);
1657 	return 1;
1658 }
1659 
1660 static inline int
1661 enqueue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op *op,
1662 		uint16_t desc_offset)
1663 {
1664 	union fpga_dma_desc *desc;
1665 	int ret;
1666 	uint8_t c, crc24_bits = 0;
1667 	struct rte_bbdev_op_ldpc_enc *enc = &op->ldpc_enc;
1668 	uint16_t in_offset = enc->input.offset;
1669 	uint16_t out_offset = enc->output.offset;
1670 	struct rte_mbuf *m_in = enc->input.data;
1671 	struct rte_mbuf *m_out = enc->output.data;
1672 	struct rte_mbuf *m_out_head = enc->output.data;
1673 	uint32_t in_length, out_length, e;
1674 	uint16_t total_left = enc->input.length;
1675 	uint16_t ring_offset;
1676 	uint16_t K, k_;
1677 
1678 
1679 	if (validate_ldpc_enc_op(op) == -1) {
1680 		rte_bbdev_log(ERR, "LDPC encoder validation rejected");
1681 		return -EINVAL;
1682 	}
1683 
1684 	/* Clear op status */
1685 	op->status = 0;
1686 
1687 	if (m_in == NULL || m_out == NULL) {
1688 		rte_bbdev_log(ERR, "Invalid mbuf pointer");
1689 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
1690 		return -EINVAL;
1691 	}
1692 
1693 	if (enc->op_flags & RTE_BBDEV_LDPC_CRC_24B_ATTACH)
1694 		crc24_bits = 24;
1695 
1696 	if (enc->code_block_mode == RTE_BBDEV_TRANSPORT_BLOCK) {
1697 		/* For Transport Block mode */
1698 		/* FIXME */
1699 		c = enc->tb_params.c;
1700 		e = enc->tb_params.ea;
1701 	} else { /* For Code Block mode */
1702 		c = 1;
1703 		e = enc->cb_params.e;
1704 	}
1705 
1706 	/* Update total_left */
1707 	K = (enc->basegraph == 1 ? 22 : 10) * enc->z_c;
1708 	k_ = K - enc->n_filler;
1709 	in_length = (k_ - crc24_bits) >> 3;
1710 	out_length = (e + 7) >> 3;
1711 
1712 	total_left = rte_pktmbuf_data_len(m_in) - in_offset;
1713 
1714 	/* Update offsets */
1715 	if (total_left != in_length) {
1716 		op->status |= 1 << RTE_BBDEV_DATA_ERROR;
1717 		rte_bbdev_log(ERR,
1718 				"Mismatch between mbuf length and included CBs sizes %d",
1719 				total_left);
1720 	}
1721 
1722 	mbuf_append(m_out_head, m_out, out_length);
1723 
1724 	/* Offset into the ring */
1725 	ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
1726 	/* Setup DMA Descriptor */
1727 	desc = q->ring_addr + ring_offset;
1728 
1729 	ret = fpga_dma_desc_te_fill(op, &desc->enc_req, m_in, m_out,
1730 			k_, e, in_offset, out_offset, ring_offset, c);
1731 	if (unlikely(ret < 0))
1732 		return ret;
1733 
1734 	/* Update lengths */
1735 	total_left -= in_length;
1736 	op->ldpc_enc.output.length += out_length;
1737 
1738 	if (total_left > 0) {
1739 		rte_bbdev_log(ERR,
1740 			"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1741 				total_left, in_length);
1742 		return -1;
1743 	}
1744 
1745 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1746 	print_dma_enc_desc_debug_info(desc);
1747 #endif
1748 	return 1;
1749 }
1750 
1751 static inline int
1752 enqueue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op *op,
1753 		uint16_t desc_offset)
1754 {
1755 	union fpga_dma_desc *desc;
1756 	int ret;
1757 	uint16_t ring_offset;
1758 	uint8_t c;
1759 	uint16_t e, in_length, out_length, k0, l, seg_total_left, sys_cols;
1760 	uint16_t K, parity_offset, harq_in_length = 0, harq_out_length = 0;
1761 	uint16_t crc24_overlap = 0;
1762 	struct rte_bbdev_op_ldpc_dec *dec = &op->ldpc_dec;
1763 	struct rte_mbuf *m_in = dec->input.data;
1764 	struct rte_mbuf *m_out = dec->hard_output.data;
1765 	struct rte_mbuf *m_out_head = dec->hard_output.data;
1766 	uint16_t in_offset = dec->input.offset;
1767 	uint16_t out_offset = dec->hard_output.offset;
1768 	uint32_t harq_offset = 0;
1769 
1770 	if (validate_ldpc_dec_op(op) == -1) {
1771 		rte_bbdev_log(ERR, "LDPC decoder validation rejected");
1772 		return -EINVAL;
1773 	}
1774 
1775 	/* Clear op status */
1776 	op->status = 0;
1777 
1778 	/* Setup DMA Descriptor */
1779 	ring_offset = ((q->tail + desc_offset) & q->sw_ring_wrap_mask);
1780 	desc = q->ring_addr + ring_offset;
1781 
1782 	if (check_bit(dec->op_flags,
1783 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
1784 		struct rte_mbuf *harq_in = dec->harq_combined_input.data;
1785 		struct rte_mbuf *harq_out = dec->harq_combined_output.data;
1786 		harq_in_length = dec->harq_combined_input.length;
1787 		uint32_t harq_in_offset = dec->harq_combined_input.offset;
1788 		uint32_t harq_out_offset = dec->harq_combined_output.offset;
1789 
1790 		if (check_bit(dec->op_flags,
1791 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_OUT_ENABLE
1792 				)) {
1793 			ret = fpga_harq_write_loopback(q, harq_in,
1794 					harq_in_length, harq_in_offset,
1795 					harq_out_offset);
1796 		} else if (check_bit(dec->op_flags,
1797 				RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_IN_ENABLE
1798 				)) {
1799 			ret = fpga_harq_read_loopback(q, harq_out,
1800 				harq_in_length, harq_in_offset,
1801 				harq_out_offset);
1802 			dec->harq_combined_output.length = harq_in_length;
1803 		} else {
1804 			rte_bbdev_log(ERR, "OP flag Err!");
1805 			ret = -1;
1806 		}
1807 		/* Set descriptor for dequeue */
1808 		desc->dec_req.done = 1;
1809 		desc->dec_req.error = 0;
1810 		desc->dec_req.op_addr = op;
1811 		desc->dec_req.cbs_in_op = 1;
1812 		/* Mark this dummy descriptor to be dropped by HW */
1813 		desc->dec_req.desc_idx = (ring_offset + 1)
1814 				& q->sw_ring_wrap_mask;
1815 		return ret; /* Error or number of CB */
1816 	}
1817 
1818 	if (m_in == NULL || m_out == NULL) {
1819 		rte_bbdev_log(ERR, "Invalid mbuf pointer");
1820 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
1821 		return -1;
1822 	}
1823 
1824 	c = 1;
1825 	e = dec->cb_params.e;
1826 
1827 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_CRC_TYPE_24B_DROP))
1828 		crc24_overlap = 24;
1829 
1830 	sys_cols = (dec->basegraph == 1) ? 22 : 10;
1831 	K = sys_cols * dec->z_c;
1832 	parity_offset = K - 2 * dec->z_c;
1833 
1834 	out_length = ((K - crc24_overlap - dec->n_filler) >> 3);
1835 	in_length = e;
1836 	seg_total_left = dec->input.length;
1837 
1838 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE)) {
1839 		harq_in_length = RTE_MIN(dec->harq_combined_input.length,
1840 				(uint32_t)dec->n_cb);
1841 	}
1842 
1843 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE)) {
1844 		k0 = get_k0(dec->n_cb, dec->z_c,
1845 				dec->basegraph, dec->rv_index);
1846 		if (k0 > parity_offset)
1847 			l = k0 + e;
1848 		else
1849 			l = k0 + e + dec->n_filler;
1850 		harq_out_length = RTE_MIN(RTE_MAX(harq_in_length, l),
1851 				dec->n_cb);
1852 		dec->harq_combined_output.length = harq_out_length;
1853 	}
1854 
1855 	mbuf_append(m_out_head, m_out, out_length);
1856 	if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_IN_ENABLE))
1857 		harq_offset = dec->harq_combined_input.offset;
1858 	else if (check_bit(dec->op_flags, RTE_BBDEV_LDPC_HQ_COMBINE_OUT_ENABLE))
1859 		harq_offset = dec->harq_combined_output.offset;
1860 
1861 	if ((harq_offset & 0x3FF) > 0) {
1862 		rte_bbdev_log(ERR, "Invalid HARQ offset %d", harq_offset);
1863 		op->status = 1 << RTE_BBDEV_DATA_ERROR;
1864 		return -1;
1865 	}
1866 
1867 	ret = fpga_dma_desc_ld_fill(op, &desc->dec_req, m_in, m_out,
1868 		harq_in_length, in_offset, out_offset, harq_offset,
1869 		ring_offset, c);
1870 	if (unlikely(ret < 0))
1871 		return ret;
1872 	/* Update lengths */
1873 	seg_total_left -= in_length;
1874 	op->ldpc_dec.hard_output.length += out_length;
1875 	if (seg_total_left > 0) {
1876 		rte_bbdev_log(ERR,
1877 				"Mismatch between mbuf length and included CB sizes: mbuf len %u, cb len %u",
1878 				seg_total_left, in_length);
1879 		return -1;
1880 	}
1881 
1882 #ifdef RTE_LIBRTE_BBDEV_DEBUG
1883 	print_dma_dec_desc_debug_info(desc);
1884 #endif
1885 
1886 	return 1;
1887 }
1888 
1889 static uint16_t
1890 fpga_enqueue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
1891 		struct rte_bbdev_enc_op **ops, uint16_t num)
1892 {
1893 	uint16_t i, total_enqueued_cbs = 0;
1894 	int32_t avail;
1895 	int enqueued_cbs;
1896 	struct fpga_queue *q = q_data->queue_private;
1897 	union fpga_dma_desc *desc;
1898 
1899 	/* Check if queue is not full */
1900 	if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
1901 			q->head_free_desc))
1902 		return 0;
1903 
1904 	/* Calculates available space */
1905 	avail = (q->head_free_desc > q->tail) ?
1906 		q->head_free_desc - q->tail - 1 :
1907 		q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
1908 
1909 	for (i = 0; i < num; ++i) {
1910 
1911 		/* Check if there is available space for further
1912 		 * processing
1913 		 */
1914 		if (unlikely(avail - 1 < 0))
1915 			break;
1916 		avail -= 1;
1917 		enqueued_cbs = enqueue_ldpc_enc_one_op_cb(q, ops[i],
1918 				total_enqueued_cbs);
1919 
1920 		if (enqueued_cbs < 0)
1921 			break;
1922 
1923 		total_enqueued_cbs += enqueued_cbs;
1924 
1925 		rte_bbdev_log_debug("enqueuing enc ops [%d/%d] | head %d | tail %d",
1926 				total_enqueued_cbs, num,
1927 				q->head_free_desc, q->tail);
1928 	}
1929 
1930 	/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
1931 	 * only when all previous CBs were already processed.
1932 	 */
1933 	desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
1934 			& q->sw_ring_wrap_mask);
1935 	desc->enc_req.irq_en = q->irq_enable;
1936 
1937 	fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
1938 
1939 	/* Update stats */
1940 	q_data->queue_stats.enqueued_count += i;
1941 	q_data->queue_stats.enqueue_err_count += num - i;
1942 
1943 	return i;
1944 }
1945 
1946 static uint16_t
1947 fpga_enqueue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
1948 		struct rte_bbdev_dec_op **ops, uint16_t num)
1949 {
1950 	uint16_t i, total_enqueued_cbs = 0;
1951 	int32_t avail;
1952 	int enqueued_cbs;
1953 	struct fpga_queue *q = q_data->queue_private;
1954 	union fpga_dma_desc *desc;
1955 
1956 	/* Check if queue is not full */
1957 	if (unlikely(((q->tail + 1) & q->sw_ring_wrap_mask) ==
1958 			q->head_free_desc))
1959 		return 0;
1960 
1961 	/* Calculates available space */
1962 	avail = (q->head_free_desc > q->tail) ?
1963 		q->head_free_desc - q->tail - 1 :
1964 		q->ring_ctrl_reg.ring_size + q->head_free_desc - q->tail - 1;
1965 
1966 	for (i = 0; i < num; ++i) {
1967 
1968 		/* Check if there is available space for further
1969 		 * processing
1970 		 */
1971 		if (unlikely(avail - 1 < 0))
1972 			break;
1973 		avail -= 1;
1974 		enqueued_cbs = enqueue_ldpc_dec_one_op_cb(q, ops[i],
1975 				total_enqueued_cbs);
1976 
1977 		if (enqueued_cbs < 0)
1978 			break;
1979 
1980 		total_enqueued_cbs += enqueued_cbs;
1981 
1982 		rte_bbdev_log_debug("enqueuing dec ops [%d/%d] | head %d | tail %d",
1983 				total_enqueued_cbs, num,
1984 				q->head_free_desc, q->tail);
1985 	}
1986 
1987 	/* Update stats */
1988 	q_data->queue_stats.enqueued_count += i;
1989 	q_data->queue_stats.enqueue_err_count += num - i;
1990 
1991 	/* Set interrupt bit for last CB in enqueued ops. FPGA issues interrupt
1992 	 * only when all previous CBs were already processed.
1993 	 */
1994 	desc = q->ring_addr + ((q->tail + total_enqueued_cbs - 1)
1995 			& q->sw_ring_wrap_mask);
1996 	desc->enc_req.irq_en = q->irq_enable;
1997 	fpga_dma_enqueue(q, total_enqueued_cbs, &q_data->queue_stats);
1998 	return i;
1999 }
2000 
2001 
2002 static inline int
2003 dequeue_ldpc_enc_one_op_cb(struct fpga_queue *q, struct rte_bbdev_enc_op **op,
2004 		uint16_t desc_offset)
2005 {
2006 	union fpga_dma_desc *desc;
2007 	int desc_error;
2008 	/* Set current desc */
2009 	desc = q->ring_addr + ((q->head_free_desc + desc_offset)
2010 			& q->sw_ring_wrap_mask);
2011 
2012 	/*check if done */
2013 	if (desc->enc_req.done == 0)
2014 		return -1;
2015 
2016 	/* make sure the response is read atomically */
2017 	rte_smp_rmb();
2018 
2019 	rte_bbdev_log_debug("DMA response desc %p", desc);
2020 
2021 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2022 	print_dma_enc_desc_debug_info(desc);
2023 #endif
2024 
2025 	*op = desc->enc_req.op_addr;
2026 	/* Check the descriptor error field, return 1 on error */
2027 	desc_error = check_desc_error(desc->enc_req.error);
2028 	(*op)->status = desc_error << RTE_BBDEV_DATA_ERROR;
2029 
2030 	return 1;
2031 }
2032 
2033 
2034 static inline int
2035 dequeue_ldpc_dec_one_op_cb(struct fpga_queue *q, struct rte_bbdev_dec_op **op,
2036 		uint16_t desc_offset)
2037 {
2038 	union fpga_dma_desc *desc;
2039 	int desc_error;
2040 	/* Set descriptor */
2041 	desc = q->ring_addr + ((q->head_free_desc + desc_offset)
2042 			& q->sw_ring_wrap_mask);
2043 
2044 	/* Verify done bit is set */
2045 	if (desc->dec_req.done == 0)
2046 		return -1;
2047 
2048 	/* make sure the response is read atomically */
2049 	rte_smp_rmb();
2050 
2051 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2052 	print_dma_dec_desc_debug_info(desc);
2053 #endif
2054 
2055 	*op = desc->dec_req.op_addr;
2056 
2057 	if (check_bit((*op)->ldpc_dec.op_flags,
2058 			RTE_BBDEV_LDPC_INTERNAL_HARQ_MEMORY_LOOPBACK)) {
2059 		(*op)->status = 0;
2060 		return 1;
2061 	}
2062 
2063 	/* FPGA reports iterations based on round-up minus 1 */
2064 	(*op)->ldpc_dec.iter_count = desc->dec_req.iter + 1;
2065 	/* CRC Check criteria */
2066 	if (desc->dec_req.crc24b_ind && !(desc->dec_req.crcb_pass))
2067 		(*op)->status = 1 << RTE_BBDEV_CRC_ERROR;
2068 	/* et_pass = 0 when decoder fails */
2069 	(*op)->status |= !(desc->dec_req.et_pass) << RTE_BBDEV_SYNDROME_ERROR;
2070 	/* Check the descriptor error field, return 1 on error */
2071 	desc_error = check_desc_error(desc->dec_req.error);
2072 	(*op)->status |= desc_error << RTE_BBDEV_DATA_ERROR;
2073 	return 1;
2074 }
2075 
2076 static uint16_t
2077 fpga_dequeue_ldpc_enc(struct rte_bbdev_queue_data *q_data,
2078 		struct rte_bbdev_enc_op **ops, uint16_t num)
2079 {
2080 	struct fpga_queue *q = q_data->queue_private;
2081 	uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
2082 	uint16_t i;
2083 	uint16_t dequeued_cbs = 0;
2084 	int ret;
2085 
2086 	for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
2087 		ret = dequeue_ldpc_enc_one_op_cb(q, &ops[i], dequeued_cbs);
2088 
2089 		if (ret < 0)
2090 			break;
2091 
2092 		dequeued_cbs += ret;
2093 
2094 		rte_bbdev_log_debug("dequeuing enc ops [%d/%d] | head %d | tail %d",
2095 				dequeued_cbs, num, q->head_free_desc, q->tail);
2096 	}
2097 
2098 	/* Update head */
2099 	q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
2100 			q->sw_ring_wrap_mask;
2101 
2102 	/* Update stats */
2103 	q_data->queue_stats.dequeued_count += i;
2104 
2105 	return i;
2106 }
2107 
2108 static uint16_t
2109 fpga_dequeue_ldpc_dec(struct rte_bbdev_queue_data *q_data,
2110 		struct rte_bbdev_dec_op **ops, uint16_t num)
2111 {
2112 	struct fpga_queue *q = q_data->queue_private;
2113 	uint32_t avail = (q->tail - q->head_free_desc) & q->sw_ring_wrap_mask;
2114 	uint16_t i;
2115 	uint16_t dequeued_cbs = 0;
2116 	int ret;
2117 
2118 	for (i = 0; (i < num) && (dequeued_cbs < avail); ++i) {
2119 		ret = dequeue_ldpc_dec_one_op_cb(q, &ops[i], dequeued_cbs);
2120 
2121 		if (ret < 0)
2122 			break;
2123 
2124 		dequeued_cbs += ret;
2125 
2126 		rte_bbdev_log_debug("dequeuing dec ops [%d/%d] | head %d | tail %d",
2127 				dequeued_cbs, num, q->head_free_desc, q->tail);
2128 	}
2129 
2130 	/* Update head */
2131 	q->head_free_desc = (q->head_free_desc + dequeued_cbs) &
2132 			q->sw_ring_wrap_mask;
2133 
2134 	/* Update stats */
2135 	q_data->queue_stats.dequeued_count += i;
2136 
2137 	return i;
2138 }
2139 
2140 
2141 /* Initialization Function */
2142 static void
2143 fpga_5gnr_fec_init(struct rte_bbdev *dev, struct rte_pci_driver *drv)
2144 {
2145 	struct rte_pci_device *pci_dev = RTE_DEV_TO_PCI(dev->device);
2146 
2147 	dev->dev_ops = &fpga_ops;
2148 	dev->enqueue_ldpc_enc_ops = fpga_enqueue_ldpc_enc;
2149 	dev->enqueue_ldpc_dec_ops = fpga_enqueue_ldpc_dec;
2150 	dev->dequeue_ldpc_enc_ops = fpga_dequeue_ldpc_enc;
2151 	dev->dequeue_ldpc_dec_ops = fpga_dequeue_ldpc_dec;
2152 
2153 	((struct fpga_5gnr_fec_device *) dev->data->dev_private)->pf_device =
2154 			!strcmp(drv->driver.name,
2155 					RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME));
2156 	((struct fpga_5gnr_fec_device *) dev->data->dev_private)->mmio_base =
2157 			pci_dev->mem_resource[0].addr;
2158 
2159 	rte_bbdev_log_debug(
2160 			"Init device %s [%s] @ virtaddr %p phyaddr %#"PRIx64,
2161 			drv->driver.name, dev->data->name,
2162 			(void *)pci_dev->mem_resource[0].addr,
2163 			pci_dev->mem_resource[0].phys_addr);
2164 }
2165 
2166 static int
2167 fpga_5gnr_fec_probe(struct rte_pci_driver *pci_drv,
2168 	struct rte_pci_device *pci_dev)
2169 {
2170 	struct rte_bbdev *bbdev = NULL;
2171 	char dev_name[RTE_BBDEV_NAME_MAX_LEN];
2172 
2173 	if (pci_dev == NULL) {
2174 		rte_bbdev_log(ERR, "NULL PCI device");
2175 		return -EINVAL;
2176 	}
2177 
2178 	rte_pci_device_name(&pci_dev->addr, dev_name, sizeof(dev_name));
2179 
2180 	/* Allocate memory to be used privately by drivers */
2181 	bbdev = rte_bbdev_allocate(pci_dev->device.name);
2182 	if (bbdev == NULL)
2183 		return -ENODEV;
2184 
2185 	/* allocate device private memory */
2186 	bbdev->data->dev_private = rte_zmalloc_socket(dev_name,
2187 			sizeof(struct fpga_5gnr_fec_device),
2188 			RTE_CACHE_LINE_SIZE,
2189 			pci_dev->device.numa_node);
2190 
2191 	if (bbdev->data->dev_private == NULL) {
2192 		rte_bbdev_log(CRIT,
2193 				"Allocate of %zu bytes for device \"%s\" failed",
2194 				sizeof(struct fpga_5gnr_fec_device), dev_name);
2195 				rte_bbdev_release(bbdev);
2196 			return -ENOMEM;
2197 	}
2198 
2199 	/* Fill HW specific part of device structure */
2200 	bbdev->device = &pci_dev->device;
2201 	bbdev->intr_handle = pci_dev->intr_handle;
2202 	bbdev->data->socket_id = pci_dev->device.numa_node;
2203 
2204 	/* Invoke FEC FPGA device initialization function */
2205 	fpga_5gnr_fec_init(bbdev, pci_drv);
2206 
2207 	rte_bbdev_log_debug("bbdev id = %u [%s]",
2208 			bbdev->data->dev_id, dev_name);
2209 
2210 	struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
2211 	uint32_t version_id = fpga_reg_read_32(d->mmio_base,
2212 			FPGA_5GNR_FEC_VERSION_ID);
2213 	rte_bbdev_log(INFO, "FEC FPGA RTL v%u.%u",
2214 		((uint16_t)(version_id >> 16)), ((uint16_t)version_id));
2215 
2216 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2217 	if (!strcmp(pci_drv->driver.name,
2218 			RTE_STR(FPGA_5GNR_FEC_PF_DRIVER_NAME)))
2219 		print_static_reg_debug_info(d->mmio_base);
2220 #endif
2221 	return 0;
2222 }
2223 
2224 static int
2225 fpga_5gnr_fec_remove(struct rte_pci_device *pci_dev)
2226 {
2227 	struct rte_bbdev *bbdev;
2228 	int ret;
2229 	uint8_t dev_id;
2230 
2231 	if (pci_dev == NULL)
2232 		return -EINVAL;
2233 
2234 	/* Find device */
2235 	bbdev = rte_bbdev_get_named_dev(pci_dev->device.name);
2236 	if (bbdev == NULL) {
2237 		rte_bbdev_log(CRIT,
2238 				"Couldn't find HW dev \"%s\" to uninitialise it",
2239 				pci_dev->device.name);
2240 		return -ENODEV;
2241 	}
2242 	dev_id = bbdev->data->dev_id;
2243 
2244 	/* free device private memory before close */
2245 	rte_free(bbdev->data->dev_private);
2246 
2247 	/* Close device */
2248 	ret = rte_bbdev_close(dev_id);
2249 	if (ret < 0)
2250 		rte_bbdev_log(ERR,
2251 				"Device %i failed to close during uninit: %i",
2252 				dev_id, ret);
2253 
2254 	/* release bbdev from library */
2255 	ret = rte_bbdev_release(bbdev);
2256 	if (ret)
2257 		rte_bbdev_log(ERR, "Device %i failed to uninit: %i", dev_id,
2258 				ret);
2259 
2260 	rte_bbdev_log_debug("Destroyed bbdev = %u", dev_id);
2261 
2262 	return 0;
2263 }
2264 
2265 static inline void
2266 set_default_fpga_conf(struct rte_fpga_5gnr_fec_conf *def_conf)
2267 {
2268 	/* clear default configuration before initialization */
2269 	memset(def_conf, 0, sizeof(struct rte_fpga_5gnr_fec_conf));
2270 	/* Set pf mode to true */
2271 	def_conf->pf_mode_en = true;
2272 
2273 	/* Set ratio between UL and DL to 1:1 (unit of weight is 3 CBs) */
2274 	def_conf->ul_bandwidth = 3;
2275 	def_conf->dl_bandwidth = 3;
2276 
2277 	/* Set Load Balance Factor to 64 */
2278 	def_conf->dl_load_balance = 64;
2279 	def_conf->ul_load_balance = 64;
2280 }
2281 
2282 /* Initial configuration of FPGA 5GNR FEC device */
2283 int
2284 rte_fpga_5gnr_fec_configure(const char *dev_name,
2285 		const struct rte_fpga_5gnr_fec_conf *conf)
2286 {
2287 	uint32_t payload_32, address;
2288 	uint16_t payload_16;
2289 	uint8_t payload_8;
2290 	uint16_t q_id, vf_id, total_q_id, total_ul_q_id, total_dl_q_id;
2291 	struct rte_bbdev *bbdev = rte_bbdev_get_named_dev(dev_name);
2292 	struct rte_fpga_5gnr_fec_conf def_conf;
2293 
2294 	if (bbdev == NULL) {
2295 		rte_bbdev_log(ERR,
2296 				"Invalid dev_name (%s), or device is not yet initialised",
2297 				dev_name);
2298 		return -ENODEV;
2299 	}
2300 
2301 	struct fpga_5gnr_fec_device *d = bbdev->data->dev_private;
2302 
2303 	if (conf == NULL) {
2304 		rte_bbdev_log(ERR,
2305 				"FPGA Configuration was not provided. Default configuration will be loaded.");
2306 		set_default_fpga_conf(&def_conf);
2307 		conf = &def_conf;
2308 	}
2309 
2310 	/*
2311 	 * Configure UL:DL ratio.
2312 	 * [7:0]: UL weight
2313 	 * [15:8]: DL weight
2314 	 */
2315 	payload_16 = (conf->dl_bandwidth << 8) | conf->ul_bandwidth;
2316 	address = FPGA_5GNR_FEC_CONFIGURATION;
2317 	fpga_reg_write_16(d->mmio_base, address, payload_16);
2318 
2319 	/* Clear all queues registers */
2320 	payload_32 = FPGA_INVALID_HW_QUEUE_ID;
2321 	for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
2322 		address = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;
2323 		fpga_reg_write_32(d->mmio_base, address, payload_32);
2324 	}
2325 
2326 	/*
2327 	 * If PF mode is enabled allocate all queues for PF only.
2328 	 *
2329 	 * For VF mode each VF can have different number of UL and DL queues.
2330 	 * Total number of queues to configure cannot exceed FPGA
2331 	 * capabilities - 64 queues - 32 queues for UL and 32 queues for DL.
2332 	 * Queues mapping is done according to configuration:
2333 	 *
2334 	 * UL queues:
2335 	 * |                Q_ID              | VF_ID |
2336 	 * |                 0                |   0   |
2337 	 * |                ...               |   0   |
2338 	 * | conf->vf_dl_queues_number[0] - 1 |   0   |
2339 	 * | conf->vf_dl_queues_number[0]     |   1   |
2340 	 * |                ...               |   1   |
2341 	 * | conf->vf_dl_queues_number[1] - 1 |   1   |
2342 	 * |                ...               |  ...  |
2343 	 * | conf->vf_dl_queues_number[7] - 1 |   7   |
2344 	 *
2345 	 * DL queues:
2346 	 * |                Q_ID              | VF_ID |
2347 	 * |                 32               |   0   |
2348 	 * |                ...               |   0   |
2349 	 * | conf->vf_ul_queues_number[0] - 1 |   0   |
2350 	 * | conf->vf_ul_queues_number[0]     |   1   |
2351 	 * |                ...               |   1   |
2352 	 * | conf->vf_ul_queues_number[1] - 1 |   1   |
2353 	 * |                ...               |  ...  |
2354 	 * | conf->vf_ul_queues_number[7] - 1 |   7   |
2355 	 *
2356 	 * Example of configuration:
2357 	 * conf->vf_ul_queues_number[0] = 4;  -> 4 UL queues for VF0
2358 	 * conf->vf_dl_queues_number[0] = 4;  -> 4 DL queues for VF0
2359 	 * conf->vf_ul_queues_number[1] = 2;  -> 2 UL queues for VF1
2360 	 * conf->vf_dl_queues_number[1] = 2;  -> 2 DL queues for VF1
2361 	 *
2362 	 * UL:
2363 	 * | Q_ID | VF_ID |
2364 	 * |   0  |   0   |
2365 	 * |   1  |   0   |
2366 	 * |   2  |   0   |
2367 	 * |   3  |   0   |
2368 	 * |   4  |   1   |
2369 	 * |   5  |   1   |
2370 	 *
2371 	 * DL:
2372 	 * | Q_ID | VF_ID |
2373 	 * |  32  |   0   |
2374 	 * |  33  |   0   |
2375 	 * |  34  |   0   |
2376 	 * |  35  |   0   |
2377 	 * |  36  |   1   |
2378 	 * |  37  |   1   |
2379 	 */
2380 	if (conf->pf_mode_en) {
2381 		payload_32 = 0x1;
2382 		for (q_id = 0; q_id < FPGA_TOTAL_NUM_QUEUES; ++q_id) {
2383 			address = (q_id << 2) + FPGA_5GNR_FEC_QUEUE_MAP;
2384 			fpga_reg_write_32(d->mmio_base, address, payload_32);
2385 		}
2386 	} else {
2387 		/* Calculate total number of UL and DL queues to configure */
2388 		total_ul_q_id = total_dl_q_id = 0;
2389 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
2390 			total_ul_q_id += conf->vf_ul_queues_number[vf_id];
2391 			total_dl_q_id += conf->vf_dl_queues_number[vf_id];
2392 		}
2393 		total_q_id = total_dl_q_id + total_ul_q_id;
2394 		/*
2395 		 * Check if total number of queues to configure does not exceed
2396 		 * FPGA capabilities (64 queues - 32 UL and 32 DL queues)
2397 		 */
2398 		if ((total_ul_q_id > FPGA_NUM_UL_QUEUES) ||
2399 			(total_dl_q_id > FPGA_NUM_DL_QUEUES) ||
2400 			(total_q_id > FPGA_TOTAL_NUM_QUEUES)) {
2401 			rte_bbdev_log(ERR,
2402 					"FPGA Configuration failed. Too many queues to configure: UL_Q %u, DL_Q %u, FPGA_Q %u",
2403 					total_ul_q_id, total_dl_q_id,
2404 					FPGA_TOTAL_NUM_QUEUES);
2405 			return -EINVAL;
2406 		}
2407 		total_ul_q_id = 0;
2408 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
2409 			for (q_id = 0; q_id < conf->vf_ul_queues_number[vf_id];
2410 					++q_id, ++total_ul_q_id) {
2411 				address = (total_ul_q_id << 2) +
2412 						FPGA_5GNR_FEC_QUEUE_MAP;
2413 				payload_32 = ((0x80 + vf_id) << 16) | 0x1;
2414 				fpga_reg_write_32(d->mmio_base, address,
2415 						payload_32);
2416 			}
2417 		}
2418 		total_dl_q_id = 0;
2419 		for (vf_id = 0; vf_id < FPGA_5GNR_FEC_NUM_VFS; ++vf_id) {
2420 			for (q_id = 0; q_id < conf->vf_dl_queues_number[vf_id];
2421 					++q_id, ++total_dl_q_id) {
2422 				address = ((total_dl_q_id + FPGA_NUM_UL_QUEUES)
2423 						<< 2) + FPGA_5GNR_FEC_QUEUE_MAP;
2424 				payload_32 = ((0x80 + vf_id) << 16) | 0x1;
2425 				fpga_reg_write_32(d->mmio_base, address,
2426 						payload_32);
2427 			}
2428 		}
2429 	}
2430 
2431 	/* Setting Load Balance Factor */
2432 	payload_16 = (conf->dl_load_balance << 8) | (conf->ul_load_balance);
2433 	address = FPGA_5GNR_FEC_LOAD_BALANCE_FACTOR;
2434 	fpga_reg_write_16(d->mmio_base, address, payload_16);
2435 
2436 	/* Setting length of ring descriptor entry */
2437 	payload_16 = FPGA_RING_DESC_ENTRY_LENGTH;
2438 	address = FPGA_5GNR_FEC_RING_DESC_LEN;
2439 	fpga_reg_write_16(d->mmio_base, address, payload_16);
2440 
2441 	/* Queue PF/VF mapping table is ready */
2442 	payload_8 = 0x1;
2443 	address = FPGA_5GNR_FEC_QUEUE_PF_VF_MAP_DONE;
2444 	fpga_reg_write_8(d->mmio_base, address, payload_8);
2445 
2446 	rte_bbdev_log_debug("PF FPGA 5GNR FEC configuration complete for %s",
2447 			dev_name);
2448 
2449 #ifdef RTE_LIBRTE_BBDEV_DEBUG
2450 	print_static_reg_debug_info(d->mmio_base);
2451 #endif
2452 	return 0;
2453 }
2454 
2455 /* FPGA 5GNR FEC PCI PF address map */
2456 static struct rte_pci_id pci_id_fpga_5gnr_fec_pf_map[] = {
2457 	{
2458 		RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
2459 				FPGA_5GNR_FEC_PF_DEVICE_ID)
2460 	},
2461 	{.device_id = 0},
2462 };
2463 
2464 static struct rte_pci_driver fpga_5gnr_fec_pci_pf_driver = {
2465 	.probe = fpga_5gnr_fec_probe,
2466 	.remove = fpga_5gnr_fec_remove,
2467 	.id_table = pci_id_fpga_5gnr_fec_pf_map,
2468 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING
2469 };
2470 
2471 /* FPGA 5GNR FEC PCI VF address map */
2472 static struct rte_pci_id pci_id_fpga_5gnr_fec_vf_map[] = {
2473 	{
2474 		RTE_PCI_DEVICE(FPGA_5GNR_FEC_VENDOR_ID,
2475 				FPGA_5GNR_FEC_VF_DEVICE_ID)
2476 	},
2477 	{.device_id = 0},
2478 };
2479 
2480 static struct rte_pci_driver fpga_5gnr_fec_pci_vf_driver = {
2481 	.probe = fpga_5gnr_fec_probe,
2482 	.remove = fpga_5gnr_fec_remove,
2483 	.id_table = pci_id_fpga_5gnr_fec_vf_map,
2484 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING
2485 };
2486 
2487 
2488 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_PF_DRIVER_NAME, fpga_5gnr_fec_pci_pf_driver);
2489 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_PF_DRIVER_NAME,
2490 		pci_id_fpga_5gnr_fec_pf_map);
2491 RTE_PMD_REGISTER_PCI(FPGA_5GNR_FEC_VF_DRIVER_NAME, fpga_5gnr_fec_pci_vf_driver);
2492 RTE_PMD_REGISTER_PCI_TABLE(FPGA_5GNR_FEC_VF_DRIVER_NAME,
2493 		pci_id_fpga_5gnr_fec_vf_map);
2494