xref: /dpdk/drivers/event/dlb2/pf/dlb2_pf.c (revision f665790a5dbad7b645ff46f31d65e977324e7bfc)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2016-2020 Intel Corporation
3  */
4 
5 #include <stdint.h>
6 #include <stdbool.h>
7 #include <stdio.h>
8 #include <sys/mman.h>
9 #include <fcntl.h>
10 #include <sys/time.h>
11 #include <errno.h>
12 #include <assert.h>
13 #include <unistd.h>
14 #include <string.h>
15 
16 #include <rte_debug.h>
17 #include <rte_log.h>
18 #include <dev_driver.h>
19 #include <rte_devargs.h>
20 #include <rte_mbuf.h>
21 #include <rte_ring.h>
22 #include <rte_errno.h>
23 #include <rte_kvargs.h>
24 #include <rte_malloc.h>
25 #include <rte_cycles.h>
26 #include <rte_io.h>
27 #include <rte_pci.h>
28 #include <bus_pci_driver.h>
29 #include <rte_eventdev.h>
30 #include <eventdev_pmd.h>
31 #include <eventdev_pmd_pci.h>
32 #include <rte_memory.h>
33 #include <rte_string_fns.h>
34 
35 #include "../dlb2_priv.h"
36 #include "../dlb2_iface.h"
37 #include "../dlb2_inline_fns.h"
38 #include "dlb2_main.h"
39 #include "base/dlb2_hw_types.h"
40 #include "base/dlb2_osdep.h"
41 #include "base/dlb2_resource.h"
42 
43 static const char *event_dlb2_pf_name = RTE_STR(EVDEV_DLB2_NAME_PMD);
44 static unsigned int dlb2_qe_sa_pct = 1;
45 static unsigned int dlb2_qid_sa_pct;
46 
47 static void
48 dlb2_pf_low_level_io_init(void)
49 {
50 	int i;
51 	/* Addresses will be initialized at port create */
52 	for (i = 0; i < DLB2_MAX_NUM_PORTS(DLB2_HW_V2_5); i++) {
53 		/* First directed ports */
54 		dlb2_port[i][DLB2_DIR_PORT].pp_addr = NULL;
55 		dlb2_port[i][DLB2_DIR_PORT].cq_base = NULL;
56 		dlb2_port[i][DLB2_DIR_PORT].mmaped = true;
57 
58 		/* Now load balanced ports */
59 		dlb2_port[i][DLB2_LDB_PORT].pp_addr = NULL;
60 		dlb2_port[i][DLB2_LDB_PORT].cq_base = NULL;
61 		dlb2_port[i][DLB2_LDB_PORT].mmaped = true;
62 	}
63 }
64 
65 static int
66 dlb2_pf_open(struct dlb2_hw_dev *handle, const char *name)
67 {
68 	RTE_SET_USED(handle);
69 	RTE_SET_USED(name);
70 
71 	return 0;
72 }
73 
74 static int
75 dlb2_pf_get_device_version(struct dlb2_hw_dev *handle,
76 			   uint8_t *revision)
77 {
78 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
79 
80 	*revision = dlb2_dev->revision;
81 
82 	return 0;
83 }
84 
85 static void dlb2_pf_calc_arbiter_weights(u8 *weight,
86 					 unsigned int pct)
87 {
88 	int val, i;
89 
90 	/* Largest possible weight (100% SA case): 32 */
91 	val = (DLB2_MAX_WEIGHT + 1) / DLB2_NUM_ARB_WEIGHTS;
92 
93 	/* Scale val according to the starvation avoidance percentage */
94 	val = (val * pct) / 100;
95 	if (val == 0 && pct != 0)
96 		val = 1;
97 
98 	/* Prio 7 always has weight 0xff */
99 	weight[DLB2_NUM_ARB_WEIGHTS - 1] = DLB2_MAX_WEIGHT;
100 
101 	for (i = DLB2_NUM_ARB_WEIGHTS - 2; i >= 0; i--)
102 		weight[i] = weight[i + 1] - val;
103 }
104 
105 
106 static void
107 dlb2_pf_hardware_init(struct dlb2_hw_dev *handle)
108 {
109 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
110 
111 	dlb2_hw_enable_sparse_ldb_cq_mode(&dlb2_dev->hw);
112 	dlb2_hw_enable_sparse_dir_cq_mode(&dlb2_dev->hw);
113 
114 	/* Configure arbitration weights for QE selection */
115 	if (dlb2_qe_sa_pct <= 100) {
116 		u8 weight[DLB2_NUM_ARB_WEIGHTS];
117 
118 		dlb2_pf_calc_arbiter_weights(weight,
119 					     dlb2_qe_sa_pct);
120 
121 		dlb2_hw_set_qe_arbiter_weights(&dlb2_dev->hw, weight);
122 	}
123 
124 	/* Configure arbitration weights for QID selection */
125 	if (dlb2_qid_sa_pct <= 100) {
126 		u8 weight[DLB2_NUM_ARB_WEIGHTS];
127 
128 		dlb2_pf_calc_arbiter_weights(weight,
129 					     dlb2_qid_sa_pct);
130 
131 		dlb2_hw_set_qid_arbiter_weights(&dlb2_dev->hw, weight);
132 	}
133 
134 }
135 
136 static int
137 dlb2_pf_get_num_resources(struct dlb2_hw_dev *handle,
138 			  struct dlb2_get_num_resources_args *rsrcs)
139 {
140 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
141 
142 	return dlb2_hw_get_num_resources(&dlb2_dev->hw, rsrcs, false, 0);
143 }
144 
145 static int
146 dlb2_pf_get_cq_poll_mode(struct dlb2_hw_dev *handle,
147 			 enum dlb2_cq_poll_modes *mode)
148 {
149 	RTE_SET_USED(handle);
150 
151 	*mode = DLB2_CQ_POLL_MODE_SPARSE;
152 
153 	return 0;
154 }
155 
156 static int
157 dlb2_pf_sched_domain_create(struct dlb2_hw_dev *handle,
158 			    struct dlb2_create_sched_domain_args *arg)
159 {
160 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
161 	struct dlb2_cmd_response response = {0};
162 	int ret;
163 
164 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
165 
166 	if (dlb2_dev->domain_reset_failed) {
167 		response.status = DLB2_ST_DOMAIN_RESET_FAILED;
168 		ret = -EINVAL;
169 		goto done;
170 	}
171 
172 	ret = dlb2_pf_create_sched_domain(&dlb2_dev->hw, arg, &response);
173 	if (ret)
174 		goto done;
175 
176 done:
177 
178 	arg->response = response;
179 
180 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
181 		  __func__, ret);
182 
183 	return ret;
184 }
185 
186 static void
187 dlb2_pf_domain_reset(struct dlb2_eventdev *dlb2)
188 {
189 	struct dlb2_dev *dlb2_dev;
190 	int ret;
191 
192 	dlb2_dev = (struct dlb2_dev *)dlb2->qm_instance.pf_dev;
193 	ret = dlb2_pf_reset_domain(&dlb2_dev->hw, dlb2->qm_instance.domain_id);
194 	if (ret)
195 		DLB2_LOG_ERR("dlb2_pf_reset_domain err %d", ret);
196 }
197 
198 static int
199 dlb2_pf_ldb_queue_create(struct dlb2_hw_dev *handle,
200 			 struct dlb2_create_ldb_queue_args *cfg)
201 {
202 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
203 	struct dlb2_cmd_response response = {0};
204 	int ret;
205 
206 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
207 
208 	ret = dlb2_pf_create_ldb_queue(&dlb2_dev->hw,
209 				       handle->domain_id,
210 				       cfg,
211 				       &response);
212 
213 	cfg->response = response;
214 
215 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
216 		  __func__, ret);
217 
218 	return ret;
219 }
220 
221 static int
222 dlb2_pf_get_sn_occupancy(struct dlb2_hw_dev *handle,
223 			 struct dlb2_get_sn_occupancy_args *args)
224 {
225 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
226 	struct dlb2_cmd_response response = {0};
227 	int ret;
228 
229 	ret = dlb2_get_group_sequence_number_occupancy(&dlb2_dev->hw,
230 						       args->group);
231 
232 	response.id = ret;
233 	response.status = 0;
234 
235 	args->response = response;
236 
237 	return ret;
238 }
239 
240 static int
241 dlb2_pf_get_sn_allocation(struct dlb2_hw_dev *handle,
242 			  struct dlb2_get_sn_allocation_args *args)
243 {
244 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
245 	struct dlb2_cmd_response response = {0};
246 	int ret;
247 
248 	ret = dlb2_get_group_sequence_numbers(&dlb2_dev->hw, args->group);
249 
250 	response.id = ret;
251 	response.status = 0;
252 
253 	args->response = response;
254 
255 	return ret;
256 }
257 
258 static int
259 dlb2_pf_set_sn_allocation(struct dlb2_hw_dev *handle,
260 			  struct dlb2_set_sn_allocation_args *args)
261 {
262 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
263 	struct dlb2_cmd_response response = {0};
264 	int ret;
265 
266 	ret = dlb2_set_group_sequence_numbers(&dlb2_dev->hw, args->group,
267 					      args->num);
268 
269 	response.status = 0;
270 
271 	args->response = response;
272 
273 	return ret;
274 }
275 
276 static void *
277 dlb2_alloc_coherent_aligned(const struct rte_memzone **mz, uintptr_t *phys,
278 			    size_t size, int align)
279 {
280 	char mz_name[RTE_MEMZONE_NAMESIZE];
281 	uint32_t core_id = rte_lcore_id();
282 	unsigned int socket_id;
283 
284 	snprintf(mz_name, sizeof(mz_name) - 1, "event_dlb2_pf_%lx",
285 		 (unsigned long)rte_get_timer_cycles());
286 	if (core_id == (unsigned int)LCORE_ID_ANY)
287 		core_id = rte_get_main_lcore();
288 	socket_id = rte_lcore_to_socket_id(core_id);
289 	*mz = rte_memzone_reserve_aligned(mz_name, size, socket_id,
290 					 RTE_MEMZONE_IOVA_CONTIG, align);
291 	if (*mz == NULL) {
292 		DLB2_LOG_LINE_DBG("Unable to allocate DMA memory of size %zu bytes - %s",
293 			     size, rte_strerror(rte_errno));
294 		*phys = 0;
295 		return NULL;
296 	}
297 	*phys = (*mz)->iova;
298 	return (*mz)->addr;
299 }
300 
301 static int
302 dlb2_pf_ldb_port_create(struct dlb2_hw_dev *handle,
303 			struct dlb2_create_ldb_port_args *cfg,
304 			enum dlb2_cq_poll_modes poll_mode)
305 {
306 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
307 	struct dlb2_cmd_response response = {0};
308 	struct dlb2_port_memory port_memory;
309 	int ret, cq_alloc_depth;
310 	uint8_t *port_base;
311 	const struct rte_memzone *mz;
312 	int alloc_sz, qe_sz;
313 	phys_addr_t cq_base;
314 	phys_addr_t pp_base;
315 	int is_dir = false;
316 
317 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
318 
319 	if (poll_mode == DLB2_CQ_POLL_MODE_STD)
320 		qe_sz = sizeof(struct dlb2_dequeue_qe);
321 	else
322 		qe_sz = RTE_CACHE_LINE_SIZE;
323 
324 	/* Calculate the port memory required, and round up to the nearest
325 	 * cache line.
326 	 */
327 	cq_alloc_depth = RTE_MAX(cfg->cq_depth, DLB2_MIN_HARDWARE_CQ_DEPTH);
328 	alloc_sz = cq_alloc_depth * qe_sz;
329 	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
330 
331 	port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
332 						rte_mem_page_size());
333 	if (port_base == NULL)
334 		return -ENOMEM;
335 
336 	/* Lock the page in memory */
337 	ret = rte_mem_lock_page(port_base);
338 	if (ret < 0) {
339 		DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o");
340 		goto create_port_err;
341 	}
342 
343 	memset(port_base, 0, alloc_sz);
344 
345 	ret = dlb2_pf_create_ldb_port(&dlb2_dev->hw,
346 				      handle->domain_id,
347 				      cfg,
348 				      cq_base,
349 				      &response);
350 	if (ret)
351 		goto create_port_err;
352 
353 	pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
354 	dlb2_port[response.id][DLB2_LDB_PORT].pp_addr =
355 		(void *)(pp_base + (rte_mem_page_size() * response.id));
356 
357 	dlb2_port[response.id][DLB2_LDB_PORT].cq_base = (void *)(port_base);
358 	memset(&port_memory, 0, sizeof(port_memory));
359 
360 	dlb2_port[response.id][DLB2_LDB_PORT].mz = mz;
361 
362 	dlb2_list_init_head(&port_memory.list);
363 
364 	cfg->response = response;
365 
366 	return 0;
367 
368 create_port_err:
369 
370 	rte_memzone_free(mz);
371 
372 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
373 		  __func__, ret);
374 	return ret;
375 }
376 
377 static int
378 dlb2_pf_dir_port_create(struct dlb2_hw_dev *handle,
379 			struct dlb2_create_dir_port_args *cfg,
380 			enum dlb2_cq_poll_modes poll_mode)
381 {
382 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
383 	struct dlb2_cmd_response response = {0};
384 	struct dlb2_port_memory port_memory;
385 	int ret;
386 	uint8_t *port_base;
387 	const struct rte_memzone *mz;
388 	int alloc_sz, qe_sz;
389 	phys_addr_t cq_base;
390 	phys_addr_t pp_base;
391 	int is_dir = true;
392 
393 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
394 
395 	if (poll_mode == DLB2_CQ_POLL_MODE_STD)
396 		qe_sz = sizeof(struct dlb2_dequeue_qe);
397 	else
398 		qe_sz = RTE_CACHE_LINE_SIZE;
399 
400 	/* Calculate the port memory required, and round up to the nearest
401 	 * cache line.
402 	 */
403 	alloc_sz = cfg->cq_depth * qe_sz;
404 	alloc_sz = RTE_CACHE_LINE_ROUNDUP(alloc_sz);
405 
406 	port_base = dlb2_alloc_coherent_aligned(&mz, &cq_base, alloc_sz,
407 						rte_mem_page_size());
408 	if (port_base == NULL)
409 		return -ENOMEM;
410 
411 	/* Lock the page in memory */
412 	ret = rte_mem_lock_page(port_base);
413 	if (ret < 0) {
414 		DLB2_LOG_ERR("dlb2 pf pmd could not lock page for device i/o");
415 		goto create_port_err;
416 	}
417 
418 	memset(port_base, 0, alloc_sz);
419 
420 	ret = dlb2_pf_create_dir_port(&dlb2_dev->hw,
421 				      handle->domain_id,
422 				      cfg,
423 				      cq_base,
424 				      &response);
425 	if (ret)
426 		goto create_port_err;
427 
428 	pp_base = (uintptr_t)dlb2_dev->hw.func_kva + PP_BASE(is_dir);
429 	dlb2_port[response.id][DLB2_DIR_PORT].pp_addr =
430 		(void *)(pp_base + (rte_mem_page_size() * response.id));
431 
432 	dlb2_port[response.id][DLB2_DIR_PORT].cq_base =
433 		(void *)(port_base);
434 	memset(&port_memory, 0, sizeof(port_memory));
435 
436 	dlb2_port[response.id][DLB2_DIR_PORT].mz = mz;
437 
438 	dlb2_list_init_head(&port_memory.list);
439 
440 	cfg->response = response;
441 
442 	return 0;
443 
444 create_port_err:
445 
446 	rte_memzone_free(mz);
447 
448 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
449 		  __func__, ret);
450 
451 	return ret;
452 }
453 
454 static int
455 dlb2_pf_dir_queue_create(struct dlb2_hw_dev *handle,
456 			 struct dlb2_create_dir_queue_args *cfg)
457 {
458 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
459 	struct dlb2_cmd_response response = {0};
460 	int ret;
461 
462 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
463 
464 	ret = dlb2_pf_create_dir_queue(&dlb2_dev->hw,
465 				       handle->domain_id,
466 				       cfg,
467 				       &response);
468 
469 	cfg->response = response;
470 
471 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
472 		  __func__, ret);
473 
474 	return ret;
475 }
476 
477 static int
478 dlb2_pf_map_qid(struct dlb2_hw_dev *handle,
479 		struct dlb2_map_qid_args *cfg)
480 {
481 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
482 	struct dlb2_cmd_response response = {0};
483 	int ret;
484 
485 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
486 
487 	ret = dlb2_hw_map_qid(&dlb2_dev->hw,
488 			      handle->domain_id,
489 			      cfg,
490 			      &response,
491 			      false,
492 			      0);
493 
494 	cfg->response = response;
495 
496 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
497 		  __func__, ret);
498 
499 	return ret;
500 }
501 
502 static int
503 dlb2_pf_unmap_qid(struct dlb2_hw_dev *handle,
504 		  struct dlb2_unmap_qid_args *cfg)
505 {
506 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
507 	struct dlb2_cmd_response response = {0};
508 	int ret;
509 
510 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
511 
512 	ret = dlb2_hw_unmap_qid(&dlb2_dev->hw,
513 				handle->domain_id,
514 				cfg,
515 				&response,
516 				false,
517 				0);
518 
519 	cfg->response = response;
520 
521 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
522 		  __func__, ret);
523 
524 	return ret;
525 }
526 
527 static int
528 dlb2_pf_pending_port_unmaps(struct dlb2_hw_dev *handle,
529 			    struct dlb2_pending_port_unmaps_args *args)
530 {
531 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
532 	struct dlb2_cmd_response response = {0};
533 	int ret;
534 
535 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
536 
537 	ret = dlb2_hw_pending_port_unmaps(&dlb2_dev->hw,
538 					  handle->domain_id,
539 					  args,
540 					  &response,
541 					  false,
542 					  0);
543 
544 	args->response = response;
545 
546 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
547 		  __func__, ret);
548 
549 	return ret;
550 }
551 
552 static int
553 dlb2_pf_sched_domain_start(struct dlb2_hw_dev *handle,
554 			   struct dlb2_start_domain_args *cfg)
555 {
556 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
557 	struct dlb2_cmd_response response = {0};
558 	int ret;
559 
560 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
561 
562 	ret = dlb2_pf_start_domain(&dlb2_dev->hw,
563 				   handle->domain_id,
564 				   cfg,
565 				   &response);
566 
567 	cfg->response = response;
568 
569 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
570 		  __func__, ret);
571 
572 	return ret;
573 }
574 
575 static int
576 dlb2_pf_get_ldb_queue_depth(struct dlb2_hw_dev *handle,
577 			    struct dlb2_get_ldb_queue_depth_args *args)
578 {
579 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
580 	struct dlb2_cmd_response response = {0};
581 	int ret;
582 
583 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
584 
585 	ret = dlb2_hw_get_ldb_queue_depth(&dlb2_dev->hw,
586 					  handle->domain_id,
587 					  args,
588 					  &response,
589 					  false,
590 					  0);
591 
592 	args->response = response;
593 
594 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
595 		  __func__, ret);
596 
597 	return ret;
598 }
599 
600 static int
601 dlb2_pf_get_dir_queue_depth(struct dlb2_hw_dev *handle,
602 			    struct dlb2_get_dir_queue_depth_args *args)
603 {
604 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
605 	struct dlb2_cmd_response response = {0};
606 	int ret = 0;
607 
608 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
609 
610 	ret = dlb2_hw_get_dir_queue_depth(&dlb2_dev->hw,
611 					  handle->domain_id,
612 					  args,
613 					  &response,
614 					  false,
615 					  0);
616 
617 	args->response = response;
618 
619 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
620 		  __func__, ret);
621 
622 	return ret;
623 }
624 
625 static int
626 dlb2_pf_enable_cq_weight(struct dlb2_hw_dev *handle,
627 			 struct dlb2_enable_cq_weight_args *args)
628 {
629 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
630 	struct dlb2_cmd_response response = {0};
631 	int ret = 0;
632 
633 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
634 
635 	ret = dlb2_hw_enable_cq_weight(&dlb2_dev->hw,
636 				       handle->domain_id,
637 				       args,
638 				       &response,
639 				       false,
640 				       0);
641 	args->response = response;
642 
643 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
644 		  __func__, ret);
645 
646 	return ret;
647 }
648 
649 static int
650 dlb2_pf_set_cos_bandwidth(struct dlb2_hw_dev *handle,
651 			  struct dlb2_set_cos_bw_args *args)
652 {
653 	struct dlb2_dev *dlb2_dev = (struct dlb2_dev *)handle->pf_dev;
654 	int ret = 0;
655 
656 	DLB2_INFO(dev->dlb2_device, "Entering %s()\n", __func__);
657 
658 	ret = dlb2_hw_set_cos_bandwidth(&dlb2_dev->hw,
659 					args->cos_id,
660 					args->bandwidth);
661 
662 	DLB2_INFO(dev->dlb2_device, "Exiting %s() with ret=%d\n",
663 		  __func__, ret);
664 
665 	return ret;
666 }
667 
668 static void
669 dlb2_pf_iface_fn_ptrs_init(void)
670 {
671 	dlb2_iface_low_level_io_init = dlb2_pf_low_level_io_init;
672 	dlb2_iface_open = dlb2_pf_open;
673 	dlb2_iface_domain_reset = dlb2_pf_domain_reset;
674 	dlb2_iface_get_device_version = dlb2_pf_get_device_version;
675 	dlb2_iface_hardware_init = dlb2_pf_hardware_init;
676 	dlb2_iface_get_num_resources = dlb2_pf_get_num_resources;
677 	dlb2_iface_get_cq_poll_mode = dlb2_pf_get_cq_poll_mode;
678 	dlb2_iface_sched_domain_create = dlb2_pf_sched_domain_create;
679 	dlb2_iface_ldb_queue_create = dlb2_pf_ldb_queue_create;
680 	dlb2_iface_ldb_port_create = dlb2_pf_ldb_port_create;
681 	dlb2_iface_dir_queue_create = dlb2_pf_dir_queue_create;
682 	dlb2_iface_dir_port_create = dlb2_pf_dir_port_create;
683 	dlb2_iface_map_qid = dlb2_pf_map_qid;
684 	dlb2_iface_unmap_qid = dlb2_pf_unmap_qid;
685 	dlb2_iface_get_ldb_queue_depth = dlb2_pf_get_ldb_queue_depth;
686 	dlb2_iface_get_dir_queue_depth = dlb2_pf_get_dir_queue_depth;
687 	dlb2_iface_sched_domain_start = dlb2_pf_sched_domain_start;
688 	dlb2_iface_pending_port_unmaps = dlb2_pf_pending_port_unmaps;
689 	dlb2_iface_get_sn_allocation = dlb2_pf_get_sn_allocation;
690 	dlb2_iface_set_sn_allocation = dlb2_pf_set_sn_allocation;
691 	dlb2_iface_get_sn_occupancy = dlb2_pf_get_sn_occupancy;
692 	dlb2_iface_enable_cq_weight = dlb2_pf_enable_cq_weight;
693 	dlb2_iface_set_cos_bw = dlb2_pf_set_cos_bandwidth;
694 }
695 
696 /* PCI DEV HOOKS */
697 static int
698 dlb2_eventdev_pci_init(struct rte_eventdev *eventdev)
699 {
700 	int ret = 0;
701 	struct rte_pci_device *pci_dev;
702 	struct dlb2_devargs dlb2_args = {
703 		.socket_id = rte_socket_id(),
704 		.max_num_events = DLB2_MAX_NUM_LDB_CREDITS,
705 		.producer_coremask = NULL,
706 		.num_dir_credits_override = -1,
707 		.qid_depth_thresholds = { {0} },
708 		.poll_interval = DLB2_POLL_INTERVAL_DEFAULT,
709 		.sw_credit_quanta = DLB2_SW_CREDIT_QUANTA_DEFAULT,
710 		.hw_credit_quanta = DLB2_SW_CREDIT_BATCH_SZ,
711 		.default_depth_thresh = DLB2_DEPTH_THRESH_DEFAULT,
712 		.max_cq_depth = DLB2_DEFAULT_CQ_DEPTH,
713 		.max_enq_depth = DLB2_MAX_ENQUEUE_DEPTH
714 	};
715 	struct dlb2_eventdev *dlb2;
716 	int q;
717 	const void *probe_args = NULL;
718 
719 	DLB2_LOG_LINE_DBG("Enter with dev_id=%d socket_id=%d",
720 		     eventdev->data->dev_id, eventdev->data->socket_id);
721 
722 	for (q = 0; q < DLB2_MAX_NUM_PORTS_ALL; q++)
723 		dlb2_args.port_cos.cos_id[q] = DLB2_COS_DEFAULT;
724 
725 	dlb2_pf_iface_fn_ptrs_init();
726 
727 	pci_dev = RTE_DEV_TO_PCI(eventdev->dev);
728 
729 	if (rte_eal_process_type() == RTE_PROC_PRIMARY) {
730 		dlb2 = dlb2_pmd_priv(eventdev); /* rte_zmalloc_socket mem */
731 		dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
732 
733 		/* Were we invoked with runtime parameters? */
734 		if (pci_dev->device.devargs) {
735 			ret = dlb2_parse_params(pci_dev->device.devargs->args,
736 						pci_dev->device.devargs->name,
737 						&dlb2_args,
738 						dlb2->version);
739 			if (ret) {
740 				DLB2_LOG_ERR("PFPMD failed to parse args ret=%d, errno=%d",
741 					     ret, rte_errno);
742 				goto dlb2_probe_failed;
743 			}
744 			probe_args = &dlb2_args;
745 		}
746 
747 		/* Probe the DLB2 PF layer */
748 		dlb2->qm_instance.pf_dev = dlb2_probe(pci_dev, probe_args);
749 
750 		if (dlb2->qm_instance.pf_dev == NULL) {
751 			DLB2_LOG_ERR("DLB2 PF Probe failed with error %d",
752 				     rte_errno);
753 			ret = -rte_errno;
754 			goto dlb2_probe_failed;
755 		}
756 
757 		ret = dlb2_primary_eventdev_probe(eventdev,
758 						  event_dlb2_pf_name,
759 						  &dlb2_args);
760 	} else {
761 		dlb2 = dlb2_pmd_priv(eventdev);
762 		dlb2->version = DLB2_HW_DEVICE_FROM_PCI_ID(pci_dev);
763 		ret = dlb2_secondary_eventdev_probe(eventdev,
764 						    event_dlb2_pf_name);
765 	}
766 	if (ret)
767 		goto dlb2_probe_failed;
768 
769 	DLB2_LOG_INFO("DLB2 PF Probe success");
770 
771 	return 0;
772 
773 dlb2_probe_failed:
774 
775 	DLB2_LOG_INFO("DLB2 PF Probe failed, ret=%d", ret);
776 
777 	return ret;
778 }
779 
780 #define EVENTDEV_INTEL_VENDOR_ID 0x8086
781 
782 static const struct rte_pci_id pci_id_dlb2_map[] = {
783 	{
784 		RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
785 			       PCI_DEVICE_ID_INTEL_DLB2_PF)
786 	},
787 	{
788 		.vendor_id = 0,
789 	},
790 };
791 
792 static const struct rte_pci_id pci_id_dlb2_5_map[] = {
793 	{
794 		RTE_PCI_DEVICE(EVENTDEV_INTEL_VENDOR_ID,
795 			       PCI_DEVICE_ID_INTEL_DLB2_5_PF)
796 	},
797 	{
798 		.vendor_id = 0,
799 	},
800 };
801 
802 static int
803 event_dlb2_pci_probe(struct rte_pci_driver *pci_drv,
804 		     struct rte_pci_device *pci_dev)
805 {
806 	int ret;
807 
808 	ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
809 					     sizeof(struct dlb2_eventdev),
810 					     dlb2_eventdev_pci_init,
811 					     event_dlb2_pf_name);
812 	if (ret) {
813 		DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
814 				"ret=%d", ret);
815 	}
816 
817 	return ret;
818 }
819 
820 static int
821 event_dlb2_pci_remove(struct rte_pci_device *pci_dev)
822 {
823 	int ret;
824 
825 	ret = rte_event_pmd_pci_remove(pci_dev, NULL);
826 
827 	if (ret) {
828 		DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
829 				"ret=%d", ret);
830 	}
831 
832 	return ret;
833 
834 }
835 
836 static int
837 event_dlb2_5_pci_probe(struct rte_pci_driver *pci_drv,
838 		       struct rte_pci_device *pci_dev)
839 {
840 	int ret;
841 
842 	ret = rte_event_pmd_pci_probe_named(pci_drv, pci_dev,
843 					    sizeof(struct dlb2_eventdev),
844 					    dlb2_eventdev_pci_init,
845 					    event_dlb2_pf_name);
846 	if (ret) {
847 		DLB2_LOG_INFO("rte_event_pmd_pci_probe_named() failed, "
848 				"ret=%d", ret);
849 	}
850 
851 	return ret;
852 }
853 
854 static int
855 event_dlb2_5_pci_remove(struct rte_pci_device *pci_dev)
856 {
857 	int ret;
858 
859 	ret = rte_event_pmd_pci_remove(pci_dev, NULL);
860 
861 	if (ret) {
862 		DLB2_LOG_INFO("rte_event_pmd_pci_remove() failed, "
863 				"ret=%d", ret);
864 	}
865 
866 	return ret;
867 
868 }
869 
870 static struct rte_pci_driver pci_eventdev_dlb2_pmd = {
871 	.id_table = pci_id_dlb2_map,
872 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
873 	.probe = event_dlb2_pci_probe,
874 	.remove = event_dlb2_pci_remove,
875 };
876 
877 static struct rte_pci_driver pci_eventdev_dlb2_5_pmd = {
878 	.id_table = pci_id_dlb2_5_map,
879 	.drv_flags = RTE_PCI_DRV_NEED_MAPPING,
880 	.probe = event_dlb2_5_pci_probe,
881 	.remove = event_dlb2_5_pci_remove,
882 };
883 
884 RTE_PMD_REGISTER_PCI(event_dlb2_pf, pci_eventdev_dlb2_pmd);
885 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_pf, pci_id_dlb2_map);
886 
887 RTE_PMD_REGISTER_PCI(event_dlb2_5_pf, pci_eventdev_dlb2_5_pmd);
888 RTE_PMD_REGISTER_PCI_TABLE(event_dlb2_5_pf, pci_id_dlb2_5_map);
889