xref: /dpdk/drivers/event/octeontx/ssovf_evdev.c (revision c7aa67f5a9e4a59a816a6506aa87cfb133981315)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <inttypes.h>
6 
7 #include <rte_common.h>
8 #include <rte_debug.h>
9 #include <rte_dev.h>
10 #include <rte_eal.h>
11 #include <rte_ethdev.h>
12 #include <rte_event_eth_rx_adapter.h>
13 #include <rte_lcore.h>
14 #include <rte_log.h>
15 #include <rte_malloc.h>
16 #include <rte_memory.h>
17 #include <rte_bus_vdev.h>
18 
19 #include "ssovf_evdev.h"
20 
21 int otx_logtype_ssovf;
22 
23 RTE_INIT(otx_ssovf_init_log);
24 static void
25 otx_ssovf_init_log(void)
26 {
27 	otx_logtype_ssovf = rte_log_register("pmd.otx.eventdev");
28 	if (otx_logtype_ssovf >= 0)
29 		rte_log_set_level(otx_logtype_ssovf, RTE_LOG_NOTICE);
30 }
31 
32 /* SSOPF Mailbox messages */
33 
34 struct ssovf_mbox_dev_info {
35 	uint64_t min_deq_timeout_ns;
36 	uint64_t max_deq_timeout_ns;
37 	uint32_t max_num_events;
38 };
39 
40 static int
41 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
42 {
43 	struct octeontx_mbox_hdr hdr = {0};
44 	uint16_t len = sizeof(struct ssovf_mbox_dev_info);
45 
46 	hdr.coproc = SSO_COPROC;
47 	hdr.msg = SSO_GET_DEV_INFO;
48 	hdr.vfid = 0;
49 
50 	memset(info, 0, len);
51 	return octeontx_ssovf_mbox_send(&hdr, NULL, 0, info, len);
52 }
53 
54 struct ssovf_mbox_getwork_wait {
55 	uint64_t wait_ns;
56 };
57 
58 static int
59 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
60 {
61 	struct octeontx_mbox_hdr hdr = {0};
62 	struct ssovf_mbox_getwork_wait tmo_set;
63 	uint16_t len = sizeof(struct ssovf_mbox_getwork_wait);
64 	int ret;
65 
66 	hdr.coproc = SSO_COPROC;
67 	hdr.msg = SSO_SET_GETWORK_WAIT;
68 	hdr.vfid = 0;
69 
70 	tmo_set.wait_ns = timeout_ns;
71 	ret = octeontx_ssovf_mbox_send(&hdr, &tmo_set, len, NULL, 0);
72 	if (ret)
73 		ssovf_log_err("Failed to set getwork timeout(%d)", ret);
74 
75 	return ret;
76 }
77 
78 struct ssovf_mbox_grp_pri {
79 	uint8_t wgt_left; /* Read only */
80 	uint8_t weight;
81 	uint8_t affinity;
82 	uint8_t priority;
83 };
84 
85 static int
86 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
87 {
88 	struct octeontx_mbox_hdr hdr = {0};
89 	struct ssovf_mbox_grp_pri grp;
90 	uint16_t len = sizeof(struct ssovf_mbox_grp_pri);
91 	int ret;
92 
93 	hdr.coproc = SSO_COPROC;
94 	hdr.msg = SSO_GRP_SET_PRIORITY;
95 	hdr.vfid = queue;
96 
97 	grp.weight = 0xff;
98 	grp.affinity = 0xff;
99 	grp.priority = prio / 32; /* Normalize to 0 to 7 */
100 
101 	ret = octeontx_ssovf_mbox_send(&hdr, &grp, len, NULL, 0);
102 	if (ret)
103 		ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
104 
105 	return ret;
106 }
107 
108 struct ssovf_mbox_convert_ns_getworks_iter {
109 	uint64_t wait_ns;
110 	uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */
111 };
112 
113 static int
114 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
115 {
116 	struct octeontx_mbox_hdr hdr = {0};
117 	struct ssovf_mbox_convert_ns_getworks_iter ns2iter;
118 	uint16_t len = sizeof(ns2iter);
119 	int ret;
120 
121 	hdr.coproc = SSO_COPROC;
122 	hdr.msg = SSO_CONVERT_NS_GETWORK_ITER;
123 	hdr.vfid = 0;
124 
125 	memset(&ns2iter, 0, len);
126 	ns2iter.wait_ns = ns;
127 	ret = octeontx_ssovf_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
128 	if (ret < 0 || (ret != len)) {
129 		ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
130 		return -EIO;
131 	}
132 
133 	*tmo_ticks = ns2iter.getwork_iter;
134 	return 0;
135 }
136 
137 static void
138 ssovf_fastpath_fns_set(struct rte_eventdev *dev)
139 {
140 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
141 
142 	dev->enqueue       = ssows_enq;
143 	dev->enqueue_burst = ssows_enq_burst;
144 	dev->enqueue_new_burst = ssows_enq_new_burst;
145 	dev->enqueue_forward_burst = ssows_enq_fwd_burst;
146 	dev->dequeue       = ssows_deq;
147 	dev->dequeue_burst = ssows_deq_burst;
148 
149 	if (edev->is_timeout_deq) {
150 		dev->dequeue       = ssows_deq_timeout;
151 		dev->dequeue_burst = ssows_deq_timeout_burst;
152 	}
153 }
154 
155 static void
156 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
157 {
158 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
159 
160 	dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
161 	dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
162 	dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
163 	dev_info->max_event_queues = edev->max_event_queues;
164 	dev_info->max_event_queue_flows = (1ULL << 20);
165 	dev_info->max_event_queue_priority_levels = 8;
166 	dev_info->max_event_priority_levels = 1;
167 	dev_info->max_event_ports = edev->max_event_ports;
168 	dev_info->max_event_port_dequeue_depth = 1;
169 	dev_info->max_event_port_enqueue_depth = 1;
170 	dev_info->max_num_events =  edev->max_num_events;
171 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
172 					RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
173 					RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
174 					RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
175 					RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
176 					RTE_EVENT_DEV_CAP_NONSEQ_MODE;
177 
178 }
179 
180 static int
181 ssovf_configure(const struct rte_eventdev *dev)
182 {
183 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
184 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
185 	uint64_t deq_tmo_ns;
186 
187 	ssovf_func_trace();
188 	deq_tmo_ns = conf->dequeue_timeout_ns;
189 	if (deq_tmo_ns == 0)
190 		deq_tmo_ns = edev->min_deq_timeout_ns;
191 
192 	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
193 		edev->is_timeout_deq = 1;
194 		deq_tmo_ns = edev->min_deq_timeout_ns;
195 	}
196 	edev->nb_event_queues = conf->nb_event_queues;
197 	edev->nb_event_ports = conf->nb_event_ports;
198 
199 	return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
200 }
201 
202 static void
203 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
204 				 struct rte_event_queue_conf *queue_conf)
205 {
206 	RTE_SET_USED(dev);
207 	RTE_SET_USED(queue_id);
208 
209 	queue_conf->nb_atomic_flows = (1ULL << 20);
210 	queue_conf->nb_atomic_order_sequences = (1ULL << 20);
211 	queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
212 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
213 }
214 
215 static void
216 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
217 {
218 	RTE_SET_USED(dev);
219 	RTE_SET_USED(queue_id);
220 }
221 
222 static int
223 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
224 			      const struct rte_event_queue_conf *queue_conf)
225 {
226 	RTE_SET_USED(dev);
227 	ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);
228 
229 	return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
230 }
231 
232 static void
233 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
234 				 struct rte_event_port_conf *port_conf)
235 {
236 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
237 
238 	RTE_SET_USED(port_id);
239 	port_conf->new_event_threshold = edev->max_num_events;
240 	port_conf->dequeue_depth = 1;
241 	port_conf->enqueue_depth = 1;
242 	port_conf->disable_implicit_release = 0;
243 }
244 
245 static void
246 ssovf_port_release(void *port)
247 {
248 	rte_free(port);
249 }
250 
251 static int
252 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
253 				const struct rte_event_port_conf *port_conf)
254 {
255 	struct ssows *ws;
256 	uint32_t reg_off;
257 	uint8_t q;
258 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
259 
260 	ssovf_func_trace("port=%d", port_id);
261 	RTE_SET_USED(port_conf);
262 
263 	/* Free memory prior to re-allocation if needed */
264 	if (dev->data->ports[port_id] != NULL) {
265 		ssovf_port_release(dev->data->ports[port_id]);
266 		dev->data->ports[port_id] = NULL;
267 	}
268 
269 	/* Allocate event port memory */
270 	ws = rte_zmalloc_socket("eventdev ssows",
271 			sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
272 			dev->data->socket_id);
273 	if (ws == NULL) {
274 		ssovf_log_err("Failed to alloc memory for port=%d", port_id);
275 		return -ENOMEM;
276 	}
277 
278 	ws->base = octeontx_ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
279 	if (ws->base == NULL) {
280 		rte_free(ws);
281 		ssovf_log_err("Failed to get hws base addr port=%d", port_id);
282 		return -EINVAL;
283 	}
284 
285 	reg_off = SSOW_VHWS_OP_GET_WORK0;
286 	reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
287 	reg_off |= 1 << 16; /* Wait */
288 	ws->getwork = ws->base + reg_off;
289 	ws->port = port_id;
290 
291 	for (q = 0; q < edev->nb_event_queues; q++) {
292 		ws->grps[q] = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
293 		if (ws->grps[q] == NULL) {
294 			rte_free(ws);
295 			ssovf_log_err("Failed to get grp%d base addr", q);
296 			return -EINVAL;
297 		}
298 	}
299 
300 	dev->data->ports[port_id] = ws;
301 	ssovf_log_dbg("port=%d ws=%p", port_id, ws);
302 	return 0;
303 }
304 
305 static int
306 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
307 		const uint8_t priorities[], uint16_t nb_links)
308 {
309 	uint16_t link;
310 	uint64_t val;
311 	struct ssows *ws = port;
312 
313 	ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
314 	RTE_SET_USED(dev);
315 	RTE_SET_USED(priorities);
316 
317 	for (link = 0; link < nb_links; link++) {
318 		val = queues[link];
319 		val |= (1ULL << 24); /* Set membership */
320 		ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
321 	}
322 	return (int)nb_links;
323 }
324 
325 static int
326 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
327 			uint16_t nb_unlinks)
328 {
329 	uint16_t unlink;
330 	uint64_t val;
331 	struct ssows *ws = port;
332 
333 	ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
334 	RTE_SET_USED(dev);
335 
336 	for (unlink = 0; unlink < nb_unlinks; unlink++) {
337 		val = queues[unlink];
338 		val &= ~(1ULL << 24); /* Clear membership */
339 		ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
340 	}
341 	return (int)nb_unlinks;
342 }
343 
344 static int
345 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks)
346 {
347 	RTE_SET_USED(dev);
348 
349 	return ssovf_mbox_timeout_ticks(ns, tmo_ticks);
350 }
351 
352 static void
353 ssows_dump(struct ssows *ws, FILE *f)
354 {
355 	uint8_t *base = ws->base;
356 	uint64_t val;
357 
358 	fprintf(f, "\t---------------port%d---------------\n", ws->port);
359 	val = ssovf_read64(base + SSOW_VHWS_TAG);
360 	fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
361 		(uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
362 		(int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1,
363 		(int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff,
364 		(int)(val >> 63) & 0x1);
365 
366 	val = ssovf_read64(base + SSOW_VHWS_WQP);
367 	fprintf(f, "\twqp=0x%"PRIx64"\n", val);
368 
369 	val = ssovf_read64(base + SSOW_VHWS_LINKS);
370 	fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
371 		(int)(val & 0x3ff), (int)(val >> 10) & 0x1,
372 		(int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1,
373 		(int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff);
374 
375 	val = ssovf_read64(base + SSOW_VHWS_PENDTAG);
376 	fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
377 		(uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
378 		(int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1,
379 		(int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1,
380 		(int)(val >> 63) & 0x1);
381 
382 	val = ssovf_read64(base + SSOW_VHWS_PENDWQP);
383 	fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
384 }
385 
386 static int
387 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
388 		const struct rte_eth_dev *eth_dev, uint32_t *caps)
389 {
390 	int ret;
391 	RTE_SET_USED(dev);
392 
393 	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
394 	if (ret)
395 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
396 	else
397 		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
398 
399 	return 0;
400 }
401 
402 static int
403 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
404 		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
405 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
406 {
407 	int ret = 0;
408 	const struct octeontx_nic *nic = eth_dev->data->dev_private;
409 	pki_mod_qos_t pki_qos;
410 	RTE_SET_USED(dev);
411 
412 	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
413 	if (ret)
414 		return -EINVAL;
415 
416 	if (rx_queue_id >= 0)
417 		return -EINVAL;
418 
419 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
420 		return -ENOTSUP;
421 
422 	memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
423 
424 	pki_qos.port_type = 0;
425 	pki_qos.index = 0;
426 	pki_qos.mmask.f_tag_type = 1;
427 	pki_qos.mmask.f_port_add = 1;
428 	pki_qos.mmask.f_grp_ok = 1;
429 	pki_qos.mmask.f_grp_bad = 1;
430 	pki_qos.mmask.f_grptag_ok = 1;
431 	pki_qos.mmask.f_grptag_bad = 1;
432 
433 	pki_qos.tag_type = queue_conf->ev.sched_type;
434 	pki_qos.qos_entry.port_add = 0;
435 	pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
436 	pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
437 	pki_qos.qos_entry.grptag_bad = 0;
438 	pki_qos.qos_entry.grptag_ok = 0;
439 
440 	ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
441 	if (ret < 0)
442 		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
443 				nic->port_id, queue_conf->ev.queue_id);
444 
445 	return ret;
446 }
447 
448 static int
449 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
450 		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
451 {
452 	int ret = 0;
453 	const struct octeontx_nic *nic = eth_dev->data->dev_private;
454 	pki_del_qos_t pki_qos;
455 	RTE_SET_USED(dev);
456 	RTE_SET_USED(rx_queue_id);
457 
458 	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
459 	if (ret)
460 		return -EINVAL;
461 
462 	pki_qos.port_type = 0;
463 	pki_qos.index = 0;
464 	memset(&pki_qos, 0, sizeof(pki_del_qos_t));
465 	ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
466 	if (ret < 0)
467 		ssovf_log_err("Failed to delete QOS port=%d, q=%d",
468 				nic->port_id, queue_conf->ev.queue_id);
469 	return ret;
470 }
471 
472 static int
473 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
474 					const struct rte_eth_dev *eth_dev)
475 {
476 	int ret;
477 	const struct octeontx_nic *nic = eth_dev->data->dev_private;
478 	RTE_SET_USED(dev);
479 
480 	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
481 	if (ret)
482 		return 0;
483 	octeontx_pki_port_start(nic->port_id);
484 	return 0;
485 }
486 
487 
488 static int
489 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
490 		const struct rte_eth_dev *eth_dev)
491 {
492 	int ret;
493 	const struct octeontx_nic *nic = eth_dev->data->dev_private;
494 	RTE_SET_USED(dev);
495 
496 	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
497 	if (ret)
498 		return 0;
499 	octeontx_pki_port_stop(nic->port_id);
500 	return 0;
501 }
502 
503 static void
504 ssovf_dump(struct rte_eventdev *dev, FILE *f)
505 {
506 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
507 	uint8_t port;
508 
509 	/* Dump SSOWVF debug registers */
510 	for (port = 0; port < edev->nb_event_ports; port++)
511 		ssows_dump(dev->data->ports[port], f);
512 }
513 
514 static int
515 ssovf_start(struct rte_eventdev *dev)
516 {
517 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
518 	struct ssows *ws;
519 	uint8_t *base;
520 	uint8_t i;
521 
522 	ssovf_func_trace();
523 	for (i = 0; i < edev->nb_event_ports; i++) {
524 		ws = dev->data->ports[i];
525 		ssows_reset(ws);
526 		ws->swtag_req = 0;
527 	}
528 
529 	for (i = 0; i < edev->nb_event_queues; i++) {
530 		/* Consume all the events through HWS0 */
531 		ssows_flush_events(dev->data->ports[0], i);
532 
533 		base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
534 		base += SSO_VHGRP_QCTL;
535 		ssovf_write64(1, base); /* Enable SSO group */
536 	}
537 
538 	ssovf_fastpath_fns_set(dev);
539 	return 0;
540 }
541 
542 static void
543 ssovf_stop(struct rte_eventdev *dev)
544 {
545 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
546 	struct ssows *ws;
547 	uint8_t *base;
548 	uint8_t i;
549 
550 	ssovf_func_trace();
551 	for (i = 0; i < edev->nb_event_ports; i++) {
552 		ws = dev->data->ports[i];
553 		ssows_reset(ws);
554 		ws->swtag_req = 0;
555 	}
556 
557 	for (i = 0; i < edev->nb_event_queues; i++) {
558 		/* Consume all the events through HWS0 */
559 		ssows_flush_events(dev->data->ports[0], i);
560 
561 		base = octeontx_ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
562 		base += SSO_VHGRP_QCTL;
563 		ssovf_write64(0, base); /* Disable SSO group */
564 	}
565 }
566 
567 static int
568 ssovf_close(struct rte_eventdev *dev)
569 {
570 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
571 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
572 	uint8_t i;
573 
574 	for (i = 0; i < edev->nb_event_queues; i++)
575 		all_queues[i] = i;
576 
577 	for (i = 0; i < edev->nb_event_ports; i++)
578 		ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
579 			edev->nb_event_queues);
580 	return 0;
581 }
582 
583 /* Initialize and register event driver with DPDK Application */
584 static const struct rte_eventdev_ops ssovf_ops = {
585 	.dev_infos_get    = ssovf_info_get,
586 	.dev_configure    = ssovf_configure,
587 	.queue_def_conf   = ssovf_queue_def_conf,
588 	.queue_setup      = ssovf_queue_setup,
589 	.queue_release    = ssovf_queue_release,
590 	.port_def_conf    = ssovf_port_def_conf,
591 	.port_setup       = ssovf_port_setup,
592 	.port_release     = ssovf_port_release,
593 	.port_link        = ssovf_port_link,
594 	.port_unlink      = ssovf_port_unlink,
595 	.timeout_ticks    = ssovf_timeout_ticks,
596 
597 	.eth_rx_adapter_caps_get  = ssovf_eth_rx_adapter_caps_get,
598 	.eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
599 	.eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
600 	.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
601 	.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
602 
603 	.dump             = ssovf_dump,
604 	.dev_start        = ssovf_start,
605 	.dev_stop         = ssovf_stop,
606 	.dev_close        = ssovf_close
607 };
608 
609 static int
610 ssovf_vdev_probe(struct rte_vdev_device *vdev)
611 {
612 	struct octeontx_ssovf_info oinfo;
613 	struct ssovf_mbox_dev_info info;
614 	struct ssovf_evdev *edev;
615 	struct rte_eventdev *eventdev;
616 	static int ssovf_init_once;
617 	const char *name;
618 	int ret;
619 
620 	name = rte_vdev_device_name(vdev);
621 	/* More than one instance is not supported */
622 	if (ssovf_init_once) {
623 		ssovf_log_err("Request to create >1 %s instance", name);
624 		return -EINVAL;
625 	}
626 
627 	eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
628 				rte_socket_id());
629 	if (eventdev == NULL) {
630 		ssovf_log_err("Failed to create eventdev vdev %s", name);
631 		return -ENOMEM;
632 	}
633 	eventdev->dev_ops = &ssovf_ops;
634 
635 	/* For secondary processes, the primary has done all the work */
636 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
637 		ssovf_fastpath_fns_set(eventdev);
638 		return 0;
639 	}
640 
641 	ret = octeontx_ssovf_info(&oinfo);
642 	if (ret) {
643 		ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
644 		goto error;
645 	}
646 
647 	edev = ssovf_pmd_priv(eventdev);
648 	edev->max_event_ports = oinfo.total_ssowvfs;
649 	edev->max_event_queues = oinfo.total_ssovfs;
650 	edev->is_timeout_deq = 0;
651 
652 	ret = ssovf_mbox_dev_info(&info);
653 	if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
654 		ssovf_log_err("Failed to get mbox devinfo %d", ret);
655 		goto error;
656 	}
657 
658 	edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
659 	edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
660 	edev->max_num_events =  info.max_num_events;
661 	ssovf_log_dbg("min_deq_tmo=%"PRId64" max_deq_tmo=%"PRId64" max_evts=%d",
662 			info.min_deq_timeout_ns, info.max_deq_timeout_ns,
663 			info.max_num_events);
664 
665 	if (!edev->max_event_ports || !edev->max_event_queues) {
666 		ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
667 			edev->max_event_queues, edev->max_event_ports);
668 		ret = -ENODEV;
669 		goto error;
670 	}
671 
672 	ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
673 			name, oinfo.domain, edev->max_event_queues,
674 			edev->max_event_ports);
675 
676 	ssovf_init_once = 1;
677 	return 0;
678 
679 error:
680 	rte_event_pmd_vdev_uninit(name);
681 	return ret;
682 }
683 
684 static int
685 ssovf_vdev_remove(struct rte_vdev_device *vdev)
686 {
687 	const char *name;
688 
689 	name = rte_vdev_device_name(vdev);
690 	ssovf_log_info("Closing %s", name);
691 	return rte_event_pmd_vdev_uninit(name);
692 }
693 
694 static struct rte_vdev_driver vdev_ssovf_pmd = {
695 	.probe = ssovf_vdev_probe,
696 	.remove = ssovf_vdev_remove
697 };
698 
699 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd);
700