xref: /dpdk/drivers/event/octeontx/ssovf_evdev.c (revision 3e86eee028c69b98144e2c62ec48091467e790be)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(c) 2017 Cavium, Inc
3  */
4 
5 #include <inttypes.h>
6 #include <stdlib.h>
7 
8 #include <rte_common.h>
9 #include <cryptodev_pmd.h>
10 #include <rte_debug.h>
11 #include <dev_driver.h>
12 #include <rte_eal.h>
13 #include <ethdev_driver.h>
14 #include <rte_event_eth_rx_adapter.h>
15 #include <rte_kvargs.h>
16 #include <rte_lcore.h>
17 #include <rte_log.h>
18 #include <rte_malloc.h>
19 #include <rte_memory.h>
20 #include <bus_vdev_driver.h>
21 
22 #include "ssovf_evdev.h"
23 #include "timvf_evdev.h"
24 #include "otx_cryptodev_hw_access.h"
25 
26 static uint8_t timvf_enable_stats;
27 
28 RTE_LOG_REGISTER_DEFAULT(otx_logtype_ssovf, NOTICE);
29 
30 /* SSOPF Mailbox messages */
31 
32 struct ssovf_mbox_dev_info {
33 	uint64_t min_deq_timeout_ns;
34 	uint64_t max_deq_timeout_ns;
35 	uint32_t max_num_events;
36 };
37 
38 static int
39 ssovf_mbox_dev_info(struct ssovf_mbox_dev_info *info)
40 {
41 	struct octeontx_mbox_hdr hdr = {0};
42 	uint16_t len = sizeof(struct ssovf_mbox_dev_info);
43 
44 	hdr.coproc = SSO_COPROC;
45 	hdr.msg = SSO_GET_DEV_INFO;
46 	hdr.vfid = 0;
47 
48 	memset(info, 0, len);
49 	return octeontx_mbox_send(&hdr, NULL, 0, info, len);
50 }
51 
52 struct ssovf_mbox_getwork_wait {
53 	uint64_t wait_ns;
54 };
55 
56 static int
57 ssovf_mbox_getwork_tmo_set(uint32_t timeout_ns)
58 {
59 	struct octeontx_mbox_hdr hdr = {0};
60 	struct ssovf_mbox_getwork_wait tmo_set;
61 	uint16_t len = sizeof(struct ssovf_mbox_getwork_wait);
62 	int ret;
63 
64 	hdr.coproc = SSO_COPROC;
65 	hdr.msg = SSO_SET_GETWORK_WAIT;
66 	hdr.vfid = 0;
67 
68 	tmo_set.wait_ns = timeout_ns;
69 	ret = octeontx_mbox_send(&hdr, &tmo_set, len, NULL, 0);
70 	if (ret)
71 		ssovf_log_err("Failed to set getwork timeout(%d)", ret);
72 
73 	return ret;
74 }
75 
76 struct ssovf_mbox_grp_pri {
77 	uint8_t vhgrp_id;
78 	uint8_t wgt_left; /* Read only */
79 	uint8_t weight;
80 	uint8_t affinity;
81 	uint8_t priority;
82 };
83 
84 static int
85 ssovf_mbox_priority_set(uint8_t queue, uint8_t prio)
86 {
87 	struct octeontx_mbox_hdr hdr = {0};
88 	struct ssovf_mbox_grp_pri grp;
89 	uint16_t len = sizeof(struct ssovf_mbox_grp_pri);
90 	int ret;
91 
92 	hdr.coproc = SSO_COPROC;
93 	hdr.msg = SSO_GRP_SET_PRIORITY;
94 	hdr.vfid = queue;
95 
96 	grp.vhgrp_id = queue;
97 	grp.weight = 0xff;
98 	grp.affinity = 0xff;
99 	grp.priority = prio / 32; /* Normalize to 0 to 7 */
100 
101 	ret = octeontx_mbox_send(&hdr, &grp, len, NULL, 0);
102 	if (ret)
103 		ssovf_log_err("Failed to set grp=%d prio=%d", queue, prio);
104 
105 	return ret;
106 }
107 
108 struct ssovf_mbox_convert_ns_getworks_iter {
109 	uint64_t wait_ns;
110 	uint32_t getwork_iter;/* Get_work iterations for the given wait_ns */
111 };
112 
113 static int
114 ssovf_mbox_timeout_ticks(uint64_t ns, uint64_t *tmo_ticks)
115 {
116 	struct octeontx_mbox_hdr hdr = {0};
117 	struct ssovf_mbox_convert_ns_getworks_iter ns2iter;
118 	uint16_t len = sizeof(ns2iter);
119 	int ret;
120 
121 	hdr.coproc = SSO_COPROC;
122 	hdr.msg = SSO_CONVERT_NS_GETWORK_ITER;
123 	hdr.vfid = 0;
124 
125 	memset(&ns2iter, 0, len);
126 	ns2iter.wait_ns = ns;
127 	ret = octeontx_mbox_send(&hdr, &ns2iter, len, &ns2iter, len);
128 	if (ret < 0 || (ret != len)) {
129 		ssovf_log_err("Failed to get tmo ticks ns=%"PRId64"", ns);
130 		return -EIO;
131 	}
132 
133 	*tmo_ticks = ns2iter.getwork_iter;
134 	return 0;
135 }
136 
137 static void
138 ssovf_info_get(struct rte_eventdev *dev, struct rte_event_dev_info *dev_info)
139 {
140 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
141 
142 	dev_info->driver_name = RTE_STR(EVENTDEV_NAME_OCTEONTX_PMD);
143 	dev_info->min_dequeue_timeout_ns = edev->min_deq_timeout_ns;
144 	dev_info->max_dequeue_timeout_ns = edev->max_deq_timeout_ns;
145 	dev_info->max_event_queues = edev->max_event_queues;
146 	dev_info->max_event_queue_flows = (1ULL << 20);
147 	dev_info->max_event_queue_priority_levels = 8;
148 	dev_info->max_event_priority_levels = 1;
149 	dev_info->max_event_ports = edev->max_event_ports;
150 	dev_info->max_event_port_dequeue_depth = 1;
151 	dev_info->max_event_port_enqueue_depth = 1;
152 	dev_info->max_num_events =  edev->max_num_events;
153 	dev_info->event_dev_cap = RTE_EVENT_DEV_CAP_QUEUE_QOS |
154 					RTE_EVENT_DEV_CAP_ATOMIC |
155 					RTE_EVENT_DEV_CAP_ORDERED |
156 					RTE_EVENT_DEV_CAP_PARALLEL |
157 					RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
158 					RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES|
159 					RTE_EVENT_DEV_CAP_RUNTIME_PORT_LINK |
160 					RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
161 					RTE_EVENT_DEV_CAP_NONSEQ_MODE |
162 					RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
163 					RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
164 	dev_info->max_profiles_per_port = 1;
165 }
166 
167 static int
168 ssovf_configure(const struct rte_eventdev *dev)
169 {
170 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
171 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
172 	uint64_t deq_tmo_ns;
173 
174 	ssovf_func_trace();
175 	deq_tmo_ns = conf->dequeue_timeout_ns;
176 	if (deq_tmo_ns == 0)
177 		deq_tmo_ns = edev->min_deq_timeout_ns;
178 
179 	if (conf->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
180 		edev->is_timeout_deq = 1;
181 		deq_tmo_ns = edev->min_deq_timeout_ns;
182 	}
183 	edev->nb_event_queues = conf->nb_event_queues;
184 	edev->nb_event_ports = conf->nb_event_ports;
185 
186 	return ssovf_mbox_getwork_tmo_set(deq_tmo_ns);
187 }
188 
189 static void
190 ssovf_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
191 				 struct rte_event_queue_conf *queue_conf)
192 {
193 	RTE_SET_USED(dev);
194 	RTE_SET_USED(queue_id);
195 
196 	queue_conf->nb_atomic_flows = (1ULL << 20);
197 	queue_conf->nb_atomic_order_sequences = (1ULL << 20);
198 	queue_conf->event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES;
199 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_NORMAL;
200 }
201 
202 static void
203 ssovf_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
204 {
205 	RTE_SET_USED(dev);
206 	RTE_SET_USED(queue_id);
207 }
208 
209 static int
210 ssovf_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
211 			      const struct rte_event_queue_conf *queue_conf)
212 {
213 	RTE_SET_USED(dev);
214 	ssovf_func_trace("queue=%d prio=%d", queue_id, queue_conf->priority);
215 
216 	return ssovf_mbox_priority_set(queue_id, queue_conf->priority);
217 }
218 
219 static void
220 ssovf_port_def_conf(struct rte_eventdev *dev, uint8_t port_id,
221 				 struct rte_event_port_conf *port_conf)
222 {
223 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
224 
225 	RTE_SET_USED(port_id);
226 	port_conf->new_event_threshold = edev->max_num_events;
227 	port_conf->dequeue_depth = 1;
228 	port_conf->enqueue_depth = 1;
229 	port_conf->event_port_cfg = 0;
230 }
231 
232 static void
233 ssovf_port_release(void *port)
234 {
235 	rte_free(port);
236 }
237 
238 static int
239 ssovf_port_setup(struct rte_eventdev *dev, uint8_t port_id,
240 				const struct rte_event_port_conf *port_conf)
241 {
242 	struct ssows *ws;
243 	uint32_t reg_off;
244 	uint8_t q;
245 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
246 
247 	ssovf_func_trace("port=%d", port_id);
248 	RTE_SET_USED(port_conf);
249 
250 	/* Free memory prior to re-allocation if needed */
251 	if (dev->data->ports[port_id] != NULL) {
252 		ssovf_port_release(dev->data->ports[port_id]);
253 		dev->data->ports[port_id] = NULL;
254 	}
255 
256 	/* Allocate event port memory */
257 	ws = rte_zmalloc_socket("eventdev ssows",
258 			sizeof(struct ssows), RTE_CACHE_LINE_SIZE,
259 			dev->data->socket_id);
260 	if (ws == NULL) {
261 		ssovf_log_err("Failed to alloc memory for port=%d", port_id);
262 		return -ENOMEM;
263 	}
264 
265 	ws->base = ssovf_bar(OCTEONTX_SSO_HWS, port_id, 0);
266 	if (ws->base == NULL) {
267 		rte_free(ws);
268 		ssovf_log_err("Failed to get hws base addr port=%d", port_id);
269 		return -EINVAL;
270 	}
271 
272 	reg_off = SSOW_VHWS_OP_GET_WORK0;
273 	reg_off |= 1 << 4; /* Index_ggrp_mask (Use maskset zero) */
274 	reg_off |= 1 << 16; /* Wait */
275 	ws->getwork = ws->base + reg_off;
276 	ws->port = port_id;
277 	ws->lookup_mem = octeontx_fastpath_lookup_mem_get();
278 
279 	for (q = 0; q < edev->nb_event_queues; q++) {
280 		ws->grps[q] = ssovf_bar(OCTEONTX_SSO_GROUP, q, 2);
281 		if (ws->grps[q] == NULL) {
282 			rte_free(ws);
283 			ssovf_log_err("Failed to get grp%d base addr", q);
284 			return -EINVAL;
285 		}
286 	}
287 
288 	dev->data->ports[port_id] = ws;
289 	ssovf_log_dbg("port=%d ws=%p", port_id, ws);
290 	return 0;
291 }
292 
293 static int
294 ssovf_port_link(struct rte_eventdev *dev, void *port, const uint8_t queues[],
295 		const uint8_t priorities[], uint16_t nb_links)
296 {
297 	uint16_t link;
298 	uint64_t val;
299 	struct ssows *ws = port;
300 
301 	ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_links);
302 	RTE_SET_USED(dev);
303 	RTE_SET_USED(priorities);
304 
305 	for (link = 0; link < nb_links; link++) {
306 		val = queues[link];
307 		val |= (1ULL << 24); /* Set membership */
308 		ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
309 	}
310 	return (int)nb_links;
311 }
312 
313 static int
314 ssovf_port_unlink(struct rte_eventdev *dev, void *port, uint8_t queues[],
315 			uint16_t nb_unlinks)
316 {
317 	uint16_t unlink;
318 	uint64_t val;
319 	struct ssows *ws = port;
320 
321 	ssovf_func_trace("port=%d nb_links=%d", ws->port, nb_unlinks);
322 	RTE_SET_USED(dev);
323 
324 	for (unlink = 0; unlink < nb_unlinks; unlink++) {
325 		val = queues[unlink];
326 		val &= ~(1ULL << 24); /* Clear membership */
327 		ssovf_write64(val, ws->base + SSOW_VHWS_GRPMSK_CHGX(0));
328 	}
329 	return (int)nb_unlinks;
330 }
331 
332 static int
333 ssovf_timeout_ticks(struct rte_eventdev *dev, uint64_t ns, uint64_t *tmo_ticks)
334 {
335 	RTE_SET_USED(dev);
336 
337 	return ssovf_mbox_timeout_ticks(ns, tmo_ticks);
338 }
339 
340 static void
341 ssows_dump(struct ssows *ws, FILE *f)
342 {
343 	uint8_t *base = ws->base;
344 	uint64_t val;
345 
346 	fprintf(f, "\t---------------port%d---------------\n", ws->port);
347 	val = ssovf_read64(base + SSOW_VHWS_TAG);
348 	fprintf(f, "\ttag=0x%x tt=%d head=%d tail=%d grp=%d index=%d tail=%d\n",
349 		(uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
350 		(int)(val >> 34) & 0x1, (int)(val >> 35) & 0x1,
351 		(int)(val >> 36) & 0x3ff, (int)(val >> 48) & 0x3ff,
352 		(int)(val >> 63) & 0x1);
353 
354 	val = ssovf_read64(base + SSOW_VHWS_WQP);
355 	fprintf(f, "\twqp=0x%"PRIx64"\n", val);
356 
357 	val = ssovf_read64(base + SSOW_VHWS_LINKS);
358 	fprintf(f, "\tindex=%d valid=%d revlink=%d tail=%d head=%d grp=%d\n",
359 		(int)(val & 0x3ff), (int)(val >> 10) & 0x1,
360 		(int)(val >> 11) & 0x3ff, (int)(val >> 26) & 0x1,
361 		(int)(val >> 27) & 0x1, (int)(val >> 28) & 0x3ff);
362 
363 	val = ssovf_read64(base + SSOW_VHWS_PENDTAG);
364 	fprintf(f, "\tptag=0x%x ptt=%d pgwi=%d pdesc=%d pgw=%d pgww=%d ps=%d\n",
365 		(uint32_t)(val & 0xffffffff), (int)(val >> 32) & 0x3,
366 		(int)(val >> 56) & 0x1, (int)(val >> 58) & 0x1,
367 		(int)(val >> 61) & 0x1, (int)(val >> 62) & 0x1,
368 		(int)(val >> 63) & 0x1);
369 
370 	val = ssovf_read64(base + SSOW_VHWS_PENDWQP);
371 	fprintf(f, "\tpwqp=0x%"PRIx64"\n", val);
372 }
373 
374 static int
375 ssovf_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
376 		const struct rte_eth_dev *eth_dev, uint32_t *caps)
377 {
378 	int ret;
379 	RTE_SET_USED(dev);
380 
381 	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
382 	if (ret)
383 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
384 	else
385 		*caps = RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT;
386 
387 	return 0;
388 }
389 
390 static int
391 ssovf_eth_rx_adapter_queue_add(const struct rte_eventdev *dev,
392 		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id,
393 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
394 {
395 	const struct octeontx_nic *nic = eth_dev->data->dev_private;
396 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
397 	uint16_t free_idx = UINT16_MAX;
398 	struct octeontx_rxq *rxq;
399 	pki_mod_qos_t pki_qos;
400 	uint8_t found = false;
401 	int i, ret = 0;
402 	void *old_ptr;
403 
404 	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
405 	if (ret)
406 		return -EINVAL;
407 
408 	if (queue_conf->ev.sched_type == RTE_SCHED_TYPE_PARALLEL)
409 		return -ENOTSUP;
410 
411 	/* eth_octeontx only supports one rq. */
412 	rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id;
413 	rxq = eth_dev->data->rx_queues[rx_queue_id];
414 	/* Add rxq pool to list of used pools and reduce available events. */
415 	for (i = 0; i < edev->rxq_pools; i++) {
416 		if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) {
417 			edev->rxq_pool_rcnt[i]++;
418 			found = true;
419 			break;
420 		} else if (free_idx == UINT16_MAX &&
421 			   edev->rxq_pool_array[i] == 0) {
422 			free_idx = i;
423 		}
424 	}
425 
426 	if (!found) {
427 		uint16_t idx;
428 
429 		if (edev->available_events < rxq->pool->size) {
430 			ssovf_log_err(
431 				"Max available events %"PRIu32" requested events in rxq pool %"PRIu32"",
432 				edev->available_events, rxq->pool->size);
433 			return -ENOMEM;
434 		}
435 
436 		if (free_idx != UINT16_MAX) {
437 			idx = free_idx;
438 		} else {
439 			old_ptr = edev->rxq_pool_array;
440 			edev->rxq_pools++;
441 			edev->rxq_pool_array = rte_realloc(
442 				edev->rxq_pool_array,
443 				sizeof(uint64_t) * edev->rxq_pools, 0);
444 			if (edev->rxq_pool_array == NULL) {
445 				edev->rxq_pools--;
446 				edev->rxq_pool_array = old_ptr;
447 				return -ENOMEM;
448 			}
449 
450 			old_ptr = edev->rxq_pool_rcnt;
451 			edev->rxq_pool_rcnt = rte_realloc(
452 				edev->rxq_pool_rcnt,
453 				sizeof(uint8_t) * edev->rxq_pools, 0);
454 			if (edev->rxq_pool_rcnt == NULL) {
455 				edev->rxq_pools--;
456 				edev->rxq_pool_rcnt = old_ptr;
457 				return -ENOMEM;
458 			}
459 			idx = edev->rxq_pools - 1;
460 		}
461 
462 		edev->rxq_pool_array[idx] = (uintptr_t)rxq->pool;
463 		edev->rxq_pool_rcnt[idx] = 1;
464 		edev->available_events -= rxq->pool->size;
465 	}
466 
467 	memset(&pki_qos, 0, sizeof(pki_mod_qos_t));
468 
469 	pki_qos.port_type = 0;
470 	pki_qos.index = 0;
471 	pki_qos.mmask.f_tag_type = 1;
472 	pki_qos.mmask.f_port_add = 1;
473 	pki_qos.mmask.f_grp_ok = 1;
474 	pki_qos.mmask.f_grp_bad = 1;
475 	pki_qos.mmask.f_grptag_ok = 1;
476 	pki_qos.mmask.f_grptag_bad = 1;
477 
478 	pki_qos.qos_entry.tag_type = queue_conf->ev.sched_type;
479 	pki_qos.qos_entry.port_add = 0;
480 	pki_qos.qos_entry.ggrp_ok = queue_conf->ev.queue_id;
481 	pki_qos.qos_entry.ggrp_bad = queue_conf->ev.queue_id;
482 	pki_qos.qos_entry.grptag_bad = 0;
483 	pki_qos.qos_entry.grptag_ok = 0;
484 
485 	ret = octeontx_pki_port_modify_qos(nic->port_id, &pki_qos);
486 	if (ret < 0)
487 		ssovf_log_err("failed to modify QOS, port=%d, q=%d",
488 				nic->port_id, queue_conf->ev.queue_id);
489 
490 	edev->rx_offload_flags = nic->rx_offload_flags;
491 	edev->tx_offload_flags = nic->tx_offload_flags;
492 	return ret;
493 }
494 
495 static int
496 ssovf_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
497 		const struct rte_eth_dev *eth_dev, int32_t rx_queue_id)
498 {
499 	const struct octeontx_nic *nic = eth_dev->data->dev_private;
500 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
501 	struct octeontx_rxq *rxq;
502 	pki_del_qos_t pki_qos;
503 	uint8_t found = false;
504 	int i, ret = 0;
505 
506 	rx_queue_id = rx_queue_id == -1 ? 0 : rx_queue_id;
507 	rxq = eth_dev->data->rx_queues[rx_queue_id];
508 	for (i = 0; i < edev->rxq_pools; i++) {
509 		if (edev->rxq_pool_array[i] == (uintptr_t)rxq->pool) {
510 			found = true;
511 			break;
512 		}
513 	}
514 
515 	if (found) {
516 		edev->rxq_pool_rcnt[i]--;
517 		if (edev->rxq_pool_rcnt[i] == 0)
518 			edev->rxq_pool_array[i] = 0;
519 		edev->available_events += rxq->pool->size;
520 	}
521 
522 	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
523 	if (ret)
524 		return -EINVAL;
525 
526 	pki_qos.port_type = 0;
527 	pki_qos.index = 0;
528 	memset(&pki_qos, 0, sizeof(pki_del_qos_t));
529 	ret = octeontx_pki_port_delete_qos(nic->port_id, &pki_qos);
530 	if (ret < 0)
531 		ssovf_log_err("Failed to delete QOS port=%d, q=%d",
532 				nic->port_id, rx_queue_id);
533 	return ret;
534 }
535 
536 static int
537 ssovf_eth_rx_adapter_start(const struct rte_eventdev *dev,
538 					const struct rte_eth_dev *eth_dev)
539 {
540 	RTE_SET_USED(dev);
541 	RTE_SET_USED(eth_dev);
542 
543 	return 0;
544 }
545 
546 
547 static int
548 ssovf_eth_rx_adapter_stop(const struct rte_eventdev *dev,
549 		const struct rte_eth_dev *eth_dev)
550 {
551 	RTE_SET_USED(dev);
552 	RTE_SET_USED(eth_dev);
553 
554 	return 0;
555 }
556 
557 static int
558 ssovf_eth_tx_adapter_caps_get(const struct rte_eventdev *dev,
559 		const struct rte_eth_dev *eth_dev, uint32_t *caps)
560 {
561 	int ret;
562 	RTE_SET_USED(dev);
563 
564 	ret = strncmp(eth_dev->data->name, "eth_octeontx", 12);
565 	if (ret)
566 		*caps = 0;
567 	else
568 		*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
569 
570 	return 0;
571 }
572 
573 static int
574 ssovf_eth_tx_adapter_create(uint8_t id, const struct rte_eventdev *dev)
575 {
576 	RTE_SET_USED(id);
577 	RTE_SET_USED(dev);
578 	return 0;
579 }
580 
581 static int
582 ssovf_eth_tx_adapter_free(uint8_t id, const struct rte_eventdev *dev)
583 {
584 	RTE_SET_USED(id);
585 	RTE_SET_USED(dev);
586 	return 0;
587 }
588 
589 static int
590 ssovf_eth_tx_adapter_queue_add(uint8_t id, const struct rte_eventdev *dev,
591 		const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
592 {
593 	RTE_SET_USED(id);
594 	RTE_SET_USED(dev);
595 	RTE_SET_USED(eth_dev);
596 	RTE_SET_USED(tx_queue_id);
597 	return 0;
598 }
599 
600 static int
601 ssovf_eth_tx_adapter_queue_del(uint8_t id, const struct rte_eventdev *dev,
602 		const struct rte_eth_dev *eth_dev, int32_t tx_queue_id)
603 {
604 	RTE_SET_USED(id);
605 	RTE_SET_USED(dev);
606 	RTE_SET_USED(eth_dev);
607 	RTE_SET_USED(tx_queue_id);
608 	return 0;
609 }
610 
611 static int
612 ssovf_eth_tx_adapter_start(uint8_t id, const struct rte_eventdev *dev)
613 {
614 	RTE_SET_USED(id);
615 	RTE_SET_USED(dev);
616 	return 0;
617 }
618 
619 static int
620 ssovf_eth_tx_adapter_stop(uint8_t id, const struct rte_eventdev *dev)
621 {
622 	RTE_SET_USED(id);
623 	RTE_SET_USED(dev);
624 	return 0;
625 }
626 
627 
628 static void
629 ssovf_dump(struct rte_eventdev *dev, FILE *f)
630 {
631 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
632 	uint8_t port;
633 
634 	/* Dump SSOWVF debug registers */
635 	for (port = 0; port < edev->nb_event_ports; port++)
636 		ssows_dump(dev->data->ports[port], f);
637 }
638 
639 static int
640 ssovf_start(struct rte_eventdev *dev)
641 {
642 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
643 	struct ssows *ws;
644 	uint8_t *base;
645 	uint8_t i;
646 
647 	ssovf_func_trace();
648 	for (i = 0; i < edev->nb_event_ports; i++) {
649 		ws = dev->data->ports[i];
650 		ssows_reset(ws);
651 		ws->swtag_req = 0;
652 	}
653 
654 	for (i = 0; i < edev->nb_event_queues; i++) {
655 		/* Consume all the events through HWS0 */
656 		ssows_flush_events(dev->data->ports[0], i, NULL, NULL);
657 
658 		base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
659 		base += SSO_VHGRP_QCTL;
660 		ssovf_write64(1, base); /* Enable SSO group */
661 	}
662 
663 	ssovf_fastpath_fns_set(dev);
664 	return 0;
665 }
666 
667 static void
668 ssows_handle_event(void *arg, struct rte_event event)
669 {
670 	struct rte_eventdev *dev = arg;
671 
672 	if (dev->dev_ops->dev_stop_flush != NULL)
673 		dev->dev_ops->dev_stop_flush(dev->data->dev_id, event,
674 					dev->data->dev_stop_flush_arg);
675 }
676 
677 static void
678 ssovf_stop(struct rte_eventdev *dev)
679 {
680 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
681 	struct ssows *ws;
682 	uint8_t *base;
683 	uint8_t i;
684 
685 	ssovf_func_trace();
686 	for (i = 0; i < edev->nb_event_ports; i++) {
687 		ws = dev->data->ports[i];
688 		ssows_reset(ws);
689 		ws->swtag_req = 0;
690 	}
691 
692 	for (i = 0; i < edev->nb_event_queues; i++) {
693 		/* Consume all the events through HWS0 */
694 		ssows_flush_events(dev->data->ports[0], i,
695 				ssows_handle_event, dev);
696 
697 		base = ssovf_bar(OCTEONTX_SSO_GROUP, i, 0);
698 		base += SSO_VHGRP_QCTL;
699 		ssovf_write64(0, base); /* Disable SSO group */
700 	}
701 }
702 
703 static int
704 ssovf_close(struct rte_eventdev *dev)
705 {
706 	struct ssovf_evdev *edev = ssovf_pmd_priv(dev);
707 	uint8_t all_queues[RTE_EVENT_MAX_QUEUES_PER_DEV];
708 	uint8_t i;
709 
710 	for (i = 0; i < edev->nb_event_queues; i++)
711 		all_queues[i] = i;
712 
713 	for (i = 0; i < edev->nb_event_ports; i++)
714 		ssovf_port_unlink(dev, dev->data->ports[i], all_queues,
715 			edev->nb_event_queues);
716 	return 0;
717 }
718 
719 static int
720 ssovf_parsekv(const char *key, const char *value, void *opaque)
721 {
722 	uint8_t *flag = opaque;
723 	uint64_t v;
724 	char *end;
725 
726 	errno = 0;
727 	v = strtoul(value, &end, 0);
728 	if ((errno != 0) || (value == end) || *end != '\0' || v > 1) {
729 		ssovf_log_err("invalid %s value %s", key, value);
730 		return -EINVAL;
731 	}
732 
733 	*flag = !!v;
734 	return 0;
735 }
736 
737 static int
738 ssovf_timvf_caps_get(const struct rte_eventdev *dev, uint64_t flags,
739 		     uint32_t *caps, const struct event_timer_adapter_ops **ops)
740 {
741 	return timvf_timer_adapter_caps_get(dev, flags, caps, ops,
742 			timvf_enable_stats);
743 }
744 
745 static int
746 ssovf_crypto_adapter_caps_get(const struct rte_eventdev *dev,
747 			      const struct rte_cryptodev *cdev, uint32_t *caps)
748 {
749 	RTE_SET_USED(dev);
750 	RTE_SET_USED(cdev);
751 
752 	*caps = RTE_EVENT_CRYPTO_ADAPTER_CAP_INTERNAL_PORT_OP_FWD |
753 		RTE_EVENT_CRYPTO_ADAPTER_CAP_SESSION_PRIVATE_DATA;
754 
755 	return 0;
756 }
757 
758 static int
759 ssovf_crypto_adapter_qp_add(const struct rte_eventdev *dev,
760 			    const struct rte_cryptodev *cdev,
761 			    int32_t queue_pair_id,
762 			    const struct rte_event_crypto_adapter_queue_conf *conf)
763 {
764 	struct cpt_instance *qp;
765 	uint8_t qp_id;
766 
767 	RTE_SET_USED(conf);
768 
769 	if (queue_pair_id == -1) {
770 		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
771 			qp = cdev->data->queue_pairs[qp_id];
772 			qp->ca_enabled = 1;
773 		}
774 	} else {
775 		qp = cdev->data->queue_pairs[queue_pair_id];
776 		qp->ca_enabled = 1;
777 	}
778 
779 	ssovf_fastpath_fns_set((struct rte_eventdev *)(uintptr_t)dev);
780 
781 	return 0;
782 }
783 
784 static int
785 ssovf_crypto_adapter_qp_del(const struct rte_eventdev *dev,
786 			    const struct rte_cryptodev *cdev,
787 			    int32_t queue_pair_id)
788 {
789 	struct cpt_instance *qp;
790 	uint8_t qp_id;
791 
792 	RTE_SET_USED(dev);
793 
794 	if (queue_pair_id == -1) {
795 		for (qp_id = 0; qp_id < cdev->data->nb_queue_pairs; qp_id++) {
796 			qp = cdev->data->queue_pairs[qp_id];
797 			qp->ca_enabled = 0;
798 		}
799 	} else {
800 		qp = cdev->data->queue_pairs[queue_pair_id];
801 		qp->ca_enabled = 0;
802 	}
803 
804 	return 0;
805 }
806 
807 /* Initialize and register event driver with DPDK Application */
808 static struct eventdev_ops ssovf_ops = {
809 	.dev_infos_get    = ssovf_info_get,
810 	.dev_configure    = ssovf_configure,
811 	.queue_def_conf   = ssovf_queue_def_conf,
812 	.queue_setup      = ssovf_queue_setup,
813 	.queue_release    = ssovf_queue_release,
814 	.port_def_conf    = ssovf_port_def_conf,
815 	.port_setup       = ssovf_port_setup,
816 	.port_release     = ssovf_port_release,
817 	.port_link        = ssovf_port_link,
818 	.port_unlink      = ssovf_port_unlink,
819 	.timeout_ticks    = ssovf_timeout_ticks,
820 
821 	.eth_rx_adapter_caps_get  = ssovf_eth_rx_adapter_caps_get,
822 	.eth_rx_adapter_queue_add = ssovf_eth_rx_adapter_queue_add,
823 	.eth_rx_adapter_queue_del = ssovf_eth_rx_adapter_queue_del,
824 	.eth_rx_adapter_start = ssovf_eth_rx_adapter_start,
825 	.eth_rx_adapter_stop = ssovf_eth_rx_adapter_stop,
826 
827 	.eth_tx_adapter_caps_get = ssovf_eth_tx_adapter_caps_get,
828 	.eth_tx_adapter_create = ssovf_eth_tx_adapter_create,
829 	.eth_tx_adapter_free = ssovf_eth_tx_adapter_free,
830 	.eth_tx_adapter_queue_add = ssovf_eth_tx_adapter_queue_add,
831 	.eth_tx_adapter_queue_del = ssovf_eth_tx_adapter_queue_del,
832 	.eth_tx_adapter_start = ssovf_eth_tx_adapter_start,
833 	.eth_tx_adapter_stop = ssovf_eth_tx_adapter_stop,
834 
835 	.timer_adapter_caps_get = ssovf_timvf_caps_get,
836 
837 	.crypto_adapter_caps_get = ssovf_crypto_adapter_caps_get,
838 	.crypto_adapter_queue_pair_add = ssovf_crypto_adapter_qp_add,
839 	.crypto_adapter_queue_pair_del = ssovf_crypto_adapter_qp_del,
840 
841 	.dev_selftest = test_eventdev_octeontx,
842 
843 	.dump             = ssovf_dump,
844 	.dev_start        = ssovf_start,
845 	.dev_stop         = ssovf_stop,
846 	.dev_close        = ssovf_close
847 };
848 
849 static int
850 ssovf_vdev_probe(struct rte_vdev_device *vdev)
851 {
852 	struct ssovf_info oinfo;
853 	struct ssovf_mbox_dev_info info;
854 	struct ssovf_evdev *edev;
855 	struct rte_eventdev *eventdev;
856 	static int ssovf_init_once;
857 	const char *name;
858 	const char *params;
859 	int ret;
860 
861 	static const char *const args[] = {
862 		TIMVF_ENABLE_STATS_ARG,
863 		NULL
864 	};
865 
866 	name = rte_vdev_device_name(vdev);
867 	/* More than one instance is not supported */
868 	if (ssovf_init_once) {
869 		ssovf_log_err("Request to create >1 %s instance", name);
870 		return -EINVAL;
871 	}
872 
873 	params = rte_vdev_device_args(vdev);
874 	if (params != NULL && params[0] != '\0') {
875 		struct rte_kvargs *kvlist = rte_kvargs_parse(params, args);
876 
877 		if (!kvlist) {
878 			ssovf_log_info(
879 				"Ignoring unsupported params supplied '%s'",
880 				name);
881 		} else {
882 			ret = rte_kvargs_process(kvlist, TIMVF_ENABLE_STATS_ARG,
883 						 ssovf_parsekv,
884 						 &timvf_enable_stats);
885 			if (ret != 0) {
886 				ssovf_log_err("%s: Error in timvf stats", name);
887 				rte_kvargs_free(kvlist);
888 				return ret;
889 			}
890 		}
891 
892 		rte_kvargs_free(kvlist);
893 	}
894 
895 	eventdev = rte_event_pmd_vdev_init(name, sizeof(struct ssovf_evdev),
896 				rte_socket_id(), vdev);
897 	if (eventdev == NULL) {
898 		ssovf_log_err("Failed to create eventdev vdev %s", name);
899 		return -ENOMEM;
900 	}
901 	eventdev->dev_ops = &ssovf_ops;
902 
903 	timvf_set_eventdevice(eventdev);
904 
905 	/* For secondary processes, the primary has done all the work */
906 	if (rte_eal_process_type() != RTE_PROC_PRIMARY) {
907 		ssovf_fastpath_fns_set(eventdev);
908 		return 0;
909 	}
910 
911 	octeontx_mbox_init();
912 	ret = ssovf_info(&oinfo);
913 	if (ret) {
914 		ssovf_log_err("Failed to probe and validate ssovfs %d", ret);
915 		goto error;
916 	}
917 
918 	edev = ssovf_pmd_priv(eventdev);
919 	edev->max_event_ports = oinfo.total_ssowvfs;
920 	edev->max_event_queues = oinfo.total_ssovfs;
921 	edev->is_timeout_deq = 0;
922 
923 	ret = ssovf_mbox_dev_info(&info);
924 	if (ret < 0 || ret != sizeof(struct ssovf_mbox_dev_info)) {
925 		ssovf_log_err("Failed to get mbox devinfo %d", ret);
926 		goto error;
927 	}
928 
929 	edev->min_deq_timeout_ns = info.min_deq_timeout_ns;
930 	edev->max_deq_timeout_ns = info.max_deq_timeout_ns;
931 	edev->max_num_events =  info.max_num_events;
932 	edev->available_events = info.max_num_events;
933 
934 	ssovf_log_dbg("min_deq_tmo=%" PRId64 " max_deq_tmo=%" PRId64
935 		      " max_evts=%d",
936 		      info.min_deq_timeout_ns, info.max_deq_timeout_ns,
937 		      info.max_num_events);
938 
939 	if (!edev->max_event_ports || !edev->max_event_queues) {
940 		ssovf_log_err("Not enough eventdev resource queues=%d ports=%d",
941 			edev->max_event_queues, edev->max_event_ports);
942 		ret = -ENODEV;
943 		goto error;
944 	}
945 
946 	ssovf_log_info("Initializing %s domain=%d max_queues=%d max_ports=%d",
947 			name, oinfo.domain, edev->max_event_queues,
948 			edev->max_event_ports);
949 
950 	ssovf_init_once = 1;
951 	event_dev_probing_finish(eventdev);
952 	return 0;
953 
954 error:
955 	rte_event_pmd_vdev_uninit(name);
956 	return ret;
957 }
958 
959 static int
960 ssovf_vdev_remove(struct rte_vdev_device *vdev)
961 {
962 	const char *name;
963 
964 	name = rte_vdev_device_name(vdev);
965 	ssovf_log_info("Closing %s", name);
966 	return rte_event_pmd_vdev_uninit(name);
967 }
968 
969 static struct rte_vdev_driver vdev_ssovf_pmd = {
970 	.probe = ssovf_vdev_probe,
971 	.remove = ssovf_vdev_remove
972 };
973 
974 RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_OCTEONTX_PMD, vdev_ssovf_pmd);
975