xref: /dpdk/drivers/event/dpaa/dpaa_eventdev.c (revision e1b07dd581cb487e1138e60c21159c499173352e)
19caac5ddSSunil Kumar Kori /*   SPDX-License-Identifier:        BSD-3-Clause
2f513f620SSachin Saxena  *   Copyright 2017-2019 NXP
39caac5ddSSunil Kumar Kori  */
49caac5ddSSunil Kumar Kori 
59caac5ddSSunil Kumar Kori #include <assert.h>
69caac5ddSSunil Kumar Kori #include <stdio.h>
79caac5ddSSunil Kumar Kori #include <stdbool.h>
89caac5ddSSunil Kumar Kori #include <errno.h>
99caac5ddSSunil Kumar Kori #include <stdint.h>
109caac5ddSSunil Kumar Kori #include <string.h>
119caac5ddSSunil Kumar Kori #include <sys/epoll.h>
129caac5ddSSunil Kumar Kori 
139caac5ddSSunil Kumar Kori #include <rte_atomic.h>
149caac5ddSSunil Kumar Kori #include <rte_byteorder.h>
159caac5ddSSunil Kumar Kori #include <rte_common.h>
169caac5ddSSunil Kumar Kori #include <rte_debug.h>
171acb7f54SDavid Marchand #include <dev_driver.h>
189caac5ddSSunil Kumar Kori #include <rte_eal.h>
199caac5ddSSunil Kumar Kori #include <rte_lcore.h>
209caac5ddSSunil Kumar Kori #include <rte_log.h>
219caac5ddSSunil Kumar Kori #include <rte_malloc.h>
229caac5ddSSunil Kumar Kori #include <rte_memcpy.h>
239caac5ddSSunil Kumar Kori #include <rte_memory.h>
249caac5ddSSunil Kumar Kori #include <rte_memzone.h>
259caac5ddSSunil Kumar Kori #include <rte_pci.h>
269caac5ddSSunil Kumar Kori #include <rte_eventdev.h>
2725187042SBruce Richardson #include <eventdev_pmd_vdev.h>
289caac5ddSSunil Kumar Kori #include <rte_ethdev.h>
29c1749bc5SVolodymyr Fialko #include <rte_event_crypto_adapter.h>
309caac5ddSSunil Kumar Kori #include <rte_event_eth_rx_adapter.h>
31ba6c1aa2SNipun Gupta #include <rte_event_eth_tx_adapter.h>
3292cb1309SAkhil Goyal #include <cryptodev_pmd.h>
33a2f1da7dSDavid Marchand #include <bus_dpaa_driver.h>
349caac5ddSSunil Kumar Kori #include <rte_dpaa_logs.h>
3589cf9584SHemant Agrawal #include <rte_cycles.h>
3677b5311dSHemant Agrawal #include <rte_kvargs.h>
379caac5ddSSunil Kumar Kori 
389caac5ddSSunil Kumar Kori #include <dpaa_ethdev.h>
39b0f66a68SAkhil Goyal #include <dpaa_sec_event.h>
409caac5ddSSunil Kumar Kori #include "dpaa_eventdev.h"
419caac5ddSSunil Kumar Kori #include <dpaa_mempool.h>
429caac5ddSSunil Kumar Kori 
439caac5ddSSunil Kumar Kori /*
449caac5ddSSunil Kumar Kori  * Clarifications
459caac5ddSSunil Kumar Kori  * Evendev = Virtual Instance for SoC
469caac5ddSSunil Kumar Kori  * Eventport = Portal Instance
479caac5ddSSunil Kumar Kori  * Eventqueue = Channel Instance
489caac5ddSSunil Kumar Kori  * 1 Eventdev can have N Eventqueue
499caac5ddSSunil Kumar Kori  */
50eeded204SDavid Marchand RTE_LOG_REGISTER_DEFAULT(dpaa_logtype_eventdev, NOTICE);
512b843cacSDavid Marchand #define RTE_LOGTYPE_DPAA_EVENTDEV dpaa_logtype_eventdev
529caac5ddSSunil Kumar Kori 
5377b5311dSHemant Agrawal #define DISABLE_INTR_MODE "disable_intr"
5477b5311dSHemant Agrawal 
559caac5ddSSunil Kumar Kori static int
569caac5ddSSunil Kumar Kori dpaa_event_dequeue_timeout_ticks(struct rte_eventdev *dev, uint64_t ns,
579caac5ddSSunil Kumar Kori 				 uint64_t *timeout_ticks)
589caac5ddSSunil Kumar Kori {
59e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
609caac5ddSSunil Kumar Kori 
619caac5ddSSunil Kumar Kori 	RTE_SET_USED(dev);
629caac5ddSSunil Kumar Kori 
6377b5311dSHemant Agrawal 	uint64_t cycles_per_second;
649caac5ddSSunil Kumar Kori 
6577b5311dSHemant Agrawal 	cycles_per_second = rte_get_timer_hz();
6677b5311dSHemant Agrawal 	*timeout_ticks = (ns * cycles_per_second) / NS_PER_S;
6777b5311dSHemant Agrawal 
6877b5311dSHemant Agrawal 	return 0;
6977b5311dSHemant Agrawal }
7077b5311dSHemant Agrawal 
7177b5311dSHemant Agrawal static int
7277b5311dSHemant Agrawal dpaa_event_dequeue_timeout_ticks_intr(struct rte_eventdev *dev, uint64_t ns,
7377b5311dSHemant Agrawal 				 uint64_t *timeout_ticks)
7477b5311dSHemant Agrawal {
7577b5311dSHemant Agrawal 	RTE_SET_USED(dev);
7677b5311dSHemant Agrawal 
7777b5311dSHemant Agrawal 	*timeout_ticks = ns/1000;
789caac5ddSSunil Kumar Kori 	return 0;
799caac5ddSSunil Kumar Kori }
809caac5ddSSunil Kumar Kori 
819caac5ddSSunil Kumar Kori static void
820ee17f79SSunil Kumar Kori dpaa_eventq_portal_add(u16 ch_id)
830ee17f79SSunil Kumar Kori {
840ee17f79SSunil Kumar Kori 	uint32_t sdqcr;
850ee17f79SSunil Kumar Kori 
860ee17f79SSunil Kumar Kori 	sdqcr = QM_SDQCR_CHANNELS_POOL_CONV(ch_id);
870ee17f79SSunil Kumar Kori 	qman_static_dequeue_add(sdqcr, NULL);
880ee17f79SSunil Kumar Kori }
890ee17f79SSunil Kumar Kori 
900ee17f79SSunil Kumar Kori static uint16_t
910ee17f79SSunil Kumar Kori dpaa_event_enqueue_burst(void *port, const struct rte_event ev[],
920ee17f79SSunil Kumar Kori 			 uint16_t nb_events)
930ee17f79SSunil Kumar Kori {
940ee17f79SSunil Kumar Kori 	uint16_t i;
950ee17f79SSunil Kumar Kori 	struct rte_mbuf *mbuf;
960ee17f79SSunil Kumar Kori 
970ee17f79SSunil Kumar Kori 	RTE_SET_USED(port);
980ee17f79SSunil Kumar Kori 	/*Release all the contexts saved previously*/
990ee17f79SSunil Kumar Kori 	for (i = 0; i < nb_events; i++) {
1000ee17f79SSunil Kumar Kori 		switch (ev[i].op) {
1010ee17f79SSunil Kumar Kori 		case RTE_EVENT_OP_RELEASE:
1020ee17f79SSunil Kumar Kori 			qman_dca_index(ev[i].impl_opaque, 0);
1030ee17f79SSunil Kumar Kori 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
104c9a1c2e5SDavid Marchand 			*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
1050ee17f79SSunil Kumar Kori 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
1060ee17f79SSunil Kumar Kori 			DPAA_PER_LCORE_DQRR_SIZE--;
1070ee17f79SSunil Kumar Kori 			break;
1080ee17f79SSunil Kumar Kori 		default:
1090ee17f79SSunil Kumar Kori 			break;
1100ee17f79SSunil Kumar Kori 		}
1110ee17f79SSunil Kumar Kori 	}
1120ee17f79SSunil Kumar Kori 
1130ee17f79SSunil Kumar Kori 	return nb_events;
1140ee17f79SSunil Kumar Kori }
1150ee17f79SSunil Kumar Kori 
11677b5311dSHemant Agrawal static void drain_4_bytes(int fd, fd_set *fdset)
11777b5311dSHemant Agrawal {
11877b5311dSHemant Agrawal 	if (FD_ISSET(fd, fdset)) {
11977b5311dSHemant Agrawal 		/* drain 4 bytes */
12077b5311dSHemant Agrawal 		uint32_t junk;
12177b5311dSHemant Agrawal 		ssize_t sjunk = read(qman_thread_fd(), &junk, sizeof(junk));
12277b5311dSHemant Agrawal 		if (sjunk != sizeof(junk))
12377b5311dSHemant Agrawal 			DPAA_EVENTDEV_ERR("UIO irq read error");
12477b5311dSHemant Agrawal 	}
12577b5311dSHemant Agrawal }
12677b5311dSHemant Agrawal 
12777b5311dSHemant Agrawal static inline int
12877b5311dSHemant Agrawal dpaa_event_dequeue_wait(uint64_t timeout_ticks)
12977b5311dSHemant Agrawal {
13077b5311dSHemant Agrawal 	int fd_qman, nfds;
13177b5311dSHemant Agrawal 	int ret;
13277b5311dSHemant Agrawal 	fd_set readset;
13377b5311dSHemant Agrawal 
13477b5311dSHemant Agrawal 	/* Go into (and back out of) IRQ mode for each select,
13577b5311dSHemant Agrawal 	 * it simplifies exit-path considerations and other
13677b5311dSHemant Agrawal 	 * potential nastiness.
13777b5311dSHemant Agrawal 	 */
13877b5311dSHemant Agrawal 	struct timeval tv = {
13977b5311dSHemant Agrawal 		.tv_sec = timeout_ticks / 1000000,
14077b5311dSHemant Agrawal 		.tv_usec = timeout_ticks % 1000000
14177b5311dSHemant Agrawal 	};
14277b5311dSHemant Agrawal 
14377b5311dSHemant Agrawal 	fd_qman = qman_thread_fd();
14477b5311dSHemant Agrawal 	nfds = fd_qman + 1;
14577b5311dSHemant Agrawal 	FD_ZERO(&readset);
14677b5311dSHemant Agrawal 	FD_SET(fd_qman, &readset);
14777b5311dSHemant Agrawal 
14877b5311dSHemant Agrawal 	qman_irqsource_add(QM_PIRQ_DQRI);
14977b5311dSHemant Agrawal 
15077b5311dSHemant Agrawal 	ret = select(nfds, &readset, NULL, NULL, &tv);
15177b5311dSHemant Agrawal 	if (ret < 0)
15277b5311dSHemant Agrawal 		return ret;
15377b5311dSHemant Agrawal 	/* Calling irqsource_remove() prior to thread_irq()
15477b5311dSHemant Agrawal 	 * means thread_irq() will not process whatever caused
15577b5311dSHemant Agrawal 	 * the interrupts, however it does ensure that, once
15677b5311dSHemant Agrawal 	 * thread_irq() re-enables interrupts, they won't fire
15777b5311dSHemant Agrawal 	 * again immediately.
15877b5311dSHemant Agrawal 	 */
15977b5311dSHemant Agrawal 	qman_irqsource_remove(~0);
16077b5311dSHemant Agrawal 	drain_4_bytes(fd_qman, &readset);
16177b5311dSHemant Agrawal 	qman_thread_irq();
16277b5311dSHemant Agrawal 
16377b5311dSHemant Agrawal 	return ret;
16477b5311dSHemant Agrawal }
16577b5311dSHemant Agrawal 
1660ee17f79SSunil Kumar Kori static uint16_t
1670ee17f79SSunil Kumar Kori dpaa_event_dequeue_burst(void *port, struct rte_event ev[],
1680ee17f79SSunil Kumar Kori 			 uint16_t nb_events, uint64_t timeout_ticks)
1690ee17f79SSunil Kumar Kori {
1700ee17f79SSunil Kumar Kori 	int ret;
1710ee17f79SSunil Kumar Kori 	u16 ch_id;
1720ee17f79SSunil Kumar Kori 	void *buffers[8];
173fea67874SYunjian Wang 	u32 num_frames, i;
17477b5311dSHemant Agrawal 	uint64_t cur_ticks = 0, wait_time_ticks = 0;
1750ee17f79SSunil Kumar Kori 	struct dpaa_port *portal = (struct dpaa_port *)port;
1760ee17f79SSunil Kumar Kori 	struct rte_mbuf *mbuf;
1770ee17f79SSunil Kumar Kori 
178e5872221SRohit Raj 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
1790ee17f79SSunil Kumar Kori 		/* Affine current thread context to a qman portal */
1800ee17f79SSunil Kumar Kori 		ret = rte_dpaa_portal_init((void *)0);
1810ee17f79SSunil Kumar Kori 		if (ret) {
1820ee17f79SSunil Kumar Kori 			DPAA_EVENTDEV_ERR("Unable to initialize portal");
1830ee17f79SSunil Kumar Kori 			return ret;
1840ee17f79SSunil Kumar Kori 		}
1855b852644SNipun Gupta 	}
1860ee17f79SSunil Kumar Kori 
1870ee17f79SSunil Kumar Kori 	if (unlikely(!portal->is_port_linked)) {
1880ee17f79SSunil Kumar Kori 		/*
1890ee17f79SSunil Kumar Kori 		 * Affine event queue for current thread context
1900ee17f79SSunil Kumar Kori 		 * to a qman portal.
1910ee17f79SSunil Kumar Kori 		 */
1920ee17f79SSunil Kumar Kori 		for (i = 0; i < portal->num_linked_evq; i++) {
1930ee17f79SSunil Kumar Kori 			ch_id = portal->evq_info[i].ch_id;
1940ee17f79SSunil Kumar Kori 			dpaa_eventq_portal_add(ch_id);
1950ee17f79SSunil Kumar Kori 		}
1960ee17f79SSunil Kumar Kori 		portal->is_port_linked = true;
1970ee17f79SSunil Kumar Kori 	}
1980ee17f79SSunil Kumar Kori 
1990ee17f79SSunil Kumar Kori 	/* Check if there are atomic contexts to be released */
2000ee17f79SSunil Kumar Kori 	i = 0;
2010ee17f79SSunil Kumar Kori 	while (DPAA_PER_LCORE_DQRR_SIZE) {
2020ee17f79SSunil Kumar Kori 		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
2030ee17f79SSunil Kumar Kori 			qman_dca_index(i, 0);
2040ee17f79SSunil Kumar Kori 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
205c9a1c2e5SDavid Marchand 			*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
2060ee17f79SSunil Kumar Kori 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
2070ee17f79SSunil Kumar Kori 			DPAA_PER_LCORE_DQRR_SIZE--;
2080ee17f79SSunil Kumar Kori 		}
2090ee17f79SSunil Kumar Kori 		i++;
2100ee17f79SSunil Kumar Kori 	}
2110ee17f79SSunil Kumar Kori 	DPAA_PER_LCORE_DQRR_HELD = 0;
2120ee17f79SSunil Kumar Kori 
21377b5311dSHemant Agrawal 	if (timeout_ticks)
21477b5311dSHemant Agrawal 		wait_time_ticks = timeout_ticks;
2150ee17f79SSunil Kumar Kori 	else
21677b5311dSHemant Agrawal 		wait_time_ticks = portal->timeout_us;
2170ee17f79SSunil Kumar Kori 
21877b5311dSHemant Agrawal 	wait_time_ticks += rte_get_timer_cycles();
2190ee17f79SSunil Kumar Kori 	do {
22077b5311dSHemant Agrawal 		/* Lets dequeue the frames */
2210ee17f79SSunil Kumar Kori 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
22277b5311dSHemant Agrawal 		if (num_frames)
2230ee17f79SSunil Kumar Kori 			break;
2240ee17f79SSunil Kumar Kori 		cur_ticks = rte_get_timer_cycles();
22577b5311dSHemant Agrawal 	} while (cur_ticks < wait_time_ticks);
2260ee17f79SSunil Kumar Kori 
2270ee17f79SSunil Kumar Kori 	return num_frames;
2280ee17f79SSunil Kumar Kori }
2290ee17f79SSunil Kumar Kori 
2300ee17f79SSunil Kumar Kori static uint16_t
23177b5311dSHemant Agrawal dpaa_event_dequeue_burst_intr(void *port, struct rte_event ev[],
23277b5311dSHemant Agrawal 			      uint16_t nb_events, uint64_t timeout_ticks)
23377b5311dSHemant Agrawal {
23477b5311dSHemant Agrawal 	int ret;
23577b5311dSHemant Agrawal 	u16 ch_id;
23677b5311dSHemant Agrawal 	void *buffers[8];
23777b5311dSHemant Agrawal 	u32 num_frames, i, irq = 0;
23877b5311dSHemant Agrawal 	uint64_t cur_ticks = 0, wait_time_ticks = 0;
23977b5311dSHemant Agrawal 	struct dpaa_port *portal = (struct dpaa_port *)port;
24077b5311dSHemant Agrawal 	struct rte_mbuf *mbuf;
24177b5311dSHemant Agrawal 
242e5872221SRohit Raj 	if (unlikely(!DPAA_PER_LCORE_PORTAL)) {
24377b5311dSHemant Agrawal 		/* Affine current thread context to a qman portal */
24477b5311dSHemant Agrawal 		ret = rte_dpaa_portal_init((void *)0);
24577b5311dSHemant Agrawal 		if (ret) {
24677b5311dSHemant Agrawal 			DPAA_EVENTDEV_ERR("Unable to initialize portal");
24777b5311dSHemant Agrawal 			return ret;
24877b5311dSHemant Agrawal 		}
24977b5311dSHemant Agrawal 	}
25077b5311dSHemant Agrawal 
25177b5311dSHemant Agrawal 	if (unlikely(!portal->is_port_linked)) {
25277b5311dSHemant Agrawal 		/*
25377b5311dSHemant Agrawal 		 * Affine event queue for current thread context
25477b5311dSHemant Agrawal 		 * to a qman portal.
25577b5311dSHemant Agrawal 		 */
25677b5311dSHemant Agrawal 		for (i = 0; i < portal->num_linked_evq; i++) {
25777b5311dSHemant Agrawal 			ch_id = portal->evq_info[i].ch_id;
25877b5311dSHemant Agrawal 			dpaa_eventq_portal_add(ch_id);
25977b5311dSHemant Agrawal 		}
26077b5311dSHemant Agrawal 		portal->is_port_linked = true;
26177b5311dSHemant Agrawal 	}
26277b5311dSHemant Agrawal 
26377b5311dSHemant Agrawal 	/* Check if there are atomic contexts to be released */
26477b5311dSHemant Agrawal 	i = 0;
26577b5311dSHemant Agrawal 	while (DPAA_PER_LCORE_DQRR_SIZE) {
26677b5311dSHemant Agrawal 		if (DPAA_PER_LCORE_DQRR_HELD & (1 << i)) {
26777b5311dSHemant Agrawal 			qman_dca_index(i, 0);
26877b5311dSHemant Agrawal 			mbuf = DPAA_PER_LCORE_DQRR_MBUF(i);
269c9a1c2e5SDavid Marchand 			*dpaa_seqn(mbuf) = DPAA_INVALID_MBUF_SEQN;
27077b5311dSHemant Agrawal 			DPAA_PER_LCORE_DQRR_HELD &= ~(1 << i);
27177b5311dSHemant Agrawal 			DPAA_PER_LCORE_DQRR_SIZE--;
27277b5311dSHemant Agrawal 		}
27377b5311dSHemant Agrawal 		i++;
27477b5311dSHemant Agrawal 	}
27577b5311dSHemant Agrawal 	DPAA_PER_LCORE_DQRR_HELD = 0;
27677b5311dSHemant Agrawal 
27777b5311dSHemant Agrawal 	if (timeout_ticks)
27877b5311dSHemant Agrawal 		wait_time_ticks = timeout_ticks;
27977b5311dSHemant Agrawal 	else
28077b5311dSHemant Agrawal 		wait_time_ticks = portal->timeout_us;
28177b5311dSHemant Agrawal 
28277b5311dSHemant Agrawal 	do {
28377b5311dSHemant Agrawal 		/* Lets dequeue the frames */
28477b5311dSHemant Agrawal 		num_frames = qman_portal_dequeue(ev, nb_events, buffers);
28577b5311dSHemant Agrawal 		if (irq)
28677b5311dSHemant Agrawal 			irq = 0;
28777b5311dSHemant Agrawal 		if (num_frames)
28877b5311dSHemant Agrawal 			break;
28977b5311dSHemant Agrawal 		if (wait_time_ticks) { /* wait for time */
29077b5311dSHemant Agrawal 			if (dpaa_event_dequeue_wait(wait_time_ticks) > 0) {
29177b5311dSHemant Agrawal 				irq = 1;
29277b5311dSHemant Agrawal 				continue;
29377b5311dSHemant Agrawal 			}
29477b5311dSHemant Agrawal 			break; /* no event after waiting */
29577b5311dSHemant Agrawal 		}
29677b5311dSHemant Agrawal 		cur_ticks = rte_get_timer_cycles();
29777b5311dSHemant Agrawal 	} while (cur_ticks < wait_time_ticks);
29877b5311dSHemant Agrawal 
29977b5311dSHemant Agrawal 	return num_frames;
30077b5311dSHemant Agrawal }
30177b5311dSHemant Agrawal 
3020ee17f79SSunil Kumar Kori static void
3039caac5ddSSunil Kumar Kori dpaa_event_dev_info_get(struct rte_eventdev *dev,
3049caac5ddSSunil Kumar Kori 			struct rte_event_dev_info *dev_info)
3059caac5ddSSunil Kumar Kori {
306e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
3079caac5ddSSunil Kumar Kori 
3089caac5ddSSunil Kumar Kori 	RTE_SET_USED(dev);
309b0f66a68SAkhil Goyal 	dev_info->driver_name = "event_dpaa1";
3109caac5ddSSunil Kumar Kori 	dev_info->min_dequeue_timeout_ns =
3119caac5ddSSunil Kumar Kori 		DPAA_EVENT_MIN_DEQUEUE_TIMEOUT;
3129caac5ddSSunil Kumar Kori 	dev_info->max_dequeue_timeout_ns =
3139caac5ddSSunil Kumar Kori 		DPAA_EVENT_MAX_DEQUEUE_TIMEOUT;
3149caac5ddSSunil Kumar Kori 	dev_info->dequeue_timeout_ns =
31577b5311dSHemant Agrawal 		DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
3169caac5ddSSunil Kumar Kori 	dev_info->max_event_queues =
3179caac5ddSSunil Kumar Kori 		DPAA_EVENT_MAX_QUEUES;
3189caac5ddSSunil Kumar Kori 	dev_info->max_event_queue_flows =
3199caac5ddSSunil Kumar Kori 		DPAA_EVENT_MAX_QUEUE_FLOWS;
3209caac5ddSSunil Kumar Kori 	dev_info->max_event_queue_priority_levels =
3219caac5ddSSunil Kumar Kori 		DPAA_EVENT_MAX_QUEUE_PRIORITY_LEVELS;
3229caac5ddSSunil Kumar Kori 	dev_info->max_event_priority_levels =
3239caac5ddSSunil Kumar Kori 		DPAA_EVENT_MAX_EVENT_PRIORITY_LEVELS;
3249caac5ddSSunil Kumar Kori 	dev_info->max_event_ports =
3259caac5ddSSunil Kumar Kori 		DPAA_EVENT_MAX_EVENT_PORT;
3269caac5ddSSunil Kumar Kori 	dev_info->max_event_port_dequeue_depth =
3279caac5ddSSunil Kumar Kori 		DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
3289caac5ddSSunil Kumar Kori 	dev_info->max_event_port_enqueue_depth =
3299caac5ddSSunil Kumar Kori 		DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
3309caac5ddSSunil Kumar Kori 	/*
3319caac5ddSSunil Kumar Kori 	 * TODO: Need to find out that how to fetch this info
3329caac5ddSSunil Kumar Kori 	 * from kernel or somewhere else.
3339caac5ddSSunil Kumar Kori 	 */
3349caac5ddSSunil Kumar Kori 	dev_info->max_num_events =
3359caac5ddSSunil Kumar Kori 		DPAA_EVENT_MAX_NUM_EVENTS;
3369caac5ddSSunil Kumar Kori 	dev_info->event_dev_cap =
33721662755SBruce Richardson 		RTE_EVENT_DEV_CAP_ATOMIC |
33821662755SBruce Richardson 		RTE_EVENT_DEV_CAP_PARALLEL |
3399caac5ddSSunil Kumar Kori 		RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED |
3409caac5ddSSunil Kumar Kori 		RTE_EVENT_DEV_CAP_BURST_MODE |
3419caac5ddSSunil Kumar Kori 		RTE_EVENT_DEV_CAP_MULTIPLE_QUEUE_PORT |
34275d11313STimothy McDaniel 		RTE_EVENT_DEV_CAP_NONSEQ_MODE |
343bd991897SMattias Rönnblom 		RTE_EVENT_DEV_CAP_CARRY_FLOW_ID |
344bd991897SMattias Rönnblom 		RTE_EVENT_DEV_CAP_MAINTENANCE_FREE;
345d007a7f3SPavan Nikhilesh 	dev_info->max_profiles_per_port = 1;
3469caac5ddSSunil Kumar Kori }
3479caac5ddSSunil Kumar Kori 
3489caac5ddSSunil Kumar Kori static int
3499caac5ddSSunil Kumar Kori dpaa_event_dev_configure(const struct rte_eventdev *dev)
3509caac5ddSSunil Kumar Kori {
3519caac5ddSSunil Kumar Kori 	struct dpaa_eventdev *priv = dev->data->dev_private;
3529caac5ddSSunil Kumar Kori 	struct rte_event_dev_config *conf = &dev->data->dev_conf;
3539caac5ddSSunil Kumar Kori 	int ret, i;
3549caac5ddSSunil Kumar Kori 	uint32_t *ch_id;
3559caac5ddSSunil Kumar Kori 
356e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
3579caac5ddSSunil Kumar Kori 	priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
3589caac5ddSSunil Kumar Kori 	priv->nb_events_limit = conf->nb_events_limit;
3599caac5ddSSunil Kumar Kori 	priv->nb_event_queues = conf->nb_event_queues;
3609caac5ddSSunil Kumar Kori 	priv->nb_event_ports = conf->nb_event_ports;
3619caac5ddSSunil Kumar Kori 	priv->nb_event_queue_flows = conf->nb_event_queue_flows;
3629caac5ddSSunil Kumar Kori 	priv->nb_event_port_dequeue_depth = conf->nb_event_port_dequeue_depth;
3639caac5ddSSunil Kumar Kori 	priv->nb_event_port_enqueue_depth = conf->nb_event_port_enqueue_depth;
3649caac5ddSSunil Kumar Kori 	priv->event_dev_cfg = conf->event_dev_cfg;
3659caac5ddSSunil Kumar Kori 
3669caac5ddSSunil Kumar Kori 	ch_id = rte_malloc("dpaa-channels",
3679caac5ddSSunil Kumar Kori 			  sizeof(uint32_t) * priv->nb_event_queues,
3689caac5ddSSunil Kumar Kori 			  RTE_CACHE_LINE_SIZE);
3699caac5ddSSunil Kumar Kori 	if (ch_id == NULL) {
3701ec9a3afSHemant Agrawal 		DPAA_EVENTDEV_ERR("Fail to allocate memory for dpaa channels");
3719caac5ddSSunil Kumar Kori 		return -ENOMEM;
3729caac5ddSSunil Kumar Kori 	}
3739caac5ddSSunil Kumar Kori 	/* Create requested event queues within the given event device */
3749caac5ddSSunil Kumar Kori 	ret = qman_alloc_pool_range(ch_id, priv->nb_event_queues, 1, 0);
3759caac5ddSSunil Kumar Kori 	if (ret < 0) {
3761ec9a3afSHemant Agrawal 		DPAA_EVENTDEV_ERR("qman_alloc_pool_range %u, err =%d",
377e7bbddb8SHemant Agrawal 				 priv->nb_event_queues, ret);
3789caac5ddSSunil Kumar Kori 		rte_free(ch_id);
3799caac5ddSSunil Kumar Kori 		return ret;
3809caac5ddSSunil Kumar Kori 	}
3819caac5ddSSunil Kumar Kori 	for (i = 0; i < priv->nb_event_queues; i++)
3829caac5ddSSunil Kumar Kori 		priv->evq_info[i].ch_id = (u16)ch_id[i];
3839caac5ddSSunil Kumar Kori 
3849caac5ddSSunil Kumar Kori 	/* Lets prepare event ports */
3859caac5ddSSunil Kumar Kori 	memset(&priv->ports[0], 0,
3869caac5ddSSunil Kumar Kori 	      sizeof(struct dpaa_port) * priv->nb_event_ports);
38777b5311dSHemant Agrawal 
38877b5311dSHemant Agrawal 	/* Check dequeue timeout method is per dequeue or global */
3899caac5ddSSunil Kumar Kori 	if (priv->event_dev_cfg & RTE_EVENT_DEV_CFG_PER_DEQUEUE_TIMEOUT) {
39077b5311dSHemant Agrawal 		/*
39177b5311dSHemant Agrawal 		 * Use timeout value as given in dequeue operation.
39277b5311dSHemant Agrawal 		 * So invalidating this timeout value.
39377b5311dSHemant Agrawal 		 */
39477b5311dSHemant Agrawal 		priv->dequeue_timeout_ns = 0;
39577b5311dSHemant Agrawal 
39677b5311dSHemant Agrawal 	} else if (conf->dequeue_timeout_ns == 0) {
39777b5311dSHemant Agrawal 		priv->dequeue_timeout_ns = DPAA_EVENT_PORT_DEQUEUE_TIMEOUT_NS;
3989caac5ddSSunil Kumar Kori 	} else {
39977b5311dSHemant Agrawal 		priv->dequeue_timeout_ns = conf->dequeue_timeout_ns;
40077b5311dSHemant Agrawal 	}
40177b5311dSHemant Agrawal 
4029caac5ddSSunil Kumar Kori 	for (i = 0; i < priv->nb_event_ports; i++) {
40377b5311dSHemant Agrawal 		if (priv->intr_mode) {
40477b5311dSHemant Agrawal 			priv->ports[i].timeout_us =
40577b5311dSHemant Agrawal 				priv->dequeue_timeout_ns/1000;
40677b5311dSHemant Agrawal 		} else {
40777b5311dSHemant Agrawal 			uint64_t cycles_per_second;
40877b5311dSHemant Agrawal 
40977b5311dSHemant Agrawal 			cycles_per_second = rte_get_timer_hz();
41077b5311dSHemant Agrawal 			priv->ports[i].timeout_us =
41177b5311dSHemant Agrawal 				(priv->dequeue_timeout_ns * cycles_per_second)
41277b5311dSHemant Agrawal 					/ NS_PER_S;
4139caac5ddSSunil Kumar Kori 		}
4149caac5ddSSunil Kumar Kori 	}
41577b5311dSHemant Agrawal 
4169caac5ddSSunil Kumar Kori 	/*
4179caac5ddSSunil Kumar Kori 	 * TODO: Currently portals are affined with threads. Maximum threads
4189caac5ddSSunil Kumar Kori 	 * can be created equals to number of lcore.
4199caac5ddSSunil Kumar Kori 	 */
4209caac5ddSSunil Kumar Kori 	rte_free(ch_id);
421e7bbddb8SHemant Agrawal 	DPAA_EVENTDEV_INFO("Configured eventdev devid=%d", dev->data->dev_id);
4229caac5ddSSunil Kumar Kori 
4239caac5ddSSunil Kumar Kori 	return 0;
4249caac5ddSSunil Kumar Kori }
4259caac5ddSSunil Kumar Kori 
4269caac5ddSSunil Kumar Kori static int
4279caac5ddSSunil Kumar Kori dpaa_event_dev_start(struct rte_eventdev *dev)
4289caac5ddSSunil Kumar Kori {
429e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
4309caac5ddSSunil Kumar Kori 	RTE_SET_USED(dev);
4319caac5ddSSunil Kumar Kori 
4329caac5ddSSunil Kumar Kori 	return 0;
4339caac5ddSSunil Kumar Kori }
4349caac5ddSSunil Kumar Kori 
4359caac5ddSSunil Kumar Kori static void
4369caac5ddSSunil Kumar Kori dpaa_event_dev_stop(struct rte_eventdev *dev)
4379caac5ddSSunil Kumar Kori {
438e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
4399caac5ddSSunil Kumar Kori 	RTE_SET_USED(dev);
4409caac5ddSSunil Kumar Kori }
4419caac5ddSSunil Kumar Kori 
4429caac5ddSSunil Kumar Kori static int
4439caac5ddSSunil Kumar Kori dpaa_event_dev_close(struct rte_eventdev *dev)
4449caac5ddSSunil Kumar Kori {
445e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
4469caac5ddSSunil Kumar Kori 	RTE_SET_USED(dev);
4479caac5ddSSunil Kumar Kori 
4489caac5ddSSunil Kumar Kori 	return 0;
4499caac5ddSSunil Kumar Kori }
4509caac5ddSSunil Kumar Kori 
451b08dc643SSunil Kumar Kori static void
452b08dc643SSunil Kumar Kori dpaa_event_queue_def_conf(struct rte_eventdev *dev, uint8_t queue_id,
453b08dc643SSunil Kumar Kori 			  struct rte_event_queue_conf *queue_conf)
454b08dc643SSunil Kumar Kori {
455e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
4569caac5ddSSunil Kumar Kori 
457b08dc643SSunil Kumar Kori 	RTE_SET_USED(dev);
458b08dc643SSunil Kumar Kori 	RTE_SET_USED(queue_id);
459b08dc643SSunil Kumar Kori 
460b08dc643SSunil Kumar Kori 	memset(queue_conf, 0, sizeof(struct rte_event_queue_conf));
461c37421a2SNipun Gupta 	queue_conf->nb_atomic_flows = DPAA_EVENT_QUEUE_ATOMIC_FLOWS;
462b08dc643SSunil Kumar Kori 	queue_conf->schedule_type = RTE_SCHED_TYPE_PARALLEL;
463b08dc643SSunil Kumar Kori 	queue_conf->priority = RTE_EVENT_DEV_PRIORITY_HIGHEST;
464b08dc643SSunil Kumar Kori }
465b08dc643SSunil Kumar Kori 
466b08dc643SSunil Kumar Kori static int
467b08dc643SSunil Kumar Kori dpaa_event_queue_setup(struct rte_eventdev *dev, uint8_t queue_id,
468b08dc643SSunil Kumar Kori 		       const struct rte_event_queue_conf *queue_conf)
469b08dc643SSunil Kumar Kori {
470b08dc643SSunil Kumar Kori 	struct dpaa_eventdev *priv = dev->data->dev_private;
471b08dc643SSunil Kumar Kori 	struct dpaa_eventq *evq_info = &priv->evq_info[queue_id];
472b08dc643SSunil Kumar Kori 
473e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
474b08dc643SSunil Kumar Kori 
475b08dc643SSunil Kumar Kori 	switch (queue_conf->schedule_type) {
476b08dc643SSunil Kumar Kori 	case RTE_SCHED_TYPE_PARALLEL:
477b08dc643SSunil Kumar Kori 	case RTE_SCHED_TYPE_ATOMIC:
478b08dc643SSunil Kumar Kori 		break;
479b08dc643SSunil Kumar Kori 	case RTE_SCHED_TYPE_ORDERED:
480e7bbddb8SHemant Agrawal 		DPAA_EVENTDEV_ERR("Schedule type is not supported.");
481b08dc643SSunil Kumar Kori 		return -1;
482b08dc643SSunil Kumar Kori 	}
483b08dc643SSunil Kumar Kori 	evq_info->event_queue_cfg = queue_conf->event_queue_cfg;
484b08dc643SSunil Kumar Kori 	evq_info->event_queue_id = queue_id;
485b08dc643SSunil Kumar Kori 
486b08dc643SSunil Kumar Kori 	return 0;
487b08dc643SSunil Kumar Kori }
488b08dc643SSunil Kumar Kori 
489b08dc643SSunil Kumar Kori static void
490b08dc643SSunil Kumar Kori dpaa_event_queue_release(struct rte_eventdev *dev, uint8_t queue_id)
491b08dc643SSunil Kumar Kori {
492e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
493b08dc643SSunil Kumar Kori 
494b08dc643SSunil Kumar Kori 	RTE_SET_USED(dev);
495b08dc643SSunil Kumar Kori 	RTE_SET_USED(queue_id);
496b08dc643SSunil Kumar Kori }
4979caac5ddSSunil Kumar Kori 
4981d99bc35SSunil Kumar Kori static void
4991d99bc35SSunil Kumar Kori dpaa_event_port_default_conf_get(struct rte_eventdev *dev, uint8_t port_id,
5001d99bc35SSunil Kumar Kori 				 struct rte_event_port_conf *port_conf)
5011d99bc35SSunil Kumar Kori {
502e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
5031d99bc35SSunil Kumar Kori 
5041d99bc35SSunil Kumar Kori 	RTE_SET_USED(dev);
5051d99bc35SSunil Kumar Kori 	RTE_SET_USED(port_id);
5061d99bc35SSunil Kumar Kori 
5071d99bc35SSunil Kumar Kori 	port_conf->new_event_threshold = DPAA_EVENT_MAX_NUM_EVENTS;
5081d99bc35SSunil Kumar Kori 	port_conf->dequeue_depth = DPAA_EVENT_MAX_PORT_DEQUEUE_DEPTH;
5091d99bc35SSunil Kumar Kori 	port_conf->enqueue_depth = DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH;
5101d99bc35SSunil Kumar Kori }
5111d99bc35SSunil Kumar Kori 
5121d99bc35SSunil Kumar Kori static int
5131d99bc35SSunil Kumar Kori dpaa_event_port_setup(struct rte_eventdev *dev, uint8_t port_id,
5141d99bc35SSunil Kumar Kori 		      const struct rte_event_port_conf *port_conf)
5151d99bc35SSunil Kumar Kori {
5161d99bc35SSunil Kumar Kori 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
5171d99bc35SSunil Kumar Kori 
518e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
5191d99bc35SSunil Kumar Kori 
5201d99bc35SSunil Kumar Kori 	RTE_SET_USED(port_conf);
5211d99bc35SSunil Kumar Kori 	dev->data->ports[port_id] = &eventdev->ports[port_id];
5221d99bc35SSunil Kumar Kori 
5231d99bc35SSunil Kumar Kori 	return 0;
5241d99bc35SSunil Kumar Kori }
5251d99bc35SSunil Kumar Kori 
5261d99bc35SSunil Kumar Kori static void
5271d99bc35SSunil Kumar Kori dpaa_event_port_release(void *port)
5281d99bc35SSunil Kumar Kori {
529e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
5301d99bc35SSunil Kumar Kori 
5311d99bc35SSunil Kumar Kori 	RTE_SET_USED(port);
5321d99bc35SSunil Kumar Kori }
5331d99bc35SSunil Kumar Kori 
5341d99bc35SSunil Kumar Kori static int
5351d99bc35SSunil Kumar Kori dpaa_event_port_link(struct rte_eventdev *dev, void *port,
5361d99bc35SSunil Kumar Kori 		     const uint8_t queues[], const uint8_t priorities[],
5371d99bc35SSunil Kumar Kori 		     uint16_t nb_links)
5381d99bc35SSunil Kumar Kori {
5391d99bc35SSunil Kumar Kori 	struct dpaa_eventdev *priv = dev->data->dev_private;
5401d99bc35SSunil Kumar Kori 	struct dpaa_port *event_port = (struct dpaa_port *)port;
5411d99bc35SSunil Kumar Kori 	struct dpaa_eventq *event_queue;
5421d99bc35SSunil Kumar Kori 	uint8_t eventq_id;
5431d99bc35SSunil Kumar Kori 	int i;
5441d99bc35SSunil Kumar Kori 
5451d99bc35SSunil Kumar Kori 	RTE_SET_USED(dev);
5461d99bc35SSunil Kumar Kori 	RTE_SET_USED(priorities);
5471d99bc35SSunil Kumar Kori 
5481d99bc35SSunil Kumar Kori 	/* First check that input configuration are valid */
5491d99bc35SSunil Kumar Kori 	for (i = 0; i < nb_links; i++) {
5501d99bc35SSunil Kumar Kori 		eventq_id = queues[i];
5511d99bc35SSunil Kumar Kori 		event_queue = &priv->evq_info[eventq_id];
5521d99bc35SSunil Kumar Kori 		if ((event_queue->event_queue_cfg
5531d99bc35SSunil Kumar Kori 			& RTE_EVENT_QUEUE_CFG_SINGLE_LINK)
5541d99bc35SSunil Kumar Kori 			&& (event_queue->event_port)) {
5551d99bc35SSunil Kumar Kori 			return -EINVAL;
5561d99bc35SSunil Kumar Kori 		}
5571d99bc35SSunil Kumar Kori 	}
5581d99bc35SSunil Kumar Kori 
5591d99bc35SSunil Kumar Kori 	for (i = 0; i < nb_links; i++) {
5601d99bc35SSunil Kumar Kori 		eventq_id = queues[i];
5611d99bc35SSunil Kumar Kori 		event_queue = &priv->evq_info[eventq_id];
5621d99bc35SSunil Kumar Kori 		event_port->evq_info[i].event_queue_id = eventq_id;
5631d99bc35SSunil Kumar Kori 		event_port->evq_info[i].ch_id = event_queue->ch_id;
5641d99bc35SSunil Kumar Kori 		event_queue->event_port = port;
5651d99bc35SSunil Kumar Kori 	}
5661d99bc35SSunil Kumar Kori 
5671d99bc35SSunil Kumar Kori 	event_port->num_linked_evq = event_port->num_linked_evq + i;
5681d99bc35SSunil Kumar Kori 
5691d99bc35SSunil Kumar Kori 	return (int)i;
5701d99bc35SSunil Kumar Kori }
5711d99bc35SSunil Kumar Kori 
5721d99bc35SSunil Kumar Kori static int
5731d99bc35SSunil Kumar Kori dpaa_event_port_unlink(struct rte_eventdev *dev, void *port,
5741d99bc35SSunil Kumar Kori 		       uint8_t queues[], uint16_t nb_links)
5751d99bc35SSunil Kumar Kori {
5761d99bc35SSunil Kumar Kori 	int i;
5771d99bc35SSunil Kumar Kori 	uint8_t eventq_id;
5781d99bc35SSunil Kumar Kori 	struct dpaa_eventq *event_queue;
5791d99bc35SSunil Kumar Kori 	struct dpaa_eventdev *priv = dev->data->dev_private;
5801d99bc35SSunil Kumar Kori 	struct dpaa_port *event_port = (struct dpaa_port *)port;
5811d99bc35SSunil Kumar Kori 
5821d99bc35SSunil Kumar Kori 	if (!event_port->num_linked_evq)
5831d99bc35SSunil Kumar Kori 		return nb_links;
5841d99bc35SSunil Kumar Kori 
5851d99bc35SSunil Kumar Kori 	for (i = 0; i < nb_links; i++) {
5861d99bc35SSunil Kumar Kori 		eventq_id = queues[i];
5871d99bc35SSunil Kumar Kori 		event_port->evq_info[eventq_id].event_queue_id = -1;
5881d99bc35SSunil Kumar Kori 		event_port->evq_info[eventq_id].ch_id = 0;
5891d99bc35SSunil Kumar Kori 		event_queue = &priv->evq_info[eventq_id];
5901d99bc35SSunil Kumar Kori 		event_queue->event_port = NULL;
5911d99bc35SSunil Kumar Kori 	}
5921d99bc35SSunil Kumar Kori 
59377b5311dSHemant Agrawal 	if (event_port->num_linked_evq)
5941d99bc35SSunil Kumar Kori 		event_port->num_linked_evq = event_port->num_linked_evq - i;
5951d99bc35SSunil Kumar Kori 
5961d99bc35SSunil Kumar Kori 	return (int)i;
5971d99bc35SSunil Kumar Kori }
5981d99bc35SSunil Kumar Kori 
5994ed80e63SSunil Kumar Kori static int
6004ed80e63SSunil Kumar Kori dpaa_event_eth_rx_adapter_caps_get(const struct rte_eventdev *dev,
6014ed80e63SSunil Kumar Kori 				   const struct rte_eth_dev *eth_dev,
6024ed80e63SSunil Kumar Kori 				   uint32_t *caps)
6034ed80e63SSunil Kumar Kori {
6044ed80e63SSunil Kumar Kori 	const char *ethdev_driver = eth_dev->device->driver->name;
6054ed80e63SSunil Kumar Kori 
606e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
6074ed80e63SSunil Kumar Kori 
6084ed80e63SSunil Kumar Kori 	RTE_SET_USED(dev);
6094ed80e63SSunil Kumar Kori 
6104ed80e63SSunil Kumar Kori 	if (!strcmp(ethdev_driver, "net_dpaa"))
6114ed80e63SSunil Kumar Kori 		*caps = RTE_EVENT_ETH_RX_ADAPTER_DPAA_CAP;
6124ed80e63SSunil Kumar Kori 	else
6134ed80e63SSunil Kumar Kori 		*caps = RTE_EVENT_ETH_RX_ADAPTER_SW_CAP;
6144ed80e63SSunil Kumar Kori 
6154ed80e63SSunil Kumar Kori 	return 0;
6164ed80e63SSunil Kumar Kori }
6174ed80e63SSunil Kumar Kori 
6184ed80e63SSunil Kumar Kori static int
6194ed80e63SSunil Kumar Kori dpaa_event_eth_rx_adapter_queue_add(
6204ed80e63SSunil Kumar Kori 		const struct rte_eventdev *dev,
6214ed80e63SSunil Kumar Kori 		const struct rte_eth_dev *eth_dev,
6224ed80e63SSunil Kumar Kori 		int32_t rx_queue_id,
6234ed80e63SSunil Kumar Kori 		const struct rte_event_eth_rx_adapter_queue_conf *queue_conf)
6244ed80e63SSunil Kumar Kori {
6254ed80e63SSunil Kumar Kori 	struct dpaa_eventdev *eventdev = dev->data->dev_private;
6264ed80e63SSunil Kumar Kori 	uint8_t ev_qid = queue_conf->ev.queue_id;
6274ed80e63SSunil Kumar Kori 	u16 ch_id = eventdev->evq_info[ev_qid].ch_id;
6284ed80e63SSunil Kumar Kori 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
6294ed80e63SSunil Kumar Kori 	int ret, i;
6304ed80e63SSunil Kumar Kori 
631e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
6324ed80e63SSunil Kumar Kori 
6334ed80e63SSunil Kumar Kori 	if (rx_queue_id == -1) {
6344ed80e63SSunil Kumar Kori 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
6354ed80e63SSunil Kumar Kori 			ret = dpaa_eth_eventq_attach(eth_dev, i, ch_id,
6364ed80e63SSunil Kumar Kori 						     queue_conf);
6374ed80e63SSunil Kumar Kori 			if (ret) {
638e7bbddb8SHemant Agrawal 				DPAA_EVENTDEV_ERR(
6391ec9a3afSHemant Agrawal 					"Event Queue attach failed:%d", ret);
6404ed80e63SSunil Kumar Kori 				goto detach_configured_queues;
6414ed80e63SSunil Kumar Kori 			}
6424ed80e63SSunil Kumar Kori 		}
6434ed80e63SSunil Kumar Kori 		return 0;
6444ed80e63SSunil Kumar Kori 	}
6454ed80e63SSunil Kumar Kori 
6464ed80e63SSunil Kumar Kori 	ret = dpaa_eth_eventq_attach(eth_dev, rx_queue_id, ch_id, queue_conf);
6474ed80e63SSunil Kumar Kori 	if (ret)
6481ec9a3afSHemant Agrawal 		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_attach failed:%d", ret);
6494ed80e63SSunil Kumar Kori 	return ret;
6504ed80e63SSunil Kumar Kori 
6514ed80e63SSunil Kumar Kori detach_configured_queues:
6524ed80e63SSunil Kumar Kori 
6534ed80e63SSunil Kumar Kori 	for (i = (i - 1); i >= 0 ; i--)
6544ed80e63SSunil Kumar Kori 		dpaa_eth_eventq_detach(eth_dev, i);
6554ed80e63SSunil Kumar Kori 
6564ed80e63SSunil Kumar Kori 	return ret;
6574ed80e63SSunil Kumar Kori }
6584ed80e63SSunil Kumar Kori 
6594ed80e63SSunil Kumar Kori static int
6604ed80e63SSunil Kumar Kori dpaa_event_eth_rx_adapter_queue_del(const struct rte_eventdev *dev,
6614ed80e63SSunil Kumar Kori 				    const struct rte_eth_dev *eth_dev,
6624ed80e63SSunil Kumar Kori 				    int32_t rx_queue_id)
6634ed80e63SSunil Kumar Kori {
6644ed80e63SSunil Kumar Kori 	int ret, i;
6654ed80e63SSunil Kumar Kori 	struct dpaa_if *dpaa_intf = eth_dev->data->dev_private;
6664ed80e63SSunil Kumar Kori 
667e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
6684ed80e63SSunil Kumar Kori 
6694ed80e63SSunil Kumar Kori 	RTE_SET_USED(dev);
6704ed80e63SSunil Kumar Kori 	if (rx_queue_id == -1) {
6714ed80e63SSunil Kumar Kori 		for (i = 0; i < dpaa_intf->nb_rx_queues; i++) {
6724ed80e63SSunil Kumar Kori 			ret = dpaa_eth_eventq_detach(eth_dev, i);
6734ed80e63SSunil Kumar Kori 			if (ret)
674e7bbddb8SHemant Agrawal 				DPAA_EVENTDEV_ERR(
6751ec9a3afSHemant Agrawal 					"Event Queue detach failed:%d", ret);
6764ed80e63SSunil Kumar Kori 		}
6774ed80e63SSunil Kumar Kori 
6784ed80e63SSunil Kumar Kori 		return 0;
6794ed80e63SSunil Kumar Kori 	}
6804ed80e63SSunil Kumar Kori 
6814ed80e63SSunil Kumar Kori 	ret = dpaa_eth_eventq_detach(eth_dev, rx_queue_id);
6824ed80e63SSunil Kumar Kori 	if (ret)
6831ec9a3afSHemant Agrawal 		DPAA_EVENTDEV_ERR("dpaa_eth_eventq_detach failed:%d", ret);
6844ed80e63SSunil Kumar Kori 	return ret;
6854ed80e63SSunil Kumar Kori }
6864ed80e63SSunil Kumar Kori 
6874ed80e63SSunil Kumar Kori static int
6884ed80e63SSunil Kumar Kori dpaa_event_eth_rx_adapter_start(const struct rte_eventdev *dev,
6894ed80e63SSunil Kumar Kori 				const struct rte_eth_dev *eth_dev)
6904ed80e63SSunil Kumar Kori {
691e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
6924ed80e63SSunil Kumar Kori 
6934ed80e63SSunil Kumar Kori 	RTE_SET_USED(dev);
6944ed80e63SSunil Kumar Kori 	RTE_SET_USED(eth_dev);
6954ed80e63SSunil Kumar Kori 
6964ed80e63SSunil Kumar Kori 	return 0;
6974ed80e63SSunil Kumar Kori }
6984ed80e63SSunil Kumar Kori 
6994ed80e63SSunil Kumar Kori static int
7004ed80e63SSunil Kumar Kori dpaa_event_eth_rx_adapter_stop(const struct rte_eventdev *dev,
7014ed80e63SSunil Kumar Kori 			       const struct rte_eth_dev *eth_dev)
7024ed80e63SSunil Kumar Kori {
703e7bbddb8SHemant Agrawal 	EVENTDEV_INIT_FUNC_TRACE();
7044ed80e63SSunil Kumar Kori 
7054ed80e63SSunil Kumar Kori 	RTE_SET_USED(dev);
7064ed80e63SSunil Kumar Kori 	RTE_SET_USED(eth_dev);
7074ed80e63SSunil Kumar Kori 
7084ed80e63SSunil Kumar Kori 	return 0;
7094ed80e63SSunil Kumar Kori }
7104ed80e63SSunil Kumar Kori 
711b0f66a68SAkhil Goyal static int
712b0f66a68SAkhil Goyal dpaa_eventdev_crypto_caps_get(const struct rte_eventdev *dev,
713b0f66a68SAkhil Goyal 			    const struct rte_cryptodev *cdev,
714b0f66a68SAkhil Goyal 			    uint32_t *caps)
715b0f66a68SAkhil Goyal {
716b0f66a68SAkhil Goyal 	const char *name = cdev->data->name;
717b0f66a68SAkhil Goyal 
718b0f66a68SAkhil Goyal 	EVENTDEV_INIT_FUNC_TRACE();
719b0f66a68SAkhil Goyal 
720b0f66a68SAkhil Goyal 	RTE_SET_USED(dev);
721b0f66a68SAkhil Goyal 
722b0f66a68SAkhil Goyal 	if (!strncmp(name, "dpaa_sec-", 9))
723b0f66a68SAkhil Goyal 		*caps = RTE_EVENT_CRYPTO_ADAPTER_DPAA_CAP;
724b0f66a68SAkhil Goyal 	else
725b0f66a68SAkhil Goyal 		return -1;
726b0f66a68SAkhil Goyal 
727b0f66a68SAkhil Goyal 	return 0;
728b0f66a68SAkhil Goyal }
729b0f66a68SAkhil Goyal 
730b0f66a68SAkhil Goyal static int
731b0f66a68SAkhil Goyal dpaa_eventdev_crypto_queue_add_all(const struct rte_eventdev *dev,
732b0f66a68SAkhil Goyal 		const struct rte_cryptodev *cryptodev,
733b0f66a68SAkhil Goyal 		const struct rte_event *ev)
734b0f66a68SAkhil Goyal {
735b0f66a68SAkhil Goyal 	struct dpaa_eventdev *priv = dev->data->dev_private;
736b0f66a68SAkhil Goyal 	uint8_t ev_qid = ev->queue_id;
737b0f66a68SAkhil Goyal 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
738b0f66a68SAkhil Goyal 	int i, ret;
739b0f66a68SAkhil Goyal 
740b0f66a68SAkhil Goyal 	EVENTDEV_INIT_FUNC_TRACE();
741b0f66a68SAkhil Goyal 
742b0f66a68SAkhil Goyal 	for (i = 0; i < cryptodev->data->nb_queue_pairs; i++) {
743b0f66a68SAkhil Goyal 		ret = dpaa_sec_eventq_attach(cryptodev, i,
744b0f66a68SAkhil Goyal 				ch_id, ev);
745b0f66a68SAkhil Goyal 		if (ret) {
7461ec9a3afSHemant Agrawal 			DPAA_EVENTDEV_ERR("dpaa_sec_eventq_attach failed: ret %d",
747b0f66a68SAkhil Goyal 				    ret);
748b0f66a68SAkhil Goyal 			goto fail;
749b0f66a68SAkhil Goyal 		}
750b0f66a68SAkhil Goyal 	}
751b0f66a68SAkhil Goyal 	return 0;
752b0f66a68SAkhil Goyal fail:
753b0f66a68SAkhil Goyal 	for (i = (i - 1); i >= 0 ; i--)
754b0f66a68SAkhil Goyal 		dpaa_sec_eventq_detach(cryptodev, i);
755b0f66a68SAkhil Goyal 
756b0f66a68SAkhil Goyal 	return ret;
757b0f66a68SAkhil Goyal }
758b0f66a68SAkhil Goyal 
759b0f66a68SAkhil Goyal static int
760b0f66a68SAkhil Goyal dpaa_eventdev_crypto_queue_add(const struct rte_eventdev *dev,
761b0f66a68SAkhil Goyal 		const struct rte_cryptodev *cryptodev,
762b0f66a68SAkhil Goyal 		int32_t rx_queue_id,
763c1749bc5SVolodymyr Fialko 		const struct rte_event_crypto_adapter_queue_conf *conf)
764b0f66a68SAkhil Goyal {
765b0f66a68SAkhil Goyal 	struct dpaa_eventdev *priv = dev->data->dev_private;
766c1749bc5SVolodymyr Fialko 	uint8_t ev_qid = conf->ev.queue_id;
767b0f66a68SAkhil Goyal 	u16 ch_id = priv->evq_info[ev_qid].ch_id;
768b0f66a68SAkhil Goyal 	int ret;
769b0f66a68SAkhil Goyal 
770b0f66a68SAkhil Goyal 	EVENTDEV_INIT_FUNC_TRACE();
771b0f66a68SAkhil Goyal 
772b0f66a68SAkhil Goyal 	if (rx_queue_id == -1)
773b0f66a68SAkhil Goyal 		return dpaa_eventdev_crypto_queue_add_all(dev,
774c1749bc5SVolodymyr Fialko 				cryptodev, &conf->ev);
775b0f66a68SAkhil Goyal 
776b0f66a68SAkhil Goyal 	ret = dpaa_sec_eventq_attach(cryptodev, rx_queue_id,
777c1749bc5SVolodymyr Fialko 			ch_id, &conf->ev);
778b0f66a68SAkhil Goyal 	if (ret) {
779b0f66a68SAkhil Goyal 		DPAA_EVENTDEV_ERR(
7801ec9a3afSHemant Agrawal 			"dpaa_sec_eventq_attach failed: ret: %d", ret);
781b0f66a68SAkhil Goyal 		return ret;
782b0f66a68SAkhil Goyal 	}
783b0f66a68SAkhil Goyal 	return 0;
784b0f66a68SAkhil Goyal }
785b0f66a68SAkhil Goyal 
786b0f66a68SAkhil Goyal static int
787b0f66a68SAkhil Goyal dpaa_eventdev_crypto_queue_del_all(const struct rte_eventdev *dev,
788b0f66a68SAkhil Goyal 			     const struct rte_cryptodev *cdev)
789b0f66a68SAkhil Goyal {
790b0f66a68SAkhil Goyal 	int i, ret;
791b0f66a68SAkhil Goyal 
792b0f66a68SAkhil Goyal 	EVENTDEV_INIT_FUNC_TRACE();
793b0f66a68SAkhil Goyal 
794b0f66a68SAkhil Goyal 	RTE_SET_USED(dev);
795b0f66a68SAkhil Goyal 
796b0f66a68SAkhil Goyal 	for (i = 0; i < cdev->data->nb_queue_pairs; i++) {
797b0f66a68SAkhil Goyal 		ret = dpaa_sec_eventq_detach(cdev, i);
798b0f66a68SAkhil Goyal 		if (ret) {
799b0f66a68SAkhil Goyal 			DPAA_EVENTDEV_ERR(
8001ec9a3afSHemant Agrawal 				"dpaa_sec_eventq_detach failed:ret %d", ret);
801b0f66a68SAkhil Goyal 			return ret;
802b0f66a68SAkhil Goyal 		}
803b0f66a68SAkhil Goyal 	}
804b0f66a68SAkhil Goyal 
805b0f66a68SAkhil Goyal 	return 0;
806b0f66a68SAkhil Goyal }
807b0f66a68SAkhil Goyal 
808b0f66a68SAkhil Goyal static int
809b0f66a68SAkhil Goyal dpaa_eventdev_crypto_queue_del(const struct rte_eventdev *dev,
810b0f66a68SAkhil Goyal 			     const struct rte_cryptodev *cryptodev,
811b0f66a68SAkhil Goyal 			     int32_t rx_queue_id)
812b0f66a68SAkhil Goyal {
813b0f66a68SAkhil Goyal 	int ret;
814b0f66a68SAkhil Goyal 
815b0f66a68SAkhil Goyal 	EVENTDEV_INIT_FUNC_TRACE();
816b0f66a68SAkhil Goyal 
817b0f66a68SAkhil Goyal 	if (rx_queue_id == -1)
818b0f66a68SAkhil Goyal 		return dpaa_eventdev_crypto_queue_del_all(dev, cryptodev);
819b0f66a68SAkhil Goyal 
820b0f66a68SAkhil Goyal 	ret = dpaa_sec_eventq_detach(cryptodev, rx_queue_id);
821b0f66a68SAkhil Goyal 	if (ret) {
822b0f66a68SAkhil Goyal 		DPAA_EVENTDEV_ERR(
8231ec9a3afSHemant Agrawal 			"dpaa_sec_eventq_detach failed: ret: %d", ret);
824b0f66a68SAkhil Goyal 		return ret;
825b0f66a68SAkhil Goyal 	}
826b0f66a68SAkhil Goyal 
827b0f66a68SAkhil Goyal 	return 0;
828b0f66a68SAkhil Goyal }
829b0f66a68SAkhil Goyal 
830b0f66a68SAkhil Goyal static int
831b0f66a68SAkhil Goyal dpaa_eventdev_crypto_start(const struct rte_eventdev *dev,
832b0f66a68SAkhil Goyal 			   const struct rte_cryptodev *cryptodev)
833b0f66a68SAkhil Goyal {
834b0f66a68SAkhil Goyal 	EVENTDEV_INIT_FUNC_TRACE();
835b0f66a68SAkhil Goyal 
836b0f66a68SAkhil Goyal 	RTE_SET_USED(dev);
837b0f66a68SAkhil Goyal 	RTE_SET_USED(cryptodev);
838b0f66a68SAkhil Goyal 
839b0f66a68SAkhil Goyal 	return 0;
840b0f66a68SAkhil Goyal }
841b0f66a68SAkhil Goyal 
842b0f66a68SAkhil Goyal static int
843b0f66a68SAkhil Goyal dpaa_eventdev_crypto_stop(const struct rte_eventdev *dev,
844b0f66a68SAkhil Goyal 			  const struct rte_cryptodev *cryptodev)
845b0f66a68SAkhil Goyal {
846b0f66a68SAkhil Goyal 	EVENTDEV_INIT_FUNC_TRACE();
847b0f66a68SAkhil Goyal 
848b0f66a68SAkhil Goyal 	RTE_SET_USED(dev);
849b0f66a68SAkhil Goyal 	RTE_SET_USED(cryptodev);
850b0f66a68SAkhil Goyal 
851b0f66a68SAkhil Goyal 	return 0;
852b0f66a68SAkhil Goyal }
853b0f66a68SAkhil Goyal 
854ba6c1aa2SNipun Gupta static int
855ba6c1aa2SNipun Gupta dpaa_eventdev_tx_adapter_create(uint8_t id,
856ba6c1aa2SNipun Gupta 				 const struct rte_eventdev *dev)
857ba6c1aa2SNipun Gupta {
858ba6c1aa2SNipun Gupta 	RTE_SET_USED(id);
859ba6c1aa2SNipun Gupta 	RTE_SET_USED(dev);
860ba6c1aa2SNipun Gupta 
861ba6c1aa2SNipun Gupta 	/* Nothing to do. Simply return. */
862ba6c1aa2SNipun Gupta 	return 0;
863ba6c1aa2SNipun Gupta }
864ba6c1aa2SNipun Gupta 
865ba6c1aa2SNipun Gupta static int
866ba6c1aa2SNipun Gupta dpaa_eventdev_tx_adapter_caps(const struct rte_eventdev *dev,
867ba6c1aa2SNipun Gupta 			       const struct rte_eth_dev *eth_dev,
868ba6c1aa2SNipun Gupta 			       uint32_t *caps)
869ba6c1aa2SNipun Gupta {
870ba6c1aa2SNipun Gupta 	RTE_SET_USED(dev);
871ba6c1aa2SNipun Gupta 	RTE_SET_USED(eth_dev);
872ba6c1aa2SNipun Gupta 
873ba6c1aa2SNipun Gupta 	*caps = RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT;
874ba6c1aa2SNipun Gupta 	return 0;
875ba6c1aa2SNipun Gupta }
876ba6c1aa2SNipun Gupta 
877ba6c1aa2SNipun Gupta static uint16_t
878ba6c1aa2SNipun Gupta dpaa_eventdev_txa_enqueue_same_dest(void *port,
879ba6c1aa2SNipun Gupta 				     struct rte_event ev[],
880ba6c1aa2SNipun Gupta 				     uint16_t nb_events)
881ba6c1aa2SNipun Gupta {
882ba6c1aa2SNipun Gupta 	struct rte_mbuf *m[DPAA_EVENT_MAX_PORT_ENQUEUE_DEPTH], *m0;
883ba6c1aa2SNipun Gupta 	uint8_t qid, i;
884ba6c1aa2SNipun Gupta 
885ba6c1aa2SNipun Gupta 	RTE_SET_USED(port);
886ba6c1aa2SNipun Gupta 
887ba6c1aa2SNipun Gupta 	m0 = (struct rte_mbuf *)ev[0].mbuf;
888ba6c1aa2SNipun Gupta 	qid = rte_event_eth_tx_adapter_txq_get(m0);
889ba6c1aa2SNipun Gupta 
890ba6c1aa2SNipun Gupta 	for (i = 0; i < nb_events; i++)
891ba6c1aa2SNipun Gupta 		m[i] = (struct rte_mbuf *)ev[i].mbuf;
892ba6c1aa2SNipun Gupta 
893ba6c1aa2SNipun Gupta 	return rte_eth_tx_burst(m0->port, qid, m, nb_events);
894ba6c1aa2SNipun Gupta }
895ba6c1aa2SNipun Gupta 
896ba6c1aa2SNipun Gupta static uint16_t
897ba6c1aa2SNipun Gupta dpaa_eventdev_txa_enqueue(void *port,
898ba6c1aa2SNipun Gupta 			   struct rte_event ev[],
899ba6c1aa2SNipun Gupta 			   uint16_t nb_events)
900ba6c1aa2SNipun Gupta {
901ba6c1aa2SNipun Gupta 	struct rte_mbuf *m = (struct rte_mbuf *)ev[0].mbuf;
902ba6c1aa2SNipun Gupta 	uint8_t qid, i;
903ba6c1aa2SNipun Gupta 
904ba6c1aa2SNipun Gupta 	RTE_SET_USED(port);
905ba6c1aa2SNipun Gupta 
906ba6c1aa2SNipun Gupta 	for (i = 0; i < nb_events; i++) {
907ba6c1aa2SNipun Gupta 		qid = rte_event_eth_tx_adapter_txq_get(m);
908ba6c1aa2SNipun Gupta 		rte_eth_tx_burst(m->port, qid, &m, 1);
909ba6c1aa2SNipun Gupta 	}
910ba6c1aa2SNipun Gupta 
911ba6c1aa2SNipun Gupta 	return nb_events;
912ba6c1aa2SNipun Gupta }
913ba6c1aa2SNipun Gupta 
91423d06e37SPavan Nikhilesh static struct eventdev_ops dpaa_eventdev_ops = {
9159caac5ddSSunil Kumar Kori 	.dev_infos_get    = dpaa_event_dev_info_get,
9169caac5ddSSunil Kumar Kori 	.dev_configure    = dpaa_event_dev_configure,
9179caac5ddSSunil Kumar Kori 	.dev_start        = dpaa_event_dev_start,
9189caac5ddSSunil Kumar Kori 	.dev_stop         = dpaa_event_dev_stop,
9199caac5ddSSunil Kumar Kori 	.dev_close        = dpaa_event_dev_close,
920b08dc643SSunil Kumar Kori 	.queue_def_conf   = dpaa_event_queue_def_conf,
921b08dc643SSunil Kumar Kori 	.queue_setup      = dpaa_event_queue_setup,
922b08dc643SSunil Kumar Kori 	.queue_release    = dpaa_event_queue_release,
9231d99bc35SSunil Kumar Kori 	.port_def_conf    = dpaa_event_port_default_conf_get,
9241d99bc35SSunil Kumar Kori 	.port_setup       = dpaa_event_port_setup,
9251d99bc35SSunil Kumar Kori 	.port_release       = dpaa_event_port_release,
9261d99bc35SSunil Kumar Kori 	.port_link        = dpaa_event_port_link,
9271d99bc35SSunil Kumar Kori 	.port_unlink      = dpaa_event_port_unlink,
9289caac5ddSSunil Kumar Kori 	.timeout_ticks    = dpaa_event_dequeue_timeout_ticks,
9294ed80e63SSunil Kumar Kori 	.eth_rx_adapter_caps_get	= dpaa_event_eth_rx_adapter_caps_get,
9304ed80e63SSunil Kumar Kori 	.eth_rx_adapter_queue_add	= dpaa_event_eth_rx_adapter_queue_add,
9314ed80e63SSunil Kumar Kori 	.eth_rx_adapter_queue_del	= dpaa_event_eth_rx_adapter_queue_del,
9324ed80e63SSunil Kumar Kori 	.eth_rx_adapter_start		= dpaa_event_eth_rx_adapter_start,
9334ed80e63SSunil Kumar Kori 	.eth_rx_adapter_stop		= dpaa_event_eth_rx_adapter_stop,
934ba6c1aa2SNipun Gupta 	.eth_tx_adapter_caps_get	= dpaa_eventdev_tx_adapter_caps,
935ba6c1aa2SNipun Gupta 	.eth_tx_adapter_create		= dpaa_eventdev_tx_adapter_create,
936b0f66a68SAkhil Goyal 	.crypto_adapter_caps_get	= dpaa_eventdev_crypto_caps_get,
937b0f66a68SAkhil Goyal 	.crypto_adapter_queue_pair_add	= dpaa_eventdev_crypto_queue_add,
938b0f66a68SAkhil Goyal 	.crypto_adapter_queue_pair_del	= dpaa_eventdev_crypto_queue_del,
939b0f66a68SAkhil Goyal 	.crypto_adapter_start		= dpaa_eventdev_crypto_start,
940b0f66a68SAkhil Goyal 	.crypto_adapter_stop		= dpaa_eventdev_crypto_stop,
9419caac5ddSSunil Kumar Kori };
9429caac5ddSSunil Kumar Kori 
94377b5311dSHemant Agrawal static int flag_check_handler(__rte_unused const char *key,
94477b5311dSHemant Agrawal 		const char *value, __rte_unused void *opaque)
94577b5311dSHemant Agrawal {
94677b5311dSHemant Agrawal 	if (strcmp(value, "1"))
94777b5311dSHemant Agrawal 		return -1;
94877b5311dSHemant Agrawal 
94977b5311dSHemant Agrawal 	return 0;
95077b5311dSHemant Agrawal }
95177b5311dSHemant Agrawal 
9529caac5ddSSunil Kumar Kori static int
95377b5311dSHemant Agrawal dpaa_event_check_flags(const char *params)
95477b5311dSHemant Agrawal {
95577b5311dSHemant Agrawal 	struct rte_kvargs *kvlist;
95677b5311dSHemant Agrawal 
95777b5311dSHemant Agrawal 	if (params == NULL || params[0] == '\0')
95877b5311dSHemant Agrawal 		return 0;
95977b5311dSHemant Agrawal 
96077b5311dSHemant Agrawal 	kvlist = rte_kvargs_parse(params, NULL);
96177b5311dSHemant Agrawal 	if (kvlist == NULL)
96277b5311dSHemant Agrawal 		return 0;
96377b5311dSHemant Agrawal 
96477b5311dSHemant Agrawal 	if (!rte_kvargs_count(kvlist, DISABLE_INTR_MODE)) {
96577b5311dSHemant Agrawal 		rte_kvargs_free(kvlist);
96677b5311dSHemant Agrawal 		return 0;
96777b5311dSHemant Agrawal 	}
96877b5311dSHemant Agrawal 	/* INTR MODE is disabled when there's key-value pair: disable_intr = 1*/
96977b5311dSHemant Agrawal 	if (rte_kvargs_process(kvlist, DISABLE_INTR_MODE,
97077b5311dSHemant Agrawal 				flag_check_handler, NULL) < 0) {
97177b5311dSHemant Agrawal 		rte_kvargs_free(kvlist);
97277b5311dSHemant Agrawal 		return 0;
97377b5311dSHemant Agrawal 	}
97477b5311dSHemant Agrawal 	rte_kvargs_free(kvlist);
97577b5311dSHemant Agrawal 
97677b5311dSHemant Agrawal 	return 1;
97777b5311dSHemant Agrawal }
97877b5311dSHemant Agrawal 
97977b5311dSHemant Agrawal static int
980928b5c70SBruce Richardson dpaa_event_dev_create(const char *name, const char *params, struct rte_vdev_device *vdev)
9819caac5ddSSunil Kumar Kori {
9829caac5ddSSunil Kumar Kori 	struct rte_eventdev *eventdev;
9839caac5ddSSunil Kumar Kori 	struct dpaa_eventdev *priv;
9849caac5ddSSunil Kumar Kori 
9859caac5ddSSunil Kumar Kori 	eventdev = rte_event_pmd_vdev_init(name,
9869caac5ddSSunil Kumar Kori 					   sizeof(struct dpaa_eventdev),
987928b5c70SBruce Richardson 					   rte_socket_id(), vdev);
9889caac5ddSSunil Kumar Kori 	if (eventdev == NULL) {
989e7bbddb8SHemant Agrawal 		DPAA_EVENTDEV_ERR("Failed to create eventdev vdev %s", name);
9909caac5ddSSunil Kumar Kori 		goto fail;
9919caac5ddSSunil Kumar Kori 	}
99277b5311dSHemant Agrawal 	priv = eventdev->data->dev_private;
9939caac5ddSSunil Kumar Kori 
9949caac5ddSSunil Kumar Kori 	eventdev->dev_ops       = &dpaa_eventdev_ops;
9950ee17f79SSunil Kumar Kori 	eventdev->enqueue_burst = dpaa_event_enqueue_burst;
99677b5311dSHemant Agrawal 
997*e1b07dd5SMattias Rönnblom 	if (dpaa_event_check_flags(params))
9980ee17f79SSunil Kumar Kori 		eventdev->dequeue_burst = dpaa_event_dequeue_burst;
999*e1b07dd5SMattias Rönnblom 	else {
100077b5311dSHemant Agrawal 		priv->intr_mode = 1;
100177b5311dSHemant Agrawal 		eventdev->dev_ops->timeout_ticks =
100277b5311dSHemant Agrawal 				dpaa_event_dequeue_timeout_ticks_intr;
100377b5311dSHemant Agrawal 		eventdev->dequeue_burst = dpaa_event_dequeue_burst_intr;
100477b5311dSHemant Agrawal 	}
1005ba6c1aa2SNipun Gupta 	eventdev->txa_enqueue = dpaa_eventdev_txa_enqueue;
1006ba6c1aa2SNipun Gupta 	eventdev->txa_enqueue_same_dest	= dpaa_eventdev_txa_enqueue_same_dest;
10079caac5ddSSunil Kumar Kori 
1008a247fcd9SStephen Hemminger 	DPAA_EVENTDEV_INFO("%s eventdev added", name);
1009b0f66a68SAkhil Goyal 
10109caac5ddSSunil Kumar Kori 	/* For secondary processes, the primary has done all the work */
10119caac5ddSSunil Kumar Kori 	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
101285be9971SPavan Nikhilesh 		goto done;
10139caac5ddSSunil Kumar Kori 
10149caac5ddSSunil Kumar Kori 	priv->max_event_queues = DPAA_EVENT_MAX_QUEUES;
10159caac5ddSSunil Kumar Kori 
101685be9971SPavan Nikhilesh done:
101785be9971SPavan Nikhilesh 	event_dev_probing_finish(eventdev);
10189caac5ddSSunil Kumar Kori 	return 0;
10199caac5ddSSunil Kumar Kori fail:
10209caac5ddSSunil Kumar Kori 	return -EFAULT;
10219caac5ddSSunil Kumar Kori }
10229caac5ddSSunil Kumar Kori 
10239caac5ddSSunil Kumar Kori static int
10249caac5ddSSunil Kumar Kori dpaa_event_dev_probe(struct rte_vdev_device *vdev)
10259caac5ddSSunil Kumar Kori {
10269caac5ddSSunil Kumar Kori 	const char *name;
102777b5311dSHemant Agrawal 	const char *params;
10289caac5ddSSunil Kumar Kori 
10299caac5ddSSunil Kumar Kori 	name = rte_vdev_device_name(vdev);
1030e7bbddb8SHemant Agrawal 	DPAA_EVENTDEV_INFO("Initializing %s", name);
10319caac5ddSSunil Kumar Kori 
103277b5311dSHemant Agrawal 	params = rte_vdev_device_args(vdev);
103377b5311dSHemant Agrawal 
1034928b5c70SBruce Richardson 	return dpaa_event_dev_create(name, params, vdev);
10359caac5ddSSunil Kumar Kori }
10369caac5ddSSunil Kumar Kori 
10379caac5ddSSunil Kumar Kori static int
10389caac5ddSSunil Kumar Kori dpaa_event_dev_remove(struct rte_vdev_device *vdev)
10399caac5ddSSunil Kumar Kori {
10409caac5ddSSunil Kumar Kori 	const char *name;
10419caac5ddSSunil Kumar Kori 
10429caac5ddSSunil Kumar Kori 	name = rte_vdev_device_name(vdev);
1043e7bbddb8SHemant Agrawal 	DPAA_EVENTDEV_INFO("Closing %s", name);
10449caac5ddSSunil Kumar Kori 
10459caac5ddSSunil Kumar Kori 	return rte_event_pmd_vdev_uninit(name);
10469caac5ddSSunil Kumar Kori }
10479caac5ddSSunil Kumar Kori 
10489caac5ddSSunil Kumar Kori static struct rte_vdev_driver vdev_eventdev_dpaa_pmd = {
10499caac5ddSSunil Kumar Kori 	.probe = dpaa_event_dev_probe,
10509caac5ddSSunil Kumar Kori 	.remove = dpaa_event_dev_remove
10519caac5ddSSunil Kumar Kori };
10529caac5ddSSunil Kumar Kori 
10539caac5ddSSunil Kumar Kori RTE_PMD_REGISTER_VDEV(EVENTDEV_NAME_DPAA_PMD, vdev_eventdev_dpaa_pmd);
105477b5311dSHemant Agrawal RTE_PMD_REGISTER_PARAM_STRING(EVENTDEV_NAME_DPAA_PMD,
105577b5311dSHemant Agrawal 		DISABLE_INTR_MODE "=<int>");
1056