xref: /dpdk/lib/eventdev/eventdev_private.c (revision 34e3ad3a1e423a874d0d2388efa04d5d6ebee340)
1 /* SPDX-License-Identifier: BSD-3-Clause
2  * Copyright(C) 2021 Marvell.
3  */
4 
5 #include "eventdev_pmd.h"
6 #include "rte_eventdev.h"
7 
8 static uint16_t
9 dummy_event_enqueue_burst(__rte_unused void *port,
10 			  __rte_unused const struct rte_event ev[],
11 			  __rte_unused uint16_t nb_events)
12 {
13 	RTE_EDEV_LOG_ERR(
14 		"event enqueue burst requested for unconfigured event device");
15 	return 0;
16 }
17 
18 static uint16_t
19 dummy_event_dequeue_burst(__rte_unused void *port,
20 			  __rte_unused struct rte_event ev[],
21 			  __rte_unused uint16_t nb_events,
22 			  __rte_unused uint64_t timeout_ticks)
23 {
24 	RTE_EDEV_LOG_ERR(
25 		"event dequeue burst requested for unconfigured event device");
26 	return 0;
27 }
28 
29 static void
30 dummy_event_maintain(__rte_unused void *port, __rte_unused int op)
31 {
32 	RTE_EDEV_LOG_ERR(
33 		"maintenance requested for unconfigured event device");
34 }
35 
36 static uint16_t
37 dummy_event_tx_adapter_enqueue(__rte_unused void *port,
38 			       __rte_unused struct rte_event ev[],
39 			       __rte_unused uint16_t nb_events)
40 {
41 	RTE_EDEV_LOG_ERR(
42 		"event Tx adapter enqueue requested for unconfigured event device");
43 	return 0;
44 }
45 
46 static uint16_t
47 dummy_event_tx_adapter_enqueue_same_dest(__rte_unused void *port,
48 					 __rte_unused struct rte_event ev[],
49 					 __rte_unused uint16_t nb_events)
50 {
51 	RTE_EDEV_LOG_ERR(
52 		"event Tx adapter enqueue same destination requested for unconfigured event device");
53 	return 0;
54 }
55 
56 static uint16_t
57 dummy_event_crypto_adapter_enqueue(__rte_unused void *port,
58 				   __rte_unused struct rte_event ev[],
59 				   __rte_unused uint16_t nb_events)
60 {
61 	RTE_EDEV_LOG_ERR(
62 		"event crypto adapter enqueue requested for unconfigured event device");
63 	return 0;
64 }
65 
66 static uint16_t
67 dummy_event_dma_adapter_enqueue(__rte_unused void *port, __rte_unused struct rte_event ev[],
68 			       __rte_unused uint16_t nb_events)
69 {
70 	RTE_EDEV_LOG_ERR("event DMA adapter enqueue requested for unconfigured event device");
71 	return 0;
72 }
73 
74 static int
75 dummy_event_port_profile_switch(__rte_unused void *port, __rte_unused uint8_t profile_id)
76 {
77 	RTE_EDEV_LOG_ERR("change profile requested for unconfigured event device");
78 	return -EINVAL;
79 }
80 
81 static int
82 dummy_event_port_preschedule_modify(__rte_unused void *port,
83 				    __rte_unused enum rte_event_dev_preschedule_type preschedule)
84 {
85 	RTE_EDEV_LOG_ERR("modify pre-schedule requested for unconfigured event device");
86 	return -EINVAL;
87 }
88 
89 static int
90 dummy_event_port_preschedule_modify_hint(
91 	__rte_unused void *port, __rte_unused enum rte_event_dev_preschedule_type preschedule)
92 {
93 	return -ENOTSUP;
94 }
95 
96 static void
97 dummy_event_port_preschedule(__rte_unused void *port,
98 			     __rte_unused enum rte_event_dev_preschedule_type preschedule)
99 {
100 	RTE_EDEV_LOG_ERR("pre-schedule requested for unconfigured event device");
101 }
102 
103 static void
104 dummy_event_port_preschedule_hint(__rte_unused void *port,
105 				  __rte_unused enum rte_event_dev_preschedule_type preschedule)
106 {
107 }
108 
109 void
110 event_dev_fp_ops_reset(struct rte_event_fp_ops *fp_op)
111 {
112 	static void *dummy_data[RTE_MAX_QUEUES_PER_PORT];
113 	static const struct rte_event_fp_ops dummy = {
114 		.enqueue_burst = dummy_event_enqueue_burst,
115 		.enqueue_new_burst = dummy_event_enqueue_burst,
116 		.enqueue_forward_burst = dummy_event_enqueue_burst,
117 		.dequeue_burst = dummy_event_dequeue_burst,
118 		.maintain = dummy_event_maintain,
119 		.txa_enqueue = dummy_event_tx_adapter_enqueue,
120 		.txa_enqueue_same_dest = dummy_event_tx_adapter_enqueue_same_dest,
121 		.ca_enqueue = dummy_event_crypto_adapter_enqueue,
122 		.dma_enqueue = dummy_event_dma_adapter_enqueue,
123 		.profile_switch = dummy_event_port_profile_switch,
124 		.preschedule_modify = dummy_event_port_preschedule_modify,
125 		.preschedule = dummy_event_port_preschedule,
126 		.data = dummy_data,
127 	};
128 
129 	*fp_op = dummy;
130 }
131 
132 void
133 event_dev_fp_ops_set(struct rte_event_fp_ops *fp_op,
134 		     const struct rte_eventdev *dev)
135 {
136 	fp_op->enqueue_burst = dev->enqueue_burst;
137 	fp_op->enqueue_new_burst = dev->enqueue_new_burst;
138 	fp_op->enqueue_forward_burst = dev->enqueue_forward_burst;
139 	fp_op->dequeue_burst = dev->dequeue_burst;
140 	fp_op->maintain = dev->maintain;
141 	fp_op->txa_enqueue = dev->txa_enqueue;
142 	fp_op->txa_enqueue_same_dest = dev->txa_enqueue_same_dest;
143 	fp_op->ca_enqueue = dev->ca_enqueue;
144 	fp_op->dma_enqueue = dev->dma_enqueue;
145 	fp_op->profile_switch = dev->profile_switch;
146 	fp_op->preschedule_modify = dev->preschedule_modify;
147 	fp_op->preschedule = dev->preschedule;
148 	fp_op->data = dev->data->ports;
149 
150 	if (fp_op->preschedule_modify == NULL)
151 		fp_op->preschedule_modify = dummy_event_port_preschedule_modify_hint;
152 
153 	if (fp_op->preschedule == NULL)
154 		fp_op->preschedule = dummy_event_port_preschedule_hint;
155 }
156