1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2019 Marvell International Ltd. 3 */ 4 5 #ifdef RTE_LIB_EVENTDEV 6 #include <stdbool.h> 7 8 #include "l3fwd.h" 9 #include "l3fwd_event.h" 10 11 static uint32_t 12 l3fwd_event_device_setup_internal_port(void) 13 { 14 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 15 struct rte_event_dev_config event_d_conf = { 16 .nb_events_limit = 4096, 17 .nb_event_queue_flows = 1024, 18 .nb_event_port_dequeue_depth = 128, 19 .nb_event_port_enqueue_depth = 128 20 }; 21 struct rte_event_dev_info dev_info; 22 const uint8_t event_d_id = 0; /* Always use first event device only */ 23 uint32_t event_queue_cfg = 0; 24 uint16_t ethdev_count = 0; 25 uint16_t num_workers = 0; 26 uint16_t port_id; 27 int ret; 28 29 RTE_ETH_FOREACH_DEV(port_id) { 30 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) 31 continue; 32 ethdev_count++; 33 } 34 35 /* Event device configuration */ 36 rte_event_dev_info_get(event_d_id, &dev_info); 37 38 /* Enable implicit release */ 39 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE) 40 evt_rsrc->disable_implicit_release = 0; 41 42 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) 43 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES; 44 45 event_d_conf.nb_event_queues = ethdev_count; 46 if (dev_info.max_event_queues < event_d_conf.nb_event_queues) 47 event_d_conf.nb_event_queues = dev_info.max_event_queues; 48 49 if (dev_info.max_num_events < event_d_conf.nb_events_limit) 50 event_d_conf.nb_events_limit = dev_info.max_num_events; 51 52 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows) 53 event_d_conf.nb_event_queue_flows = 54 dev_info.max_event_queue_flows; 55 56 if (dev_info.max_event_port_dequeue_depth < 57 event_d_conf.nb_event_port_dequeue_depth) 58 event_d_conf.nb_event_port_dequeue_depth = 59 dev_info.max_event_port_dequeue_depth; 60 61 if (dev_info.max_event_port_enqueue_depth < 62 event_d_conf.nb_event_port_enqueue_depth) 63 event_d_conf.nb_event_port_enqueue_depth = 64 dev_info.max_event_port_enqueue_depth; 65 66 num_workers = rte_lcore_count(); 67 if (dev_info.max_event_ports < num_workers) 68 num_workers = dev_info.max_event_ports; 69 70 event_d_conf.nb_event_ports = num_workers; 71 evt_rsrc->evp.nb_ports = num_workers; 72 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; 73 evt_rsrc->has_burst = !!(dev_info.event_dev_cap & 74 RTE_EVENT_DEV_CAP_BURST_MODE); 75 76 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE) 77 event_d_conf.preschedule_type = RTE_EVENT_PRESCHEDULE; 78 79 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE) 80 event_d_conf.preschedule_type = RTE_EVENT_PRESCHEDULE_ADAPTIVE; 81 82 ret = rte_event_dev_configure(event_d_id, &event_d_conf); 83 if (ret < 0) 84 rte_panic("Error in configuring event device\n"); 85 86 evt_rsrc->event_d_id = event_d_id; 87 return event_queue_cfg; 88 } 89 90 static void 91 l3fwd_event_port_setup_internal_port(void) 92 { 93 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 94 uint8_t event_d_id = evt_rsrc->event_d_id; 95 struct rte_event_port_conf event_p_conf = { 96 .dequeue_depth = 32, 97 .enqueue_depth = 32, 98 .new_event_threshold = 4096 99 }; 100 struct rte_event_port_conf def_p_conf; 101 uint8_t event_p_id; 102 int32_t ret; 103 104 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) * 105 evt_rsrc->evp.nb_ports); 106 if (!evt_rsrc->evp.event_p_id) 107 rte_panic("Failed to allocate memory for Event Ports\n"); 108 109 ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf); 110 if (ret < 0) 111 rte_panic("Error to get default configuration of event port\n"); 112 113 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold) 114 event_p_conf.new_event_threshold = 115 def_p_conf.new_event_threshold; 116 117 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth) 118 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth; 119 120 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth) 121 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth; 122 123 event_p_conf.event_port_cfg = 0; 124 if (evt_rsrc->disable_implicit_release) 125 event_p_conf.event_port_cfg |= 126 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL; 127 128 evt_rsrc->deq_depth = def_p_conf.dequeue_depth; 129 130 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports; 131 event_p_id++) { 132 ret = rte_event_port_setup(event_d_id, event_p_id, 133 &event_p_conf); 134 if (ret < 0) 135 rte_panic("Error in configuring event port %d\n", 136 event_p_id); 137 138 ret = rte_event_port_link(event_d_id, event_p_id, NULL, 139 NULL, 0); 140 if (ret < 0) 141 rte_panic("Error in linking event port %d to queue\n", 142 event_p_id); 143 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id; 144 145 /* init spinlock */ 146 rte_spinlock_init(&evt_rsrc->evp.lock); 147 } 148 149 evt_rsrc->def_p_conf = event_p_conf; 150 } 151 152 static void 153 l3fwd_event_queue_setup_internal_port(uint32_t event_queue_cfg) 154 { 155 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 156 uint8_t event_d_id = evt_rsrc->event_d_id; 157 struct rte_event_queue_conf event_q_conf = { 158 .nb_atomic_flows = 1024, 159 .nb_atomic_order_sequences = 1024, 160 .event_queue_cfg = event_queue_cfg, 161 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL 162 }; 163 struct rte_event_queue_conf def_q_conf; 164 uint8_t event_q_id = 0; 165 int32_t ret; 166 167 ret = rte_event_queue_default_conf_get(event_d_id, event_q_id, 168 &def_q_conf); 169 if (ret < 0) 170 rte_panic("Error to get default config of event queue\n"); 171 172 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows) 173 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows; 174 175 if (def_q_conf.nb_atomic_order_sequences < 176 event_q_conf.nb_atomic_order_sequences) 177 event_q_conf.nb_atomic_order_sequences = 178 def_q_conf.nb_atomic_order_sequences; 179 180 event_q_conf.event_queue_cfg = event_queue_cfg; 181 event_q_conf.schedule_type = evt_rsrc->sched_type; 182 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) * 183 evt_rsrc->evq.nb_queues); 184 if (!evt_rsrc->evq.event_q_id) 185 rte_panic("Memory allocation failure\n"); 186 187 for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues; 188 event_q_id++) { 189 ret = rte_event_queue_setup(event_d_id, event_q_id, 190 &event_q_conf); 191 if (ret < 0) 192 rte_panic("Error in configuring event queue\n"); 193 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id; 194 } 195 } 196 197 static void 198 l3fwd_rx_tx_adapter_setup_internal_port(void) 199 { 200 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 201 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf; 202 uint8_t event_d_id = evt_rsrc->event_d_id; 203 uint16_t adapter_id = 0; 204 uint16_t nb_adapter = 0; 205 uint16_t port_id; 206 uint8_t q_id = 0; 207 int ret; 208 209 memset(ð_q_conf, 0, sizeof(eth_q_conf)); 210 eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 211 212 RTE_ETH_FOREACH_DEV(port_id) { 213 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) 214 continue; 215 nb_adapter++; 216 } 217 218 evt_rsrc->rx_adptr.nb_rx_adptr = nb_adapter; 219 evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 220 evt_rsrc->rx_adptr.nb_rx_adptr); 221 if (!evt_rsrc->rx_adptr.rx_adptr) { 222 free(evt_rsrc->evp.event_p_id); 223 free(evt_rsrc->evq.event_q_id); 224 rte_panic("Failed to allocate memory for Rx adapter\n"); 225 } 226 227 RTE_ETH_FOREACH_DEV(port_id) { 228 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) 229 continue; 230 231 if (evt_rsrc->vector_enabled) { 232 uint32_t cap; 233 234 if (rte_event_eth_rx_adapter_caps_get(event_d_id, 235 port_id, &cap)) 236 rte_panic( 237 "Failed to get event rx adapter capability"); 238 239 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) { 240 eth_q_conf.vector_sz = evt_rsrc->vector_size; 241 eth_q_conf.vector_timeout_ns = 242 evt_rsrc->vector_tmo_ns; 243 eth_q_conf.vector_mp = 244 evt_rsrc->per_port_pool ? 245 evt_rsrc->vec_pool[port_id] : 246 evt_rsrc->vec_pool[0]; 247 eth_q_conf.rx_queue_flags |= 248 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR; 249 } else { 250 rte_panic( 251 "Rx adapter doesn't support event vector"); 252 } 253 } 254 255 ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id, 256 &evt_rsrc->def_p_conf); 257 if (ret) 258 rte_panic("Failed to create rx adapter[%d]\n", 259 adapter_id); 260 261 /* Configure user requested sched type*/ 262 eth_q_conf.ev.sched_type = evt_rsrc->sched_type; 263 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id]; 264 ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id, 265 -1, ð_q_conf); 266 if (ret) 267 rte_panic("Failed to add queues to Rx adapter\n"); 268 269 ret = rte_event_eth_rx_adapter_start(adapter_id); 270 if (ret) 271 rte_panic("Rx adapter[%d] start Failed\n", adapter_id); 272 273 evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id; 274 adapter_id++; 275 if (q_id < evt_rsrc->evq.nb_queues) 276 q_id++; 277 } 278 279 evt_rsrc->tx_adptr.nb_tx_adptr = nb_adapter; 280 evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 281 evt_rsrc->tx_adptr.nb_tx_adptr); 282 if (!evt_rsrc->tx_adptr.tx_adptr) { 283 free(evt_rsrc->rx_adptr.rx_adptr); 284 free(evt_rsrc->evp.event_p_id); 285 free(evt_rsrc->evq.event_q_id); 286 rte_panic("Failed to allocate memory for Rx adapter\n"); 287 } 288 289 adapter_id = 0; 290 RTE_ETH_FOREACH_DEV(port_id) { 291 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) 292 continue; 293 ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id, 294 &evt_rsrc->def_p_conf); 295 if (ret) 296 rte_panic("Failed to create tx adapter[%d]\n", 297 adapter_id); 298 299 ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id, 300 -1); 301 if (ret) 302 rte_panic("Failed to add queues to Tx adapter\n"); 303 304 ret = rte_event_eth_tx_adapter_start(adapter_id); 305 if (ret) 306 rte_panic("Tx adapter[%d] start Failed\n", adapter_id); 307 308 evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id; 309 adapter_id++; 310 } 311 } 312 313 void 314 l3fwd_event_set_internal_port_ops(struct l3fwd_event_setup_ops *ops) 315 { 316 ops->event_device_setup = l3fwd_event_device_setup_internal_port; 317 ops->event_queue_setup = l3fwd_event_queue_setup_internal_port; 318 ops->event_port_setup = l3fwd_event_port_setup_internal_port; 319 ops->adapter_setup = l3fwd_rx_tx_adapter_setup_internal_port; 320 } 321 #endif /* RTE_LIB_EVENTDEV */ 322