1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2019 Marvell International Ltd. 3 */ 4 5 #include <stdbool.h> 6 #include <getopt.h> 7 8 #include <rte_cycles.h> 9 #include <rte_ethdev.h> 10 #include <rte_eventdev.h> 11 #include <rte_event_eth_rx_adapter.h> 12 #include <rte_event_eth_tx_adapter.h> 13 #include <rte_lcore.h> 14 #include <rte_spinlock.h> 15 16 #include "l2fwd_common.h" 17 #include "l2fwd_event.h" 18 19 static uint32_t 20 l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc) 21 { 22 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 23 struct rte_event_dev_config event_d_conf = { 24 .nb_events_limit = 4096, 25 .nb_event_queue_flows = 1024, 26 .nb_event_port_dequeue_depth = 128, 27 .nb_event_port_enqueue_depth = 128 28 }; 29 struct rte_event_dev_info dev_info; 30 const uint8_t event_d_id = 0; /* Always use first event device only */ 31 uint32_t event_queue_cfg = 0; 32 uint16_t ethdev_count = 0; 33 uint16_t num_workers = 0; 34 uint16_t port_id; 35 int ret; 36 37 RTE_ETH_FOREACH_DEV(port_id) { 38 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 39 continue; 40 ethdev_count++; 41 } 42 43 /* Event device configuration */ 44 rte_event_dev_info_get(event_d_id, &dev_info); 45 46 /* Enable implicit release */ 47 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE) 48 evt_rsrc->disable_implicit_release = 0; 49 50 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) 51 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES; 52 53 event_d_conf.nb_event_queues = ethdev_count; 54 if (dev_info.max_event_queues < event_d_conf.nb_event_queues) 55 event_d_conf.nb_event_queues = dev_info.max_event_queues; 56 57 if (dev_info.max_num_events < event_d_conf.nb_events_limit) 58 event_d_conf.nb_events_limit = dev_info.max_num_events; 59 60 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows) 61 event_d_conf.nb_event_queue_flows = 62 dev_info.max_event_queue_flows; 63 64 if (dev_info.max_event_port_dequeue_depth < 65 event_d_conf.nb_event_port_dequeue_depth) 66 event_d_conf.nb_event_port_dequeue_depth = 67 dev_info.max_event_port_dequeue_depth; 68 69 if (dev_info.max_event_port_enqueue_depth < 70 event_d_conf.nb_event_port_enqueue_depth) 71 event_d_conf.nb_event_port_enqueue_depth = 72 dev_info.max_event_port_enqueue_depth; 73 74 /* Ignore Main core. */ 75 num_workers = rte_lcore_count() - 1; 76 if (dev_info.max_event_ports < num_workers) 77 num_workers = dev_info.max_event_ports; 78 79 event_d_conf.nb_event_ports = num_workers; 80 evt_rsrc->evp.nb_ports = num_workers; 81 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; 82 evt_rsrc->has_burst = !!(dev_info.event_dev_cap & 83 RTE_EVENT_DEV_CAP_BURST_MODE); 84 85 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE) 86 event_d_conf.preschedule_type = RTE_EVENT_PRESCHEDULE; 87 88 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_EVENT_PRESCHEDULE_ADAPTIVE) 89 event_d_conf.preschedule_type = RTE_EVENT_PRESCHEDULE_ADAPTIVE; 90 91 ret = rte_event_dev_configure(event_d_id, &event_d_conf); 92 if (ret < 0) 93 rte_panic("Error in configuring event device\n"); 94 95 evt_rsrc->event_d_id = event_d_id; 96 return event_queue_cfg; 97 } 98 99 static void 100 l2fwd_event_port_setup_internal_port(struct l2fwd_resources *rsrc) 101 { 102 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 103 uint8_t event_d_id = evt_rsrc->event_d_id; 104 struct rte_event_port_conf event_p_conf = { 105 .dequeue_depth = 32, 106 .enqueue_depth = 32, 107 .new_event_threshold = 4096 108 }; 109 struct rte_event_port_conf def_p_conf; 110 uint8_t event_p_id; 111 int32_t ret; 112 113 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) * 114 evt_rsrc->evp.nb_ports); 115 if (!evt_rsrc->evp.event_p_id) 116 rte_panic("Failed to allocate memory for Event Ports\n"); 117 118 ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf); 119 if (ret < 0) 120 rte_panic("Error to get default configuration of event port\n"); 121 122 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold) 123 event_p_conf.new_event_threshold = 124 def_p_conf.new_event_threshold; 125 126 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth) 127 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth; 128 129 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth) 130 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth; 131 132 event_p_conf.event_port_cfg = 0; 133 if (evt_rsrc->disable_implicit_release) 134 event_p_conf.event_port_cfg |= 135 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL; 136 137 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports; 138 event_p_id++) { 139 ret = rte_event_port_setup(event_d_id, event_p_id, 140 &event_p_conf); 141 if (ret < 0) 142 rte_panic("Error in configuring event port %d\n", 143 event_p_id); 144 145 ret = rte_event_port_link(event_d_id, event_p_id, NULL, 146 NULL, 0); 147 if (ret < 0) 148 rte_panic("Error in linking event port %d to queue\n", 149 event_p_id); 150 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id; 151 152 /* init spinlock */ 153 rte_spinlock_init(&evt_rsrc->evp.lock); 154 } 155 156 evt_rsrc->def_p_conf = event_p_conf; 157 } 158 159 static void 160 l2fwd_event_queue_setup_internal_port(struct l2fwd_resources *rsrc, 161 uint32_t event_queue_cfg) 162 { 163 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 164 uint8_t event_d_id = evt_rsrc->event_d_id; 165 struct rte_event_queue_conf event_q_conf = { 166 .nb_atomic_flows = 1024, 167 .nb_atomic_order_sequences = 1024, 168 .event_queue_cfg = event_queue_cfg, 169 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL 170 }; 171 struct rte_event_queue_conf def_q_conf; 172 uint8_t event_q_id = 0; 173 int32_t ret; 174 175 ret = rte_event_queue_default_conf_get(event_d_id, event_q_id, 176 &def_q_conf); 177 if (ret < 0) 178 rte_panic("Error to get default config of event queue\n"); 179 180 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows) 181 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows; 182 183 if (def_q_conf.nb_atomic_order_sequences < 184 event_q_conf.nb_atomic_order_sequences) 185 event_q_conf.nb_atomic_order_sequences = 186 def_q_conf.nb_atomic_order_sequences; 187 188 event_q_conf.event_queue_cfg = event_queue_cfg; 189 event_q_conf.schedule_type = rsrc->sched_type; 190 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) * 191 evt_rsrc->evq.nb_queues); 192 if (!evt_rsrc->evq.event_q_id) 193 rte_panic("Memory allocation failure\n"); 194 195 for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues; 196 event_q_id++) { 197 ret = rte_event_queue_setup(event_d_id, event_q_id, 198 &event_q_conf); 199 if (ret < 0) 200 rte_panic("Error in configuring event queue\n"); 201 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id; 202 } 203 } 204 205 static void 206 l2fwd_rx_tx_adapter_setup_internal_port(struct l2fwd_resources *rsrc) 207 { 208 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 209 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf; 210 uint8_t event_d_id = evt_rsrc->event_d_id; 211 uint16_t adapter_id = 0; 212 uint16_t nb_adapter = 0; 213 uint16_t port_id; 214 uint8_t q_id = 0; 215 int ret; 216 217 memset(ð_q_conf, 0, sizeof(eth_q_conf)); 218 eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 219 220 RTE_ETH_FOREACH_DEV(port_id) { 221 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 222 continue; 223 nb_adapter++; 224 } 225 226 evt_rsrc->rx_adptr.nb_rx_adptr = nb_adapter; 227 evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 228 evt_rsrc->rx_adptr.nb_rx_adptr); 229 if (!evt_rsrc->rx_adptr.rx_adptr) { 230 free(evt_rsrc->evp.event_p_id); 231 free(evt_rsrc->evq.event_q_id); 232 rte_panic("Failed to allocate memery for Rx adapter\n"); 233 } 234 235 /* Assigned ethernet port. 8< */ 236 RTE_ETH_FOREACH_DEV(port_id) { 237 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 238 continue; 239 240 if (rsrc->evt_vec.enabled) { 241 uint32_t cap; 242 243 if (rte_event_eth_rx_adapter_caps_get(event_d_id, 244 port_id, &cap)) 245 rte_panic( 246 "Failed to get event rx adapter capability"); 247 248 if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) { 249 eth_q_conf.vector_sz = rsrc->evt_vec.size; 250 eth_q_conf.vector_timeout_ns = 251 rsrc->evt_vec.timeout_ns; 252 eth_q_conf.vector_mp = rsrc->evt_vec_pool; 253 eth_q_conf.rx_queue_flags |= 254 RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR; 255 } else { 256 rte_panic( 257 "Rx adapter doesn't support event vector"); 258 } 259 } 260 261 ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id, 262 &evt_rsrc->def_p_conf); 263 if (ret) 264 rte_panic("Failed to create rx adapter[%d]\n", 265 adapter_id); 266 267 /* Configure user requested sched type*/ 268 eth_q_conf.ev.sched_type = rsrc->sched_type; 269 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id]; 270 ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id, 271 -1, ð_q_conf); 272 if (ret) 273 rte_panic("Failed to add queues to Rx adapter\n"); 274 275 ret = rte_event_eth_rx_adapter_start(adapter_id); 276 if (ret) 277 rte_panic("Rx adapter[%d] start Failed\n", adapter_id); 278 279 evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id; 280 adapter_id++; 281 if (q_id < evt_rsrc->evq.nb_queues) 282 q_id++; 283 } 284 285 evt_rsrc->tx_adptr.nb_tx_adptr = nb_adapter; 286 evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 287 evt_rsrc->tx_adptr.nb_tx_adptr); 288 if (!evt_rsrc->tx_adptr.tx_adptr) { 289 free(evt_rsrc->rx_adptr.rx_adptr); 290 free(evt_rsrc->evp.event_p_id); 291 free(evt_rsrc->evq.event_q_id); 292 rte_panic("Failed to allocate memery for Rx adapter\n"); 293 } 294 295 adapter_id = 0; 296 RTE_ETH_FOREACH_DEV(port_id) { 297 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 298 continue; 299 ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id, 300 &evt_rsrc->def_p_conf); 301 if (ret) 302 rte_panic("Failed to create tx adapter[%d]\n", 303 adapter_id); 304 305 ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id, 306 -1); 307 if (ret) 308 rte_panic("Failed to add queues to Tx adapter\n"); 309 310 ret = rte_event_eth_tx_adapter_start(adapter_id); 311 if (ret) 312 rte_panic("Tx adapter[%d] start Failed\n", adapter_id); 313 314 evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id; 315 adapter_id++; 316 } 317 /* >8 End of assigned ethernet port. */ 318 } 319 320 void 321 l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops) 322 { 323 ops->event_device_setup = l2fwd_event_device_setup_internal_port; 324 ops->event_queue_setup = l2fwd_event_queue_setup_internal_port; 325 ops->event_port_setup = l2fwd_event_port_setup_internal_port; 326 ops->adapter_setup = l2fwd_rx_tx_adapter_setup_internal_port; 327 } 328