1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2019 Marvell International Ltd. 3 */ 4 5 #include <stdbool.h> 6 #include <getopt.h> 7 8 #include <rte_cycles.h> 9 #include <rte_ethdev.h> 10 #include <rte_eventdev.h> 11 #include <rte_event_eth_rx_adapter.h> 12 #include <rte_event_eth_tx_adapter.h> 13 #include <rte_lcore.h> 14 #include <rte_spinlock.h> 15 16 #include "l2fwd_common.h" 17 #include "l2fwd_event.h" 18 19 static uint32_t 20 l2fwd_event_device_setup_generic(struct l2fwd_resources *rsrc) 21 { 22 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 23 /* Configures event device as per below configuration. 8< */ 24 struct rte_event_dev_config event_d_conf = { 25 .nb_events_limit = 4096, 26 .nb_event_queue_flows = 1024, 27 .nb_event_port_dequeue_depth = 128, 28 .nb_event_port_enqueue_depth = 128 29 }; 30 /* >8 End of configuration event device as per below configuration. */ 31 struct rte_event_dev_info dev_info; 32 const uint8_t event_d_id = 0; /* Always use first event device only */ 33 uint32_t event_queue_cfg = 0; 34 uint16_t ethdev_count = 0; 35 uint16_t num_workers = 0; 36 uint16_t port_id; 37 int ret; 38 39 RTE_ETH_FOREACH_DEV(port_id) { 40 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 41 continue; 42 ethdev_count++; 43 } 44 45 /* Event device configurtion */ 46 rte_event_dev_info_get(event_d_id, &dev_info); 47 48 /* Enable implicit release */ 49 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE) 50 evt_rsrc->disable_implicit_release = 0; 51 52 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) 53 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES; 54 55 /* One queue for each ethdev port + one Tx adapter Single link queue. */ 56 event_d_conf.nb_event_queues = ethdev_count + 1; 57 if (dev_info.max_event_queues < event_d_conf.nb_event_queues) 58 event_d_conf.nb_event_queues = dev_info.max_event_queues; 59 60 if (dev_info.max_num_events < event_d_conf.nb_events_limit) 61 event_d_conf.nb_events_limit = dev_info.max_num_events; 62 63 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows) 64 event_d_conf.nb_event_queue_flows = 65 dev_info.max_event_queue_flows; 66 67 if (dev_info.max_event_port_dequeue_depth < 68 event_d_conf.nb_event_port_dequeue_depth) 69 event_d_conf.nb_event_port_dequeue_depth = 70 dev_info.max_event_port_dequeue_depth; 71 72 if (dev_info.max_event_port_enqueue_depth < 73 event_d_conf.nb_event_port_enqueue_depth) 74 event_d_conf.nb_event_port_enqueue_depth = 75 dev_info.max_event_port_enqueue_depth; 76 77 /* Ignore Main core and service cores. */ 78 num_workers = rte_lcore_count() - 1 - rte_service_lcore_count(); 79 if (dev_info.max_event_ports < num_workers) 80 num_workers = dev_info.max_event_ports; 81 82 event_d_conf.nb_event_ports = num_workers; 83 evt_rsrc->evp.nb_ports = num_workers; 84 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; 85 86 evt_rsrc->has_burst = !!(dev_info.event_dev_cap & 87 RTE_EVENT_DEV_CAP_BURST_MODE); 88 89 ret = rte_event_dev_configure(event_d_id, &event_d_conf); 90 if (ret < 0) 91 rte_panic("Error in configuring event device\n"); 92 93 evt_rsrc->event_d_id = event_d_id; 94 return event_queue_cfg; 95 } 96 97 static void 98 l2fwd_event_port_setup_generic(struct l2fwd_resources *rsrc) 99 { 100 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 101 uint8_t event_d_id = evt_rsrc->event_d_id; 102 /* Event port initialization. 8< */ 103 struct rte_event_port_conf event_p_conf = { 104 .dequeue_depth = 32, 105 .enqueue_depth = 32, 106 .new_event_threshold = 4096 107 }; 108 struct rte_event_port_conf def_p_conf; 109 uint8_t event_p_id; 110 int32_t ret; 111 112 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) * 113 evt_rsrc->evp.nb_ports); 114 if (!evt_rsrc->evp.event_p_id) 115 rte_panic("No space is available\n"); 116 117 memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf)); 118 ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf); 119 if (ret < 0) 120 rte_panic("Error to get default configuration of event port\n"); 121 122 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold) 123 event_p_conf.new_event_threshold = 124 def_p_conf.new_event_threshold; 125 126 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth) 127 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth; 128 129 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth) 130 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth; 131 132 event_p_conf.event_port_cfg = 0; 133 if (evt_rsrc->disable_implicit_release) 134 event_p_conf.event_port_cfg |= 135 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL; 136 137 evt_rsrc->deq_depth = def_p_conf.dequeue_depth; 138 139 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports; 140 event_p_id++) { 141 ret = rte_event_port_setup(event_d_id, event_p_id, 142 &event_p_conf); 143 if (ret < 0) 144 rte_panic("Error in configuring event port %d\n", 145 event_p_id); 146 147 ret = rte_event_port_link(event_d_id, event_p_id, 148 evt_rsrc->evq.event_q_id, 149 NULL, 150 evt_rsrc->evq.nb_queues - 1); 151 if (ret != (evt_rsrc->evq.nb_queues - 1)) 152 rte_panic("Error in linking event port %d to queues\n", 153 event_p_id); 154 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id; 155 /* >8 End of event port initialization. */ 156 } 157 /* init spinlock */ 158 rte_spinlock_init(&evt_rsrc->evp.lock); 159 160 evt_rsrc->def_p_conf = event_p_conf; 161 } 162 163 static void 164 l2fwd_event_queue_setup_generic(struct l2fwd_resources *rsrc, 165 uint32_t event_queue_cfg) 166 { 167 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 168 uint8_t event_d_id = evt_rsrc->event_d_id; 169 /* Event queue initialization. 8< */ 170 struct rte_event_queue_conf event_q_conf = { 171 .nb_atomic_flows = 1024, 172 .nb_atomic_order_sequences = 1024, 173 .event_queue_cfg = event_queue_cfg, 174 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL 175 }; 176 struct rte_event_queue_conf def_q_conf; 177 uint8_t event_q_id; 178 int32_t ret; 179 180 event_q_conf.schedule_type = rsrc->sched_type; 181 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) * 182 evt_rsrc->evq.nb_queues); 183 if (!evt_rsrc->evq.event_q_id) 184 rte_panic("Memory allocation failure\n"); 185 186 ret = rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf); 187 if (ret < 0) 188 rte_panic("Error to get default config of event queue\n"); 189 /* >8 End of event queue initialization. */ 190 191 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows) 192 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows; 193 194 for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1); 195 event_q_id++) { 196 ret = rte_event_queue_setup(event_d_id, event_q_id, 197 &event_q_conf); 198 if (ret < 0) 199 rte_panic("Error in configuring event queue\n"); 200 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id; 201 } 202 203 event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK; 204 event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST, 205 ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf); 206 if (ret < 0) 207 rte_panic("Error in configuring event queue for Tx adapter\n"); 208 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id; 209 } 210 211 static void 212 l2fwd_rx_tx_adapter_setup_generic(struct l2fwd_resources *rsrc) 213 { 214 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 215 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf; 216 uint8_t event_d_id = evt_rsrc->event_d_id; 217 uint8_t rx_adptr_id = 0; 218 uint8_t tx_adptr_id = 0; 219 uint8_t tx_port_id = 0; 220 uint16_t port_id; 221 uint32_t service_id; 222 int32_t ret, i = 0; 223 224 memset(ð_q_conf, 0, sizeof(eth_q_conf)); 225 eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 226 227 /* Rx adapter setup */ 228 evt_rsrc->rx_adptr.nb_rx_adptr = 1; 229 evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 230 evt_rsrc->rx_adptr.nb_rx_adptr); 231 if (!evt_rsrc->rx_adptr.rx_adptr) { 232 free(evt_rsrc->evp.event_p_id); 233 free(evt_rsrc->evq.event_q_id); 234 rte_panic("Failed to allocate memery for Rx adapter\n"); 235 } 236 237 ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id, 238 &evt_rsrc->def_p_conf); 239 if (ret) 240 rte_panic("Failed to create rx adapter\n"); 241 242 /* Configure user requested sched type */ 243 eth_q_conf.ev.sched_type = rsrc->sched_type; 244 RTE_ETH_FOREACH_DEV(port_id) { 245 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 246 continue; 247 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i]; 248 ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id, 249 -1, ð_q_conf); 250 if (ret) 251 rte_panic("Failed to add queues to Rx adapter\n"); 252 if (i < evt_rsrc->evq.nb_queues) 253 i++; 254 } 255 256 ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id); 257 if (ret != -ESRCH && ret != 0) 258 rte_panic("Error getting the service ID for rx adptr\n"); 259 260 rte_service_runstate_set(service_id, 1); 261 rte_service_set_runstate_mapped_check(service_id, 0); 262 evt_rsrc->rx_adptr.service_id = service_id; 263 264 ret = rte_event_eth_rx_adapter_start(rx_adptr_id); 265 if (ret) 266 rte_panic("Rx adapter[%d] start Failed\n", rx_adptr_id); 267 268 evt_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id; 269 270 /* Tx adapter setup */ 271 evt_rsrc->tx_adptr.nb_tx_adptr = 1; 272 evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 273 evt_rsrc->tx_adptr.nb_tx_adptr); 274 if (!evt_rsrc->tx_adptr.tx_adptr) { 275 free(evt_rsrc->rx_adptr.rx_adptr); 276 free(evt_rsrc->evp.event_p_id); 277 free(evt_rsrc->evq.event_q_id); 278 rte_panic("Failed to allocate memery for Rx adapter\n"); 279 } 280 281 ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id, 282 &evt_rsrc->def_p_conf); 283 if (ret) 284 rte_panic("Failed to create tx adapter\n"); 285 286 RTE_ETH_FOREACH_DEV(port_id) { 287 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 288 continue; 289 ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, port_id, 290 -1); 291 if (ret) 292 rte_panic("Failed to add queues to Tx adapter\n"); 293 } 294 295 ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id); 296 if (ret != -ESRCH && ret != 0) 297 rte_panic("Failed to get Tx adapter service ID\n"); 298 299 rte_service_runstate_set(service_id, 1); 300 rte_service_set_runstate_mapped_check(service_id, 0); 301 evt_rsrc->tx_adptr.service_id = service_id; 302 303 /* Extra port created. 8< */ 304 ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id); 305 if (ret) 306 rte_panic("Failed to get Tx adapter port id: %d\n", ret); 307 308 ret = rte_event_port_link(event_d_id, tx_port_id, 309 &evt_rsrc->evq.event_q_id[ 310 evt_rsrc->evq.nb_queues - 1], 311 NULL, 1); 312 if (ret != 1) 313 rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n", 314 ret); 315 /* >8 End of extra port created. */ 316 317 ret = rte_event_eth_tx_adapter_start(tx_adptr_id); 318 if (ret) 319 rte_panic("Tx adapter[%d] start Failed\n", tx_adptr_id); 320 321 evt_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id; 322 } 323 324 void 325 l2fwd_event_set_generic_ops(struct event_setup_ops *ops) 326 { 327 ops->event_device_setup = l2fwd_event_device_setup_generic; 328 ops->event_queue_setup = l2fwd_event_queue_setup_generic; 329 ops->event_port_setup = l2fwd_event_port_setup_generic; 330 ops->adapter_setup = l2fwd_rx_tx_adapter_setup_generic; 331 } 332