1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2019 Marvell International Ltd. 3 */ 4 5 #include <stdbool.h> 6 #include <getopt.h> 7 8 #include <rte_cycles.h> 9 #include <rte_ethdev.h> 10 #include <rte_eventdev.h> 11 #include <rte_event_eth_rx_adapter.h> 12 #include <rte_event_eth_tx_adapter.h> 13 #include <rte_lcore.h> 14 #include <rte_spinlock.h> 15 16 #include "l2fwd_common.h" 17 #include "l2fwd_event.h" 18 19 static uint32_t 20 l2fwd_event_device_setup_internal_port(struct l2fwd_resources *rsrc) 21 { 22 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 23 struct rte_event_dev_config event_d_conf = { 24 .nb_events_limit = 4096, 25 .nb_event_queue_flows = 1024, 26 .nb_event_port_dequeue_depth = 128, 27 .nb_event_port_enqueue_depth = 128 28 }; 29 struct rte_event_dev_info dev_info; 30 const uint8_t event_d_id = 0; /* Always use first event device only */ 31 uint32_t event_queue_cfg = 0; 32 uint16_t ethdev_count = 0; 33 uint16_t num_workers = 0; 34 uint16_t port_id; 35 int ret; 36 37 RTE_ETH_FOREACH_DEV(port_id) { 38 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 39 continue; 40 ethdev_count++; 41 } 42 43 /* Event device configurtion */ 44 rte_event_dev_info_get(event_d_id, &dev_info); 45 46 /* Enable implicit release */ 47 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE) 48 evt_rsrc->disable_implicit_release = 0; 49 50 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) 51 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES; 52 53 event_d_conf.nb_event_queues = ethdev_count; 54 if (dev_info.max_event_queues < event_d_conf.nb_event_queues) 55 event_d_conf.nb_event_queues = dev_info.max_event_queues; 56 57 if (dev_info.max_num_events < event_d_conf.nb_events_limit) 58 event_d_conf.nb_events_limit = dev_info.max_num_events; 59 60 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows) 61 event_d_conf.nb_event_queue_flows = 62 dev_info.max_event_queue_flows; 63 64 if (dev_info.max_event_port_dequeue_depth < 65 event_d_conf.nb_event_port_dequeue_depth) 66 event_d_conf.nb_event_port_dequeue_depth = 67 dev_info.max_event_port_dequeue_depth; 68 69 if (dev_info.max_event_port_enqueue_depth < 70 event_d_conf.nb_event_port_enqueue_depth) 71 event_d_conf.nb_event_port_enqueue_depth = 72 dev_info.max_event_port_enqueue_depth; 73 74 /* Ignore Main core. */ 75 num_workers = rte_lcore_count() - 1; 76 if (dev_info.max_event_ports < num_workers) 77 num_workers = dev_info.max_event_ports; 78 79 event_d_conf.nb_event_ports = num_workers; 80 evt_rsrc->evp.nb_ports = num_workers; 81 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; 82 evt_rsrc->has_burst = !!(dev_info.event_dev_cap & 83 RTE_EVENT_DEV_CAP_BURST_MODE); 84 85 ret = rte_event_dev_configure(event_d_id, &event_d_conf); 86 if (ret < 0) 87 rte_panic("Error in configuring event device\n"); 88 89 evt_rsrc->event_d_id = event_d_id; 90 return event_queue_cfg; 91 } 92 93 static void 94 l2fwd_event_port_setup_internal_port(struct l2fwd_resources *rsrc) 95 { 96 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 97 uint8_t event_d_id = evt_rsrc->event_d_id; 98 struct rte_event_port_conf event_p_conf = { 99 .dequeue_depth = 32, 100 .enqueue_depth = 32, 101 .new_event_threshold = 4096 102 }; 103 struct rte_event_port_conf def_p_conf; 104 uint8_t event_p_id; 105 int32_t ret; 106 107 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) * 108 evt_rsrc->evp.nb_ports); 109 if (!evt_rsrc->evp.event_p_id) 110 rte_panic("Failed to allocate memory for Event Ports\n"); 111 112 ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf); 113 if (ret < 0) 114 rte_panic("Error to get default configuration of event port\n"); 115 116 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold) 117 event_p_conf.new_event_threshold = 118 def_p_conf.new_event_threshold; 119 120 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth) 121 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth; 122 123 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth) 124 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth; 125 126 event_p_conf.event_port_cfg = 0; 127 if (evt_rsrc->disable_implicit_release) 128 event_p_conf.event_port_cfg |= 129 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL; 130 131 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports; 132 event_p_id++) { 133 ret = rte_event_port_setup(event_d_id, event_p_id, 134 &event_p_conf); 135 if (ret < 0) 136 rte_panic("Error in configuring event port %d\n", 137 event_p_id); 138 139 ret = rte_event_port_link(event_d_id, event_p_id, NULL, 140 NULL, 0); 141 if (ret < 0) 142 rte_panic("Error in linking event port %d to queue\n", 143 event_p_id); 144 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id; 145 146 /* init spinlock */ 147 rte_spinlock_init(&evt_rsrc->evp.lock); 148 } 149 150 evt_rsrc->def_p_conf = event_p_conf; 151 } 152 153 static void 154 l2fwd_event_queue_setup_internal_port(struct l2fwd_resources *rsrc, 155 uint32_t event_queue_cfg) 156 { 157 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 158 uint8_t event_d_id = evt_rsrc->event_d_id; 159 struct rte_event_queue_conf event_q_conf = { 160 .nb_atomic_flows = 1024, 161 .nb_atomic_order_sequences = 1024, 162 .event_queue_cfg = event_queue_cfg, 163 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL 164 }; 165 struct rte_event_queue_conf def_q_conf; 166 uint8_t event_q_id = 0; 167 int32_t ret; 168 169 ret = rte_event_queue_default_conf_get(event_d_id, event_q_id, 170 &def_q_conf); 171 if (ret < 0) 172 rte_panic("Error to get default config of event queue\n"); 173 174 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows) 175 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows; 176 177 if (def_q_conf.nb_atomic_order_sequences < 178 event_q_conf.nb_atomic_order_sequences) 179 event_q_conf.nb_atomic_order_sequences = 180 def_q_conf.nb_atomic_order_sequences; 181 182 event_q_conf.event_queue_cfg = event_queue_cfg; 183 event_q_conf.schedule_type = rsrc->sched_type; 184 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) * 185 evt_rsrc->evq.nb_queues); 186 if (!evt_rsrc->evq.event_q_id) 187 rte_panic("Memory allocation failure\n"); 188 189 for (event_q_id = 0; event_q_id < evt_rsrc->evq.nb_queues; 190 event_q_id++) { 191 ret = rte_event_queue_setup(event_d_id, event_q_id, 192 &event_q_conf); 193 if (ret < 0) 194 rte_panic("Error in configuring event queue\n"); 195 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id; 196 } 197 } 198 199 static void 200 l2fwd_rx_tx_adapter_setup_internal_port(struct l2fwd_resources *rsrc) 201 { 202 struct l2fwd_event_resources *evt_rsrc = rsrc->evt_rsrc; 203 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf; 204 uint8_t event_d_id = evt_rsrc->event_d_id; 205 uint16_t adapter_id = 0; 206 uint16_t nb_adapter = 0; 207 uint16_t port_id; 208 uint8_t q_id = 0; 209 int ret; 210 211 memset(ð_q_conf, 0, sizeof(eth_q_conf)); 212 eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 213 214 RTE_ETH_FOREACH_DEV(port_id) { 215 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 216 continue; 217 nb_adapter++; 218 } 219 220 evt_rsrc->rx_adptr.nb_rx_adptr = nb_adapter; 221 evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 222 evt_rsrc->rx_adptr.nb_rx_adptr); 223 if (!evt_rsrc->rx_adptr.rx_adptr) { 224 free(evt_rsrc->evp.event_p_id); 225 free(evt_rsrc->evq.event_q_id); 226 rte_panic("Failed to allocate memery for Rx adapter\n"); 227 } 228 229 230 RTE_ETH_FOREACH_DEV(port_id) { 231 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 232 continue; 233 ret = rte_event_eth_rx_adapter_create(adapter_id, event_d_id, 234 &evt_rsrc->def_p_conf); 235 if (ret) 236 rte_panic("Failed to create rx adapter[%d]\n", 237 adapter_id); 238 239 /* Configure user requested sched type*/ 240 eth_q_conf.ev.sched_type = rsrc->sched_type; 241 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[q_id]; 242 ret = rte_event_eth_rx_adapter_queue_add(adapter_id, port_id, 243 -1, ð_q_conf); 244 if (ret) 245 rte_panic("Failed to add queues to Rx adapter\n"); 246 247 ret = rte_event_eth_rx_adapter_start(adapter_id); 248 if (ret) 249 rte_panic("Rx adapter[%d] start Failed\n", adapter_id); 250 251 evt_rsrc->rx_adptr.rx_adptr[adapter_id] = adapter_id; 252 adapter_id++; 253 if (q_id < evt_rsrc->evq.nb_queues) 254 q_id++; 255 } 256 257 evt_rsrc->tx_adptr.nb_tx_adptr = nb_adapter; 258 evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 259 evt_rsrc->tx_adptr.nb_tx_adptr); 260 if (!evt_rsrc->tx_adptr.tx_adptr) { 261 free(evt_rsrc->rx_adptr.rx_adptr); 262 free(evt_rsrc->evp.event_p_id); 263 free(evt_rsrc->evq.event_q_id); 264 rte_panic("Failed to allocate memery for Rx adapter\n"); 265 } 266 267 adapter_id = 0; 268 RTE_ETH_FOREACH_DEV(port_id) { 269 if ((rsrc->enabled_port_mask & (1 << port_id)) == 0) 270 continue; 271 ret = rte_event_eth_tx_adapter_create(adapter_id, event_d_id, 272 &evt_rsrc->def_p_conf); 273 if (ret) 274 rte_panic("Failed to create tx adapter[%d]\n", 275 adapter_id); 276 277 ret = rte_event_eth_tx_adapter_queue_add(adapter_id, port_id, 278 -1); 279 if (ret) 280 rte_panic("Failed to add queues to Tx adapter\n"); 281 282 ret = rte_event_eth_tx_adapter_start(adapter_id); 283 if (ret) 284 rte_panic("Tx adapter[%d] start Failed\n", adapter_id); 285 286 evt_rsrc->tx_adptr.tx_adptr[adapter_id] = adapter_id; 287 adapter_id++; 288 } 289 } 290 291 void 292 l2fwd_event_set_internal_port_ops(struct event_setup_ops *ops) 293 { 294 ops->event_device_setup = l2fwd_event_device_setup_internal_port; 295 ops->event_queue_setup = l2fwd_event_queue_setup_internal_port; 296 ops->event_port_setup = l2fwd_event_port_setup_internal_port; 297 ops->adapter_setup = l2fwd_rx_tx_adapter_setup_internal_port; 298 } 299