1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2019 Marvell International Ltd. 3 */ 4 5 #include <stdbool.h> 6 7 #include "l3fwd.h" 8 #include "l3fwd_event.h" 9 10 static uint32_t 11 l3fwd_event_device_setup_generic(void) 12 { 13 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 14 struct rte_event_dev_config event_d_conf = { 15 .nb_events_limit = 4096, 16 .nb_event_queue_flows = 1024, 17 .nb_event_port_dequeue_depth = 128, 18 .nb_event_port_enqueue_depth = 128 19 }; 20 struct rte_event_dev_info dev_info; 21 const uint8_t event_d_id = 0; /* Always use first event device only */ 22 uint32_t event_queue_cfg = 0; 23 uint16_t ethdev_count = 0; 24 uint16_t num_workers = 0; 25 uint16_t port_id; 26 int ret; 27 28 RTE_ETH_FOREACH_DEV(port_id) { 29 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) 30 continue; 31 ethdev_count++; 32 } 33 34 /* Event device configuration */ 35 rte_event_dev_info_get(event_d_id, &dev_info); 36 /* Enable implicit release */ 37 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_IMPLICIT_RELEASE_DISABLE) 38 evt_rsrc->disable_implicit_release = 0; 39 40 if (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) 41 event_queue_cfg |= RTE_EVENT_QUEUE_CFG_ALL_TYPES; 42 43 /* One queue for each ethdev port + one Tx adapter Single link queue. */ 44 event_d_conf.nb_event_queues = ethdev_count + 1; 45 if (dev_info.max_event_queues < event_d_conf.nb_event_queues) 46 event_d_conf.nb_event_queues = dev_info.max_event_queues; 47 48 if (dev_info.max_num_events < event_d_conf.nb_events_limit) 49 event_d_conf.nb_events_limit = dev_info.max_num_events; 50 51 if (dev_info.max_event_queue_flows < event_d_conf.nb_event_queue_flows) 52 event_d_conf.nb_event_queue_flows = 53 dev_info.max_event_queue_flows; 54 55 if (dev_info.max_event_port_dequeue_depth < 56 event_d_conf.nb_event_port_dequeue_depth) 57 event_d_conf.nb_event_port_dequeue_depth = 58 dev_info.max_event_port_dequeue_depth; 59 60 if (dev_info.max_event_port_enqueue_depth < 61 event_d_conf.nb_event_port_enqueue_depth) 62 event_d_conf.nb_event_port_enqueue_depth = 63 dev_info.max_event_port_enqueue_depth; 64 65 num_workers = rte_lcore_count() - rte_service_lcore_count(); 66 if (dev_info.max_event_ports < num_workers) 67 num_workers = dev_info.max_event_ports; 68 69 event_d_conf.nb_event_ports = num_workers; 70 evt_rsrc->evp.nb_ports = num_workers; 71 evt_rsrc->evq.nb_queues = event_d_conf.nb_event_queues; 72 73 evt_rsrc->has_burst = !!(dev_info.event_dev_cap & 74 RTE_EVENT_DEV_CAP_BURST_MODE); 75 76 ret = rte_event_dev_configure(event_d_id, &event_d_conf); 77 if (ret < 0) 78 rte_panic("Error in configuring event device\n"); 79 80 evt_rsrc->event_d_id = event_d_id; 81 return event_queue_cfg; 82 } 83 84 static void 85 l3fwd_event_port_setup_generic(void) 86 { 87 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 88 uint8_t event_d_id = evt_rsrc->event_d_id; 89 struct rte_event_port_conf event_p_conf = { 90 .dequeue_depth = 32, 91 .enqueue_depth = 32, 92 .new_event_threshold = 4096 93 }; 94 struct rte_event_port_conf def_p_conf; 95 uint8_t event_p_id; 96 int32_t ret; 97 98 evt_rsrc->evp.event_p_id = (uint8_t *)malloc(sizeof(uint8_t) * 99 evt_rsrc->evp.nb_ports); 100 if (!evt_rsrc->evp.event_p_id) 101 rte_panic("No space is available\n"); 102 103 memset(&def_p_conf, 0, sizeof(struct rte_event_port_conf)); 104 ret = rte_event_port_default_conf_get(event_d_id, 0, &def_p_conf); 105 if (ret < 0) 106 rte_panic("Error to get default configuration of event port\n"); 107 108 if (def_p_conf.new_event_threshold < event_p_conf.new_event_threshold) 109 event_p_conf.new_event_threshold = 110 def_p_conf.new_event_threshold; 111 112 if (def_p_conf.dequeue_depth < event_p_conf.dequeue_depth) 113 event_p_conf.dequeue_depth = def_p_conf.dequeue_depth; 114 115 if (def_p_conf.enqueue_depth < event_p_conf.enqueue_depth) 116 event_p_conf.enqueue_depth = def_p_conf.enqueue_depth; 117 118 event_p_conf.event_port_cfg = 0; 119 if (evt_rsrc->disable_implicit_release) 120 event_p_conf.event_port_cfg |= 121 RTE_EVENT_PORT_CFG_DISABLE_IMPL_REL; 122 123 evt_rsrc->deq_depth = def_p_conf.dequeue_depth; 124 125 for (event_p_id = 0; event_p_id < evt_rsrc->evp.nb_ports; 126 event_p_id++) { 127 ret = rte_event_port_setup(event_d_id, event_p_id, 128 &event_p_conf); 129 if (ret < 0) 130 rte_panic("Error in configuring event port %d\n", 131 event_p_id); 132 133 ret = rte_event_port_link(event_d_id, event_p_id, 134 evt_rsrc->evq.event_q_id, 135 NULL, 136 evt_rsrc->evq.nb_queues - 1); 137 if (ret != (evt_rsrc->evq.nb_queues - 1)) 138 rte_panic("Error in linking event port %d to queues\n", 139 event_p_id); 140 evt_rsrc->evp.event_p_id[event_p_id] = event_p_id; 141 } 142 /* init spinlock */ 143 rte_spinlock_init(&evt_rsrc->evp.lock); 144 145 evt_rsrc->def_p_conf = event_p_conf; 146 } 147 148 static void 149 l3fwd_event_queue_setup_generic(uint32_t event_queue_cfg) 150 { 151 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 152 uint8_t event_d_id = evt_rsrc->event_d_id; 153 struct rte_event_queue_conf event_q_conf = { 154 .nb_atomic_flows = 1024, 155 .nb_atomic_order_sequences = 1024, 156 .event_queue_cfg = event_queue_cfg, 157 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL 158 }; 159 struct rte_event_queue_conf def_q_conf; 160 uint8_t event_q_id; 161 int32_t ret; 162 163 event_q_conf.schedule_type = evt_rsrc->sched_type; 164 evt_rsrc->evq.event_q_id = (uint8_t *)malloc(sizeof(uint8_t) * 165 evt_rsrc->evq.nb_queues); 166 if (!evt_rsrc->evq.event_q_id) 167 rte_panic("Memory allocation failure\n"); 168 169 ret = rte_event_queue_default_conf_get(event_d_id, 0, &def_q_conf); 170 if (ret < 0) 171 rte_panic("Error to get default config of event queue\n"); 172 173 if (def_q_conf.nb_atomic_flows < event_q_conf.nb_atomic_flows) 174 event_q_conf.nb_atomic_flows = def_q_conf.nb_atomic_flows; 175 176 for (event_q_id = 0; event_q_id < (evt_rsrc->evq.nb_queues - 1); 177 event_q_id++) { 178 ret = rte_event_queue_setup(event_d_id, event_q_id, 179 &event_q_conf); 180 if (ret < 0) 181 rte_panic("Error in configuring event queue\n"); 182 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id; 183 } 184 185 event_q_conf.event_queue_cfg |= RTE_EVENT_QUEUE_CFG_SINGLE_LINK; 186 event_q_conf.priority = RTE_EVENT_DEV_PRIORITY_HIGHEST, 187 ret = rte_event_queue_setup(event_d_id, event_q_id, &event_q_conf); 188 if (ret < 0) 189 rte_panic("Error in configuring event queue for Tx adapter\n"); 190 evt_rsrc->evq.event_q_id[event_q_id] = event_q_id; 191 } 192 193 static void 194 l3fwd_rx_tx_adapter_setup_generic(void) 195 { 196 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 197 struct rte_event_eth_rx_adapter_queue_conf eth_q_conf; 198 uint8_t event_d_id = evt_rsrc->event_d_id; 199 uint8_t rx_adptr_id = 0; 200 uint8_t tx_adptr_id = 0; 201 uint8_t tx_port_id = 0; 202 uint16_t port_id; 203 uint32_t service_id; 204 int32_t ret, i = 0; 205 206 memset(ð_q_conf, 0, sizeof(eth_q_conf)); 207 eth_q_conf.ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 208 209 /* Rx adapter setup */ 210 evt_rsrc->rx_adptr.nb_rx_adptr = 1; 211 evt_rsrc->rx_adptr.rx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 212 evt_rsrc->rx_adptr.nb_rx_adptr); 213 if (!evt_rsrc->rx_adptr.rx_adptr) { 214 free(evt_rsrc->evp.event_p_id); 215 free(evt_rsrc->evq.event_q_id); 216 rte_panic("Failed to allocate memory for Rx adapter\n"); 217 } 218 219 ret = rte_event_eth_rx_adapter_create(rx_adptr_id, event_d_id, 220 &evt_rsrc->def_p_conf); 221 if (ret) 222 rte_panic("Failed to create rx adapter\n"); 223 224 /* Configure user requested sched type */ 225 eth_q_conf.ev.sched_type = evt_rsrc->sched_type; 226 RTE_ETH_FOREACH_DEV(port_id) { 227 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) 228 continue; 229 eth_q_conf.ev.queue_id = evt_rsrc->evq.event_q_id[i]; 230 ret = rte_event_eth_rx_adapter_queue_add(rx_adptr_id, port_id, 231 -1, ð_q_conf); 232 if (ret) 233 rte_panic("Failed to add queues to Rx adapter\n"); 234 if (i < evt_rsrc->evq.nb_queues) 235 i++; 236 } 237 238 ret = rte_event_eth_rx_adapter_service_id_get(rx_adptr_id, &service_id); 239 if (ret != -ESRCH && ret != 0) 240 rte_panic("Error getting the service ID for rx adptr\n"); 241 242 rte_service_runstate_set(service_id, 1); 243 rte_service_set_runstate_mapped_check(service_id, 0); 244 evt_rsrc->rx_adptr.service_id = service_id; 245 246 ret = rte_event_eth_rx_adapter_start(rx_adptr_id); 247 if (ret) 248 rte_panic("Rx adapter[%d] start Failed\n", rx_adptr_id); 249 250 evt_rsrc->rx_adptr.rx_adptr[0] = rx_adptr_id; 251 252 /* Tx adapter setup */ 253 evt_rsrc->tx_adptr.nb_tx_adptr = 1; 254 evt_rsrc->tx_adptr.tx_adptr = (uint8_t *)malloc(sizeof(uint8_t) * 255 evt_rsrc->tx_adptr.nb_tx_adptr); 256 if (!evt_rsrc->tx_adptr.tx_adptr) { 257 free(evt_rsrc->rx_adptr.rx_adptr); 258 free(evt_rsrc->evp.event_p_id); 259 free(evt_rsrc->evq.event_q_id); 260 rte_panic("Failed to allocate memory for Rx adapter\n"); 261 } 262 263 ret = rte_event_eth_tx_adapter_create(tx_adptr_id, event_d_id, 264 &evt_rsrc->def_p_conf); 265 if (ret) 266 rte_panic("Failed to create tx adapter\n"); 267 268 RTE_ETH_FOREACH_DEV(port_id) { 269 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) 270 continue; 271 ret = rte_event_eth_tx_adapter_queue_add(tx_adptr_id, port_id, 272 -1); 273 if (ret) 274 rte_panic("Failed to add queues to Tx adapter\n"); 275 } 276 277 ret = rte_event_eth_tx_adapter_service_id_get(tx_adptr_id, &service_id); 278 if (ret != -ESRCH && ret != 0) 279 rte_panic("Failed to get Tx adapter service ID\n"); 280 281 rte_service_runstate_set(service_id, 1); 282 rte_service_set_runstate_mapped_check(service_id, 0); 283 evt_rsrc->tx_adptr.service_id = service_id; 284 285 ret = rte_event_eth_tx_adapter_event_port_get(tx_adptr_id, &tx_port_id); 286 if (ret) 287 rte_panic("Failed to get Tx adapter port id: %d\n", ret); 288 289 ret = rte_event_port_link(event_d_id, tx_port_id, 290 &evt_rsrc->evq.event_q_id[ 291 evt_rsrc->evq.nb_queues - 1], 292 NULL, 1); 293 if (ret != 1) 294 rte_panic("Unable to link Tx adapter port to Tx queue:err=%d\n", 295 ret); 296 297 ret = rte_event_eth_tx_adapter_start(tx_adptr_id); 298 if (ret) 299 rte_panic("Tx adapter[%d] start Failed\n", tx_adptr_id); 300 301 evt_rsrc->tx_adptr.tx_adptr[0] = tx_adptr_id; 302 } 303 304 void 305 l3fwd_event_set_generic_ops(struct l3fwd_event_setup_ops *ops) 306 { 307 ops->event_device_setup = l3fwd_event_device_setup_generic; 308 ops->event_queue_setup = l3fwd_event_queue_setup_generic; 309 ops->event_port_setup = l3fwd_event_port_setup_generic; 310 ops->adapter_setup = l3fwd_rx_tx_adapter_setup_generic; 311 } 312