1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * Copyright 2017 Cavium, Inc. 4 */ 5 6 #include "test_pipeline_common.h" 7 8 int 9 pipeline_test_result(struct evt_test *test, struct evt_options *opt) 10 { 11 RTE_SET_USED(opt); 12 int i; 13 uint64_t total = 0; 14 struct test_pipeline *t = evt_test_priv(test); 15 16 evt_info("Packet distribution across worker cores :"); 17 for (i = 0; i < t->nb_workers; i++) 18 total += t->worker[i].processed_pkts; 19 for (i = 0; i < t->nb_workers; i++) 20 evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:" 21 CLGRN" %3.2f"CLNRM, i, 22 t->worker[i].processed_pkts, 23 (((double)t->worker[i].processed_pkts)/total) 24 * 100); 25 return t->result; 26 } 27 28 void 29 pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues) 30 { 31 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); 32 evt_dump_worker_lcores(opt); 33 evt_dump_nb_stages(opt); 34 evt_dump("nb_evdev_ports", "%d", pipeline_nb_event_ports(opt)); 35 evt_dump("nb_evdev_queues", "%d", nb_queues); 36 evt_dump_queue_priority(opt); 37 evt_dump_sched_type_list(opt); 38 evt_dump_producer_type(opt); 39 } 40 41 static inline uint64_t 42 processed_pkts(struct test_pipeline *t) 43 { 44 uint8_t i; 45 uint64_t total = 0; 46 47 rte_smp_rmb(); 48 for (i = 0; i < t->nb_workers; i++) 49 total += t->worker[i].processed_pkts; 50 51 return total; 52 } 53 54 int 55 pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, 56 int (*worker)(void *)) 57 { 58 int ret, lcore_id; 59 struct test_pipeline *t = evt_test_priv(test); 60 61 int port_idx = 0; 62 /* launch workers */ 63 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 64 if (!(opt->wlcores[lcore_id])) 65 continue; 66 67 ret = rte_eal_remote_launch(worker, 68 &t->worker[port_idx], lcore_id); 69 if (ret) { 70 evt_err("failed to launch worker %d", lcore_id); 71 return ret; 72 } 73 port_idx++; 74 } 75 76 uint64_t perf_cycles = rte_get_timer_cycles(); 77 const uint64_t perf_sample = rte_get_timer_hz(); 78 79 static float total_mpps; 80 static uint64_t samples; 81 82 uint64_t prev_pkts = 0; 83 84 while (t->done == false) { 85 const uint64_t new_cycles = rte_get_timer_cycles(); 86 87 if ((new_cycles - perf_cycles) > perf_sample) { 88 const uint64_t curr_pkts = processed_pkts(t); 89 90 float mpps = (float)(curr_pkts - prev_pkts)/1000000; 91 92 prev_pkts = curr_pkts; 93 perf_cycles = new_cycles; 94 total_mpps += mpps; 95 ++samples; 96 printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, 97 mpps, total_mpps/samples); 98 fflush(stdout); 99 } 100 } 101 printf("\n"); 102 return 0; 103 } 104 105 int 106 pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) 107 { 108 unsigned int lcores; 109 /* 110 * N worker + 1 master 111 */ 112 lcores = 2; 113 114 if (!rte_eth_dev_count_avail()) { 115 evt_err("test needs minimum 1 ethernet dev"); 116 return -1; 117 } 118 119 if (rte_lcore_count() < lcores) { 120 evt_err("test need minimum %d lcores", lcores); 121 return -1; 122 } 123 124 /* Validate worker lcores */ 125 if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) { 126 evt_err("worker lcores overlaps with master lcore"); 127 return -1; 128 } 129 if (evt_has_disabled_lcore(opt->wlcores)) { 130 evt_err("one or more workers lcores are not enabled"); 131 return -1; 132 } 133 if (!evt_has_active_lcore(opt->wlcores)) { 134 evt_err("minimum one worker is required"); 135 return -1; 136 } 137 138 if (nb_queues > EVT_MAX_QUEUES) { 139 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES); 140 return -1; 141 } 142 if (pipeline_nb_event_ports(opt) > EVT_MAX_PORTS) { 143 evt_err("number of ports exceeds %d", EVT_MAX_PORTS); 144 return -1; 145 } 146 147 if (evt_has_invalid_stage(opt)) 148 return -1; 149 150 if (evt_has_invalid_sched_type(opt)) 151 return -1; 152 153 return 0; 154 } 155 156 #define NB_RX_DESC 128 157 #define NB_TX_DESC 512 158 int 159 pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) 160 { 161 uint16_t i; 162 int ret; 163 uint8_t nb_queues = 1; 164 struct test_pipeline *t = evt_test_priv(test); 165 struct rte_eth_rxconf rx_conf; 166 struct rte_eth_conf port_conf = { 167 .rxmode = { 168 .mq_mode = ETH_MQ_RX_RSS, 169 .max_rx_pkt_len = RTE_ETHER_MAX_LEN, 170 }, 171 .rx_adv_conf = { 172 .rss_conf = { 173 .rss_key = NULL, 174 .rss_hf = ETH_RSS_IP, 175 }, 176 }, 177 }; 178 179 RTE_SET_USED(opt); 180 if (!rte_eth_dev_count_avail()) { 181 evt_err("No ethernet ports found."); 182 return -ENODEV; 183 } 184 185 t->internal_port = 1; 186 RTE_ETH_FOREACH_DEV(i) { 187 struct rte_eth_dev_info dev_info; 188 struct rte_eth_conf local_port_conf = port_conf; 189 uint32_t caps = 0; 190 191 rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps); 192 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) 193 t->internal_port = 0; 194 195 ret = rte_eth_dev_info_get(i, &dev_info); 196 if (ret != 0) { 197 evt_err("Error during getting device (port %u) info: %s\n", 198 i, strerror(-ret)); 199 return ret; 200 } 201 202 rx_conf = dev_info.default_rxconf; 203 rx_conf.offloads = port_conf.rxmode.offloads; 204 205 local_port_conf.rx_adv_conf.rss_conf.rss_hf &= 206 dev_info.flow_type_rss_offloads; 207 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != 208 port_conf.rx_adv_conf.rss_conf.rss_hf) { 209 evt_info("Port %u modified RSS hash function based on hardware support," 210 "requested:%#"PRIx64" configured:%#"PRIx64"", 211 i, 212 port_conf.rx_adv_conf.rss_conf.rss_hf, 213 local_port_conf.rx_adv_conf.rss_conf.rss_hf); 214 } 215 216 if (rte_eth_dev_configure(i, nb_queues, nb_queues, 217 &local_port_conf) 218 < 0) { 219 evt_err("Failed to configure eth port [%d]", i); 220 return -EINVAL; 221 } 222 223 if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, 224 rte_socket_id(), &rx_conf, t->pool) < 0) { 225 evt_err("Failed to setup eth port [%d] rx_queue: %d.", 226 i, 0); 227 return -EINVAL; 228 } 229 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, 230 rte_socket_id(), NULL) < 0) { 231 evt_err("Failed to setup eth port [%d] tx_queue: %d.", 232 i, 0); 233 return -EINVAL; 234 } 235 236 ret = rte_eth_promiscuous_enable(i); 237 if (ret != 0) { 238 evt_err("Failed to enable promiscuous mode for eth port [%d]: %s", 239 i, rte_strerror(-ret)); 240 return ret; 241 } 242 } 243 244 return 0; 245 } 246 247 int 248 pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, 249 uint8_t *queue_arr, uint8_t nb_queues, 250 const struct rte_event_port_conf p_conf) 251 { 252 int ret; 253 uint8_t port; 254 struct test_pipeline *t = evt_test_priv(test); 255 256 257 /* setup one port per worker, linking to all queues */ 258 for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) { 259 struct worker_data *w = &t->worker[port]; 260 261 w->dev_id = opt->dev_id; 262 w->port_id = port; 263 w->t = t; 264 w->processed_pkts = 0; 265 266 ret = rte_event_port_setup(opt->dev_id, port, &p_conf); 267 if (ret) { 268 evt_err("failed to setup port %d", port); 269 return ret; 270 } 271 272 if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL, 273 nb_queues) != nb_queues) 274 goto link_fail; 275 } 276 277 return 0; 278 279 link_fail: 280 evt_err("failed to link queues to port %d", port); 281 return -EINVAL; 282 } 283 284 int 285 pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, 286 struct rte_event_port_conf prod_conf) 287 { 288 int ret = 0; 289 uint16_t prod; 290 struct rte_event_eth_rx_adapter_queue_conf queue_conf; 291 292 memset(&queue_conf, 0, 293 sizeof(struct rte_event_eth_rx_adapter_queue_conf)); 294 queue_conf.ev.sched_type = opt->sched_type_list[0]; 295 RTE_ETH_FOREACH_DEV(prod) { 296 uint32_t cap; 297 298 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, 299 prod, &cap); 300 if (ret) { 301 evt_err("failed to get event rx adapter[%d]" 302 " capabilities", 303 opt->dev_id); 304 return ret; 305 } 306 queue_conf.ev.queue_id = prod * stride; 307 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, 308 &prod_conf); 309 if (ret) { 310 evt_err("failed to create rx adapter[%d]", prod); 311 return ret; 312 } 313 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1, 314 &queue_conf); 315 if (ret) { 316 evt_err("failed to add rx queues to adapter[%d]", prod); 317 return ret; 318 } 319 320 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { 321 uint32_t service_id; 322 323 rte_event_eth_rx_adapter_service_id_get(prod, 324 &service_id); 325 ret = evt_service_setup(service_id); 326 if (ret) { 327 evt_err("Failed to setup service core" 328 " for Rx adapter"); 329 return ret; 330 } 331 } 332 333 evt_info("Port[%d] using Rx adapter[%d] configured", prod, 334 prod); 335 } 336 337 return ret; 338 } 339 340 int 341 pipeline_event_tx_adapter_setup(struct evt_options *opt, 342 struct rte_event_port_conf port_conf) 343 { 344 int ret = 0; 345 uint16_t consm; 346 347 RTE_ETH_FOREACH_DEV(consm) { 348 uint32_t cap; 349 350 ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, 351 consm, &cap); 352 if (ret) { 353 evt_err("failed to get event tx adapter[%d] caps", 354 consm); 355 return ret; 356 } 357 358 ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id, 359 &port_conf); 360 if (ret) { 361 evt_err("failed to create tx adapter[%d]", consm); 362 return ret; 363 } 364 365 ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1); 366 if (ret) { 367 evt_err("failed to add tx queues to adapter[%d]", 368 consm); 369 return ret; 370 } 371 372 if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) { 373 uint32_t service_id; 374 375 rte_event_eth_tx_adapter_service_id_get(consm, 376 &service_id); 377 ret = evt_service_setup(service_id); 378 if (ret) { 379 evt_err("Failed to setup service core" 380 " for Tx adapter\n"); 381 return ret; 382 } 383 } 384 385 evt_info("Port[%d] using Tx adapter[%d] Configured", consm, 386 consm); 387 } 388 389 return ret; 390 } 391 392 void 393 pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) 394 { 395 uint16_t i; 396 RTE_SET_USED(test); 397 RTE_SET_USED(opt); 398 399 RTE_ETH_FOREACH_DEV(i) { 400 rte_event_eth_rx_adapter_stop(i); 401 rte_event_eth_tx_adapter_stop(i); 402 rte_eth_dev_stop(i); 403 } 404 } 405 406 void 407 pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt) 408 { 409 RTE_SET_USED(test); 410 411 rte_event_dev_stop(opt->dev_id); 412 rte_event_dev_close(opt->dev_id); 413 } 414 415 int 416 pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) 417 { 418 struct test_pipeline *t = evt_test_priv(test); 419 420 t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ 421 opt->pool_sz, /* number of elements*/ 422 512, /* cache size*/ 423 0, 424 RTE_MBUF_DEFAULT_BUF_SIZE, 425 opt->socket_id); /* flags */ 426 427 if (t->pool == NULL) { 428 evt_err("failed to create mempool"); 429 return -ENOMEM; 430 } 431 432 return 0; 433 } 434 435 void 436 pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt) 437 { 438 RTE_SET_USED(opt); 439 struct test_pipeline *t = evt_test_priv(test); 440 441 rte_mempool_free(t->pool); 442 } 443 444 int 445 pipeline_test_setup(struct evt_test *test, struct evt_options *opt) 446 { 447 void *test_pipeline; 448 449 test_pipeline = rte_zmalloc_socket(test->name, 450 sizeof(struct test_pipeline), RTE_CACHE_LINE_SIZE, 451 opt->socket_id); 452 if (test_pipeline == NULL) { 453 evt_err("failed to allocate test_pipeline memory"); 454 goto nomem; 455 } 456 test->test_priv = test_pipeline; 457 458 struct test_pipeline *t = evt_test_priv(test); 459 460 t->nb_workers = evt_nr_active_lcores(opt->wlcores); 461 t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->wlcores); 462 t->done = false; 463 t->nb_flows = opt->nb_flows; 464 t->result = EVT_TEST_FAILED; 465 t->opt = opt; 466 opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR; 467 memcpy(t->sched_type_list, opt->sched_type_list, 468 sizeof(opt->sched_type_list)); 469 return 0; 470 nomem: 471 return -ENOMEM; 472 } 473 474 void 475 pipeline_test_destroy(struct evt_test *test, struct evt_options *opt) 476 { 477 RTE_SET_USED(opt); 478 479 rte_free(test->test_priv); 480 } 481