1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * Copyright 2017 Cavium, Inc. 4 */ 5 6 #include "test_pipeline_common.h" 7 8 int 9 pipeline_test_result(struct evt_test *test, struct evt_options *opt) 10 { 11 RTE_SET_USED(opt); 12 int i; 13 uint64_t total = 0; 14 struct test_pipeline *t = evt_test_priv(test); 15 16 evt_info("Packet distribution across worker cores :"); 17 for (i = 0; i < t->nb_workers; i++) 18 total += t->worker[i].processed_pkts; 19 for (i = 0; i < t->nb_workers; i++) 20 evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:" 21 CLGRN" %3.2f"CLNRM, i, 22 t->worker[i].processed_pkts, 23 (((double)t->worker[i].processed_pkts)/total) 24 * 100); 25 return t->result; 26 } 27 28 void 29 pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues) 30 { 31 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); 32 evt_dump_worker_lcores(opt); 33 evt_dump_nb_stages(opt); 34 evt_dump("nb_evdev_ports", "%d", pipeline_nb_event_ports(opt)); 35 evt_dump("nb_evdev_queues", "%d", nb_queues); 36 evt_dump_queue_priority(opt); 37 evt_dump_sched_type_list(opt); 38 evt_dump_producer_type(opt); 39 } 40 41 static inline uint64_t 42 processed_pkts(struct test_pipeline *t) 43 { 44 uint8_t i; 45 uint64_t total = 0; 46 47 rte_smp_rmb(); 48 for (i = 0; i < t->nb_workers; i++) 49 total += t->worker[i].processed_pkts; 50 51 return total; 52 } 53 54 int 55 pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, 56 int (*worker)(void *)) 57 { 58 int ret, lcore_id; 59 struct test_pipeline *t = evt_test_priv(test); 60 61 int port_idx = 0; 62 /* launch workers */ 63 RTE_LCORE_FOREACH_WORKER(lcore_id) { 64 if (!(opt->wlcores[lcore_id])) 65 continue; 66 67 ret = rte_eal_remote_launch(worker, 68 &t->worker[port_idx], lcore_id); 69 if (ret) { 70 evt_err("failed to launch worker %d", lcore_id); 71 return ret; 72 } 73 port_idx++; 74 } 75 76 uint64_t perf_cycles = rte_get_timer_cycles(); 77 const uint64_t perf_sample = rte_get_timer_hz(); 78 79 static float total_mpps; 80 static uint64_t samples; 81 82 uint64_t prev_pkts = 0; 83 84 while (t->done == false) { 85 const uint64_t new_cycles = rte_get_timer_cycles(); 86 87 if ((new_cycles - perf_cycles) > perf_sample) { 88 const uint64_t curr_pkts = processed_pkts(t); 89 90 float mpps = (float)(curr_pkts - prev_pkts)/1000000; 91 92 prev_pkts = curr_pkts; 93 perf_cycles = new_cycles; 94 total_mpps += mpps; 95 ++samples; 96 printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, 97 mpps, total_mpps/samples); 98 fflush(stdout); 99 } 100 } 101 printf("\n"); 102 return 0; 103 } 104 105 int 106 pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) 107 { 108 unsigned int lcores; 109 110 /* N worker + main */ 111 lcores = 2; 112 113 if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) { 114 evt_err("Invalid producer type '%s' valid producer '%s'", 115 evt_prod_id_to_name(opt->prod_type), 116 evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR)); 117 return -1; 118 } 119 120 if (!rte_eth_dev_count_avail()) { 121 evt_err("test needs minimum 1 ethernet dev"); 122 return -1; 123 } 124 125 if (rte_lcore_count() < lcores) { 126 evt_err("test need minimum %d lcores", lcores); 127 return -1; 128 } 129 130 /* Validate worker lcores */ 131 if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) { 132 evt_err("worker lcores overlaps with main lcore"); 133 return -1; 134 } 135 if (evt_has_disabled_lcore(opt->wlcores)) { 136 evt_err("one or more workers lcores are not enabled"); 137 return -1; 138 } 139 if (!evt_has_active_lcore(opt->wlcores)) { 140 evt_err("minimum one worker is required"); 141 return -1; 142 } 143 144 if (nb_queues > EVT_MAX_QUEUES) { 145 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES); 146 return -1; 147 } 148 if (pipeline_nb_event_ports(opt) > EVT_MAX_PORTS) { 149 evt_err("number of ports exceeds %d", EVT_MAX_PORTS); 150 return -1; 151 } 152 153 if (evt_has_invalid_stage(opt)) 154 return -1; 155 156 if (evt_has_invalid_sched_type(opt)) 157 return -1; 158 159 return 0; 160 } 161 162 #define NB_RX_DESC 128 163 #define NB_TX_DESC 512 164 int 165 pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) 166 { 167 uint16_t i; 168 int ret; 169 uint8_t nb_queues = 1; 170 struct test_pipeline *t = evt_test_priv(test); 171 struct rte_eth_rxconf rx_conf; 172 struct rte_eth_conf port_conf = { 173 .rxmode = { 174 .mq_mode = ETH_MQ_RX_RSS, 175 }, 176 .rx_adv_conf = { 177 .rss_conf = { 178 .rss_key = NULL, 179 .rss_hf = ETH_RSS_IP, 180 }, 181 }, 182 }; 183 184 if (!rte_eth_dev_count_avail()) { 185 evt_err("No ethernet ports found."); 186 return -ENODEV; 187 } 188 189 if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) { 190 evt_err("max_pkt_sz can not be less than %d", 191 RTE_ETHER_MIN_LEN); 192 return -EINVAL; 193 } 194 195 port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz; 196 if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN) 197 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 198 199 t->internal_port = 1; 200 RTE_ETH_FOREACH_DEV(i) { 201 struct rte_eth_dev_info dev_info; 202 struct rte_eth_conf local_port_conf = port_conf; 203 uint32_t caps = 0; 204 205 ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps); 206 if (ret != 0) { 207 evt_err("failed to get event tx adapter[%d] caps", i); 208 return ret; 209 } 210 211 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) 212 t->internal_port = 0; 213 214 ret = rte_eth_dev_info_get(i, &dev_info); 215 if (ret != 0) { 216 evt_err("Error during getting device (port %u) info: %s\n", 217 i, strerror(-ret)); 218 return ret; 219 } 220 221 /* Enable mbuf fast free if PMD has the capability. */ 222 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 223 local_port_conf.txmode.offloads |= 224 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 225 226 rx_conf = dev_info.default_rxconf; 227 rx_conf.offloads = port_conf.rxmode.offloads; 228 229 local_port_conf.rx_adv_conf.rss_conf.rss_hf &= 230 dev_info.flow_type_rss_offloads; 231 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != 232 port_conf.rx_adv_conf.rss_conf.rss_hf) { 233 evt_info("Port %u modified RSS hash function based on hardware support," 234 "requested:%#"PRIx64" configured:%#"PRIx64"", 235 i, 236 port_conf.rx_adv_conf.rss_conf.rss_hf, 237 local_port_conf.rx_adv_conf.rss_conf.rss_hf); 238 } 239 240 if (rte_eth_dev_configure(i, nb_queues, nb_queues, 241 &local_port_conf) 242 < 0) { 243 evt_err("Failed to configure eth port [%d]", i); 244 return -EINVAL; 245 } 246 247 if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, 248 rte_socket_id(), &rx_conf, t->pool) < 0) { 249 evt_err("Failed to setup eth port [%d] rx_queue: %d.", 250 i, 0); 251 return -EINVAL; 252 } 253 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, 254 rte_socket_id(), NULL) < 0) { 255 evt_err("Failed to setup eth port [%d] tx_queue: %d.", 256 i, 0); 257 return -EINVAL; 258 } 259 260 ret = rte_eth_promiscuous_enable(i); 261 if (ret != 0) { 262 evt_err("Failed to enable promiscuous mode for eth port [%d]: %s", 263 i, rte_strerror(-ret)); 264 return ret; 265 } 266 } 267 268 return 0; 269 } 270 271 int 272 pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, 273 uint8_t *queue_arr, uint8_t nb_queues, 274 const struct rte_event_port_conf p_conf) 275 { 276 int ret; 277 uint8_t port; 278 struct test_pipeline *t = evt_test_priv(test); 279 280 281 /* setup one port per worker, linking to all queues */ 282 for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) { 283 struct worker_data *w = &t->worker[port]; 284 285 w->dev_id = opt->dev_id; 286 w->port_id = port; 287 w->t = t; 288 w->processed_pkts = 0; 289 290 ret = rte_event_port_setup(opt->dev_id, port, &p_conf); 291 if (ret) { 292 evt_err("failed to setup port %d", port); 293 return ret; 294 } 295 296 if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL, 297 nb_queues) != nb_queues) 298 goto link_fail; 299 } 300 301 return 0; 302 303 link_fail: 304 evt_err("failed to link queues to port %d", port); 305 return -EINVAL; 306 } 307 308 int 309 pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, 310 struct rte_event_port_conf prod_conf) 311 { 312 int ret = 0; 313 uint16_t prod; 314 struct rte_event_eth_rx_adapter_queue_conf queue_conf; 315 316 memset(&queue_conf, 0, 317 sizeof(struct rte_event_eth_rx_adapter_queue_conf)); 318 queue_conf.ev.sched_type = opt->sched_type_list[0]; 319 RTE_ETH_FOREACH_DEV(prod) { 320 uint32_t cap; 321 322 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, 323 prod, &cap); 324 if (ret) { 325 evt_err("failed to get event rx adapter[%d]" 326 " capabilities", 327 opt->dev_id); 328 return ret; 329 } 330 queue_conf.ev.queue_id = prod * stride; 331 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, 332 &prod_conf); 333 if (ret) { 334 evt_err("failed to create rx adapter[%d]", prod); 335 return ret; 336 } 337 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1, 338 &queue_conf); 339 if (ret) { 340 evt_err("failed to add rx queues to adapter[%d]", prod); 341 return ret; 342 } 343 344 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { 345 uint32_t service_id = -1U; 346 347 rte_event_eth_rx_adapter_service_id_get(prod, 348 &service_id); 349 ret = evt_service_setup(service_id); 350 if (ret) { 351 evt_err("Failed to setup service core" 352 " for Rx adapter"); 353 return ret; 354 } 355 } 356 357 evt_info("Port[%d] using Rx adapter[%d] configured", prod, 358 prod); 359 } 360 361 return ret; 362 } 363 364 int 365 pipeline_event_tx_adapter_setup(struct evt_options *opt, 366 struct rte_event_port_conf port_conf) 367 { 368 int ret = 0; 369 uint16_t consm; 370 371 RTE_ETH_FOREACH_DEV(consm) { 372 uint32_t cap; 373 374 ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, 375 consm, &cap); 376 if (ret) { 377 evt_err("failed to get event tx adapter[%d] caps", 378 consm); 379 return ret; 380 } 381 382 ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id, 383 &port_conf); 384 if (ret) { 385 evt_err("failed to create tx adapter[%d]", consm); 386 return ret; 387 } 388 389 ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1); 390 if (ret) { 391 evt_err("failed to add tx queues to adapter[%d]", 392 consm); 393 return ret; 394 } 395 396 if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) { 397 uint32_t service_id = -1U; 398 399 ret = rte_event_eth_tx_adapter_service_id_get(consm, 400 &service_id); 401 if (ret != -ESRCH && ret != 0) { 402 evt_err("Failed to get Tx adptr service ID"); 403 return ret; 404 } 405 ret = evt_service_setup(service_id); 406 if (ret) { 407 evt_err("Failed to setup service core" 408 " for Tx adapter"); 409 return ret; 410 } 411 } 412 413 evt_info("Port[%d] using Tx adapter[%d] Configured", consm, 414 consm); 415 } 416 417 return ret; 418 } 419 420 void 421 pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) 422 { 423 uint16_t i; 424 RTE_SET_USED(test); 425 RTE_SET_USED(opt); 426 427 RTE_ETH_FOREACH_DEV(i) { 428 rte_event_eth_rx_adapter_stop(i); 429 rte_event_eth_tx_adapter_stop(i); 430 rte_eth_dev_stop(i); 431 } 432 } 433 434 void 435 pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt) 436 { 437 RTE_SET_USED(test); 438 439 rte_event_dev_stop(opt->dev_id); 440 rte_event_dev_close(opt->dev_id); 441 } 442 443 int 444 pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) 445 { 446 struct test_pipeline *t = evt_test_priv(test); 447 int i, ret; 448 449 if (!opt->mbuf_sz) 450 opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE; 451 452 if (!opt->max_pkt_sz) 453 opt->max_pkt_sz = RTE_ETHER_MAX_LEN; 454 455 RTE_ETH_FOREACH_DEV(i) { 456 struct rte_eth_dev_info dev_info; 457 uint16_t data_size = 0; 458 459 memset(&dev_info, 0, sizeof(dev_info)); 460 ret = rte_eth_dev_info_get(i, &dev_info); 461 if (ret != 0) { 462 evt_err("Error during getting device (port %u) info: %s\n", 463 i, strerror(-ret)); 464 return ret; 465 } 466 467 if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 468 dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 469 data_size = opt->max_pkt_sz / 470 dev_info.rx_desc_lim.nb_mtu_seg_max; 471 data_size += RTE_PKTMBUF_HEADROOM; 472 473 if (data_size > opt->mbuf_sz) 474 opt->mbuf_sz = data_size; 475 } 476 } 477 478 t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ 479 opt->pool_sz, /* number of elements*/ 480 512, /* cache size*/ 481 0, 482 opt->mbuf_sz, 483 opt->socket_id); /* flags */ 484 485 if (t->pool == NULL) { 486 evt_err("failed to create mempool"); 487 return -ENOMEM; 488 } 489 490 return 0; 491 } 492 493 void 494 pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt) 495 { 496 RTE_SET_USED(opt); 497 struct test_pipeline *t = evt_test_priv(test); 498 499 rte_mempool_free(t->pool); 500 } 501 502 int 503 pipeline_test_setup(struct evt_test *test, struct evt_options *opt) 504 { 505 void *test_pipeline; 506 507 test_pipeline = rte_zmalloc_socket(test->name, 508 sizeof(struct test_pipeline), RTE_CACHE_LINE_SIZE, 509 opt->socket_id); 510 if (test_pipeline == NULL) { 511 evt_err("failed to allocate test_pipeline memory"); 512 goto nomem; 513 } 514 test->test_priv = test_pipeline; 515 516 struct test_pipeline *t = evt_test_priv(test); 517 518 t->nb_workers = evt_nr_active_lcores(opt->wlcores); 519 t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->wlcores); 520 t->done = false; 521 t->nb_flows = opt->nb_flows; 522 t->result = EVT_TEST_FAILED; 523 t->opt = opt; 524 opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR; 525 memcpy(t->sched_type_list, opt->sched_type_list, 526 sizeof(opt->sched_type_list)); 527 return 0; 528 nomem: 529 return -ENOMEM; 530 } 531 532 void 533 pipeline_test_destroy(struct evt_test *test, struct evt_options *opt) 534 { 535 RTE_SET_USED(opt); 536 537 rte_free(test->test_priv); 538 } 539