1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * Copyright 2017 Cavium, Inc. 4 */ 5 6 #include "test_pipeline_common.h" 7 8 int 9 pipeline_test_result(struct evt_test *test, struct evt_options *opt) 10 { 11 RTE_SET_USED(opt); 12 int i; 13 uint64_t total = 0; 14 struct test_pipeline *t = evt_test_priv(test); 15 16 evt_info("Packet distribution across worker cores :"); 17 for (i = 0; i < t->nb_workers; i++) 18 total += t->worker[i].processed_pkts; 19 for (i = 0; i < t->nb_workers; i++) 20 evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:" 21 CLGRN" %3.2f"CLNRM, i, 22 t->worker[i].processed_pkts, 23 (((double)t->worker[i].processed_pkts)/total) 24 * 100); 25 return t->result; 26 } 27 28 void 29 pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues) 30 { 31 evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); 32 evt_dump_worker_lcores(opt); 33 evt_dump_nb_stages(opt); 34 evt_dump("nb_evdev_ports", "%d", pipeline_nb_event_ports(opt)); 35 evt_dump("nb_evdev_queues", "%d", nb_queues); 36 evt_dump_queue_priority(opt); 37 evt_dump_sched_type_list(opt); 38 evt_dump_producer_type(opt); 39 } 40 41 static inline uint64_t 42 processed_pkts(struct test_pipeline *t) 43 { 44 uint8_t i; 45 uint64_t total = 0; 46 47 for (i = 0; i < t->nb_workers; i++) 48 total += t->worker[i].processed_pkts; 49 50 return total; 51 } 52 53 int 54 pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, 55 int (*worker)(void *)) 56 { 57 int ret, lcore_id; 58 struct test_pipeline *t = evt_test_priv(test); 59 60 int port_idx = 0; 61 /* launch workers */ 62 RTE_LCORE_FOREACH_WORKER(lcore_id) { 63 if (!(opt->wlcores[lcore_id])) 64 continue; 65 66 ret = rte_eal_remote_launch(worker, 67 &t->worker[port_idx], lcore_id); 68 if (ret) { 69 evt_err("failed to launch worker %d", lcore_id); 70 return ret; 71 } 72 port_idx++; 73 } 74 75 uint64_t perf_cycles = rte_get_timer_cycles(); 76 const uint64_t perf_sample = rte_get_timer_hz(); 77 78 static float total_mpps; 79 static uint64_t samples; 80 81 uint64_t prev_pkts = 0; 82 83 while (t->done == false) { 84 const uint64_t new_cycles = rte_get_timer_cycles(); 85 86 if ((new_cycles - perf_cycles) > perf_sample) { 87 const uint64_t curr_pkts = processed_pkts(t); 88 89 float mpps = (float)(curr_pkts - prev_pkts)/1000000; 90 91 prev_pkts = curr_pkts; 92 perf_cycles = new_cycles; 93 total_mpps += mpps; 94 ++samples; 95 printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, 96 mpps, total_mpps/samples); 97 fflush(stdout); 98 } 99 } 100 printf("\n"); 101 return 0; 102 } 103 104 int 105 pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues) 106 { 107 unsigned int lcores; 108 109 /* N worker + main */ 110 lcores = 2; 111 112 if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) { 113 evt_err("Invalid producer type '%s' valid producer '%s'", 114 evt_prod_id_to_name(opt->prod_type), 115 evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR)); 116 return -1; 117 } 118 119 if (!rte_eth_dev_count_avail()) { 120 evt_err("test needs minimum 1 ethernet dev"); 121 return -1; 122 } 123 124 if (rte_lcore_count() < lcores) { 125 evt_err("test need minimum %d lcores", lcores); 126 return -1; 127 } 128 129 /* Validate worker lcores */ 130 if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) { 131 evt_err("worker lcores overlaps with main lcore"); 132 return -1; 133 } 134 if (evt_has_disabled_lcore(opt->wlcores)) { 135 evt_err("one or more workers lcores are not enabled"); 136 return -1; 137 } 138 if (!evt_has_active_lcore(opt->wlcores)) { 139 evt_err("minimum one worker is required"); 140 return -1; 141 } 142 143 if (nb_queues > EVT_MAX_QUEUES) { 144 evt_err("number of queues exceeds %d", EVT_MAX_QUEUES); 145 return -1; 146 } 147 if (pipeline_nb_event_ports(opt) > EVT_MAX_PORTS) { 148 evt_err("number of ports exceeds %d", EVT_MAX_PORTS); 149 return -1; 150 } 151 152 if (evt_has_invalid_stage(opt)) 153 return -1; 154 155 if (evt_has_invalid_sched_type(opt)) 156 return -1; 157 158 return 0; 159 } 160 161 #define NB_RX_DESC 128 162 #define NB_TX_DESC 512 163 int 164 pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt) 165 { 166 uint16_t i; 167 int ret; 168 uint8_t nb_queues = 1; 169 struct test_pipeline *t = evt_test_priv(test); 170 struct rte_eth_rxconf rx_conf; 171 struct rte_eth_conf port_conf = { 172 .rxmode = { 173 .mq_mode = ETH_MQ_RX_RSS, 174 }, 175 .rx_adv_conf = { 176 .rss_conf = { 177 .rss_key = NULL, 178 .rss_hf = ETH_RSS_IP, 179 }, 180 }, 181 }; 182 183 if (!rte_eth_dev_count_avail()) { 184 evt_err("No ethernet ports found."); 185 return -ENODEV; 186 } 187 188 if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) { 189 evt_err("max_pkt_sz can not be less than %d", 190 RTE_ETHER_MIN_LEN); 191 return -EINVAL; 192 } 193 194 port_conf.rxmode.max_rx_pkt_len = opt->max_pkt_sz; 195 if (opt->max_pkt_sz > RTE_ETHER_MAX_LEN) 196 port_conf.rxmode.offloads |= DEV_RX_OFFLOAD_JUMBO_FRAME; 197 198 t->internal_port = 1; 199 RTE_ETH_FOREACH_DEV(i) { 200 struct rte_eth_dev_info dev_info; 201 struct rte_eth_conf local_port_conf = port_conf; 202 uint32_t caps = 0; 203 204 ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps); 205 if (ret != 0) { 206 evt_err("failed to get event tx adapter[%d] caps", i); 207 return ret; 208 } 209 210 if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) 211 t->internal_port = 0; 212 213 ret = rte_eth_dev_info_get(i, &dev_info); 214 if (ret != 0) { 215 evt_err("Error during getting device (port %u) info: %s\n", 216 i, strerror(-ret)); 217 return ret; 218 } 219 220 /* Enable mbuf fast free if PMD has the capability. */ 221 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 222 local_port_conf.txmode.offloads |= 223 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 224 225 rx_conf = dev_info.default_rxconf; 226 rx_conf.offloads = port_conf.rxmode.offloads; 227 228 local_port_conf.rx_adv_conf.rss_conf.rss_hf &= 229 dev_info.flow_type_rss_offloads; 230 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != 231 port_conf.rx_adv_conf.rss_conf.rss_hf) { 232 evt_info("Port %u modified RSS hash function based on hardware support," 233 "requested:%#"PRIx64" configured:%#"PRIx64"", 234 i, 235 port_conf.rx_adv_conf.rss_conf.rss_hf, 236 local_port_conf.rx_adv_conf.rss_conf.rss_hf); 237 } 238 239 if (rte_eth_dev_configure(i, nb_queues, nb_queues, 240 &local_port_conf) 241 < 0) { 242 evt_err("Failed to configure eth port [%d]", i); 243 return -EINVAL; 244 } 245 246 if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, 247 rte_socket_id(), &rx_conf, t->pool) < 0) { 248 evt_err("Failed to setup eth port [%d] rx_queue: %d.", 249 i, 0); 250 return -EINVAL; 251 } 252 if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, 253 rte_socket_id(), NULL) < 0) { 254 evt_err("Failed to setup eth port [%d] tx_queue: %d.", 255 i, 0); 256 return -EINVAL; 257 } 258 259 ret = rte_eth_promiscuous_enable(i); 260 if (ret != 0) { 261 evt_err("Failed to enable promiscuous mode for eth port [%d]: %s", 262 i, rte_strerror(-ret)); 263 return ret; 264 } 265 } 266 267 return 0; 268 } 269 270 int 271 pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, 272 uint8_t *queue_arr, uint8_t nb_queues, 273 const struct rte_event_port_conf p_conf) 274 { 275 int ret; 276 uint8_t port; 277 struct test_pipeline *t = evt_test_priv(test); 278 279 280 /* setup one port per worker, linking to all queues */ 281 for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) { 282 struct worker_data *w = &t->worker[port]; 283 284 w->dev_id = opt->dev_id; 285 w->port_id = port; 286 w->t = t; 287 w->processed_pkts = 0; 288 289 ret = rte_event_port_setup(opt->dev_id, port, &p_conf); 290 if (ret) { 291 evt_err("failed to setup port %d", port); 292 return ret; 293 } 294 295 if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL, 296 nb_queues) != nb_queues) 297 goto link_fail; 298 } 299 300 return 0; 301 302 link_fail: 303 evt_err("failed to link queues to port %d", port); 304 return -EINVAL; 305 } 306 307 int 308 pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, 309 struct rte_event_port_conf prod_conf) 310 { 311 int ret = 0; 312 uint16_t prod; 313 struct rte_event_eth_rx_adapter_queue_conf queue_conf; 314 315 memset(&queue_conf, 0, 316 sizeof(struct rte_event_eth_rx_adapter_queue_conf)); 317 queue_conf.ev.sched_type = opt->sched_type_list[0]; 318 RTE_ETH_FOREACH_DEV(prod) { 319 uint32_t cap; 320 321 ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, 322 prod, &cap); 323 if (ret) { 324 evt_err("failed to get event rx adapter[%d]" 325 " capabilities", 326 opt->dev_id); 327 return ret; 328 } 329 queue_conf.ev.queue_id = prod * stride; 330 ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id, 331 &prod_conf); 332 if (ret) { 333 evt_err("failed to create rx adapter[%d]", prod); 334 return ret; 335 } 336 ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1, 337 &queue_conf); 338 if (ret) { 339 evt_err("failed to add rx queues to adapter[%d]", prod); 340 return ret; 341 } 342 343 if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) { 344 uint32_t service_id = -1U; 345 346 rte_event_eth_rx_adapter_service_id_get(prod, 347 &service_id); 348 ret = evt_service_setup(service_id); 349 if (ret) { 350 evt_err("Failed to setup service core" 351 " for Rx adapter"); 352 return ret; 353 } 354 } 355 356 evt_info("Port[%d] using Rx adapter[%d] configured", prod, 357 prod); 358 } 359 360 return ret; 361 } 362 363 int 364 pipeline_event_tx_adapter_setup(struct evt_options *opt, 365 struct rte_event_port_conf port_conf) 366 { 367 int ret = 0; 368 uint16_t consm; 369 370 RTE_ETH_FOREACH_DEV(consm) { 371 uint32_t cap; 372 373 ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, 374 consm, &cap); 375 if (ret) { 376 evt_err("failed to get event tx adapter[%d] caps", 377 consm); 378 return ret; 379 } 380 381 ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id, 382 &port_conf); 383 if (ret) { 384 evt_err("failed to create tx adapter[%d]", consm); 385 return ret; 386 } 387 388 ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1); 389 if (ret) { 390 evt_err("failed to add tx queues to adapter[%d]", 391 consm); 392 return ret; 393 } 394 395 if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) { 396 uint32_t service_id = -1U; 397 398 ret = rte_event_eth_tx_adapter_service_id_get(consm, 399 &service_id); 400 if (ret != -ESRCH && ret != 0) { 401 evt_err("Failed to get Tx adptr service ID"); 402 return ret; 403 } 404 ret = evt_service_setup(service_id); 405 if (ret) { 406 evt_err("Failed to setup service core" 407 " for Tx adapter"); 408 return ret; 409 } 410 } 411 412 evt_info("Port[%d] using Tx adapter[%d] Configured", consm, 413 consm); 414 } 415 416 return ret; 417 } 418 419 void 420 pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt) 421 { 422 uint16_t i; 423 RTE_SET_USED(test); 424 RTE_SET_USED(opt); 425 426 RTE_ETH_FOREACH_DEV(i) { 427 rte_event_eth_rx_adapter_stop(i); 428 rte_event_eth_tx_adapter_stop(i); 429 rte_eth_dev_stop(i); 430 } 431 } 432 433 void 434 pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt) 435 { 436 RTE_SET_USED(test); 437 438 rte_event_dev_stop(opt->dev_id); 439 rte_event_dev_close(opt->dev_id); 440 } 441 442 int 443 pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt) 444 { 445 struct test_pipeline *t = evt_test_priv(test); 446 int i, ret; 447 448 if (!opt->mbuf_sz) 449 opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE; 450 451 if (!opt->max_pkt_sz) 452 opt->max_pkt_sz = RTE_ETHER_MAX_LEN; 453 454 RTE_ETH_FOREACH_DEV(i) { 455 struct rte_eth_dev_info dev_info; 456 uint16_t data_size = 0; 457 458 memset(&dev_info, 0, sizeof(dev_info)); 459 ret = rte_eth_dev_info_get(i, &dev_info); 460 if (ret != 0) { 461 evt_err("Error during getting device (port %u) info: %s\n", 462 i, strerror(-ret)); 463 return ret; 464 } 465 466 if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX && 467 dev_info.rx_desc_lim.nb_mtu_seg_max != 0) { 468 data_size = opt->max_pkt_sz / 469 dev_info.rx_desc_lim.nb_mtu_seg_max; 470 data_size += RTE_PKTMBUF_HEADROOM; 471 472 if (data_size > opt->mbuf_sz) 473 opt->mbuf_sz = data_size; 474 } 475 } 476 477 t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ 478 opt->pool_sz, /* number of elements*/ 479 512, /* cache size*/ 480 0, 481 opt->mbuf_sz, 482 opt->socket_id); /* flags */ 483 484 if (t->pool == NULL) { 485 evt_err("failed to create mempool"); 486 return -ENOMEM; 487 } 488 489 return 0; 490 } 491 492 void 493 pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt) 494 { 495 RTE_SET_USED(opt); 496 struct test_pipeline *t = evt_test_priv(test); 497 498 rte_mempool_free(t->pool); 499 } 500 501 int 502 pipeline_test_setup(struct evt_test *test, struct evt_options *opt) 503 { 504 void *test_pipeline; 505 506 test_pipeline = rte_zmalloc_socket(test->name, 507 sizeof(struct test_pipeline), RTE_CACHE_LINE_SIZE, 508 opt->socket_id); 509 if (test_pipeline == NULL) { 510 evt_err("failed to allocate test_pipeline memory"); 511 goto nomem; 512 } 513 test->test_priv = test_pipeline; 514 515 struct test_pipeline *t = evt_test_priv(test); 516 517 t->nb_workers = evt_nr_active_lcores(opt->wlcores); 518 t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->wlcores); 519 t->done = false; 520 t->nb_flows = opt->nb_flows; 521 t->result = EVT_TEST_FAILED; 522 t->opt = opt; 523 opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR; 524 memcpy(t->sched_type_list, opt->sched_type_list, 525 sizeof(opt->sched_type_list)); 526 return 0; 527 nomem: 528 return -ENOMEM; 529 } 530 531 void 532 pipeline_test_destroy(struct evt_test *test, struct evt_options *opt) 533 { 534 RTE_SET_USED(opt); 535 536 rte_free(test->test_priv); 537 } 538