153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause 253a3b7e8SJerin Jacob * Copyright(c) 2017 Cavium, Inc 3ffbae86fSJerin Jacob */ 4ffbae86fSJerin Jacob 5ffbae86fSJerin Jacob #include "test_perf_common.h" 6ffbae86fSJerin Jacob 741c219e6SJerin Jacob int 841c219e6SJerin Jacob perf_test_result(struct evt_test *test, struct evt_options *opt) 941c219e6SJerin Jacob { 1041c219e6SJerin Jacob RTE_SET_USED(opt); 1141c219e6SJerin Jacob struct test_perf *t = evt_test_priv(test); 1241c219e6SJerin Jacob 1341c219e6SJerin Jacob return t->result; 1441c219e6SJerin Jacob } 1541c219e6SJerin Jacob 169d3aeb18SJerin Jacob static inline int 179d3aeb18SJerin Jacob perf_producer(void *arg) 189d3aeb18SJerin Jacob { 199d3aeb18SJerin Jacob struct prod_data *p = arg; 209d3aeb18SJerin Jacob struct test_perf *t = p->t; 219d3aeb18SJerin Jacob struct evt_options *opt = t->opt; 229d3aeb18SJerin Jacob const uint8_t dev_id = p->dev_id; 239d3aeb18SJerin Jacob const uint8_t port = p->port_id; 249d3aeb18SJerin Jacob struct rte_mempool *pool = t->pool; 259d3aeb18SJerin Jacob const uint64_t nb_pkts = t->nb_pkts; 269d3aeb18SJerin Jacob const uint32_t nb_flows = t->nb_flows; 279d3aeb18SJerin Jacob uint32_t flow_counter = 0; 289d3aeb18SJerin Jacob uint64_t count = 0; 299d3aeb18SJerin Jacob struct perf_elt *m; 309d3aeb18SJerin Jacob struct rte_event ev; 319d3aeb18SJerin Jacob 329d3aeb18SJerin Jacob if (opt->verbose_level > 1) 339d3aeb18SJerin Jacob printf("%s(): lcore %d dev_id %d port=%d queue %d\n", __func__, 349d3aeb18SJerin Jacob rte_lcore_id(), dev_id, port, p->queue_id); 359d3aeb18SJerin Jacob 369d3aeb18SJerin Jacob ev.event = 0; 379d3aeb18SJerin Jacob ev.op = RTE_EVENT_OP_NEW; 389d3aeb18SJerin Jacob ev.queue_id = p->queue_id; 399d3aeb18SJerin Jacob ev.sched_type = t->opt->sched_type_list[0]; 409d3aeb18SJerin Jacob ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 419d3aeb18SJerin Jacob ev.event_type = RTE_EVENT_TYPE_CPU; 429d3aeb18SJerin Jacob ev.sub_event_type = 0; /* stage 0 */ 439d3aeb18SJerin Jacob 449d3aeb18SJerin Jacob while (count < nb_pkts && t->done == false) { 459d3aeb18SJerin Jacob if (rte_mempool_get(pool, (void **)&m) < 0) 469d3aeb18SJerin Jacob continue; 479d3aeb18SJerin Jacob 489d3aeb18SJerin Jacob ev.flow_id = flow_counter++ % nb_flows; 499d3aeb18SJerin Jacob ev.event_ptr = m; 509d3aeb18SJerin Jacob m->timestamp = rte_get_timer_cycles(); 519d3aeb18SJerin Jacob while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) { 529d3aeb18SJerin Jacob if (t->done) 539d3aeb18SJerin Jacob break; 549d3aeb18SJerin Jacob rte_pause(); 559d3aeb18SJerin Jacob m->timestamp = rte_get_timer_cycles(); 569d3aeb18SJerin Jacob } 579d3aeb18SJerin Jacob count++; 589d3aeb18SJerin Jacob } 599d3aeb18SJerin Jacob 609d3aeb18SJerin Jacob return 0; 619d3aeb18SJerin Jacob } 629d3aeb18SJerin Jacob 6359f697e3SPavan Nikhilesh static int 6459f697e3SPavan Nikhilesh perf_producer_wrapper(void *arg) 6559f697e3SPavan Nikhilesh { 6659f697e3SPavan Nikhilesh struct prod_data *p = arg; 6759f697e3SPavan Nikhilesh struct test_perf *t = p->t; 6859f697e3SPavan Nikhilesh /* Launch the producer function only in case of synthetic producer. */ 6959f697e3SPavan Nikhilesh if (t->opt->prod_type == EVT_PROD_TYPE_SYNT) 7059f697e3SPavan Nikhilesh return perf_producer(arg); 7159f697e3SPavan Nikhilesh return 0; 7259f697e3SPavan Nikhilesh } 7359f697e3SPavan Nikhilesh 749d3aeb18SJerin Jacob static inline uint64_t 759d3aeb18SJerin Jacob processed_pkts(struct test_perf *t) 769d3aeb18SJerin Jacob { 779d3aeb18SJerin Jacob uint8_t i; 789d3aeb18SJerin Jacob uint64_t total = 0; 799d3aeb18SJerin Jacob 809d3aeb18SJerin Jacob rte_smp_rmb(); 819d3aeb18SJerin Jacob for (i = 0; i < t->nb_workers; i++) 829d3aeb18SJerin Jacob total += t->worker[i].processed_pkts; 839d3aeb18SJerin Jacob 849d3aeb18SJerin Jacob return total; 859d3aeb18SJerin Jacob } 869d3aeb18SJerin Jacob 879d3aeb18SJerin Jacob static inline uint64_t 889d3aeb18SJerin Jacob total_latency(struct test_perf *t) 899d3aeb18SJerin Jacob { 909d3aeb18SJerin Jacob uint8_t i; 919d3aeb18SJerin Jacob uint64_t total = 0; 929d3aeb18SJerin Jacob 939d3aeb18SJerin Jacob rte_smp_rmb(); 949d3aeb18SJerin Jacob for (i = 0; i < t->nb_workers; i++) 959d3aeb18SJerin Jacob total += t->worker[i].latency; 969d3aeb18SJerin Jacob 979d3aeb18SJerin Jacob return total; 989d3aeb18SJerin Jacob } 999d3aeb18SJerin Jacob 1009d3aeb18SJerin Jacob 1019d3aeb18SJerin Jacob int 1029d3aeb18SJerin Jacob perf_launch_lcores(struct evt_test *test, struct evt_options *opt, 1039d3aeb18SJerin Jacob int (*worker)(void *)) 1049d3aeb18SJerin Jacob { 1059d3aeb18SJerin Jacob int ret, lcore_id; 1069d3aeb18SJerin Jacob struct test_perf *t = evt_test_priv(test); 1079d3aeb18SJerin Jacob 1089d3aeb18SJerin Jacob int port_idx = 0; 1099d3aeb18SJerin Jacob /* launch workers */ 1109d3aeb18SJerin Jacob RTE_LCORE_FOREACH_SLAVE(lcore_id) { 1119d3aeb18SJerin Jacob if (!(opt->wlcores[lcore_id])) 1129d3aeb18SJerin Jacob continue; 1139d3aeb18SJerin Jacob 1149d3aeb18SJerin Jacob ret = rte_eal_remote_launch(worker, 1159d3aeb18SJerin Jacob &t->worker[port_idx], lcore_id); 1169d3aeb18SJerin Jacob if (ret) { 1179d3aeb18SJerin Jacob evt_err("failed to launch worker %d", lcore_id); 1189d3aeb18SJerin Jacob return ret; 1199d3aeb18SJerin Jacob } 1209d3aeb18SJerin Jacob port_idx++; 1219d3aeb18SJerin Jacob } 1229d3aeb18SJerin Jacob 1239d3aeb18SJerin Jacob /* launch producers */ 1249d3aeb18SJerin Jacob RTE_LCORE_FOREACH_SLAVE(lcore_id) { 1259d3aeb18SJerin Jacob if (!(opt->plcores[lcore_id])) 1269d3aeb18SJerin Jacob continue; 1279d3aeb18SJerin Jacob 12859f697e3SPavan Nikhilesh ret = rte_eal_remote_launch(perf_producer_wrapper, 12959f697e3SPavan Nikhilesh &t->prod[port_idx], lcore_id); 1309d3aeb18SJerin Jacob if (ret) { 1319d3aeb18SJerin Jacob evt_err("failed to launch perf_producer %d", lcore_id); 1329d3aeb18SJerin Jacob return ret; 1339d3aeb18SJerin Jacob } 1349d3aeb18SJerin Jacob port_idx++; 1359d3aeb18SJerin Jacob } 1369d3aeb18SJerin Jacob 1379d3aeb18SJerin Jacob const uint64_t total_pkts = opt->nb_pkts * 1389d3aeb18SJerin Jacob evt_nr_active_lcores(opt->plcores); 1399d3aeb18SJerin Jacob 1409d3aeb18SJerin Jacob uint64_t dead_lock_cycles = rte_get_timer_cycles(); 1419d3aeb18SJerin Jacob int64_t dead_lock_remaining = total_pkts; 1429d3aeb18SJerin Jacob const uint64_t dead_lock_sample = rte_get_timer_hz() * 5; 1439d3aeb18SJerin Jacob 1449d3aeb18SJerin Jacob uint64_t perf_cycles = rte_get_timer_cycles(); 1459d3aeb18SJerin Jacob int64_t perf_remaining = total_pkts; 1469d3aeb18SJerin Jacob const uint64_t perf_sample = rte_get_timer_hz(); 1479d3aeb18SJerin Jacob 1489d3aeb18SJerin Jacob static float total_mpps; 1499d3aeb18SJerin Jacob static uint64_t samples; 1509d3aeb18SJerin Jacob 1519d3aeb18SJerin Jacob const uint64_t freq_mhz = rte_get_timer_hz() / 1000000; 1529d3aeb18SJerin Jacob int64_t remaining = t->outstand_pkts - processed_pkts(t); 1539d3aeb18SJerin Jacob 1549d3aeb18SJerin Jacob while (t->done == false) { 1559d3aeb18SJerin Jacob const uint64_t new_cycles = rte_get_timer_cycles(); 1569d3aeb18SJerin Jacob 1579d3aeb18SJerin Jacob if ((new_cycles - perf_cycles) > perf_sample) { 1589d3aeb18SJerin Jacob const uint64_t latency = total_latency(t); 1599d3aeb18SJerin Jacob const uint64_t pkts = processed_pkts(t); 1609d3aeb18SJerin Jacob 1619d3aeb18SJerin Jacob remaining = t->outstand_pkts - pkts; 1629d3aeb18SJerin Jacob float mpps = (float)(perf_remaining-remaining)/1000000; 1639d3aeb18SJerin Jacob 1649d3aeb18SJerin Jacob perf_remaining = remaining; 1659d3aeb18SJerin Jacob perf_cycles = new_cycles; 1669d3aeb18SJerin Jacob total_mpps += mpps; 1679d3aeb18SJerin Jacob ++samples; 16804716352SJerin Jacob if (opt->fwd_latency && pkts > 0) { 1699d3aeb18SJerin Jacob printf(CLGRN"\r%.3f mpps avg %.3f mpps [avg fwd latency %.3f us] "CLNRM, 1709d3aeb18SJerin Jacob mpps, total_mpps/samples, 1719d3aeb18SJerin Jacob (float)(latency/pkts)/freq_mhz); 1729d3aeb18SJerin Jacob } else { 1739d3aeb18SJerin Jacob printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM, 1749d3aeb18SJerin Jacob mpps, total_mpps/samples); 1759d3aeb18SJerin Jacob } 1769d3aeb18SJerin Jacob fflush(stdout); 1779d3aeb18SJerin Jacob 1789d3aeb18SJerin Jacob if (remaining <= 0) { 1799d3aeb18SJerin Jacob t->result = EVT_TEST_SUCCESS; 18059f697e3SPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT) { 18159f697e3SPavan Nikhilesh t->done = true; 1829d3aeb18SJerin Jacob rte_smp_wmb(); 1839d3aeb18SJerin Jacob break; 1849d3aeb18SJerin Jacob } 1859d3aeb18SJerin Jacob } 18659f697e3SPavan Nikhilesh } 1879d3aeb18SJerin Jacob 18859f697e3SPavan Nikhilesh if (new_cycles - dead_lock_cycles > dead_lock_sample && 18959f697e3SPavan Nikhilesh opt->prod_type == EVT_PROD_TYPE_SYNT) { 1909d3aeb18SJerin Jacob remaining = t->outstand_pkts - processed_pkts(t); 1919d3aeb18SJerin Jacob if (dead_lock_remaining == remaining) { 1929d3aeb18SJerin Jacob rte_event_dev_dump(opt->dev_id, stdout); 1939d3aeb18SJerin Jacob evt_err("No schedules for seconds, deadlock"); 1949d3aeb18SJerin Jacob t->done = true; 1959d3aeb18SJerin Jacob rte_smp_wmb(); 1969d3aeb18SJerin Jacob break; 1979d3aeb18SJerin Jacob } 1989d3aeb18SJerin Jacob dead_lock_remaining = remaining; 1999d3aeb18SJerin Jacob dead_lock_cycles = new_cycles; 2009d3aeb18SJerin Jacob } 2019d3aeb18SJerin Jacob } 2029d3aeb18SJerin Jacob printf("\n"); 2039d3aeb18SJerin Jacob return 0; 2049d3aeb18SJerin Jacob } 2059d3aeb18SJerin Jacob 206272de067SJerin Jacob int 20784a7513dSJerin Jacob perf_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, 20884a7513dSJerin Jacob uint8_t stride, uint8_t nb_queues) 20984a7513dSJerin Jacob { 21084a7513dSJerin Jacob struct test_perf *t = evt_test_priv(test); 21184a7513dSJerin Jacob uint8_t port, prod; 21284a7513dSJerin Jacob int ret = -1; 21384a7513dSJerin Jacob 21484a7513dSJerin Jacob /* port configuration */ 21584a7513dSJerin Jacob const struct rte_event_port_conf wkr_p_conf = { 21684a7513dSJerin Jacob .dequeue_depth = opt->wkr_deq_dep, 21784a7513dSJerin Jacob .enqueue_depth = 64, 21884a7513dSJerin Jacob .new_event_threshold = 4096, 21984a7513dSJerin Jacob }; 22084a7513dSJerin Jacob 22184a7513dSJerin Jacob /* setup one port per worker, linking to all queues */ 22284a7513dSJerin Jacob for (port = 0; port < evt_nr_active_lcores(opt->wlcores); 22384a7513dSJerin Jacob port++) { 22484a7513dSJerin Jacob struct worker_data *w = &t->worker[port]; 22584a7513dSJerin Jacob 22684a7513dSJerin Jacob w->dev_id = opt->dev_id; 22784a7513dSJerin Jacob w->port_id = port; 22884a7513dSJerin Jacob w->t = t; 22984a7513dSJerin Jacob w->processed_pkts = 0; 23084a7513dSJerin Jacob w->latency = 0; 23184a7513dSJerin Jacob 23284a7513dSJerin Jacob ret = rte_event_port_setup(opt->dev_id, port, &wkr_p_conf); 23384a7513dSJerin Jacob if (ret) { 23484a7513dSJerin Jacob evt_err("failed to setup port %d", port); 23584a7513dSJerin Jacob return ret; 23684a7513dSJerin Jacob } 23784a7513dSJerin Jacob 23884a7513dSJerin Jacob ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0); 23984a7513dSJerin Jacob if (ret != nb_queues) { 24084a7513dSJerin Jacob evt_err("failed to link all queues to port %d", port); 24184a7513dSJerin Jacob return -EINVAL; 24284a7513dSJerin Jacob } 24384a7513dSJerin Jacob } 24484a7513dSJerin Jacob 24584a7513dSJerin Jacob /* port for producers, no links */ 24684a7513dSJerin Jacob const struct rte_event_port_conf prod_conf = { 24784a7513dSJerin Jacob .dequeue_depth = 8, 24884a7513dSJerin Jacob .enqueue_depth = 32, 24984a7513dSJerin Jacob .new_event_threshold = 1200, 25084a7513dSJerin Jacob }; 25184a7513dSJerin Jacob prod = 0; 25284a7513dSJerin Jacob for ( ; port < perf_nb_event_ports(opt); port++) { 25384a7513dSJerin Jacob struct prod_data *p = &t->prod[port]; 25484a7513dSJerin Jacob 25584a7513dSJerin Jacob p->dev_id = opt->dev_id; 25684a7513dSJerin Jacob p->port_id = port; 25784a7513dSJerin Jacob p->queue_id = prod * stride; 25884a7513dSJerin Jacob p->t = t; 25984a7513dSJerin Jacob 26084a7513dSJerin Jacob ret = rte_event_port_setup(opt->dev_id, port, &prod_conf); 26184a7513dSJerin Jacob if (ret) { 26284a7513dSJerin Jacob evt_err("failed to setup port %d", port); 26384a7513dSJerin Jacob return ret; 26484a7513dSJerin Jacob } 26584a7513dSJerin Jacob prod++; 26684a7513dSJerin Jacob } 26784a7513dSJerin Jacob 26884a7513dSJerin Jacob return ret; 26984a7513dSJerin Jacob } 27084a7513dSJerin Jacob 27184a7513dSJerin Jacob int 272272de067SJerin Jacob perf_opt_check(struct evt_options *opt, uint64_t nb_queues) 273272de067SJerin Jacob { 274272de067SJerin Jacob unsigned int lcores; 275272de067SJerin Jacob 276b01974daSPavan Nikhilesh /* N producer + N worker + 1 master when producer cores are used 277b01974daSPavan Nikhilesh * Else N worker + 1 master when Rx adapter is used 278b01974daSPavan Nikhilesh */ 279b01974daSPavan Nikhilesh lcores = opt->prod_type == EVT_PROD_TYPE_SYNT ? 3 : 2; 280272de067SJerin Jacob 281272de067SJerin Jacob if (rte_lcore_count() < lcores) { 282272de067SJerin Jacob evt_err("test need minimum %d lcores", lcores); 283272de067SJerin Jacob return -1; 284272de067SJerin Jacob } 285272de067SJerin Jacob 286272de067SJerin Jacob /* Validate worker lcores */ 287272de067SJerin Jacob if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) { 288272de067SJerin Jacob evt_err("worker lcores overlaps with master lcore"); 289272de067SJerin Jacob return -1; 290272de067SJerin Jacob } 291272de067SJerin Jacob if (evt_lcores_has_overlap_multi(opt->wlcores, opt->plcores)) { 292272de067SJerin Jacob evt_err("worker lcores overlaps producer lcores"); 293272de067SJerin Jacob return -1; 294272de067SJerin Jacob } 295272de067SJerin Jacob if (evt_has_disabled_lcore(opt->wlcores)) { 296272de067SJerin Jacob evt_err("one or more workers lcores are not enabled"); 297272de067SJerin Jacob return -1; 298272de067SJerin Jacob } 299272de067SJerin Jacob if (!evt_has_active_lcore(opt->wlcores)) { 300272de067SJerin Jacob evt_err("minimum one worker is required"); 301272de067SJerin Jacob return -1; 302272de067SJerin Jacob } 303272de067SJerin Jacob 304b01974daSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT) { 305272de067SJerin Jacob /* Validate producer lcores */ 306b01974daSPavan Nikhilesh if (evt_lcores_has_overlap(opt->plcores, 307b01974daSPavan Nikhilesh rte_get_master_lcore())) { 308272de067SJerin Jacob evt_err("producer lcores overlaps with master lcore"); 309272de067SJerin Jacob return -1; 310272de067SJerin Jacob } 311272de067SJerin Jacob if (evt_has_disabled_lcore(opt->plcores)) { 312272de067SJerin Jacob evt_err("one or more producer lcores are not enabled"); 313272de067SJerin Jacob return -1; 314272de067SJerin Jacob } 315272de067SJerin Jacob if (!evt_has_active_lcore(opt->plcores)) { 316272de067SJerin Jacob evt_err("minimum one producer is required"); 317272de067SJerin Jacob return -1; 318272de067SJerin Jacob } 319b01974daSPavan Nikhilesh } 320272de067SJerin Jacob 321272de067SJerin Jacob if (evt_has_invalid_stage(opt)) 322272de067SJerin Jacob return -1; 323272de067SJerin Jacob 324272de067SJerin Jacob if (evt_has_invalid_sched_type(opt)) 325272de067SJerin Jacob return -1; 326272de067SJerin Jacob 327272de067SJerin Jacob if (nb_queues > EVT_MAX_QUEUES) { 328272de067SJerin Jacob evt_err("number of queues exceeds %d", EVT_MAX_QUEUES); 329272de067SJerin Jacob return -1; 330272de067SJerin Jacob } 331272de067SJerin Jacob if (perf_nb_event_ports(opt) > EVT_MAX_PORTS) { 332272de067SJerin Jacob evt_err("number of ports exceeds %d", EVT_MAX_PORTS); 333272de067SJerin Jacob return -1; 334272de067SJerin Jacob } 335272de067SJerin Jacob 336272de067SJerin Jacob /* Fixups */ 337272de067SJerin Jacob if (opt->nb_stages == 1 && opt->fwd_latency) { 338272de067SJerin Jacob evt_info("fwd_latency is valid when nb_stages > 1, disabling"); 339272de067SJerin Jacob opt->fwd_latency = 0; 340272de067SJerin Jacob } 341272de067SJerin Jacob if (opt->fwd_latency && !opt->q_priority) { 342272de067SJerin Jacob evt_info("enabled queue priority for latency measurement"); 343272de067SJerin Jacob opt->q_priority = 1; 344272de067SJerin Jacob } 3459d3aeb18SJerin Jacob if (opt->nb_pkts == 0) 3469d3aeb18SJerin Jacob opt->nb_pkts = INT64_MAX/evt_nr_active_lcores(opt->plcores); 347272de067SJerin Jacob 348272de067SJerin Jacob return 0; 349272de067SJerin Jacob } 350272de067SJerin Jacob 351272de067SJerin Jacob void 352272de067SJerin Jacob perf_opt_dump(struct evt_options *opt, uint8_t nb_queues) 353272de067SJerin Jacob { 354272de067SJerin Jacob evt_dump("nb_prod_lcores", "%d", evt_nr_active_lcores(opt->plcores)); 355272de067SJerin Jacob evt_dump_producer_lcores(opt); 356272de067SJerin Jacob evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); 357272de067SJerin Jacob evt_dump_worker_lcores(opt); 358272de067SJerin Jacob evt_dump_nb_stages(opt); 359272de067SJerin Jacob evt_dump("nb_evdev_ports", "%d", perf_nb_event_ports(opt)); 360272de067SJerin Jacob evt_dump("nb_evdev_queues", "%d", nb_queues); 361272de067SJerin Jacob evt_dump_queue_priority(opt); 362272de067SJerin Jacob evt_dump_sched_type_list(opt); 363b01974daSPavan Nikhilesh evt_dump_producer_type(opt); 364272de067SJerin Jacob } 365272de067SJerin Jacob 36641c219e6SJerin Jacob void 36741c219e6SJerin Jacob perf_eventdev_destroy(struct evt_test *test, struct evt_options *opt) 36841c219e6SJerin Jacob { 36941c219e6SJerin Jacob RTE_SET_USED(test); 37041c219e6SJerin Jacob 37141c219e6SJerin Jacob rte_event_dev_stop(opt->dev_id); 37241c219e6SJerin Jacob rte_event_dev_close(opt->dev_id); 37341c219e6SJerin Jacob } 37441c219e6SJerin Jacob 37541c219e6SJerin Jacob static inline void 37641c219e6SJerin Jacob perf_elt_init(struct rte_mempool *mp, void *arg __rte_unused, 37741c219e6SJerin Jacob void *obj, unsigned i __rte_unused) 37841c219e6SJerin Jacob { 37941c219e6SJerin Jacob memset(obj, 0, mp->elt_size); 38041c219e6SJerin Jacob } 38141c219e6SJerin Jacob 3823fc8de4fSPavan Nikhilesh #define NB_RX_DESC 128 3833fc8de4fSPavan Nikhilesh #define NB_TX_DESC 512 3843fc8de4fSPavan Nikhilesh int 3853fc8de4fSPavan Nikhilesh perf_ethdev_setup(struct evt_test *test, struct evt_options *opt) 3863fc8de4fSPavan Nikhilesh { 3873fc8de4fSPavan Nikhilesh int i; 3883fc8de4fSPavan Nikhilesh struct test_perf *t = evt_test_priv(test); 3893fc8de4fSPavan Nikhilesh struct rte_eth_conf port_conf = { 3903fc8de4fSPavan Nikhilesh .rxmode = { 3913fc8de4fSPavan Nikhilesh .mq_mode = ETH_MQ_RX_RSS, 3923fc8de4fSPavan Nikhilesh .max_rx_pkt_len = ETHER_MAX_LEN, 3933fc8de4fSPavan Nikhilesh .split_hdr_size = 0, 3943fc8de4fSPavan Nikhilesh .header_split = 0, 3953fc8de4fSPavan Nikhilesh .hw_ip_checksum = 0, 3963fc8de4fSPavan Nikhilesh .hw_vlan_filter = 0, 3973fc8de4fSPavan Nikhilesh .hw_vlan_strip = 0, 3983fc8de4fSPavan Nikhilesh .hw_vlan_extend = 0, 3993fc8de4fSPavan Nikhilesh .jumbo_frame = 0, 4003fc8de4fSPavan Nikhilesh .hw_strip_crc = 1, 4013fc8de4fSPavan Nikhilesh }, 4023fc8de4fSPavan Nikhilesh .rx_adv_conf = { 4033fc8de4fSPavan Nikhilesh .rss_conf = { 4043fc8de4fSPavan Nikhilesh .rss_key = NULL, 4053fc8de4fSPavan Nikhilesh .rss_hf = ETH_RSS_IP, 4063fc8de4fSPavan Nikhilesh }, 4073fc8de4fSPavan Nikhilesh }, 4083fc8de4fSPavan Nikhilesh }; 4093fc8de4fSPavan Nikhilesh 4103fc8de4fSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT) 4113fc8de4fSPavan Nikhilesh return 0; 4123fc8de4fSPavan Nikhilesh 4133fc8de4fSPavan Nikhilesh if (!rte_eth_dev_count()) { 4143fc8de4fSPavan Nikhilesh evt_err("No ethernet ports found."); 4153fc8de4fSPavan Nikhilesh return -ENODEV; 4163fc8de4fSPavan Nikhilesh } 4173fc8de4fSPavan Nikhilesh 4183fc8de4fSPavan Nikhilesh for (i = 0; i < rte_eth_dev_count(); i++) { 4193fc8de4fSPavan Nikhilesh 4203fc8de4fSPavan Nikhilesh if (rte_eth_dev_configure(i, 1, 1, 4213fc8de4fSPavan Nikhilesh &port_conf) 4223fc8de4fSPavan Nikhilesh < 0) { 4233fc8de4fSPavan Nikhilesh evt_err("Failed to configure eth port [%d]", i); 4243fc8de4fSPavan Nikhilesh return -EINVAL; 4253fc8de4fSPavan Nikhilesh } 4263fc8de4fSPavan Nikhilesh 4273fc8de4fSPavan Nikhilesh if (rte_eth_rx_queue_setup(i, 0, NB_RX_DESC, 4283fc8de4fSPavan Nikhilesh rte_socket_id(), NULL, t->pool) < 0) { 4293fc8de4fSPavan Nikhilesh evt_err("Failed to setup eth port [%d] rx_queue: %d.", 4303fc8de4fSPavan Nikhilesh i, 0); 4313fc8de4fSPavan Nikhilesh return -EINVAL; 4323fc8de4fSPavan Nikhilesh } 4333fc8de4fSPavan Nikhilesh 4343fc8de4fSPavan Nikhilesh if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC, 4353fc8de4fSPavan Nikhilesh rte_socket_id(), NULL) < 0) { 4363fc8de4fSPavan Nikhilesh evt_err("Failed to setup eth port [%d] tx_queue: %d.", 4373fc8de4fSPavan Nikhilesh i, 0); 4383fc8de4fSPavan Nikhilesh return -EINVAL; 4393fc8de4fSPavan Nikhilesh } 4403fc8de4fSPavan Nikhilesh 4413fc8de4fSPavan Nikhilesh rte_eth_promiscuous_enable(i); 4423fc8de4fSPavan Nikhilesh } 4433fc8de4fSPavan Nikhilesh 4443fc8de4fSPavan Nikhilesh return 0; 4453fc8de4fSPavan Nikhilesh } 4463fc8de4fSPavan Nikhilesh 447*7f3daf34SPavan Nikhilesh void perf_ethdev_destroy(struct evt_test *test, struct evt_options *opt) 448*7f3daf34SPavan Nikhilesh { 449*7f3daf34SPavan Nikhilesh int i; 450*7f3daf34SPavan Nikhilesh RTE_SET_USED(test); 451*7f3daf34SPavan Nikhilesh 452*7f3daf34SPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { 453*7f3daf34SPavan Nikhilesh for (i = 0; i < rte_eth_dev_count(); i++) { 454*7f3daf34SPavan Nikhilesh rte_eth_dev_stop(i); 455*7f3daf34SPavan Nikhilesh rte_eth_dev_close(i); 456*7f3daf34SPavan Nikhilesh } 457*7f3daf34SPavan Nikhilesh } 458*7f3daf34SPavan Nikhilesh } 459*7f3daf34SPavan Nikhilesh 46041c219e6SJerin Jacob int 46141c219e6SJerin Jacob perf_mempool_setup(struct evt_test *test, struct evt_options *opt) 46241c219e6SJerin Jacob { 46341c219e6SJerin Jacob struct test_perf *t = evt_test_priv(test); 46441c219e6SJerin Jacob 4658577cc1aSPavan Nikhilesh if (opt->prod_type == EVT_PROD_TYPE_SYNT) { 46641c219e6SJerin Jacob t->pool = rte_mempool_create(test->name, /* mempool name */ 46741c219e6SJerin Jacob opt->pool_sz, /* number of elements*/ 46841c219e6SJerin Jacob sizeof(struct perf_elt), /* element size*/ 46941c219e6SJerin Jacob 512, /* cache size*/ 47041c219e6SJerin Jacob 0, NULL, NULL, 47141c219e6SJerin Jacob perf_elt_init, /* obj constructor */ 47241c219e6SJerin Jacob NULL, opt->socket_id, 0); /* flags */ 4738577cc1aSPavan Nikhilesh } else { 4748577cc1aSPavan Nikhilesh t->pool = rte_pktmbuf_pool_create(test->name, /* mempool name */ 4758577cc1aSPavan Nikhilesh opt->pool_sz, /* number of elements*/ 4768577cc1aSPavan Nikhilesh 512, /* cache size*/ 4778577cc1aSPavan Nikhilesh 0, 4788577cc1aSPavan Nikhilesh RTE_MBUF_DEFAULT_BUF_SIZE, 4798577cc1aSPavan Nikhilesh opt->socket_id); /* flags */ 4808577cc1aSPavan Nikhilesh 4818577cc1aSPavan Nikhilesh } 4828577cc1aSPavan Nikhilesh 48341c219e6SJerin Jacob if (t->pool == NULL) { 48441c219e6SJerin Jacob evt_err("failed to create mempool"); 48541c219e6SJerin Jacob return -ENOMEM; 48641c219e6SJerin Jacob } 48741c219e6SJerin Jacob 48841c219e6SJerin Jacob return 0; 48941c219e6SJerin Jacob } 49041c219e6SJerin Jacob 49141c219e6SJerin Jacob void 49241c219e6SJerin Jacob perf_mempool_destroy(struct evt_test *test, struct evt_options *opt) 49341c219e6SJerin Jacob { 49441c219e6SJerin Jacob RTE_SET_USED(opt); 49541c219e6SJerin Jacob struct test_perf *t = evt_test_priv(test); 49641c219e6SJerin Jacob 49741c219e6SJerin Jacob rte_mempool_free(t->pool); 49841c219e6SJerin Jacob } 499ffbae86fSJerin Jacob 500ffbae86fSJerin Jacob int 501ffbae86fSJerin Jacob perf_test_setup(struct evt_test *test, struct evt_options *opt) 502ffbae86fSJerin Jacob { 503ffbae86fSJerin Jacob void *test_perf; 504ffbae86fSJerin Jacob 505ffbae86fSJerin Jacob test_perf = rte_zmalloc_socket(test->name, sizeof(struct test_perf), 506ffbae86fSJerin Jacob RTE_CACHE_LINE_SIZE, opt->socket_id); 507ffbae86fSJerin Jacob if (test_perf == NULL) { 508ffbae86fSJerin Jacob evt_err("failed to allocate test_perf memory"); 509ffbae86fSJerin Jacob goto nomem; 510ffbae86fSJerin Jacob } 511ffbae86fSJerin Jacob test->test_priv = test_perf; 512ffbae86fSJerin Jacob 513ffbae86fSJerin Jacob struct test_perf *t = evt_test_priv(test); 514ffbae86fSJerin Jacob 515ffbae86fSJerin Jacob t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->plcores); 516ffbae86fSJerin Jacob t->nb_workers = evt_nr_active_lcores(opt->wlcores); 517ffbae86fSJerin Jacob t->done = false; 518ffbae86fSJerin Jacob t->nb_pkts = opt->nb_pkts; 519ffbae86fSJerin Jacob t->nb_flows = opt->nb_flows; 520ffbae86fSJerin Jacob t->result = EVT_TEST_FAILED; 521ffbae86fSJerin Jacob t->opt = opt; 522ffbae86fSJerin Jacob memcpy(t->sched_type_list, opt->sched_type_list, 523ffbae86fSJerin Jacob sizeof(opt->sched_type_list)); 524ffbae86fSJerin Jacob return 0; 525ffbae86fSJerin Jacob nomem: 526ffbae86fSJerin Jacob return -ENOMEM; 527ffbae86fSJerin Jacob } 528ffbae86fSJerin Jacob 529ffbae86fSJerin Jacob void 530ffbae86fSJerin Jacob perf_test_destroy(struct evt_test *test, struct evt_options *opt) 531ffbae86fSJerin Jacob { 532ffbae86fSJerin Jacob RTE_SET_USED(opt); 533ffbae86fSJerin Jacob 534ffbae86fSJerin Jacob rte_free(test->test_priv); 535ffbae86fSJerin Jacob } 536