153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause 253a3b7e8SJerin Jacob * Copyright(c) 2017 Cavium, Inc 302ec2955SJerin Jacob */ 402ec2955SJerin Jacob 502ec2955SJerin Jacob #include <stdio.h> 602ec2955SJerin Jacob #include <unistd.h> 702ec2955SJerin Jacob 802ec2955SJerin Jacob #include "test_order_common.h" 902ec2955SJerin Jacob 1043d162bcSThomas Monjalon /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */ 1102ec2955SJerin Jacob 124d04346fSJerin Jacob static inline __attribute__((always_inline)) void 134d04346fSJerin Jacob order_queue_process_stage_0(struct rte_event *const ev) 144d04346fSJerin Jacob { 154d04346fSJerin Jacob ev->queue_id = 1; /* q1 atomic queue */ 164d04346fSJerin Jacob ev->op = RTE_EVENT_OP_FORWARD; 174d04346fSJerin Jacob ev->sched_type = RTE_SCHED_TYPE_ATOMIC; 184d04346fSJerin Jacob ev->event_type = RTE_EVENT_TYPE_CPU; 194d04346fSJerin Jacob } 204d04346fSJerin Jacob 214d04346fSJerin Jacob static int 224d04346fSJerin Jacob order_queue_worker(void *arg) 234d04346fSJerin Jacob { 244d04346fSJerin Jacob ORDER_WORKER_INIT; 254d04346fSJerin Jacob struct rte_event ev; 264d04346fSJerin Jacob 274d04346fSJerin Jacob while (t->err == false) { 284d04346fSJerin Jacob uint16_t event = rte_event_dequeue_burst(dev_id, port, 294d04346fSJerin Jacob &ev, 1, 0); 304d04346fSJerin Jacob if (!event) { 314d04346fSJerin Jacob if (rte_atomic64_read(outstand_pkts) <= 0) 324d04346fSJerin Jacob break; 334d04346fSJerin Jacob rte_pause(); 344d04346fSJerin Jacob continue; 354d04346fSJerin Jacob } 364d04346fSJerin Jacob 374d04346fSJerin Jacob if (ev.queue_id == 0) { /* from ordered queue */ 384d04346fSJerin Jacob order_queue_process_stage_0(&ev); 394d04346fSJerin Jacob while (rte_event_enqueue_burst(dev_id, port, &ev, 1) 404d04346fSJerin Jacob != 1) 414d04346fSJerin Jacob rte_pause(); 424d04346fSJerin Jacob } else if (ev.queue_id == 1) { /* from atomic queue */ 434d04346fSJerin Jacob order_process_stage_1(t, &ev, nb_flows, 444d04346fSJerin Jacob expected_flow_seq, outstand_pkts); 454d04346fSJerin Jacob } else { 464d04346fSJerin Jacob order_process_stage_invalid(t, &ev); 474d04346fSJerin Jacob } 484d04346fSJerin Jacob } 494d04346fSJerin Jacob return 0; 504d04346fSJerin Jacob } 514d04346fSJerin Jacob 524d04346fSJerin Jacob static int 534d04346fSJerin Jacob order_queue_worker_burst(void *arg) 544d04346fSJerin Jacob { 554d04346fSJerin Jacob ORDER_WORKER_INIT; 564d04346fSJerin Jacob struct rte_event ev[BURST_SIZE]; 574d04346fSJerin Jacob uint16_t i; 584d04346fSJerin Jacob 594d04346fSJerin Jacob while (t->err == false) { 604d04346fSJerin Jacob uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev, 614d04346fSJerin Jacob BURST_SIZE, 0); 624d04346fSJerin Jacob 634d04346fSJerin Jacob if (nb_rx == 0) { 644d04346fSJerin Jacob if (rte_atomic64_read(outstand_pkts) <= 0) 654d04346fSJerin Jacob break; 664d04346fSJerin Jacob rte_pause(); 674d04346fSJerin Jacob continue; 684d04346fSJerin Jacob } 694d04346fSJerin Jacob 704d04346fSJerin Jacob for (i = 0; i < nb_rx; i++) { 714d04346fSJerin Jacob if (ev[i].queue_id == 0) { /* from ordered queue */ 724d04346fSJerin Jacob order_queue_process_stage_0(&ev[i]); 734d04346fSJerin Jacob } else if (ev[i].queue_id == 1) {/* from atomic queue */ 744d04346fSJerin Jacob order_process_stage_1(t, &ev[i], nb_flows, 754d04346fSJerin Jacob expected_flow_seq, outstand_pkts); 764d04346fSJerin Jacob ev[i].op = RTE_EVENT_OP_RELEASE; 774d04346fSJerin Jacob } else { 784d04346fSJerin Jacob order_process_stage_invalid(t, &ev[i]); 794d04346fSJerin Jacob } 804d04346fSJerin Jacob } 814d04346fSJerin Jacob 824d04346fSJerin Jacob uint16_t enq; 834d04346fSJerin Jacob 844d04346fSJerin Jacob enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx); 854d04346fSJerin Jacob while (enq < nb_rx) { 864d04346fSJerin Jacob enq += rte_event_enqueue_burst(dev_id, port, 874d04346fSJerin Jacob ev + enq, nb_rx - enq); 884d04346fSJerin Jacob } 894d04346fSJerin Jacob } 904d04346fSJerin Jacob return 0; 914d04346fSJerin Jacob } 924d04346fSJerin Jacob 934d04346fSJerin Jacob static int 944d04346fSJerin Jacob worker_wrapper(void *arg) 954d04346fSJerin Jacob { 964d04346fSJerin Jacob struct worker_data *w = arg; 974d04346fSJerin Jacob const bool burst = evt_has_burst_mode(w->dev_id); 984d04346fSJerin Jacob 994d04346fSJerin Jacob if (burst) 1004d04346fSJerin Jacob return order_queue_worker_burst(arg); 1014d04346fSJerin Jacob else 1024d04346fSJerin Jacob return order_queue_worker(arg); 1034d04346fSJerin Jacob } 1044d04346fSJerin Jacob 1054d04346fSJerin Jacob static int 1064d04346fSJerin Jacob order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt) 1074d04346fSJerin Jacob { 1084d04346fSJerin Jacob return order_launch_lcores(test, opt, worker_wrapper); 1094d04346fSJerin Jacob } 1104d04346fSJerin Jacob 11102ec2955SJerin Jacob #define NB_QUEUES 2 11202ec2955SJerin Jacob static int 11302ec2955SJerin Jacob order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) 11402ec2955SJerin Jacob { 11502ec2955SJerin Jacob int ret; 11602ec2955SJerin Jacob 11702ec2955SJerin Jacob const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores); 11802ec2955SJerin Jacob /* number of active worker cores + 1 producer */ 11902ec2955SJerin Jacob const uint8_t nb_ports = nb_workers + 1; 12002ec2955SJerin Jacob 121*f0959283SPavan Nikhilesh ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports); 12202ec2955SJerin Jacob if (ret) { 12302ec2955SJerin Jacob evt_err("failed to configure eventdev %d", opt->dev_id); 12402ec2955SJerin Jacob return ret; 12502ec2955SJerin Jacob } 12602ec2955SJerin Jacob 12702ec2955SJerin Jacob /* q0 (ordered queue) configuration */ 12802ec2955SJerin Jacob struct rte_event_queue_conf q0_ordered_conf = { 12902ec2955SJerin Jacob .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 13013370a38SPavan Nikhilesh .schedule_type = RTE_SCHED_TYPE_ORDERED, 13102ec2955SJerin Jacob .nb_atomic_flows = opt->nb_flows, 13202ec2955SJerin Jacob .nb_atomic_order_sequences = opt->nb_flows, 13302ec2955SJerin Jacob }; 13402ec2955SJerin Jacob ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf); 13502ec2955SJerin Jacob if (ret) { 13602ec2955SJerin Jacob evt_err("failed to setup queue0 eventdev %d", opt->dev_id); 13702ec2955SJerin Jacob return ret; 13802ec2955SJerin Jacob } 13902ec2955SJerin Jacob 14002ec2955SJerin Jacob /* q1 (atomic queue) configuration */ 14102ec2955SJerin Jacob struct rte_event_queue_conf q1_atomic_conf = { 14202ec2955SJerin Jacob .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 14313370a38SPavan Nikhilesh .schedule_type = RTE_SCHED_TYPE_ATOMIC, 14402ec2955SJerin Jacob .nb_atomic_flows = opt->nb_flows, 14502ec2955SJerin Jacob .nb_atomic_order_sequences = opt->nb_flows, 14602ec2955SJerin Jacob }; 14702ec2955SJerin Jacob ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf); 14802ec2955SJerin Jacob if (ret) { 14902ec2955SJerin Jacob evt_err("failed to setup queue1 eventdev %d", opt->dev_id); 15002ec2955SJerin Jacob return ret; 15102ec2955SJerin Jacob } 15202ec2955SJerin Jacob 15302ec2955SJerin Jacob /* setup one port per worker, linking to all queues */ 15402ec2955SJerin Jacob ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES); 15502ec2955SJerin Jacob if (ret) 15602ec2955SJerin Jacob return ret; 15702ec2955SJerin Jacob 1589d0c15b6SPavan Nikhilesh if (!evt_has_distributed_sched(opt->dev_id)) { 1599d0c15b6SPavan Nikhilesh uint32_t service_id; 1609d0c15b6SPavan Nikhilesh rte_event_dev_service_id_get(opt->dev_id, &service_id); 1619d0c15b6SPavan Nikhilesh ret = evt_service_setup(service_id); 16257305d79SPavan Nikhilesh if (ret) { 16357305d79SPavan Nikhilesh evt_err("No service lcore found to run event dev."); 16457305d79SPavan Nikhilesh return ret; 16557305d79SPavan Nikhilesh } 1669d0c15b6SPavan Nikhilesh } 16757305d79SPavan Nikhilesh 16802ec2955SJerin Jacob ret = rte_event_dev_start(opt->dev_id); 16902ec2955SJerin Jacob if (ret) { 17002ec2955SJerin Jacob evt_err("failed to start eventdev %d", opt->dev_id); 17102ec2955SJerin Jacob return ret; 17202ec2955SJerin Jacob } 17302ec2955SJerin Jacob 17402ec2955SJerin Jacob return 0; 17502ec2955SJerin Jacob } 17602ec2955SJerin Jacob 17702ec2955SJerin Jacob static void 17802ec2955SJerin Jacob order_queue_opt_dump(struct evt_options *opt) 17902ec2955SJerin Jacob { 18002ec2955SJerin Jacob order_opt_dump(opt); 18102ec2955SJerin Jacob evt_dump("nb_evdev_queues", "%d", NB_QUEUES); 18202ec2955SJerin Jacob } 18302ec2955SJerin Jacob 18402ec2955SJerin Jacob static bool 18502ec2955SJerin Jacob order_queue_capability_check(struct evt_options *opt) 18602ec2955SJerin Jacob { 18702ec2955SJerin Jacob struct rte_event_dev_info dev_info; 18802ec2955SJerin Jacob 18902ec2955SJerin Jacob rte_event_dev_info_get(opt->dev_id, &dev_info); 19002ec2955SJerin Jacob if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports < 19102ec2955SJerin Jacob order_nb_event_ports(opt)) { 19202ec2955SJerin Jacob evt_err("not enough eventdev queues=%d/%d or ports=%d/%d", 19302ec2955SJerin Jacob NB_QUEUES, dev_info.max_event_queues, 19402ec2955SJerin Jacob order_nb_event_ports(opt), dev_info.max_event_ports); 19502ec2955SJerin Jacob return false; 19602ec2955SJerin Jacob } 19702ec2955SJerin Jacob 19802ec2955SJerin Jacob return true; 19902ec2955SJerin Jacob } 20002ec2955SJerin Jacob 20102ec2955SJerin Jacob static const struct evt_test_ops order_queue = { 20202ec2955SJerin Jacob .cap_check = order_queue_capability_check, 20302ec2955SJerin Jacob .opt_check = order_opt_check, 20402ec2955SJerin Jacob .opt_dump = order_queue_opt_dump, 20502ec2955SJerin Jacob .test_setup = order_test_setup, 20602ec2955SJerin Jacob .mempool_setup = order_mempool_setup, 20702ec2955SJerin Jacob .eventdev_setup = order_queue_eventdev_setup, 2084d04346fSJerin Jacob .launch_lcores = order_queue_launch_lcores, 20902ec2955SJerin Jacob .eventdev_destroy = order_eventdev_destroy, 21002ec2955SJerin Jacob .mempool_destroy = order_mempool_destroy, 21102ec2955SJerin Jacob .test_result = order_test_result, 21202ec2955SJerin Jacob .test_destroy = order_test_destroy, 21302ec2955SJerin Jacob }; 21402ec2955SJerin Jacob 21502ec2955SJerin Jacob EVT_TEST_REGISTER(order_queue); 216