1*02ec2955SJerin Jacob /* 2*02ec2955SJerin Jacob * BSD LICENSE 3*02ec2955SJerin Jacob * 4*02ec2955SJerin Jacob * Copyright (C) Cavium 2017. 5*02ec2955SJerin Jacob * 6*02ec2955SJerin Jacob * Redistribution and use in source and binary forms, with or without 7*02ec2955SJerin Jacob * modification, are permitted provided that the following conditions 8*02ec2955SJerin Jacob * are met: 9*02ec2955SJerin Jacob * 10*02ec2955SJerin Jacob * * Redistributions of source code must retain the above copyright 11*02ec2955SJerin Jacob * notice, this list of conditions and the following disclaimer. 12*02ec2955SJerin Jacob * * Redistributions in binary form must reproduce the above copyright 13*02ec2955SJerin Jacob * notice, this list of conditions and the following disclaimer in 14*02ec2955SJerin Jacob * the documentation and/or other materials provided with the 15*02ec2955SJerin Jacob * distribution. 16*02ec2955SJerin Jacob * * Neither the name of Cavium nor the names of its 17*02ec2955SJerin Jacob * contributors may be used to endorse or promote products derived 18*02ec2955SJerin Jacob * from this software without specific prior written permission. 19*02ec2955SJerin Jacob * 20*02ec2955SJerin Jacob * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 21*02ec2955SJerin Jacob * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 22*02ec2955SJerin Jacob * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 23*02ec2955SJerin Jacob * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 24*02ec2955SJerin Jacob * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 25*02ec2955SJerin Jacob * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 26*02ec2955SJerin Jacob * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 27*02ec2955SJerin Jacob * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 28*02ec2955SJerin Jacob * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 29*02ec2955SJerin Jacob * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 30*02ec2955SJerin Jacob * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 31*02ec2955SJerin Jacob */ 32*02ec2955SJerin Jacob 33*02ec2955SJerin Jacob #include <stdio.h> 34*02ec2955SJerin Jacob #include <unistd.h> 35*02ec2955SJerin Jacob 36*02ec2955SJerin Jacob #include "test_order_common.h" 37*02ec2955SJerin Jacob 38*02ec2955SJerin Jacob /* See http://dpdk.org/doc/guides/tools/testeventdev.html for test details */ 39*02ec2955SJerin Jacob 40*02ec2955SJerin Jacob #define NB_QUEUES 2 41*02ec2955SJerin Jacob static int 42*02ec2955SJerin Jacob order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt) 43*02ec2955SJerin Jacob { 44*02ec2955SJerin Jacob int ret; 45*02ec2955SJerin Jacob 46*02ec2955SJerin Jacob const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores); 47*02ec2955SJerin Jacob /* number of active worker cores + 1 producer */ 48*02ec2955SJerin Jacob const uint8_t nb_ports = nb_workers + 1; 49*02ec2955SJerin Jacob 50*02ec2955SJerin Jacob const struct rte_event_dev_config config = { 51*02ec2955SJerin Jacob .nb_event_queues = NB_QUEUES,/* q0 ordered, q1 atomic */ 52*02ec2955SJerin Jacob .nb_event_ports = nb_ports, 53*02ec2955SJerin Jacob .nb_events_limit = 4096, 54*02ec2955SJerin Jacob .nb_event_queue_flows = opt->nb_flows, 55*02ec2955SJerin Jacob .nb_event_port_dequeue_depth = 128, 56*02ec2955SJerin Jacob .nb_event_port_enqueue_depth = 128, 57*02ec2955SJerin Jacob }; 58*02ec2955SJerin Jacob 59*02ec2955SJerin Jacob ret = rte_event_dev_configure(opt->dev_id, &config); 60*02ec2955SJerin Jacob if (ret) { 61*02ec2955SJerin Jacob evt_err("failed to configure eventdev %d", opt->dev_id); 62*02ec2955SJerin Jacob return ret; 63*02ec2955SJerin Jacob } 64*02ec2955SJerin Jacob 65*02ec2955SJerin Jacob /* q0 (ordered queue) configuration */ 66*02ec2955SJerin Jacob struct rte_event_queue_conf q0_ordered_conf = { 67*02ec2955SJerin Jacob .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 68*02ec2955SJerin Jacob .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ORDERED_ONLY, 69*02ec2955SJerin Jacob .nb_atomic_flows = opt->nb_flows, 70*02ec2955SJerin Jacob .nb_atomic_order_sequences = opt->nb_flows, 71*02ec2955SJerin Jacob }; 72*02ec2955SJerin Jacob ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf); 73*02ec2955SJerin Jacob if (ret) { 74*02ec2955SJerin Jacob evt_err("failed to setup queue0 eventdev %d", opt->dev_id); 75*02ec2955SJerin Jacob return ret; 76*02ec2955SJerin Jacob } 77*02ec2955SJerin Jacob 78*02ec2955SJerin Jacob /* q1 (atomic queue) configuration */ 79*02ec2955SJerin Jacob struct rte_event_queue_conf q1_atomic_conf = { 80*02ec2955SJerin Jacob .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 81*02ec2955SJerin Jacob .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ATOMIC_ONLY, 82*02ec2955SJerin Jacob .nb_atomic_flows = opt->nb_flows, 83*02ec2955SJerin Jacob .nb_atomic_order_sequences = opt->nb_flows, 84*02ec2955SJerin Jacob }; 85*02ec2955SJerin Jacob ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf); 86*02ec2955SJerin Jacob if (ret) { 87*02ec2955SJerin Jacob evt_err("failed to setup queue1 eventdev %d", opt->dev_id); 88*02ec2955SJerin Jacob return ret; 89*02ec2955SJerin Jacob } 90*02ec2955SJerin Jacob 91*02ec2955SJerin Jacob /* setup one port per worker, linking to all queues */ 92*02ec2955SJerin Jacob ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES); 93*02ec2955SJerin Jacob if (ret) 94*02ec2955SJerin Jacob return ret; 95*02ec2955SJerin Jacob 96*02ec2955SJerin Jacob ret = rte_event_dev_start(opt->dev_id); 97*02ec2955SJerin Jacob if (ret) { 98*02ec2955SJerin Jacob evt_err("failed to start eventdev %d", opt->dev_id); 99*02ec2955SJerin Jacob return ret; 100*02ec2955SJerin Jacob } 101*02ec2955SJerin Jacob 102*02ec2955SJerin Jacob return 0; 103*02ec2955SJerin Jacob } 104*02ec2955SJerin Jacob 105*02ec2955SJerin Jacob static void 106*02ec2955SJerin Jacob order_queue_opt_dump(struct evt_options *opt) 107*02ec2955SJerin Jacob { 108*02ec2955SJerin Jacob order_opt_dump(opt); 109*02ec2955SJerin Jacob evt_dump("nb_evdev_queues", "%d", NB_QUEUES); 110*02ec2955SJerin Jacob } 111*02ec2955SJerin Jacob 112*02ec2955SJerin Jacob static bool 113*02ec2955SJerin Jacob order_queue_capability_check(struct evt_options *opt) 114*02ec2955SJerin Jacob { 115*02ec2955SJerin Jacob struct rte_event_dev_info dev_info; 116*02ec2955SJerin Jacob 117*02ec2955SJerin Jacob rte_event_dev_info_get(opt->dev_id, &dev_info); 118*02ec2955SJerin Jacob if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports < 119*02ec2955SJerin Jacob order_nb_event_ports(opt)) { 120*02ec2955SJerin Jacob evt_err("not enough eventdev queues=%d/%d or ports=%d/%d", 121*02ec2955SJerin Jacob NB_QUEUES, dev_info.max_event_queues, 122*02ec2955SJerin Jacob order_nb_event_ports(opt), dev_info.max_event_ports); 123*02ec2955SJerin Jacob return false; 124*02ec2955SJerin Jacob } 125*02ec2955SJerin Jacob 126*02ec2955SJerin Jacob return true; 127*02ec2955SJerin Jacob } 128*02ec2955SJerin Jacob 129*02ec2955SJerin Jacob static const struct evt_test_ops order_queue = { 130*02ec2955SJerin Jacob .cap_check = order_queue_capability_check, 131*02ec2955SJerin Jacob .opt_check = order_opt_check, 132*02ec2955SJerin Jacob .opt_dump = order_queue_opt_dump, 133*02ec2955SJerin Jacob .test_setup = order_test_setup, 134*02ec2955SJerin Jacob .mempool_setup = order_mempool_setup, 135*02ec2955SJerin Jacob .eventdev_setup = order_queue_eventdev_setup, 136*02ec2955SJerin Jacob .eventdev_destroy = order_eventdev_destroy, 137*02ec2955SJerin Jacob .mempool_destroy = order_mempool_destroy, 138*02ec2955SJerin Jacob .test_result = order_test_result, 139*02ec2955SJerin Jacob .test_destroy = order_test_destroy, 140*02ec2955SJerin Jacob }; 141*02ec2955SJerin Jacob 142*02ec2955SJerin Jacob EVT_TEST_REGISTER(order_queue); 143