1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include "test_perf_common.h" 6 7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */ 8 9 static inline int 10 atq_nb_event_queues(struct evt_options *opt) 11 { 12 /* nb_queues = number of producers */ 13 return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 14 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores); 15 } 16 17 static inline __attribute__((always_inline)) void 18 atq_mark_fwd_latency(struct rte_event *const ev) 19 { 20 if (unlikely(ev->sub_event_type == 0)) { 21 struct perf_elt *const m = ev->event_ptr; 22 23 m->timestamp = rte_get_timer_cycles(); 24 } 25 } 26 27 static inline __attribute__((always_inline)) void 28 atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list, 29 const uint8_t nb_stages) 30 { 31 ev->sub_event_type++; 32 ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages]; 33 ev->op = RTE_EVENT_OP_FORWARD; 34 ev->event_type = RTE_EVENT_TYPE_CPU; 35 } 36 37 static int 38 perf_atq_worker(void *arg, const int enable_fwd_latency) 39 { 40 PERF_WORKER_INIT; 41 struct rte_event ev; 42 43 while (t->done == false) { 44 uint16_t event = rte_event_dequeue_burst(dev, port, &ev, 1, 0); 45 46 if (!event) { 47 rte_pause(); 48 continue; 49 } 50 51 if (enable_fwd_latency && !prod_timer_type) 52 /* first stage in pipeline, mark ts to compute fwd latency */ 53 atq_mark_fwd_latency(&ev); 54 55 /* last stage in pipeline */ 56 if (unlikely((ev.sub_event_type % nb_stages) == laststage)) { 57 if (enable_fwd_latency) 58 cnt = perf_process_last_stage_latency(pool, 59 &ev, w, bufs, sz, cnt); 60 else 61 cnt = perf_process_last_stage(pool, &ev, w, 62 bufs, sz, cnt); 63 } else { 64 atq_fwd_event(&ev, sched_type_list, nb_stages); 65 while (rte_event_enqueue_burst(dev, port, &ev, 1) != 1) 66 rte_pause(); 67 } 68 } 69 return 0; 70 } 71 72 static int 73 perf_atq_worker_burst(void *arg, const int enable_fwd_latency) 74 { 75 PERF_WORKER_INIT; 76 uint16_t i; 77 /* +1 to avoid prefetch out of array check */ 78 struct rte_event ev[BURST_SIZE + 1]; 79 80 while (t->done == false) { 81 uint16_t const nb_rx = rte_event_dequeue_burst(dev, port, ev, 82 BURST_SIZE, 0); 83 84 if (!nb_rx) { 85 rte_pause(); 86 continue; 87 } 88 89 for (i = 0; i < nb_rx; i++) { 90 if (enable_fwd_latency && !prod_timer_type) { 91 rte_prefetch0(ev[i+1].event_ptr); 92 /* first stage in pipeline. 93 * mark time stamp to compute fwd latency 94 */ 95 atq_mark_fwd_latency(&ev[i]); 96 } 97 /* last stage in pipeline */ 98 if (unlikely((ev[i].sub_event_type % nb_stages) 99 == laststage)) { 100 if (enable_fwd_latency) 101 cnt = perf_process_last_stage_latency( 102 pool, &ev[i], w, bufs, sz, cnt); 103 else 104 cnt = perf_process_last_stage(pool, 105 &ev[i], w, bufs, sz, cnt); 106 107 ev[i].op = RTE_EVENT_OP_RELEASE; 108 } else { 109 atq_fwd_event(&ev[i], sched_type_list, 110 nb_stages); 111 } 112 } 113 114 uint16_t enq; 115 116 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx); 117 while (enq < nb_rx) { 118 enq += rte_event_enqueue_burst(dev, port, 119 ev + enq, nb_rx - enq); 120 } 121 } 122 return 0; 123 } 124 125 static int 126 worker_wrapper(void *arg) 127 { 128 struct worker_data *w = arg; 129 struct evt_options *opt = w->t->opt; 130 131 const bool burst = evt_has_burst_mode(w->dev_id); 132 const int fwd_latency = opt->fwd_latency; 133 134 /* allow compiler to optimize */ 135 if (!burst && !fwd_latency) 136 return perf_atq_worker(arg, 0); 137 else if (!burst && fwd_latency) 138 return perf_atq_worker(arg, 1); 139 else if (burst && !fwd_latency) 140 return perf_atq_worker_burst(arg, 0); 141 else if (burst && fwd_latency) 142 return perf_atq_worker_burst(arg, 1); 143 144 rte_panic("invalid worker\n"); 145 } 146 147 static int 148 perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt) 149 { 150 return perf_launch_lcores(test, opt, worker_wrapper); 151 } 152 153 static int 154 perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) 155 { 156 int ret; 157 uint8_t queue; 158 uint8_t nb_queues; 159 uint8_t nb_ports; 160 uint16_t prod; 161 struct rte_event_dev_info dev_info; 162 struct test_perf *t = evt_test_priv(test); 163 164 nb_ports = evt_nr_active_lcores(opt->wlcores); 165 nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || 166 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 : 167 evt_nr_active_lcores(opt->plcores); 168 169 nb_queues = atq_nb_event_queues(opt); 170 171 memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); 172 ret = rte_event_dev_info_get(opt->dev_id, &dev_info); 173 if (ret) { 174 evt_err("failed to get eventdev info %d", opt->dev_id); 175 return ret; 176 } 177 178 const struct rte_event_dev_config config = { 179 .nb_event_queues = nb_queues, 180 .nb_event_ports = nb_ports, 181 .nb_events_limit = dev_info.max_num_events, 182 .nb_event_queue_flows = opt->nb_flows, 183 .nb_event_port_dequeue_depth = 184 dev_info.max_event_port_dequeue_depth, 185 .nb_event_port_enqueue_depth = 186 dev_info.max_event_port_enqueue_depth, 187 }; 188 189 ret = rte_event_dev_configure(opt->dev_id, &config); 190 if (ret) { 191 evt_err("failed to configure eventdev %d", opt->dev_id); 192 return ret; 193 } 194 195 struct rte_event_queue_conf q_conf = { 196 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 197 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES, 198 .nb_atomic_flows = opt->nb_flows, 199 .nb_atomic_order_sequences = opt->nb_flows, 200 }; 201 /* queue configurations */ 202 for (queue = 0; queue < nb_queues; queue++) { 203 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf); 204 if (ret) { 205 evt_err("failed to setup queue=%d", queue); 206 return ret; 207 } 208 } 209 210 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) 211 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; 212 213 /* port configuration */ 214 const struct rte_event_port_conf p_conf = { 215 .dequeue_depth = opt->wkr_deq_dep, 216 .enqueue_depth = dev_info.max_event_port_dequeue_depth, 217 .new_event_threshold = dev_info.max_num_events, 218 }; 219 220 ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues, 221 &p_conf); 222 if (ret) 223 return ret; 224 225 if (!evt_has_distributed_sched(opt->dev_id)) { 226 uint32_t service_id; 227 rte_event_dev_service_id_get(opt->dev_id, &service_id); 228 ret = evt_service_setup(service_id); 229 if (ret) { 230 evt_err("No service lcore found to run event dev."); 231 return ret; 232 } 233 } 234 235 ret = rte_event_dev_start(opt->dev_id); 236 if (ret) { 237 evt_err("failed to start eventdev %d", opt->dev_id); 238 return ret; 239 } 240 241 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { 242 RTE_ETH_FOREACH_DEV(prod) { 243 ret = rte_eth_dev_start(prod); 244 if (ret) { 245 evt_err("Ethernet dev [%d] failed to start. Using synthetic producer", 246 prod); 247 return ret; 248 } 249 250 ret = rte_event_eth_rx_adapter_start(prod); 251 if (ret) { 252 evt_err("Rx adapter[%d] start failed", prod); 253 return ret; 254 } 255 printf("%s: Port[%d] using Rx adapter[%d] started\n", 256 __func__, prod, prod); 257 } 258 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 259 for (prod = 0; prod < opt->nb_timer_adptrs; prod++) { 260 ret = rte_event_timer_adapter_start( 261 t->timer_adptr[prod]); 262 if (ret) { 263 evt_err("failed to Start event timer adapter %d" 264 , prod); 265 return ret; 266 } 267 } 268 } 269 270 return 0; 271 } 272 273 static void 274 perf_atq_opt_dump(struct evt_options *opt) 275 { 276 perf_opt_dump(opt, atq_nb_event_queues(opt)); 277 } 278 279 static int 280 perf_atq_opt_check(struct evt_options *opt) 281 { 282 return perf_opt_check(opt, atq_nb_event_queues(opt)); 283 } 284 285 static bool 286 perf_atq_capability_check(struct evt_options *opt) 287 { 288 struct rte_event_dev_info dev_info; 289 290 rte_event_dev_info_get(opt->dev_id, &dev_info); 291 if (dev_info.max_event_queues < atq_nb_event_queues(opt) || 292 dev_info.max_event_ports < perf_nb_event_ports(opt)) { 293 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d", 294 atq_nb_event_queues(opt), dev_info.max_event_queues, 295 perf_nb_event_ports(opt), dev_info.max_event_ports); 296 } 297 if (!evt_has_all_types_queue(opt->dev_id)) 298 return false; 299 300 return true; 301 } 302 303 static const struct evt_test_ops perf_atq = { 304 .cap_check = perf_atq_capability_check, 305 .opt_check = perf_atq_opt_check, 306 .opt_dump = perf_atq_opt_dump, 307 .test_setup = perf_test_setup, 308 .ethdev_setup = perf_ethdev_setup, 309 .mempool_setup = perf_mempool_setup, 310 .eventdev_setup = perf_atq_eventdev_setup, 311 .launch_lcores = perf_atq_launch_lcores, 312 .eventdev_destroy = perf_eventdev_destroy, 313 .mempool_destroy = perf_mempool_destroy, 314 .ethdev_destroy = perf_ethdev_destroy, 315 .test_result = perf_test_result, 316 .test_destroy = perf_test_destroy, 317 }; 318 319 EVT_TEST_REGISTER(perf_atq); 320