1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include "test_perf_common.h" 6 7 /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */ 8 9 static inline int 10 atq_nb_event_queues(struct evt_options *opt) 11 { 12 /* nb_queues = number of producers */ 13 return opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR ? 14 rte_eth_dev_count_avail() : evt_nr_active_lcores(opt->plcores); 15 } 16 17 static __rte_always_inline void 18 atq_fwd_event(struct rte_event *const ev, uint8_t *const sched_type_list, 19 const uint8_t nb_stages) 20 { 21 ev->sub_event_type++; 22 ev->sched_type = sched_type_list[ev->sub_event_type % nb_stages]; 23 ev->op = RTE_EVENT_OP_FORWARD; 24 ev->event_type = RTE_EVENT_TYPE_CPU; 25 } 26 27 static int 28 perf_atq_worker(void *arg, const int enable_fwd_latency) 29 { 30 struct perf_elt *pe = NULL; 31 uint16_t enq = 0, deq = 0; 32 struct rte_event ev; 33 PERF_WORKER_INIT; 34 uint8_t stage; 35 36 while (t->done == false) { 37 deq = rte_event_dequeue_burst(dev, port, &ev, 1, 0); 38 39 if (!deq) { 40 rte_pause(); 41 continue; 42 } 43 44 if (prod_crypto_type && (ev.event_type == RTE_EVENT_TYPE_CRYPTODEV)) { 45 if (perf_handle_crypto_ev(&ev, &pe, enable_fwd_latency)) 46 continue; 47 } 48 49 stage = ev.sub_event_type % nb_stages; 50 if (enable_fwd_latency && !prod_timer_type && stage == 0) 51 /* first stage in pipeline, mark ts to compute fwd latency */ 52 perf_mark_fwd_latency(ev.event_ptr); 53 54 /* last stage in pipeline */ 55 if (unlikely(stage == laststage)) { 56 if (enable_fwd_latency) 57 cnt = perf_process_last_stage_latency(pool, prod_crypto_type, 58 &ev, w, bufs, sz, cnt); 59 else 60 cnt = perf_process_last_stage(pool, prod_crypto_type, &ev, w, 61 bufs, sz, cnt); 62 } else { 63 atq_fwd_event(&ev, sched_type_list, nb_stages); 64 do { 65 enq = rte_event_enqueue_burst(dev, port, &ev, 66 1); 67 } while (!enq && !t->done); 68 } 69 } 70 71 perf_worker_cleanup(pool, dev, port, &ev, enq, deq); 72 73 return 0; 74 } 75 76 static int 77 perf_atq_worker_burst(void *arg, const int enable_fwd_latency) 78 { 79 /* +1 to avoid prefetch out of array check */ 80 struct rte_event ev[BURST_SIZE + 1]; 81 uint16_t enq = 0, nb_rx = 0; 82 struct perf_elt *pe = NULL; 83 PERF_WORKER_INIT; 84 uint8_t stage; 85 uint16_t i; 86 87 while (t->done == false) { 88 nb_rx = rte_event_dequeue_burst(dev, port, ev, BURST_SIZE, 0); 89 90 if (!nb_rx) { 91 rte_pause(); 92 continue; 93 } 94 95 for (i = 0; i < nb_rx; i++) { 96 if (prod_crypto_type && (ev[i].event_type == RTE_EVENT_TYPE_CRYPTODEV)) { 97 if (perf_handle_crypto_ev(&ev[i], &pe, enable_fwd_latency)) 98 continue; 99 } 100 101 stage = ev[i].sub_event_type % nb_stages; 102 if (enable_fwd_latency && !prod_timer_type && stage == 0) { 103 rte_prefetch0(ev[i+1].event_ptr); 104 /* first stage in pipeline. 105 * mark time stamp to compute fwd latency 106 */ 107 perf_mark_fwd_latency(ev[i].event_ptr); 108 } 109 /* last stage in pipeline */ 110 if (unlikely(stage == laststage)) { 111 if (enable_fwd_latency) 112 cnt = perf_process_last_stage_latency(pool, 113 prod_crypto_type, &ev[i], w, bufs, sz, cnt); 114 else 115 cnt = perf_process_last_stage(pool, prod_crypto_type, 116 &ev[i], w, bufs, sz, cnt); 117 118 ev[i].op = RTE_EVENT_OP_RELEASE; 119 } else { 120 atq_fwd_event(&ev[i], sched_type_list, 121 nb_stages); 122 } 123 } 124 125 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx); 126 while ((enq < nb_rx) && !t->done) { 127 enq += rte_event_enqueue_burst(dev, port, 128 ev + enq, nb_rx - enq); 129 } 130 } 131 132 perf_worker_cleanup(pool, dev, port, ev, enq, nb_rx); 133 134 return 0; 135 } 136 137 static int 138 worker_wrapper(void *arg) 139 { 140 struct worker_data *w = arg; 141 struct evt_options *opt = w->t->opt; 142 143 const bool burst = evt_has_burst_mode(w->dev_id); 144 const int fwd_latency = opt->fwd_latency; 145 146 /* allow compiler to optimize */ 147 if (!burst && !fwd_latency) 148 return perf_atq_worker(arg, 0); 149 else if (!burst && fwd_latency) 150 return perf_atq_worker(arg, 1); 151 else if (burst && !fwd_latency) 152 return perf_atq_worker_burst(arg, 0); 153 else if (burst && fwd_latency) 154 return perf_atq_worker_burst(arg, 1); 155 156 rte_panic("invalid worker\n"); 157 } 158 159 static int 160 perf_atq_launch_lcores(struct evt_test *test, struct evt_options *opt) 161 { 162 return perf_launch_lcores(test, opt, worker_wrapper); 163 } 164 165 static int 166 perf_atq_eventdev_setup(struct evt_test *test, struct evt_options *opt) 167 { 168 int ret; 169 uint8_t queue; 170 uint8_t nb_queues; 171 uint8_t nb_ports; 172 uint16_t prod; 173 struct rte_event_dev_info dev_info; 174 struct test_perf *t = evt_test_priv(test); 175 176 nb_ports = evt_nr_active_lcores(opt->wlcores); 177 nb_ports += (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR || 178 opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) ? 0 : 179 evt_nr_active_lcores(opt->plcores); 180 181 nb_queues = atq_nb_event_queues(opt); 182 183 ret = rte_event_dev_info_get(opt->dev_id, &dev_info); 184 if (ret) { 185 evt_err("failed to get eventdev info %d", opt->dev_id); 186 return ret; 187 } 188 189 ret = evt_configure_eventdev(opt, nb_queues, nb_ports); 190 if (ret) { 191 evt_err("failed to configure eventdev %d", opt->dev_id); 192 return ret; 193 } 194 195 struct rte_event_queue_conf q_conf = { 196 .priority = RTE_EVENT_DEV_PRIORITY_NORMAL, 197 .event_queue_cfg = RTE_EVENT_QUEUE_CFG_ALL_TYPES, 198 .nb_atomic_flows = opt->nb_flows, 199 .nb_atomic_order_sequences = opt->nb_flows, 200 }; 201 /* queue configurations */ 202 for (queue = 0; queue < nb_queues; queue++) { 203 ret = rte_event_queue_setup(opt->dev_id, queue, &q_conf); 204 if (ret) { 205 evt_err("failed to setup queue=%d", queue); 206 return ret; 207 } 208 } 209 210 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) 211 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; 212 213 /* port configuration */ 214 const struct rte_event_port_conf p_conf = { 215 .dequeue_depth = opt->wkr_deq_dep, 216 .enqueue_depth = dev_info.max_event_port_dequeue_depth, 217 .new_event_threshold = dev_info.max_num_events, 218 }; 219 220 ret = perf_event_dev_port_setup(test, opt, 1 /* stride */, nb_queues, 221 &p_conf); 222 if (ret) 223 return ret; 224 225 if (!evt_has_distributed_sched(opt->dev_id)) { 226 uint32_t service_id; 227 rte_event_dev_service_id_get(opt->dev_id, &service_id); 228 ret = evt_service_setup(service_id); 229 if (ret) { 230 evt_err("No service lcore found to run event dev."); 231 return ret; 232 } 233 } 234 235 ret = rte_event_dev_start(opt->dev_id); 236 if (ret) { 237 evt_err("failed to start eventdev %d", opt->dev_id); 238 return ret; 239 } 240 241 if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) { 242 RTE_ETH_FOREACH_DEV(prod) { 243 ret = rte_eth_dev_start(prod); 244 if (ret) { 245 evt_err("Ethernet dev [%d] failed to start. Using synthetic producer", 246 prod); 247 return ret; 248 } 249 250 ret = rte_event_eth_rx_adapter_start(prod); 251 if (ret) { 252 evt_err("Rx adapter[%d] start failed", prod); 253 return ret; 254 } 255 printf("%s: Port[%d] using Rx adapter[%d] started\n", 256 __func__, prod, prod); 257 } 258 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_TIMER_ADPTR) { 259 for (prod = 0; prod < opt->nb_timer_adptrs; prod++) { 260 ret = rte_event_timer_adapter_start( 261 t->timer_adptr[prod]); 262 if (ret) { 263 evt_err("failed to Start event timer adapter %d" 264 , prod); 265 return ret; 266 } 267 } 268 } else if (opt->prod_type == EVT_PROD_TYPE_EVENT_CRYPTO_ADPTR) { 269 uint8_t cdev_id, cdev_count; 270 271 cdev_count = rte_cryptodev_count(); 272 for (cdev_id = 0; cdev_id < cdev_count; cdev_id++) { 273 ret = rte_cryptodev_start(cdev_id); 274 if (ret) { 275 evt_err("Failed to start cryptodev %u", 276 cdev_id); 277 return ret; 278 } 279 } 280 } 281 282 return 0; 283 } 284 285 static void 286 perf_atq_opt_dump(struct evt_options *opt) 287 { 288 perf_opt_dump(opt, atq_nb_event_queues(opt)); 289 } 290 291 static int 292 perf_atq_opt_check(struct evt_options *opt) 293 { 294 return perf_opt_check(opt, atq_nb_event_queues(opt)); 295 } 296 297 static bool 298 perf_atq_capability_check(struct evt_options *opt) 299 { 300 struct rte_event_dev_info dev_info; 301 302 rte_event_dev_info_get(opt->dev_id, &dev_info); 303 if (dev_info.max_event_queues < atq_nb_event_queues(opt) || 304 dev_info.max_event_ports < perf_nb_event_ports(opt)) { 305 evt_err("not enough eventdev queues=%d/%d or ports=%d/%d", 306 atq_nb_event_queues(opt), dev_info.max_event_queues, 307 perf_nb_event_ports(opt), dev_info.max_event_ports); 308 } 309 if (!evt_has_all_types_queue(opt->dev_id)) 310 return false; 311 312 return true; 313 } 314 315 static const struct evt_test_ops perf_atq = { 316 .cap_check = perf_atq_capability_check, 317 .opt_check = perf_atq_opt_check, 318 .opt_dump = perf_atq_opt_dump, 319 .test_setup = perf_test_setup, 320 .ethdev_setup = perf_ethdev_setup, 321 .cryptodev_setup = perf_cryptodev_setup, 322 .ethdev_rx_stop = perf_ethdev_rx_stop, 323 .mempool_setup = perf_mempool_setup, 324 .eventdev_setup = perf_atq_eventdev_setup, 325 .launch_lcores = perf_atq_launch_lcores, 326 .eventdev_destroy = perf_eventdev_destroy, 327 .mempool_destroy = perf_mempool_destroy, 328 .ethdev_destroy = perf_ethdev_destroy, 329 .cryptodev_destroy = perf_cryptodev_destroy, 330 .test_result = perf_test_result, 331 .test_destroy = perf_test_destroy, 332 }; 333 334 EVT_TEST_REGISTER(perf_atq); 335