1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include "test_order_common.h" 6 7 int 8 order_test_result(struct evt_test *test, struct evt_options *opt) 9 { 10 RTE_SET_USED(opt); 11 struct test_order *t = evt_test_priv(test); 12 13 return t->result; 14 } 15 16 static inline int 17 order_producer(void *arg) 18 { 19 struct prod_data *p = arg; 20 struct test_order *t = p->t; 21 struct evt_options *opt = t->opt; 22 const uint8_t dev_id = p->dev_id; 23 const uint8_t port = p->port_id; 24 struct rte_mempool *pool = t->pool; 25 const uint64_t nb_pkts = t->nb_pkts; 26 uint32_t *producer_flow_seq = t->producer_flow_seq; 27 const uint32_t nb_flows = t->nb_flows; 28 uint64_t count = 0; 29 struct rte_mbuf *m; 30 struct rte_event ev; 31 32 if (opt->verbose_level > 1) 33 printf("%s(): lcore %d dev_id %d port=%d queue=%d\n", 34 __func__, rte_lcore_id(), dev_id, port, p->queue_id); 35 36 ev.event = 0; 37 ev.op = RTE_EVENT_OP_NEW; 38 ev.queue_id = p->queue_id; 39 ev.sched_type = RTE_SCHED_TYPE_ORDERED; 40 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 41 ev.event_type = RTE_EVENT_TYPE_CPU; 42 ev.sub_event_type = 0; /* stage 0 */ 43 44 while (count < nb_pkts && t->err == false) { 45 m = rte_pktmbuf_alloc(pool); 46 if (m == NULL) 47 continue; 48 49 const uint32_t flow = (uintptr_t)m % nb_flows; 50 /* Maintain seq number per flow */ 51 m->seqn = producer_flow_seq[flow]++; 52 m->udata64 = flow; 53 54 ev.flow_id = flow; 55 ev.mbuf = m; 56 57 while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) { 58 if (t->err) 59 break; 60 rte_pause(); 61 } 62 63 count++; 64 } 65 return 0; 66 } 67 68 int 69 order_opt_check(struct evt_options *opt) 70 { 71 if (opt->prod_type != EVT_PROD_TYPE_SYNT) { 72 evt_err("Invalid producer type '%s' valid producer '%s'", 73 evt_prod_id_to_name(opt->prod_type), 74 evt_prod_id_to_name(EVT_PROD_TYPE_SYNT)); 75 return -1; 76 } 77 78 /* 1 producer + N workers + main */ 79 if (rte_lcore_count() < 3) { 80 evt_err("test need minimum 3 lcores"); 81 return -1; 82 } 83 84 /* Validate worker lcores */ 85 if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) { 86 evt_err("worker lcores overlaps with main lcore"); 87 return -1; 88 } 89 90 if (evt_nr_active_lcores(opt->plcores) == 0) { 91 evt_err("missing the producer lcore"); 92 return -1; 93 } 94 95 if (evt_nr_active_lcores(opt->plcores) != 1) { 96 evt_err("only one producer lcore must be selected"); 97 return -1; 98 } 99 100 int plcore = evt_get_first_active_lcore(opt->plcores); 101 102 if (plcore < 0) { 103 evt_err("failed to find active producer"); 104 return plcore; 105 } 106 107 if (evt_lcores_has_overlap(opt->wlcores, plcore)) { 108 evt_err("worker lcores overlaps producer lcore"); 109 return -1; 110 } 111 if (evt_has_disabled_lcore(opt->wlcores)) { 112 evt_err("one or more workers lcores are not enabled"); 113 return -1; 114 } 115 if (!evt_has_active_lcore(opt->wlcores)) { 116 evt_err("minimum one worker is required"); 117 return -1; 118 } 119 120 /* Validate producer lcore */ 121 if (plcore == (int)rte_get_main_lcore()) { 122 evt_err("producer lcore and main lcore should be different"); 123 return -1; 124 } 125 if (!rte_lcore_is_enabled(plcore)) { 126 evt_err("producer lcore is not enabled"); 127 return -1; 128 } 129 130 /* Fixups */ 131 if (opt->nb_pkts == 0) 132 opt->nb_pkts = INT64_MAX; 133 134 return 0; 135 } 136 137 int 138 order_test_setup(struct evt_test *test, struct evt_options *opt) 139 { 140 void *test_order; 141 142 test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order), 143 RTE_CACHE_LINE_SIZE, opt->socket_id); 144 if (test_order == NULL) { 145 evt_err("failed to allocate test_order memory"); 146 goto nomem; 147 } 148 test->test_priv = test_order; 149 150 struct test_order *t = evt_test_priv(test); 151 152 t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq", 153 sizeof(*t->producer_flow_seq) * opt->nb_flows, 154 RTE_CACHE_LINE_SIZE, opt->socket_id); 155 156 if (t->producer_flow_seq == NULL) { 157 evt_err("failed to allocate t->producer_flow_seq memory"); 158 goto prod_nomem; 159 } 160 161 t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq", 162 sizeof(*t->expected_flow_seq) * opt->nb_flows, 163 RTE_CACHE_LINE_SIZE, opt->socket_id); 164 165 if (t->expected_flow_seq == NULL) { 166 evt_err("failed to allocate t->expected_flow_seq memory"); 167 goto exp_nomem; 168 } 169 rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts); 170 t->err = false; 171 t->nb_pkts = opt->nb_pkts; 172 t->nb_flows = opt->nb_flows; 173 t->result = EVT_TEST_FAILED; 174 t->opt = opt; 175 return 0; 176 177 exp_nomem: 178 rte_free(t->producer_flow_seq); 179 prod_nomem: 180 rte_free(test->test_priv); 181 nomem: 182 return -ENOMEM; 183 } 184 185 void 186 order_test_destroy(struct evt_test *test, struct evt_options *opt) 187 { 188 RTE_SET_USED(opt); 189 struct test_order *t = evt_test_priv(test); 190 191 rte_free(t->expected_flow_seq); 192 rte_free(t->producer_flow_seq); 193 rte_free(test->test_priv); 194 } 195 196 int 197 order_mempool_setup(struct evt_test *test, struct evt_options *opt) 198 { 199 struct test_order *t = evt_test_priv(test); 200 201 t->pool = rte_pktmbuf_pool_create(test->name, opt->pool_sz, 202 256 /* Cache */, 0, 203 512, /* Use very small mbufs */ 204 opt->socket_id); 205 if (t->pool == NULL) { 206 evt_err("failed to create mempool"); 207 return -ENOMEM; 208 } 209 210 return 0; 211 } 212 213 void 214 order_mempool_destroy(struct evt_test *test, struct evt_options *opt) 215 { 216 RTE_SET_USED(opt); 217 struct test_order *t = evt_test_priv(test); 218 219 rte_mempool_free(t->pool); 220 } 221 222 void 223 order_eventdev_destroy(struct evt_test *test, struct evt_options *opt) 224 { 225 RTE_SET_USED(test); 226 227 rte_event_dev_stop(opt->dev_id); 228 rte_event_dev_close(opt->dev_id); 229 } 230 231 void 232 order_opt_dump(struct evt_options *opt) 233 { 234 evt_dump_producer_lcores(opt); 235 evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); 236 evt_dump_worker_lcores(opt); 237 evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt)); 238 } 239 240 int 241 order_launch_lcores(struct evt_test *test, struct evt_options *opt, 242 int (*worker)(void *)) 243 { 244 int ret, lcore_id; 245 struct test_order *t = evt_test_priv(test); 246 247 int wkr_idx = 0; 248 /* launch workers */ 249 RTE_LCORE_FOREACH_WORKER(lcore_id) { 250 if (!(opt->wlcores[lcore_id])) 251 continue; 252 253 ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx], 254 lcore_id); 255 if (ret) { 256 evt_err("failed to launch worker %d", lcore_id); 257 return ret; 258 } 259 wkr_idx++; 260 } 261 262 /* launch producer */ 263 int plcore = evt_get_first_active_lcore(opt->plcores); 264 265 ret = rte_eal_remote_launch(order_producer, &t->prod, plcore); 266 if (ret) { 267 evt_err("failed to launch order_producer %d", plcore); 268 return ret; 269 } 270 271 uint64_t cycles = rte_get_timer_cycles(); 272 int64_t old_remaining = -1; 273 274 while (t->err == false) { 275 uint64_t new_cycles = rte_get_timer_cycles(); 276 int64_t remaining = rte_atomic64_read(&t->outstand_pkts); 277 278 if (remaining <= 0) { 279 t->result = EVT_TEST_SUCCESS; 280 break; 281 } 282 283 if (new_cycles - cycles > rte_get_timer_hz() * 1) { 284 printf(CLGRN"\r%"PRId64""CLNRM, remaining); 285 fflush(stdout); 286 if (old_remaining == remaining) { 287 rte_event_dev_dump(opt->dev_id, stdout); 288 evt_err("No schedules for seconds, deadlock"); 289 t->err = true; 290 rte_smp_wmb(); 291 break; 292 } 293 old_remaining = remaining; 294 cycles = new_cycles; 295 } 296 } 297 printf("\r"); 298 299 return 0; 300 } 301 302 int 303 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, 304 uint8_t nb_workers, uint8_t nb_queues) 305 { 306 int ret; 307 uint8_t port; 308 struct test_order *t = evt_test_priv(test); 309 struct rte_event_dev_info dev_info; 310 311 memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); 312 ret = rte_event_dev_info_get(opt->dev_id, &dev_info); 313 if (ret) { 314 evt_err("failed to get eventdev info %d", opt->dev_id); 315 return ret; 316 } 317 318 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) 319 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; 320 321 /* port configuration */ 322 const struct rte_event_port_conf p_conf = { 323 .dequeue_depth = opt->wkr_deq_dep, 324 .enqueue_depth = dev_info.max_event_port_dequeue_depth, 325 .new_event_threshold = dev_info.max_num_events, 326 }; 327 328 /* setup one port per worker, linking to all queues */ 329 for (port = 0; port < nb_workers; port++) { 330 struct worker_data *w = &t->worker[port]; 331 332 w->dev_id = opt->dev_id; 333 w->port_id = port; 334 w->t = t; 335 336 ret = rte_event_port_setup(opt->dev_id, port, &p_conf); 337 if (ret) { 338 evt_err("failed to setup port %d", port); 339 return ret; 340 } 341 342 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0); 343 if (ret != nb_queues) { 344 evt_err("failed to link all queues to port %d", port); 345 return -EINVAL; 346 } 347 } 348 struct prod_data *p = &t->prod; 349 350 p->dev_id = opt->dev_id; 351 p->port_id = port; /* last port */ 352 p->queue_id = 0; 353 p->t = t; 354 355 ret = rte_event_port_setup(opt->dev_id, port, &p_conf); 356 if (ret) { 357 evt_err("failed to setup producer port %d", port); 358 return ret; 359 } 360 361 return ret; 362 } 363