1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #include "test_order_common.h" 6 7 int 8 order_test_result(struct evt_test *test, struct evt_options *opt) 9 { 10 RTE_SET_USED(opt); 11 struct test_order *t = evt_test_priv(test); 12 13 return t->result; 14 } 15 16 static inline int 17 order_producer(void *arg) 18 { 19 struct prod_data *p = arg; 20 struct test_order *t = p->t; 21 struct evt_options *opt = t->opt; 22 const uint8_t dev_id = p->dev_id; 23 const uint8_t port = p->port_id; 24 struct rte_mempool *pool = t->pool; 25 const uint64_t nb_pkts = t->nb_pkts; 26 uint32_t *producer_flow_seq = t->producer_flow_seq; 27 const uint32_t nb_flows = t->nb_flows; 28 uint64_t count = 0; 29 struct rte_mbuf *m; 30 struct rte_event ev; 31 32 if (opt->verbose_level > 1) 33 printf("%s(): lcore %d dev_id %d port=%d queue=%d\n", 34 __func__, rte_lcore_id(), dev_id, port, p->queue_id); 35 36 ev.event = 0; 37 ev.op = RTE_EVENT_OP_NEW; 38 ev.queue_id = p->queue_id; 39 ev.sched_type = RTE_SCHED_TYPE_ORDERED; 40 ev.priority = RTE_EVENT_DEV_PRIORITY_NORMAL; 41 ev.event_type = RTE_EVENT_TYPE_CPU; 42 ev.sub_event_type = 0; /* stage 0 */ 43 44 while (count < nb_pkts && t->err == false) { 45 m = rte_pktmbuf_alloc(pool); 46 if (m == NULL) 47 continue; 48 49 const uint32_t flow = (uintptr_t)m % nb_flows; 50 /* Maintain seq number per flow */ 51 m->seqn = producer_flow_seq[flow]++; 52 53 ev.flow_id = flow; 54 ev.mbuf = m; 55 56 while (rte_event_enqueue_burst(dev_id, port, &ev, 1) != 1) { 57 if (t->err) 58 break; 59 rte_pause(); 60 } 61 62 count++; 63 } 64 return 0; 65 } 66 67 int 68 order_opt_check(struct evt_options *opt) 69 { 70 if (opt->prod_type != EVT_PROD_TYPE_SYNT) { 71 evt_err("Invalid producer type"); 72 return -EINVAL; 73 } 74 75 /* 1 producer + N workers + 1 master */ 76 if (rte_lcore_count() < 3) { 77 evt_err("test need minimum 3 lcores"); 78 return -1; 79 } 80 81 /* Validate worker lcores */ 82 if (evt_lcores_has_overlap(opt->wlcores, rte_get_master_lcore())) { 83 evt_err("worker lcores overlaps with master lcore"); 84 return -1; 85 } 86 87 if (evt_nr_active_lcores(opt->plcores) == 0) { 88 evt_err("missing the producer lcore"); 89 return -1; 90 } 91 92 if (evt_nr_active_lcores(opt->plcores) != 1) { 93 evt_err("only one producer lcore must be selected"); 94 return -1; 95 } 96 97 int plcore = evt_get_first_active_lcore(opt->plcores); 98 99 if (plcore < 0) { 100 evt_err("failed to find active producer"); 101 return plcore; 102 } 103 104 if (evt_lcores_has_overlap(opt->wlcores, plcore)) { 105 evt_err("worker lcores overlaps producer lcore"); 106 return -1; 107 } 108 if (evt_has_disabled_lcore(opt->wlcores)) { 109 evt_err("one or more workers lcores are not enabled"); 110 return -1; 111 } 112 if (!evt_has_active_lcore(opt->wlcores)) { 113 evt_err("minimum one worker is required"); 114 return -1; 115 } 116 117 /* Validate producer lcore */ 118 if (plcore == (int)rte_get_master_lcore()) { 119 evt_err("producer lcore and master lcore should be different"); 120 return -1; 121 } 122 if (!rte_lcore_is_enabled(plcore)) { 123 evt_err("producer lcore is not enabled"); 124 return -1; 125 } 126 127 /* Fixups */ 128 if (opt->nb_pkts == 0) 129 opt->nb_pkts = INT64_MAX; 130 131 return 0; 132 } 133 134 int 135 order_test_setup(struct evt_test *test, struct evt_options *opt) 136 { 137 void *test_order; 138 139 test_order = rte_zmalloc_socket(test->name, sizeof(struct test_order), 140 RTE_CACHE_LINE_SIZE, opt->socket_id); 141 if (test_order == NULL) { 142 evt_err("failed to allocate test_order memory"); 143 goto nomem; 144 } 145 test->test_priv = test_order; 146 147 struct test_order *t = evt_test_priv(test); 148 149 t->producer_flow_seq = rte_zmalloc_socket("test_producer_flow_seq", 150 sizeof(*t->producer_flow_seq) * opt->nb_flows, 151 RTE_CACHE_LINE_SIZE, opt->socket_id); 152 153 if (t->producer_flow_seq == NULL) { 154 evt_err("failed to allocate t->producer_flow_seq memory"); 155 goto prod_nomem; 156 } 157 158 t->expected_flow_seq = rte_zmalloc_socket("test_expected_flow_seq", 159 sizeof(*t->expected_flow_seq) * opt->nb_flows, 160 RTE_CACHE_LINE_SIZE, opt->socket_id); 161 162 if (t->expected_flow_seq == NULL) { 163 evt_err("failed to allocate t->expected_flow_seq memory"); 164 goto exp_nomem; 165 } 166 rte_atomic64_set(&t->outstand_pkts, opt->nb_pkts); 167 t->err = false; 168 t->nb_pkts = opt->nb_pkts; 169 t->nb_flows = opt->nb_flows; 170 t->result = EVT_TEST_FAILED; 171 t->opt = opt; 172 return 0; 173 174 exp_nomem: 175 rte_free(t->producer_flow_seq); 176 prod_nomem: 177 rte_free(test->test_priv); 178 nomem: 179 return -ENOMEM; 180 } 181 182 void 183 order_test_destroy(struct evt_test *test, struct evt_options *opt) 184 { 185 RTE_SET_USED(opt); 186 struct test_order *t = evt_test_priv(test); 187 188 rte_free(t->expected_flow_seq); 189 rte_free(t->producer_flow_seq); 190 rte_free(test->test_priv); 191 } 192 193 int 194 order_mempool_setup(struct evt_test *test, struct evt_options *opt) 195 { 196 struct test_order *t = evt_test_priv(test); 197 198 t->pool = rte_pktmbuf_pool_create(test->name, opt->pool_sz, 199 256 /* Cache */, 0, 200 512, /* Use very small mbufs */ 201 opt->socket_id); 202 if (t->pool == NULL) { 203 evt_err("failed to create mempool"); 204 return -ENOMEM; 205 } 206 207 return 0; 208 } 209 210 void 211 order_mempool_destroy(struct evt_test *test, struct evt_options *opt) 212 { 213 RTE_SET_USED(opt); 214 struct test_order *t = evt_test_priv(test); 215 216 rte_mempool_free(t->pool); 217 } 218 219 void 220 order_eventdev_destroy(struct evt_test *test, struct evt_options *opt) 221 { 222 RTE_SET_USED(test); 223 224 rte_event_dev_stop(opt->dev_id); 225 rte_event_dev_close(opt->dev_id); 226 } 227 228 void 229 order_opt_dump(struct evt_options *opt) 230 { 231 evt_dump_producer_lcores(opt); 232 evt_dump("nb_wrker_lcores", "%d", evt_nr_active_lcores(opt->wlcores)); 233 evt_dump_worker_lcores(opt); 234 evt_dump("nb_evdev_ports", "%d", order_nb_event_ports(opt)); 235 } 236 237 int 238 order_launch_lcores(struct evt_test *test, struct evt_options *opt, 239 int (*worker)(void *)) 240 { 241 int ret, lcore_id; 242 struct test_order *t = evt_test_priv(test); 243 244 int wkr_idx = 0; 245 /* launch workers */ 246 RTE_LCORE_FOREACH_SLAVE(lcore_id) { 247 if (!(opt->wlcores[lcore_id])) 248 continue; 249 250 ret = rte_eal_remote_launch(worker, &t->worker[wkr_idx], 251 lcore_id); 252 if (ret) { 253 evt_err("failed to launch worker %d", lcore_id); 254 return ret; 255 } 256 wkr_idx++; 257 } 258 259 /* launch producer */ 260 int plcore = evt_get_first_active_lcore(opt->plcores); 261 262 ret = rte_eal_remote_launch(order_producer, &t->prod, plcore); 263 if (ret) { 264 evt_err("failed to launch order_producer %d", plcore); 265 return ret; 266 } 267 268 uint64_t cycles = rte_get_timer_cycles(); 269 int64_t old_remaining = -1; 270 271 while (t->err == false) { 272 uint64_t new_cycles = rte_get_timer_cycles(); 273 int64_t remaining = rte_atomic64_read(&t->outstand_pkts); 274 275 if (remaining <= 0) { 276 t->result = EVT_TEST_SUCCESS; 277 break; 278 } 279 280 if (new_cycles - cycles > rte_get_timer_hz() * 1) { 281 printf(CLGRN"\r%"PRId64""CLNRM, remaining); 282 fflush(stdout); 283 if (old_remaining == remaining) { 284 rte_event_dev_dump(opt->dev_id, stdout); 285 evt_err("No schedules for seconds, deadlock"); 286 t->err = true; 287 rte_smp_wmb(); 288 break; 289 } 290 old_remaining = remaining; 291 cycles = new_cycles; 292 } 293 } 294 printf("\r"); 295 296 return 0; 297 } 298 299 int 300 order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, 301 uint8_t nb_workers, uint8_t nb_queues) 302 { 303 int ret; 304 uint8_t port; 305 struct test_order *t = evt_test_priv(test); 306 struct rte_event_dev_info dev_info; 307 308 memset(&dev_info, 0, sizeof(struct rte_event_dev_info)); 309 ret = rte_event_dev_info_get(opt->dev_id, &dev_info); 310 if (ret) { 311 evt_err("failed to get eventdev info %d", opt->dev_id); 312 return ret; 313 } 314 315 if (opt->wkr_deq_dep > dev_info.max_event_port_dequeue_depth) 316 opt->wkr_deq_dep = dev_info.max_event_port_dequeue_depth; 317 318 /* port configuration */ 319 const struct rte_event_port_conf p_conf = { 320 .dequeue_depth = opt->wkr_deq_dep, 321 .enqueue_depth = dev_info.max_event_port_dequeue_depth, 322 .new_event_threshold = dev_info.max_num_events, 323 }; 324 325 /* setup one port per worker, linking to all queues */ 326 for (port = 0; port < nb_workers; port++) { 327 struct worker_data *w = &t->worker[port]; 328 329 w->dev_id = opt->dev_id; 330 w->port_id = port; 331 w->t = t; 332 333 ret = rte_event_port_setup(opt->dev_id, port, &p_conf); 334 if (ret) { 335 evt_err("failed to setup port %d", port); 336 return ret; 337 } 338 339 ret = rte_event_port_link(opt->dev_id, port, NULL, NULL, 0); 340 if (ret != nb_queues) { 341 evt_err("failed to link all queues to port %d", port); 342 return -EINVAL; 343 } 344 } 345 struct prod_data *p = &t->prod; 346 347 p->dev_id = opt->dev_id; 348 p->port_id = port; /* last port */ 349 p->queue_id = 0; 350 p->t = t; 351 352 ret = rte_event_port_setup(opt->dev_id, port, &p_conf); 353 if (ret) { 354 evt_err("failed to setup producer port %d", port); 355 return ret; 356 } 357 358 return ret; 359 } 360