xref: /dpdk/app/test-eventdev/test_pipeline_common.c (revision d67332bca04539e74218cf08d2c736986a65d8b5)
1 /*
2  * SPDX-License-Identifier: BSD-3-Clause
3  * Copyright 2017 Cavium, Inc.
4  */
5 
6 #include "test_pipeline_common.h"
7 
8 int
9 pipeline_test_result(struct evt_test *test, struct evt_options *opt)
10 {
11 	RTE_SET_USED(opt);
12 	int i;
13 	uint64_t total = 0;
14 	struct test_pipeline *t = evt_test_priv(test);
15 
16 	evt_info("Packet distribution across worker cores :");
17 	for (i = 0; i < t->nb_workers; i++)
18 		total += t->worker[i].processed_pkts;
19 	for (i = 0; i < t->nb_workers; i++)
20 		evt_info("Worker %d packets: "CLGRN"%"PRIx64""CLNRM" percentage:"
21 				CLGRN" %3.2f"CLNRM, i,
22 				t->worker[i].processed_pkts,
23 				(((double)t->worker[i].processed_pkts)/total)
24 				* 100);
25 	return t->result;
26 }
27 
28 void
29 pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues)
30 {
31 	evt_dump("nb_worker_lcores", "%d", evt_nr_active_lcores(opt->wlcores));
32 	evt_dump_worker_lcores(opt);
33 	evt_dump_nb_stages(opt);
34 	evt_dump("nb_evdev_ports", "%d", pipeline_nb_event_ports(opt));
35 	evt_dump("nb_evdev_queues", "%d", nb_queues);
36 	evt_dump_queue_priority(opt);
37 	evt_dump_sched_type_list(opt);
38 	evt_dump_producer_type(opt);
39 	evt_dump("nb_eth_rx_queues", "%d", opt->eth_queues);
40 	evt_dump("event_vector", "%d", opt->ena_vector);
41 	if (opt->ena_vector) {
42 		evt_dump("vector_size", "%d", opt->vector_size);
43 		evt_dump("vector_tmo_ns", "%" PRIu64 "", opt->vector_tmo_nsec);
44 	}
45 }
46 
47 static inline uint64_t
48 processed_pkts(struct test_pipeline *t)
49 {
50 	uint8_t i;
51 	uint64_t total = 0;
52 
53 	for (i = 0; i < t->nb_workers; i++)
54 		total += t->worker[i].processed_pkts;
55 
56 	return total;
57 }
58 
59 int
60 pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt,
61 		int (*worker)(void *))
62 {
63 	int ret, lcore_id;
64 	struct test_pipeline *t = evt_test_priv(test);
65 
66 	int port_idx = 0;
67 	/* launch workers */
68 	RTE_LCORE_FOREACH_WORKER(lcore_id) {
69 		if (!(opt->wlcores[lcore_id]))
70 			continue;
71 
72 		ret = rte_eal_remote_launch(worker,
73 				 &t->worker[port_idx], lcore_id);
74 		if (ret) {
75 			evt_err("failed to launch worker %d", lcore_id);
76 			return ret;
77 		}
78 		port_idx++;
79 	}
80 
81 	uint64_t perf_cycles = rte_get_timer_cycles();
82 	const uint64_t perf_sample = rte_get_timer_hz();
83 
84 	static float total_mpps;
85 	static uint64_t samples;
86 
87 	uint64_t prev_pkts = 0;
88 
89 	while (t->done == false) {
90 		const uint64_t new_cycles = rte_get_timer_cycles();
91 
92 		if ((new_cycles - perf_cycles) > perf_sample) {
93 			const uint64_t curr_pkts = processed_pkts(t);
94 
95 			float mpps = (float)(curr_pkts - prev_pkts)/1000000;
96 
97 			prev_pkts = curr_pkts;
98 			perf_cycles = new_cycles;
99 			total_mpps += mpps;
100 			++samples;
101 			printf(CLGRN"\r%.3f mpps avg %.3f mpps"CLNRM,
102 					mpps, total_mpps/samples);
103 			fflush(stdout);
104 		}
105 	}
106 	printf("\n");
107 	return 0;
108 }
109 
110 int
111 pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues)
112 {
113 	unsigned int lcores;
114 
115 	/* N worker + main */
116 	lcores = 2;
117 
118 	if (opt->prod_type != EVT_PROD_TYPE_ETH_RX_ADPTR) {
119 		evt_err("Invalid producer type '%s' valid producer '%s'",
120 			evt_prod_id_to_name(opt->prod_type),
121 			evt_prod_id_to_name(EVT_PROD_TYPE_ETH_RX_ADPTR));
122 		return -1;
123 	}
124 
125 	if (!rte_eth_dev_count_avail()) {
126 		evt_err("test needs minimum 1 ethernet dev");
127 		return -1;
128 	}
129 
130 	if (rte_lcore_count() < lcores) {
131 		evt_err("test need minimum %d lcores", lcores);
132 		return -1;
133 	}
134 
135 	/* Validate worker lcores */
136 	if (evt_lcores_has_overlap(opt->wlcores, rte_get_main_lcore())) {
137 		evt_err("worker lcores overlaps with main lcore");
138 		return -1;
139 	}
140 	if (evt_has_disabled_lcore(opt->wlcores)) {
141 		evt_err("one or more workers lcores are not enabled");
142 		return -1;
143 	}
144 	if (!evt_has_active_lcore(opt->wlcores)) {
145 		evt_err("minimum one worker is required");
146 		return -1;
147 	}
148 
149 	if (nb_queues > EVT_MAX_QUEUES) {
150 		evt_err("number of queues exceeds %d", EVT_MAX_QUEUES);
151 		return -1;
152 	}
153 	if (pipeline_nb_event_ports(opt) > EVT_MAX_PORTS) {
154 		evt_err("number of ports exceeds %d", EVT_MAX_PORTS);
155 		return -1;
156 	}
157 
158 	if (evt_has_invalid_stage(opt))
159 		return -1;
160 
161 	if (evt_has_invalid_sched_type(opt))
162 		return -1;
163 
164 	return 0;
165 }
166 
167 #define NB_RX_DESC			128
168 #define NB_TX_DESC			512
169 int
170 pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt)
171 {
172 	uint16_t i, j;
173 	int ret;
174 	uint8_t nb_queues = 1;
175 	struct test_pipeline *t = evt_test_priv(test);
176 	struct rte_eth_rxconf rx_conf;
177 	struct rte_eth_conf port_conf = {
178 		.rxmode = {
179 			.mq_mode = RTE_ETH_MQ_RX_RSS,
180 		},
181 		.rx_adv_conf = {
182 			.rss_conf = {
183 				.rss_key = NULL,
184 				.rss_hf = RTE_ETH_RSS_IP,
185 			},
186 		},
187 	};
188 
189 	if (!rte_eth_dev_count_avail()) {
190 		evt_err("No ethernet ports found.");
191 		return -ENODEV;
192 	}
193 
194 	if (opt->max_pkt_sz < RTE_ETHER_MIN_LEN) {
195 		evt_err("max_pkt_sz can not be less than %d",
196 			RTE_ETHER_MIN_LEN);
197 		return -EINVAL;
198 	}
199 
200 	port_conf.rxmode.mtu = opt->max_pkt_sz - RTE_ETHER_HDR_LEN -
201 		RTE_ETHER_CRC_LEN;
202 
203 	t->internal_port = 1;
204 	RTE_ETH_FOREACH_DEV(i) {
205 		struct rte_eth_dev_info dev_info;
206 		struct rte_eth_conf local_port_conf = port_conf;
207 		uint32_t caps = 0;
208 
209 		ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id, i, &caps);
210 		if (ret != 0) {
211 			evt_err("failed to get event tx adapter[%d] caps", i);
212 			return ret;
213 		}
214 
215 		if (!(caps & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT))
216 			t->internal_port = 0;
217 
218 		ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id, i, &caps);
219 		if (ret != 0) {
220 			evt_err("failed to get event tx adapter[%d] caps", i);
221 			return ret;
222 		}
223 
224 		if (!(caps & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT))
225 			local_port_conf.rxmode.offloads |=
226 				RTE_ETH_RX_OFFLOAD_RSS_HASH;
227 
228 		ret = rte_eth_dev_info_get(i, &dev_info);
229 		if (ret != 0) {
230 			evt_err("Error during getting device (port %u) info: %s\n",
231 				i, strerror(-ret));
232 			return ret;
233 		}
234 
235 		/* Enable mbuf fast free if PMD has the capability. */
236 		if (dev_info.tx_offload_capa & RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE)
237 			local_port_conf.txmode.offloads |=
238 				RTE_ETH_TX_OFFLOAD_MBUF_FAST_FREE;
239 
240 		rx_conf = dev_info.default_rxconf;
241 		rx_conf.offloads = port_conf.rxmode.offloads;
242 
243 		local_port_conf.rx_adv_conf.rss_conf.rss_hf &=
244 			dev_info.flow_type_rss_offloads;
245 		if (local_port_conf.rx_adv_conf.rss_conf.rss_hf !=
246 				port_conf.rx_adv_conf.rss_conf.rss_hf) {
247 			evt_info("Port %u modified RSS hash function based on hardware support,"
248 				"requested:%#"PRIx64" configured:%#"PRIx64"",
249 				i,
250 				port_conf.rx_adv_conf.rss_conf.rss_hf,
251 				local_port_conf.rx_adv_conf.rss_conf.rss_hf);
252 		}
253 
254 		if (rte_eth_dev_configure(i, opt->eth_queues, nb_queues,
255 					  &local_port_conf) < 0) {
256 			evt_err("Failed to configure eth port [%d]", i);
257 			return -EINVAL;
258 		}
259 
260 		for (j = 0; j < opt->eth_queues; j++) {
261 			if (rte_eth_rx_queue_setup(
262 				    i, j, NB_RX_DESC, rte_socket_id(), &rx_conf,
263 				    opt->per_port_pool ? t->pool[i] :
264 							      t->pool[0]) < 0) {
265 				evt_err("Failed to setup eth port [%d] rx_queue: %d.",
266 					i, 0);
267 				return -EINVAL;
268 			}
269 		}
270 
271 		if (rte_eth_tx_queue_setup(i, 0, NB_TX_DESC,
272 					rte_socket_id(), NULL) < 0) {
273 			evt_err("Failed to setup eth port [%d] tx_queue: %d.",
274 					i, 0);
275 			return -EINVAL;
276 		}
277 
278 		ret = rte_eth_promiscuous_enable(i);
279 		if (ret != 0) {
280 			evt_err("Failed to enable promiscuous mode for eth port [%d]: %s",
281 				i, rte_strerror(-ret));
282 			return ret;
283 		}
284 	}
285 
286 	return 0;
287 }
288 
289 int
290 pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt,
291 		uint8_t *queue_arr, uint8_t nb_queues,
292 		const struct rte_event_port_conf p_conf)
293 {
294 	int ret;
295 	uint8_t port;
296 	struct test_pipeline *t = evt_test_priv(test);
297 
298 
299 	/* setup one port per worker, linking to all queues */
300 	for (port = 0; port < evt_nr_active_lcores(opt->wlcores); port++) {
301 		struct worker_data *w = &t->worker[port];
302 
303 		w->dev_id = opt->dev_id;
304 		w->port_id = port;
305 		w->t = t;
306 		w->processed_pkts = 0;
307 
308 		ret = rte_event_port_setup(opt->dev_id, port, &p_conf);
309 		if (ret) {
310 			evt_err("failed to setup port %d", port);
311 			return ret;
312 		}
313 
314 		if (rte_event_port_link(opt->dev_id, port, queue_arr, NULL,
315 					nb_queues) != nb_queues)
316 			goto link_fail;
317 	}
318 
319 	return 0;
320 
321 link_fail:
322 	evt_err("failed to link queues to port %d", port);
323 	return -EINVAL;
324 }
325 
326 int
327 pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride,
328 		struct rte_event_port_conf prod_conf)
329 {
330 	int ret = 0;
331 	uint16_t prod;
332 	struct rte_mempool *vector_pool = NULL;
333 	struct rte_event_eth_rx_adapter_queue_conf queue_conf;
334 
335 	memset(&queue_conf, 0,
336 			sizeof(struct rte_event_eth_rx_adapter_queue_conf));
337 	queue_conf.ev.sched_type = opt->sched_type_list[0];
338 	if (opt->ena_vector) {
339 		unsigned int nb_elem = (opt->pool_sz / opt->vector_size) << 1;
340 
341 		nb_elem = RTE_MAX(512U, nb_elem);
342 		nb_elem += evt_nr_active_lcores(opt->wlcores) * 32;
343 		vector_pool = rte_event_vector_pool_create(
344 			"vector_pool", nb_elem, 32, opt->vector_size,
345 			opt->socket_id);
346 		if (vector_pool == NULL) {
347 			evt_err("failed to create event vector pool");
348 			return -ENOMEM;
349 		}
350 	}
351 	RTE_ETH_FOREACH_DEV(prod) {
352 		struct rte_event_eth_rx_adapter_vector_limits limits;
353 		uint32_t cap;
354 
355 		ret = rte_event_eth_rx_adapter_caps_get(opt->dev_id,
356 				prod, &cap);
357 		if (ret) {
358 			evt_err("failed to get event rx adapter[%d]"
359 					" capabilities",
360 					opt->dev_id);
361 			return ret;
362 		}
363 
364 		if (opt->ena_vector) {
365 			memset(&limits, 0, sizeof(limits));
366 			ret = rte_event_eth_rx_adapter_vector_limits_get(
367 				opt->dev_id, prod, &limits);
368 			if (ret) {
369 				evt_err("failed to get vector limits");
370 				return ret;
371 			}
372 
373 			if (opt->vector_size < limits.min_sz ||
374 			    opt->vector_size > limits.max_sz) {
375 				evt_err("Vector size [%d] not within limits max[%d] min[%d]",
376 					opt->vector_size, limits.min_sz,
377 					limits.max_sz);
378 				return -EINVAL;
379 			}
380 
381 			if (limits.log2_sz &&
382 			    !rte_is_power_of_2(opt->vector_size)) {
383 				evt_err("Vector size [%d] not power of 2",
384 					opt->vector_size);
385 				return -EINVAL;
386 			}
387 
388 			if (opt->vector_tmo_nsec > limits.max_timeout_ns ||
389 			    opt->vector_tmo_nsec < limits.min_timeout_ns) {
390 				evt_err("Vector timeout [%" PRIu64
391 					"] not within limits max[%" PRIu64
392 					"] min[%" PRIu64 "]",
393 					opt->vector_tmo_nsec,
394 					limits.max_timeout_ns,
395 					limits.min_timeout_ns);
396 				return -EINVAL;
397 			}
398 
399 			if (cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_EVENT_VECTOR) {
400 				queue_conf.vector_sz = opt->vector_size;
401 				queue_conf.vector_timeout_ns =
402 					opt->vector_tmo_nsec;
403 				queue_conf.rx_queue_flags |=
404 				RTE_EVENT_ETH_RX_ADAPTER_QUEUE_EVENT_VECTOR;
405 				queue_conf.vector_mp = vector_pool;
406 			} else {
407 				evt_err("Rx adapter doesn't support event vector");
408 				return -EINVAL;
409 			}
410 		}
411 		queue_conf.ev.queue_id = prod * stride;
412 		ret = rte_event_eth_rx_adapter_create(prod, opt->dev_id,
413 				&prod_conf);
414 		if (ret) {
415 			evt_err("failed to create rx adapter[%d]", prod);
416 			return ret;
417 		}
418 		ret = rte_event_eth_rx_adapter_queue_add(prod, prod, -1,
419 				&queue_conf);
420 		if (ret) {
421 			evt_err("failed to add rx queues to adapter[%d]", prod);
422 			return ret;
423 		}
424 
425 		if (!(cap & RTE_EVENT_ETH_RX_ADAPTER_CAP_INTERNAL_PORT)) {
426 			uint32_t service_id = -1U;
427 
428 			rte_event_eth_rx_adapter_service_id_get(prod,
429 					&service_id);
430 			ret = evt_service_setup(service_id);
431 			if (ret) {
432 				evt_err("Failed to setup service core"
433 						" for Rx adapter");
434 				return ret;
435 			}
436 		}
437 
438 		evt_info("Port[%d] using Rx adapter[%d] configured", prod,
439 				prod);
440 	}
441 
442 	return ret;
443 }
444 
445 int
446 pipeline_event_tx_adapter_setup(struct evt_options *opt,
447 		struct rte_event_port_conf port_conf)
448 {
449 	int ret = 0;
450 	uint16_t consm;
451 
452 	RTE_ETH_FOREACH_DEV(consm) {
453 		uint32_t cap;
454 
455 		ret = rte_event_eth_tx_adapter_caps_get(opt->dev_id,
456 				consm, &cap);
457 		if (ret) {
458 			evt_err("failed to get event tx adapter[%d] caps",
459 					consm);
460 			return ret;
461 		}
462 
463 		if (opt->ena_vector) {
464 			if (!(cap &
465 			      RTE_EVENT_ETH_TX_ADAPTER_CAP_EVENT_VECTOR)) {
466 				evt_err("Tx adapter doesn't support event vector");
467 				return -EINVAL;
468 			}
469 		}
470 
471 		ret = rte_event_eth_tx_adapter_create(consm, opt->dev_id,
472 				&port_conf);
473 		if (ret) {
474 			evt_err("failed to create tx adapter[%d]", consm);
475 			return ret;
476 		}
477 
478 		ret = rte_event_eth_tx_adapter_queue_add(consm, consm, -1);
479 		if (ret) {
480 			evt_err("failed to add tx queues to adapter[%d]",
481 					consm);
482 			return ret;
483 		}
484 
485 		if (!(cap & RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT)) {
486 			uint32_t service_id = -1U;
487 
488 			ret = rte_event_eth_tx_adapter_service_id_get(consm,
489 								   &service_id);
490 			if (ret != -ESRCH && ret != 0) {
491 				evt_err("Failed to get Tx adptr service ID");
492 				return ret;
493 			}
494 			ret = evt_service_setup(service_id);
495 			if (ret) {
496 				evt_err("Failed to setup service core"
497 						" for Tx adapter");
498 				return ret;
499 			}
500 		}
501 
502 		evt_info("Port[%d] using Tx adapter[%d] Configured", consm,
503 				consm);
504 	}
505 
506 	return ret;
507 }
508 
509 static void
510 pipeline_vector_array_free(struct rte_event events[], uint16_t num)
511 {
512 	uint16_t i;
513 
514 	for (i = 0; i < num; i++) {
515 		rte_pktmbuf_free_bulk(events[i].vec->mbufs,
516 				      events[i].vec->nb_elem);
517 		rte_mempool_put(rte_mempool_from_obj(events[i].vec),
518 				events[i].vec);
519 	}
520 }
521 
522 static void
523 pipeline_event_port_flush(uint8_t dev_id __rte_unused, struct rte_event ev,
524 			  void *args __rte_unused)
525 {
526 	if (ev.event_type & RTE_EVENT_TYPE_VECTOR)
527 		pipeline_vector_array_free(&ev, 1);
528 	else
529 		rte_pktmbuf_free(ev.mbuf);
530 }
531 
532 void
533 pipeline_worker_cleanup(uint8_t dev, uint8_t port, struct rte_event ev[],
534 			uint16_t enq, uint16_t deq)
535 {
536 	int i;
537 
538 	if (!(deq - enq))
539 		return;
540 
541 	if (deq) {
542 		for (i = enq; i < deq; i++) {
543 			if (ev[i].op == RTE_EVENT_OP_RELEASE)
544 				continue;
545 			if (ev[i].event_type & RTE_EVENT_TYPE_VECTOR)
546 				pipeline_vector_array_free(&ev[i], 1);
547 			else
548 				rte_pktmbuf_free(ev[i].mbuf);
549 		}
550 
551 		for (i = 0; i < deq; i++)
552 			ev[i].op = RTE_EVENT_OP_RELEASE;
553 
554 		rte_event_enqueue_burst(dev, port, ev, deq);
555 	}
556 
557 	rte_event_port_quiesce(dev, port, pipeline_event_port_flush, NULL);
558 }
559 
560 void
561 pipeline_ethdev_rx_stop(struct evt_test *test, struct evt_options *opt)
562 {
563 	uint16_t i, j;
564 	RTE_SET_USED(test);
565 
566 	if (opt->prod_type == EVT_PROD_TYPE_ETH_RX_ADPTR) {
567 		RTE_ETH_FOREACH_DEV(i) {
568 			rte_event_eth_rx_adapter_stop(i);
569 			rte_event_eth_rx_adapter_queue_del(i, i, -1);
570 			for (j = 0; j < opt->eth_queues; j++)
571 				rte_eth_dev_rx_queue_stop(i, j);
572 		}
573 	}
574 }
575 
576 void
577 pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt)
578 {
579 	uint16_t i;
580 	RTE_SET_USED(test);
581 	RTE_SET_USED(opt);
582 
583 	RTE_ETH_FOREACH_DEV(i) {
584 		rte_event_eth_tx_adapter_stop(i);
585 		rte_event_eth_tx_adapter_queue_del(i, i, -1);
586 		rte_eth_dev_tx_queue_stop(i, 0);
587 		rte_eth_dev_stop(i);
588 	}
589 }
590 
591 void
592 pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt)
593 {
594 	RTE_SET_USED(test);
595 
596 	rte_event_dev_stop(opt->dev_id);
597 	rte_event_dev_close(opt->dev_id);
598 }
599 
600 int
601 pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt)
602 {
603 	struct test_pipeline *t = evt_test_priv(test);
604 	int i, ret;
605 
606 	if (!opt->mbuf_sz)
607 		opt->mbuf_sz = RTE_MBUF_DEFAULT_BUF_SIZE;
608 
609 	if (!opt->max_pkt_sz)
610 		opt->max_pkt_sz = RTE_ETHER_MAX_LEN;
611 
612 	RTE_ETH_FOREACH_DEV(i) {
613 		struct rte_eth_dev_info dev_info;
614 		uint16_t data_size = 0;
615 
616 		memset(&dev_info, 0, sizeof(dev_info));
617 		ret = rte_eth_dev_info_get(i, &dev_info);
618 		if (ret != 0) {
619 			evt_err("Error during getting device (port %u) info: %s\n",
620 				i, strerror(-ret));
621 			return ret;
622 		}
623 
624 		if (dev_info.rx_desc_lim.nb_mtu_seg_max != UINT16_MAX &&
625 				dev_info.rx_desc_lim.nb_mtu_seg_max != 0) {
626 			data_size = opt->max_pkt_sz /
627 				dev_info.rx_desc_lim.nb_mtu_seg_max;
628 			data_size += RTE_PKTMBUF_HEADROOM;
629 
630 			if (data_size  > opt->mbuf_sz)
631 				opt->mbuf_sz = data_size;
632 		}
633 		if (opt->per_port_pool) {
634 			char name[RTE_MEMPOOL_NAMESIZE];
635 
636 			snprintf(name, RTE_MEMPOOL_NAMESIZE, "%s-%d",
637 				 test->name, i);
638 			t->pool[i] = rte_pktmbuf_pool_create(
639 				name,	      /* mempool name */
640 				opt->pool_sz, /* number of elements*/
641 				0,	      /* cache size*/
642 				0, opt->mbuf_sz, opt->socket_id); /* flags */
643 
644 			if (t->pool[i] == NULL) {
645 				evt_err("failed to create mempool %s", name);
646 				return -ENOMEM;
647 			}
648 		}
649 	}
650 
651 	if (!opt->per_port_pool) {
652 		t->pool[0] = rte_pktmbuf_pool_create(
653 			test->name,   /* mempool name */
654 			opt->pool_sz, /* number of elements*/
655 			0,	      /* cache size*/
656 			0, opt->mbuf_sz, opt->socket_id); /* flags */
657 
658 		if (t->pool[0] == NULL) {
659 			evt_err("failed to create mempool");
660 			return -ENOMEM;
661 		}
662 	}
663 
664 	return 0;
665 }
666 
667 void
668 pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt)
669 {
670 	struct test_pipeline *t = evt_test_priv(test);
671 	int i;
672 
673 	RTE_SET_USED(opt);
674 	if (opt->per_port_pool) {
675 		RTE_ETH_FOREACH_DEV(i)
676 			rte_mempool_free(t->pool[i]);
677 	} else {
678 		rte_mempool_free(t->pool[0]);
679 	}
680 }
681 
682 int
683 pipeline_test_setup(struct evt_test *test, struct evt_options *opt)
684 {
685 	void *test_pipeline;
686 
687 	test_pipeline = rte_zmalloc_socket(test->name,
688 			sizeof(struct test_pipeline), RTE_CACHE_LINE_SIZE,
689 			opt->socket_id);
690 	if (test_pipeline  == NULL) {
691 		evt_err("failed to allocate test_pipeline memory");
692 		goto nomem;
693 	}
694 	test->test_priv = test_pipeline;
695 
696 	struct test_pipeline *t = evt_test_priv(test);
697 
698 	t->nb_workers = evt_nr_active_lcores(opt->wlcores);
699 	t->outstand_pkts = opt->nb_pkts * evt_nr_active_lcores(opt->wlcores);
700 	t->done = false;
701 	t->nb_flows = opt->nb_flows;
702 	t->result = EVT_TEST_FAILED;
703 	t->opt = opt;
704 	opt->prod_type = EVT_PROD_TYPE_ETH_RX_ADPTR;
705 	memcpy(t->sched_type_list, opt->sched_type_list,
706 			sizeof(opt->sched_type_list));
707 	return 0;
708 nomem:
709 	return -ENOMEM;
710 }
711 
712 void
713 pipeline_test_destroy(struct evt_test *test, struct evt_options *opt)
714 {
715 	RTE_SET_USED(opt);
716 
717 	rte_free(test->test_priv);
718 }
719