xref: /dpdk/app/test-eventdev/test_order_queue.c (revision b6a7e6852e9ab82ae0e05e2d2a0b83abca17de3b)
153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
253a3b7e8SJerin Jacob  * Copyright(c) 2017 Cavium, Inc
302ec2955SJerin Jacob  */
402ec2955SJerin Jacob 
502ec2955SJerin Jacob #include <stdio.h>
602ec2955SJerin Jacob #include <unistd.h>
702ec2955SJerin Jacob 
802ec2955SJerin Jacob #include "test_order_common.h"
902ec2955SJerin Jacob 
1043d162bcSThomas Monjalon /* See http://doc.dpdk.org/guides/tools/testeventdev.html for test details */
1102ec2955SJerin Jacob 
1233011cb3SThomas Monjalon static __rte_always_inline void
order_queue_process_stage_0(struct rte_event * const ev)134d04346fSJerin Jacob order_queue_process_stage_0(struct rte_event *const ev)
144d04346fSJerin Jacob {
154d04346fSJerin Jacob 	ev->queue_id = 1; /* q1 atomic queue */
164d04346fSJerin Jacob 	ev->op = RTE_EVENT_OP_FORWARD;
174d04346fSJerin Jacob 	ev->sched_type = RTE_SCHED_TYPE_ATOMIC;
184d04346fSJerin Jacob 	ev->event_type = RTE_EVENT_TYPE_CPU;
194d04346fSJerin Jacob }
204d04346fSJerin Jacob 
214d04346fSJerin Jacob static int
order_queue_worker(void * arg,const bool flow_id_cap)2275d11313STimothy McDaniel order_queue_worker(void *arg, const bool flow_id_cap)
234d04346fSJerin Jacob {
244d04346fSJerin Jacob 	ORDER_WORKER_INIT;
254d04346fSJerin Jacob 	struct rte_event ev;
264d04346fSJerin Jacob 
274d04346fSJerin Jacob 	while (t->err == false) {
284d04346fSJerin Jacob 		uint16_t event = rte_event_dequeue_burst(dev_id, port,
294d04346fSJerin Jacob 					&ev, 1, 0);
304d04346fSJerin Jacob 		if (!event) {
31*b6a7e685STyler Retzlaff 			if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
324d04346fSJerin Jacob 				break;
334d04346fSJerin Jacob 			rte_pause();
344d04346fSJerin Jacob 			continue;
354d04346fSJerin Jacob 		}
364d04346fSJerin Jacob 
3775d11313STimothy McDaniel 		if (!flow_id_cap)
38a4931d5bSThomas Monjalon 			order_flow_id_copy_from_mbuf(t, &ev);
3975d11313STimothy McDaniel 
404d04346fSJerin Jacob 		if (ev.queue_id == 0) { /* from ordered queue */
414d04346fSJerin Jacob 			order_queue_process_stage_0(&ev);
424d04346fSJerin Jacob 			while (rte_event_enqueue_burst(dev_id, port, &ev, 1)
434d04346fSJerin Jacob 					!= 1)
444d04346fSJerin Jacob 				rte_pause();
454d04346fSJerin Jacob 		} else if (ev.queue_id == 1) { /* from atomic queue */
464d04346fSJerin Jacob 			order_process_stage_1(t, &ev, nb_flows,
474d04346fSJerin Jacob 					expected_flow_seq, outstand_pkts);
484d04346fSJerin Jacob 		} else {
494d04346fSJerin Jacob 			order_process_stage_invalid(t, &ev);
504d04346fSJerin Jacob 		}
514d04346fSJerin Jacob 	}
524d04346fSJerin Jacob 	return 0;
534d04346fSJerin Jacob }
544d04346fSJerin Jacob 
554d04346fSJerin Jacob static int
order_queue_worker_burst(void * arg,const bool flow_id_cap)5675d11313STimothy McDaniel order_queue_worker_burst(void *arg, const bool flow_id_cap)
574d04346fSJerin Jacob {
584d04346fSJerin Jacob 	ORDER_WORKER_INIT;
594d04346fSJerin Jacob 	struct rte_event ev[BURST_SIZE];
604d04346fSJerin Jacob 	uint16_t i;
614d04346fSJerin Jacob 
624d04346fSJerin Jacob 	while (t->err == false) {
634d04346fSJerin Jacob 		uint16_t const nb_rx = rte_event_dequeue_burst(dev_id, port, ev,
644d04346fSJerin Jacob 				BURST_SIZE, 0);
654d04346fSJerin Jacob 
664d04346fSJerin Jacob 		if (nb_rx == 0) {
67*b6a7e685STyler Retzlaff 			if (rte_atomic_load_explicit(outstand_pkts, rte_memory_order_relaxed) <= 0)
684d04346fSJerin Jacob 				break;
694d04346fSJerin Jacob 			rte_pause();
704d04346fSJerin Jacob 			continue;
714d04346fSJerin Jacob 		}
724d04346fSJerin Jacob 
734d04346fSJerin Jacob 		for (i = 0; i < nb_rx; i++) {
7475d11313STimothy McDaniel 
7575d11313STimothy McDaniel 			if (!flow_id_cap)
76a4931d5bSThomas Monjalon 				order_flow_id_copy_from_mbuf(t, &ev[i]);
7775d11313STimothy McDaniel 
784d04346fSJerin Jacob 			if (ev[i].queue_id == 0) { /* from ordered queue */
794d04346fSJerin Jacob 				order_queue_process_stage_0(&ev[i]);
804d04346fSJerin Jacob 			} else if (ev[i].queue_id == 1) {/* from atomic queue */
814d04346fSJerin Jacob 				order_process_stage_1(t, &ev[i], nb_flows,
824d04346fSJerin Jacob 					expected_flow_seq, outstand_pkts);
834d04346fSJerin Jacob 				ev[i].op = RTE_EVENT_OP_RELEASE;
844d04346fSJerin Jacob 			} else {
854d04346fSJerin Jacob 				order_process_stage_invalid(t, &ev[i]);
864d04346fSJerin Jacob 			}
874d04346fSJerin Jacob 		}
884d04346fSJerin Jacob 
894d04346fSJerin Jacob 		uint16_t enq;
904d04346fSJerin Jacob 
914d04346fSJerin Jacob 		enq = rte_event_enqueue_burst(dev_id, port, ev, nb_rx);
924d04346fSJerin Jacob 		while (enq < nb_rx) {
934d04346fSJerin Jacob 			enq += rte_event_enqueue_burst(dev_id, port,
944d04346fSJerin Jacob 							ev + enq, nb_rx - enq);
954d04346fSJerin Jacob 		}
964d04346fSJerin Jacob 	}
974d04346fSJerin Jacob 	return 0;
984d04346fSJerin Jacob }
994d04346fSJerin Jacob 
1004d04346fSJerin Jacob static int
worker_wrapper(void * arg)1014d04346fSJerin Jacob worker_wrapper(void *arg)
1024d04346fSJerin Jacob {
1034d04346fSJerin Jacob 	struct worker_data *w  = arg;
1044d04346fSJerin Jacob 	const bool burst = evt_has_burst_mode(w->dev_id);
10575d11313STimothy McDaniel 	const bool flow_id_cap = evt_has_flow_id(w->dev_id);
1064d04346fSJerin Jacob 
10775d11313STimothy McDaniel 	if (burst) {
10875d11313STimothy McDaniel 		if (flow_id_cap)
10975d11313STimothy McDaniel 			return order_queue_worker_burst(arg, true);
1104d04346fSJerin Jacob 		else
11175d11313STimothy McDaniel 			return order_queue_worker_burst(arg, false);
11275d11313STimothy McDaniel 	} else {
11375d11313STimothy McDaniel 		if (flow_id_cap)
11475d11313STimothy McDaniel 			return order_queue_worker(arg, true);
11575d11313STimothy McDaniel 		else
11675d11313STimothy McDaniel 			return order_queue_worker(arg, false);
11775d11313STimothy McDaniel 	}
1184d04346fSJerin Jacob }
1194d04346fSJerin Jacob 
1204d04346fSJerin Jacob static int
order_queue_launch_lcores(struct evt_test * test,struct evt_options * opt)1214d04346fSJerin Jacob order_queue_launch_lcores(struct evt_test *test, struct evt_options *opt)
1224d04346fSJerin Jacob {
1234d04346fSJerin Jacob 	return order_launch_lcores(test, opt, worker_wrapper);
1244d04346fSJerin Jacob }
1254d04346fSJerin Jacob 
12602ec2955SJerin Jacob #define NB_QUEUES 2
12702ec2955SJerin Jacob static int
order_queue_eventdev_setup(struct evt_test * test,struct evt_options * opt)12802ec2955SJerin Jacob order_queue_eventdev_setup(struct evt_test *test, struct evt_options *opt)
12902ec2955SJerin Jacob {
13002ec2955SJerin Jacob 	int ret;
13102ec2955SJerin Jacob 
13202ec2955SJerin Jacob 	const uint8_t nb_workers = evt_nr_active_lcores(opt->wlcores);
13302ec2955SJerin Jacob 	/* number of active worker cores + 1 producer */
13402ec2955SJerin Jacob 	const uint8_t nb_ports = nb_workers + 1;
13502ec2955SJerin Jacob 
136f0959283SPavan Nikhilesh 	ret = evt_configure_eventdev(opt, NB_QUEUES, nb_ports);
13702ec2955SJerin Jacob 	if (ret) {
13802ec2955SJerin Jacob 		evt_err("failed to configure eventdev %d", opt->dev_id);
13902ec2955SJerin Jacob 		return ret;
14002ec2955SJerin Jacob 	}
14102ec2955SJerin Jacob 
14202ec2955SJerin Jacob 	/* q0 (ordered queue) configuration */
14302ec2955SJerin Jacob 	struct rte_event_queue_conf q0_ordered_conf = {
14402ec2955SJerin Jacob 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
14513370a38SPavan Nikhilesh 			.schedule_type = RTE_SCHED_TYPE_ORDERED,
14602ec2955SJerin Jacob 			.nb_atomic_flows = opt->nb_flows,
14702ec2955SJerin Jacob 			.nb_atomic_order_sequences = opt->nb_flows,
14802ec2955SJerin Jacob 	};
14902ec2955SJerin Jacob 	ret = rte_event_queue_setup(opt->dev_id, 0, &q0_ordered_conf);
15002ec2955SJerin Jacob 	if (ret) {
15102ec2955SJerin Jacob 		evt_err("failed to setup queue0 eventdev %d", opt->dev_id);
15202ec2955SJerin Jacob 		return ret;
15302ec2955SJerin Jacob 	}
15402ec2955SJerin Jacob 
15502ec2955SJerin Jacob 	/* q1 (atomic queue) configuration */
15602ec2955SJerin Jacob 	struct rte_event_queue_conf q1_atomic_conf = {
15702ec2955SJerin Jacob 			.priority = RTE_EVENT_DEV_PRIORITY_NORMAL,
15813370a38SPavan Nikhilesh 			.schedule_type = RTE_SCHED_TYPE_ATOMIC,
15902ec2955SJerin Jacob 			.nb_atomic_flows = opt->nb_flows,
16002ec2955SJerin Jacob 			.nb_atomic_order_sequences = opt->nb_flows,
16102ec2955SJerin Jacob 	};
16202ec2955SJerin Jacob 	ret = rte_event_queue_setup(opt->dev_id, 1, &q1_atomic_conf);
16302ec2955SJerin Jacob 	if (ret) {
16402ec2955SJerin Jacob 		evt_err("failed to setup queue1 eventdev %d", opt->dev_id);
16502ec2955SJerin Jacob 		return ret;
16602ec2955SJerin Jacob 	}
16702ec2955SJerin Jacob 
16802ec2955SJerin Jacob 	/* setup one port per worker, linking to all queues */
16902ec2955SJerin Jacob 	ret = order_event_dev_port_setup(test, opt, nb_workers, NB_QUEUES);
17002ec2955SJerin Jacob 	if (ret)
17102ec2955SJerin Jacob 		return ret;
17202ec2955SJerin Jacob 
1739d0c15b6SPavan Nikhilesh 	if (!evt_has_distributed_sched(opt->dev_id)) {
1749d0c15b6SPavan Nikhilesh 		uint32_t service_id;
1759d0c15b6SPavan Nikhilesh 		rte_event_dev_service_id_get(opt->dev_id, &service_id);
1769d0c15b6SPavan Nikhilesh 		ret = evt_service_setup(service_id);
17757305d79SPavan Nikhilesh 		if (ret) {
17857305d79SPavan Nikhilesh 			evt_err("No service lcore found to run event dev.");
17957305d79SPavan Nikhilesh 			return ret;
18057305d79SPavan Nikhilesh 		}
1819d0c15b6SPavan Nikhilesh 	}
18257305d79SPavan Nikhilesh 
18302ec2955SJerin Jacob 	ret = rte_event_dev_start(opt->dev_id);
18402ec2955SJerin Jacob 	if (ret) {
18502ec2955SJerin Jacob 		evt_err("failed to start eventdev %d", opt->dev_id);
18602ec2955SJerin Jacob 		return ret;
18702ec2955SJerin Jacob 	}
18802ec2955SJerin Jacob 
18902ec2955SJerin Jacob 	return 0;
19002ec2955SJerin Jacob }
19102ec2955SJerin Jacob 
19202ec2955SJerin Jacob static void
order_queue_opt_dump(struct evt_options * opt)19302ec2955SJerin Jacob order_queue_opt_dump(struct evt_options *opt)
19402ec2955SJerin Jacob {
19502ec2955SJerin Jacob 	order_opt_dump(opt);
19602ec2955SJerin Jacob 	evt_dump("nb_evdev_queues", "%d", NB_QUEUES);
19702ec2955SJerin Jacob }
19802ec2955SJerin Jacob 
19902ec2955SJerin Jacob static bool
order_queue_capability_check(struct evt_options * opt)20002ec2955SJerin Jacob order_queue_capability_check(struct evt_options *opt)
20102ec2955SJerin Jacob {
20202ec2955SJerin Jacob 	struct rte_event_dev_info dev_info;
20302ec2955SJerin Jacob 
20402ec2955SJerin Jacob 	rte_event_dev_info_get(opt->dev_id, &dev_info);
20502ec2955SJerin Jacob 	if (dev_info.max_event_queues < NB_QUEUES || dev_info.max_event_ports <
20602ec2955SJerin Jacob 			order_nb_event_ports(opt)) {
20702ec2955SJerin Jacob 		evt_err("not enough eventdev queues=%d/%d or ports=%d/%d",
20802ec2955SJerin Jacob 			NB_QUEUES, dev_info.max_event_queues,
20902ec2955SJerin Jacob 			order_nb_event_ports(opt), dev_info.max_event_ports);
21002ec2955SJerin Jacob 		return false;
21102ec2955SJerin Jacob 	}
21202ec2955SJerin Jacob 
21302ec2955SJerin Jacob 	return true;
21402ec2955SJerin Jacob }
21502ec2955SJerin Jacob 
21602ec2955SJerin Jacob static const struct evt_test_ops order_queue =  {
21702ec2955SJerin Jacob 	.cap_check          = order_queue_capability_check,
21802ec2955SJerin Jacob 	.opt_check          = order_opt_check,
21902ec2955SJerin Jacob 	.opt_dump           = order_queue_opt_dump,
22002ec2955SJerin Jacob 	.test_setup         = order_test_setup,
22102ec2955SJerin Jacob 	.mempool_setup      = order_mempool_setup,
22202ec2955SJerin Jacob 	.eventdev_setup     = order_queue_eventdev_setup,
2234d04346fSJerin Jacob 	.launch_lcores      = order_queue_launch_lcores,
22402ec2955SJerin Jacob 	.eventdev_destroy   = order_eventdev_destroy,
22502ec2955SJerin Jacob 	.mempool_destroy    = order_mempool_destroy,
22602ec2955SJerin Jacob 	.test_result        = order_test_result,
22702ec2955SJerin Jacob 	.test_destroy       = order_test_destroy,
22802ec2955SJerin Jacob };
22902ec2955SJerin Jacob 
23002ec2955SJerin Jacob EVT_TEST_REGISTER(order_queue);
231