153a3b7e8SJerin Jacob /* SPDX-License-Identifier: BSD-3-Clause
253a3b7e8SJerin Jacob * Copyright(c) 2017 Cavium, Inc
3d1f59fb7SJerin Jacob */
4d1f59fb7SJerin Jacob
5d1f59fb7SJerin Jacob #ifndef _TEST_ORDER_COMMON_
6d1f59fb7SJerin Jacob #define _TEST_ORDER_COMMON_
7d1f59fb7SJerin Jacob
8d1f59fb7SJerin Jacob #include <stdio.h>
9d1f59fb7SJerin Jacob #include <stdbool.h>
10d1f59fb7SJerin Jacob
11d1f59fb7SJerin Jacob #include <rte_cycles.h>
12d1f59fb7SJerin Jacob #include <rte_eventdev.h>
13d1f59fb7SJerin Jacob #include <rte_lcore.h>
14d1f59fb7SJerin Jacob #include <rte_malloc.h>
15d1f59fb7SJerin Jacob #include <rte_mbuf.h>
16a4931d5bSThomas Monjalon #include <rte_mbuf_dyn.h>
17d1f59fb7SJerin Jacob
18d1f59fb7SJerin Jacob #include "evt_common.h"
19d1f59fb7SJerin Jacob #include "evt_options.h"
20d1f59fb7SJerin Jacob #include "evt_test.h"
21d1f59fb7SJerin Jacob
22d1f59fb7SJerin Jacob #define BURST_SIZE 16
23d1f59fb7SJerin Jacob
24a4931d5bSThomas Monjalon typedef uint32_t flow_id_t;
2545a059b0SDavid Marchand typedef uint32_t seqn_t;
26a4931d5bSThomas Monjalon
27d1f59fb7SJerin Jacob struct test_order;
28d1f59fb7SJerin Jacob
29d1f59fb7SJerin Jacob struct worker_data {
30d1f59fb7SJerin Jacob uint8_t dev_id;
31d1f59fb7SJerin Jacob uint8_t port_id;
32d1f59fb7SJerin Jacob struct test_order *t;
33d1f59fb7SJerin Jacob };
34d1f59fb7SJerin Jacob
35d1f59fb7SJerin Jacob struct prod_data {
36d1f59fb7SJerin Jacob uint8_t dev_id;
37d1f59fb7SJerin Jacob uint8_t port_id;
38d1f59fb7SJerin Jacob uint8_t queue_id;
39d1f59fb7SJerin Jacob struct test_order *t;
40d1f59fb7SJerin Jacob };
41d1f59fb7SJerin Jacob
420efea35aSTyler Retzlaff struct __rte_cache_aligned test_order {
43d1f59fb7SJerin Jacob /* Don't change the offset of "err". Signal handler use this memory
44d1f59fb7SJerin Jacob * to terminate all lcores work.
45d1f59fb7SJerin Jacob */
46d1f59fb7SJerin Jacob int err;
47d1f59fb7SJerin Jacob /*
48d1f59fb7SJerin Jacob * The atomic_* is an expensive operation,Since it is a functional test,
49d1f59fb7SJerin Jacob * We are using the atomic_ operation to reduce the code complexity.
50d1f59fb7SJerin Jacob */
51*b6a7e685STyler Retzlaff RTE_ATOMIC(uint64_t) outstand_pkts;
52d1f59fb7SJerin Jacob enum evt_test_result result;
53d1f59fb7SJerin Jacob uint32_t nb_flows;
54d1f59fb7SJerin Jacob uint64_t nb_pkts;
55d1f59fb7SJerin Jacob struct rte_mempool *pool;
56a4931d5bSThomas Monjalon int flow_id_dynfield_offset;
5745a059b0SDavid Marchand int seqn_dynfield_offset;
58d1f59fb7SJerin Jacob struct prod_data prod;
59d1f59fb7SJerin Jacob struct worker_data worker[EVT_MAX_PORTS];
60d1f59fb7SJerin Jacob uint32_t *producer_flow_seq;
61d1f59fb7SJerin Jacob uint32_t *expected_flow_seq;
62d1f59fb7SJerin Jacob struct evt_options *opt;
630efea35aSTyler Retzlaff };
64d1f59fb7SJerin Jacob
65a4931d5bSThomas Monjalon static inline void
order_flow_id_copy_from_mbuf(struct test_order * t,struct rte_event * event)66a4931d5bSThomas Monjalon order_flow_id_copy_from_mbuf(struct test_order *t, struct rte_event *event)
67a4931d5bSThomas Monjalon {
68a4931d5bSThomas Monjalon event->flow_id = *RTE_MBUF_DYNFIELD(event->mbuf,
69a4931d5bSThomas Monjalon t->flow_id_dynfield_offset, flow_id_t *);
70a4931d5bSThomas Monjalon }
71a4931d5bSThomas Monjalon
72a4931d5bSThomas Monjalon static inline void
order_flow_id_save(struct test_order * t,flow_id_t flow_id,struct rte_mbuf * mbuf,struct rte_event * event)73a4931d5bSThomas Monjalon order_flow_id_save(struct test_order *t, flow_id_t flow_id,
74a4931d5bSThomas Monjalon struct rte_mbuf *mbuf, struct rte_event *event)
75a4931d5bSThomas Monjalon {
76a4931d5bSThomas Monjalon *RTE_MBUF_DYNFIELD(mbuf,
77a4931d5bSThomas Monjalon t->flow_id_dynfield_offset, flow_id_t *) = flow_id;
78a4931d5bSThomas Monjalon event->flow_id = flow_id;
79a4931d5bSThomas Monjalon event->mbuf = mbuf;
80a4931d5bSThomas Monjalon }
81a4931d5bSThomas Monjalon
8245a059b0SDavid Marchand static inline seqn_t *
order_mbuf_seqn(struct test_order * t,struct rte_mbuf * mbuf)8345a059b0SDavid Marchand order_mbuf_seqn(struct test_order *t, struct rte_mbuf *mbuf)
8445a059b0SDavid Marchand {
8545a059b0SDavid Marchand return RTE_MBUF_DYNFIELD(mbuf, t->seqn_dynfield_offset, seqn_t *);
8645a059b0SDavid Marchand }
8745a059b0SDavid Marchand
88ba11ebf1SJerin Jacob static inline int
order_nb_event_ports(struct evt_options * opt)89ba11ebf1SJerin Jacob order_nb_event_ports(struct evt_options *opt)
90ba11ebf1SJerin Jacob {
91ba11ebf1SJerin Jacob return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
92ba11ebf1SJerin Jacob }
93ba11ebf1SJerin Jacob
9433011cb3SThomas Monjalon static __rte_always_inline void
order_process_stage_1(struct test_order * const t,struct rte_event * const ev,const uint32_t nb_flows,uint32_t * const expected_flow_seq,RTE_ATOMIC (uint64_t)* const outstand_pkts)954d04346fSJerin Jacob order_process_stage_1(struct test_order *const t,
964d04346fSJerin Jacob struct rte_event *const ev, const uint32_t nb_flows,
974d04346fSJerin Jacob uint32_t *const expected_flow_seq,
98*b6a7e685STyler Retzlaff RTE_ATOMIC(uint64_t) *const outstand_pkts)
994d04346fSJerin Jacob {
1004d04346fSJerin Jacob const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
1014d04346fSJerin Jacob /* compare the seqn against expected value */
10245a059b0SDavid Marchand if (*order_mbuf_seqn(t, ev->mbuf) != expected_flow_seq[flow]) {
1034d04346fSJerin Jacob evt_err("flow=%x seqn mismatch got=%x expected=%x",
10445a059b0SDavid Marchand flow, *order_mbuf_seqn(t, ev->mbuf),
10545a059b0SDavid Marchand expected_flow_seq[flow]);
1064d04346fSJerin Jacob t->err = true;
1074d04346fSJerin Jacob }
1084d04346fSJerin Jacob /*
1094d04346fSJerin Jacob * Events from an atomic flow of an event queue can be scheduled only to
1104d04346fSJerin Jacob * a single port at a time. The port is guaranteed to have exclusive
1114d04346fSJerin Jacob * (atomic) access for given atomic flow.So we don't need to update
1124d04346fSJerin Jacob * expected_flow_seq in critical section.
1134d04346fSJerin Jacob */
1144d04346fSJerin Jacob expected_flow_seq[flow]++;
1154d04346fSJerin Jacob rte_pktmbuf_free(ev->mbuf);
116*b6a7e685STyler Retzlaff rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
1174d04346fSJerin Jacob }
1184d04346fSJerin Jacob
11933011cb3SThomas Monjalon static __rte_always_inline void
order_process_stage_invalid(struct test_order * const t,struct rte_event * const ev)1204d04346fSJerin Jacob order_process_stage_invalid(struct test_order *const t,
1214d04346fSJerin Jacob struct rte_event *const ev)
1224d04346fSJerin Jacob {
1234d04346fSJerin Jacob evt_err("invalid queue %d", ev->queue_id);
1244d04346fSJerin Jacob t->err = true;
1254d04346fSJerin Jacob }
1264d04346fSJerin Jacob
1274d04346fSJerin Jacob #define ORDER_WORKER_INIT\
1284d04346fSJerin Jacob struct worker_data *w = arg;\
1294d04346fSJerin Jacob struct test_order *t = w->t;\
1304d04346fSJerin Jacob struct evt_options *opt = t->opt;\
1314d04346fSJerin Jacob const uint8_t dev_id = w->dev_id;\
1324d04346fSJerin Jacob const uint8_t port = w->port_id;\
1334d04346fSJerin Jacob const uint32_t nb_flows = t->nb_flows;\
1344d04346fSJerin Jacob uint32_t *expected_flow_seq = t->expected_flow_seq;\
135*b6a7e685STyler Retzlaff RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
1364d04346fSJerin Jacob if (opt->verbose_level > 1)\
1374d04346fSJerin Jacob printf("%s(): lcore %d dev_id %d port=%d\n",\
1384d04346fSJerin Jacob __func__, rte_lcore_id(), dev_id, port)
1394d04346fSJerin Jacob
140ba11ebf1SJerin Jacob int order_test_result(struct evt_test *test, struct evt_options *opt);
141ba11ebf1SJerin Jacob int order_opt_check(struct evt_options *opt);
142d1f59fb7SJerin Jacob int order_test_setup(struct evt_test *test, struct evt_options *opt);
143ba11ebf1SJerin Jacob int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
14433b7483dSJerin Jacob int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
14533b7483dSJerin Jacob int (*worker)(void *));
1465710e751SJerin Jacob int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
1475710e751SJerin Jacob uint8_t nb_workers, uint8_t nb_queues);
148d1f59fb7SJerin Jacob void order_test_destroy(struct evt_test *test, struct evt_options *opt);
149ba11ebf1SJerin Jacob void order_opt_dump(struct evt_options *opt);
150ba11ebf1SJerin Jacob void order_mempool_destroy(struct evt_test *test, struct evt_options *opt);
151ba11ebf1SJerin Jacob void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
152d1f59fb7SJerin Jacob
153d1f59fb7SJerin Jacob #endif /* _TEST_ORDER_COMMON_ */
154