1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2017 Cavium, Inc
3 */
4
5 #ifndef _TEST_ORDER_COMMON_
6 #define _TEST_ORDER_COMMON_
7
8 #include <stdio.h>
9 #include <stdbool.h>
10
11 #include <rte_cycles.h>
12 #include <rte_eventdev.h>
13 #include <rte_lcore.h>
14 #include <rte_malloc.h>
15 #include <rte_mbuf.h>
16 #include <rte_mbuf_dyn.h>
17
18 #include "evt_common.h"
19 #include "evt_options.h"
20 #include "evt_test.h"
21
22 #define BURST_SIZE 16
23
24 typedef uint32_t flow_id_t;
25 typedef uint32_t seqn_t;
26
27 struct test_order;
28
29 struct worker_data {
30 uint8_t dev_id;
31 uint8_t port_id;
32 struct test_order *t;
33 };
34
35 struct prod_data {
36 uint8_t dev_id;
37 uint8_t port_id;
38 uint8_t queue_id;
39 struct test_order *t;
40 };
41
42 struct __rte_cache_aligned test_order {
43 /* Don't change the offset of "err". Signal handler use this memory
44 * to terminate all lcores work.
45 */
46 int err;
47 /*
48 * The atomic_* is an expensive operation,Since it is a functional test,
49 * We are using the atomic_ operation to reduce the code complexity.
50 */
51 RTE_ATOMIC(uint64_t) outstand_pkts;
52 enum evt_test_result result;
53 uint32_t nb_flows;
54 uint64_t nb_pkts;
55 struct rte_mempool *pool;
56 int flow_id_dynfield_offset;
57 int seqn_dynfield_offset;
58 struct prod_data prod;
59 struct worker_data worker[EVT_MAX_PORTS];
60 uint32_t *producer_flow_seq;
61 uint32_t *expected_flow_seq;
62 struct evt_options *opt;
63 };
64
65 static inline void
order_flow_id_copy_from_mbuf(struct test_order * t,struct rte_event * event)66 order_flow_id_copy_from_mbuf(struct test_order *t, struct rte_event *event)
67 {
68 event->flow_id = *RTE_MBUF_DYNFIELD(event->mbuf,
69 t->flow_id_dynfield_offset, flow_id_t *);
70 }
71
72 static inline void
order_flow_id_save(struct test_order * t,flow_id_t flow_id,struct rte_mbuf * mbuf,struct rte_event * event)73 order_flow_id_save(struct test_order *t, flow_id_t flow_id,
74 struct rte_mbuf *mbuf, struct rte_event *event)
75 {
76 *RTE_MBUF_DYNFIELD(mbuf,
77 t->flow_id_dynfield_offset, flow_id_t *) = flow_id;
78 event->flow_id = flow_id;
79 event->mbuf = mbuf;
80 }
81
82 static inline seqn_t *
order_mbuf_seqn(struct test_order * t,struct rte_mbuf * mbuf)83 order_mbuf_seqn(struct test_order *t, struct rte_mbuf *mbuf)
84 {
85 return RTE_MBUF_DYNFIELD(mbuf, t->seqn_dynfield_offset, seqn_t *);
86 }
87
88 static inline int
order_nb_event_ports(struct evt_options * opt)89 order_nb_event_ports(struct evt_options *opt)
90 {
91 return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */;
92 }
93
94 static __rte_always_inline void
order_process_stage_1(struct test_order * const t,struct rte_event * const ev,const uint32_t nb_flows,uint32_t * const expected_flow_seq,RTE_ATOMIC (uint64_t)* const outstand_pkts)95 order_process_stage_1(struct test_order *const t,
96 struct rte_event *const ev, const uint32_t nb_flows,
97 uint32_t *const expected_flow_seq,
98 RTE_ATOMIC(uint64_t) *const outstand_pkts)
99 {
100 const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows;
101 /* compare the seqn against expected value */
102 if (*order_mbuf_seqn(t, ev->mbuf) != expected_flow_seq[flow]) {
103 evt_err("flow=%x seqn mismatch got=%x expected=%x",
104 flow, *order_mbuf_seqn(t, ev->mbuf),
105 expected_flow_seq[flow]);
106 t->err = true;
107 }
108 /*
109 * Events from an atomic flow of an event queue can be scheduled only to
110 * a single port at a time. The port is guaranteed to have exclusive
111 * (atomic) access for given atomic flow.So we don't need to update
112 * expected_flow_seq in critical section.
113 */
114 expected_flow_seq[flow]++;
115 rte_pktmbuf_free(ev->mbuf);
116 rte_atomic_fetch_sub_explicit(outstand_pkts, 1, rte_memory_order_relaxed);
117 }
118
119 static __rte_always_inline void
order_process_stage_invalid(struct test_order * const t,struct rte_event * const ev)120 order_process_stage_invalid(struct test_order *const t,
121 struct rte_event *const ev)
122 {
123 evt_err("invalid queue %d", ev->queue_id);
124 t->err = true;
125 }
126
127 #define ORDER_WORKER_INIT\
128 struct worker_data *w = arg;\
129 struct test_order *t = w->t;\
130 struct evt_options *opt = t->opt;\
131 const uint8_t dev_id = w->dev_id;\
132 const uint8_t port = w->port_id;\
133 const uint32_t nb_flows = t->nb_flows;\
134 uint32_t *expected_flow_seq = t->expected_flow_seq;\
135 RTE_ATOMIC(uint64_t) *outstand_pkts = &t->outstand_pkts;\
136 if (opt->verbose_level > 1)\
137 printf("%s(): lcore %d dev_id %d port=%d\n",\
138 __func__, rte_lcore_id(), dev_id, port)
139
140 int order_test_result(struct evt_test *test, struct evt_options *opt);
141 int order_opt_check(struct evt_options *opt);
142 int order_test_setup(struct evt_test *test, struct evt_options *opt);
143 int order_mempool_setup(struct evt_test *test, struct evt_options *opt);
144 int order_launch_lcores(struct evt_test *test, struct evt_options *opt,
145 int (*worker)(void *));
146 int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt,
147 uint8_t nb_workers, uint8_t nb_queues);
148 void order_test_destroy(struct evt_test *test, struct evt_options *opt);
149 void order_opt_dump(struct evt_options *opt);
150 void order_mempool_destroy(struct evt_test *test, struct evt_options *opt);
151 void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt);
152
153 #endif /* _TEST_ORDER_COMMON_ */
154