1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #ifndef _TEST_ORDER_COMMON_ 6 #define _TEST_ORDER_COMMON_ 7 8 #include <stdio.h> 9 #include <stdbool.h> 10 11 #include <rte_cycles.h> 12 #include <rte_eventdev.h> 13 #include <rte_lcore.h> 14 #include <rte_malloc.h> 15 #include <rte_mbuf.h> 16 #include <rte_mbuf_dyn.h> 17 18 #include "evt_common.h" 19 #include "evt_options.h" 20 #include "evt_test.h" 21 22 #define BURST_SIZE 16 23 24 typedef uint32_t flow_id_t; 25 typedef uint32_t seqn_t; 26 27 struct test_order; 28 29 struct worker_data { 30 uint8_t dev_id; 31 uint8_t port_id; 32 struct test_order *t; 33 }; 34 35 struct prod_data { 36 uint8_t dev_id; 37 uint8_t port_id; 38 uint8_t queue_id; 39 struct test_order *t; 40 }; 41 42 struct test_order { 43 /* Don't change the offset of "err". Signal handler use this memory 44 * to terminate all lcores work. 45 */ 46 int err; 47 /* 48 * The atomic_* is an expensive operation,Since it is a functional test, 49 * We are using the atomic_ operation to reduce the code complexity. 50 */ 51 rte_atomic64_t outstand_pkts; 52 enum evt_test_result result; 53 uint32_t nb_flows; 54 uint64_t nb_pkts; 55 struct rte_mempool *pool; 56 int flow_id_dynfield_offset; 57 int seqn_dynfield_offset; 58 struct prod_data prod; 59 struct worker_data worker[EVT_MAX_PORTS]; 60 uint32_t *producer_flow_seq; 61 uint32_t *expected_flow_seq; 62 struct evt_options *opt; 63 } __rte_cache_aligned; 64 65 static inline void 66 order_flow_id_copy_from_mbuf(struct test_order *t, struct rte_event *event) 67 { 68 event->flow_id = *RTE_MBUF_DYNFIELD(event->mbuf, 69 t->flow_id_dynfield_offset, flow_id_t *); 70 } 71 72 static inline void 73 order_flow_id_save(struct test_order *t, flow_id_t flow_id, 74 struct rte_mbuf *mbuf, struct rte_event *event) 75 { 76 *RTE_MBUF_DYNFIELD(mbuf, 77 t->flow_id_dynfield_offset, flow_id_t *) = flow_id; 78 event->flow_id = flow_id; 79 event->mbuf = mbuf; 80 } 81 82 static inline seqn_t * 83 order_mbuf_seqn(struct test_order *t, struct rte_mbuf *mbuf) 84 { 85 return RTE_MBUF_DYNFIELD(mbuf, t->seqn_dynfield_offset, seqn_t *); 86 } 87 88 static inline int 89 order_nb_event_ports(struct evt_options *opt) 90 { 91 return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */; 92 } 93 94 static __rte_always_inline void 95 order_process_stage_1(struct test_order *const t, 96 struct rte_event *const ev, const uint32_t nb_flows, 97 uint32_t *const expected_flow_seq, 98 rte_atomic64_t *const outstand_pkts) 99 { 100 const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows; 101 /* compare the seqn against expected value */ 102 if (*order_mbuf_seqn(t, ev->mbuf) != expected_flow_seq[flow]) { 103 evt_err("flow=%x seqn mismatch got=%x expected=%x", 104 flow, *order_mbuf_seqn(t, ev->mbuf), 105 expected_flow_seq[flow]); 106 t->err = true; 107 rte_smp_wmb(); 108 } 109 /* 110 * Events from an atomic flow of an event queue can be scheduled only to 111 * a single port at a time. The port is guaranteed to have exclusive 112 * (atomic) access for given atomic flow.So we don't need to update 113 * expected_flow_seq in critical section. 114 */ 115 expected_flow_seq[flow]++; 116 rte_pktmbuf_free(ev->mbuf); 117 rte_atomic64_sub(outstand_pkts, 1); 118 } 119 120 static __rte_always_inline void 121 order_process_stage_invalid(struct test_order *const t, 122 struct rte_event *const ev) 123 { 124 evt_err("invalid queue %d", ev->queue_id); 125 t->err = true; 126 rte_smp_wmb(); 127 } 128 129 #define ORDER_WORKER_INIT\ 130 struct worker_data *w = arg;\ 131 struct test_order *t = w->t;\ 132 struct evt_options *opt = t->opt;\ 133 const uint8_t dev_id = w->dev_id;\ 134 const uint8_t port = w->port_id;\ 135 const uint32_t nb_flows = t->nb_flows;\ 136 uint32_t *expected_flow_seq = t->expected_flow_seq;\ 137 rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\ 138 if (opt->verbose_level > 1)\ 139 printf("%s(): lcore %d dev_id %d port=%d\n",\ 140 __func__, rte_lcore_id(), dev_id, port) 141 142 int order_test_result(struct evt_test *test, struct evt_options *opt); 143 int order_opt_check(struct evt_options *opt); 144 int order_test_setup(struct evt_test *test, struct evt_options *opt); 145 int order_mempool_setup(struct evt_test *test, struct evt_options *opt); 146 int order_launch_lcores(struct evt_test *test, struct evt_options *opt, 147 int (*worker)(void *)); 148 int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, 149 uint8_t nb_workers, uint8_t nb_queues); 150 void order_test_destroy(struct evt_test *test, struct evt_options *opt); 151 void order_opt_dump(struct evt_options *opt); 152 void order_mempool_destroy(struct evt_test *test, struct evt_options *opt); 153 void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt); 154 155 #endif /* _TEST_ORDER_COMMON_ */ 156