1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #ifndef _TEST_ORDER_COMMON_ 6 #define _TEST_ORDER_COMMON_ 7 8 #include <stdio.h> 9 #include <stdbool.h> 10 11 #include <rte_cycles.h> 12 #include <rte_eventdev.h> 13 #include <rte_lcore.h> 14 #include <rte_malloc.h> 15 #include <rte_mbuf.h> 16 17 #include "evt_common.h" 18 #include "evt_options.h" 19 #include "evt_test.h" 20 21 #define BURST_SIZE 16 22 23 struct test_order; 24 25 struct worker_data { 26 uint8_t dev_id; 27 uint8_t port_id; 28 struct test_order *t; 29 }; 30 31 struct prod_data { 32 uint8_t dev_id; 33 uint8_t port_id; 34 uint8_t queue_id; 35 struct test_order *t; 36 }; 37 38 struct test_order { 39 /* Don't change the offset of "err". Signal handler use this memory 40 * to terminate all lcores work. 41 */ 42 int err; 43 /* 44 * The atomic_* is an expensive operation,Since it is a functional test, 45 * We are using the atomic_ operation to reduce the code complexity. 46 */ 47 rte_atomic64_t outstand_pkts; 48 enum evt_test_result result; 49 uint32_t nb_flows; 50 uint64_t nb_pkts; 51 struct rte_mempool *pool; 52 struct prod_data prod; 53 struct worker_data worker[EVT_MAX_PORTS]; 54 uint32_t *producer_flow_seq; 55 uint32_t *expected_flow_seq; 56 struct evt_options *opt; 57 } __rte_cache_aligned; 58 59 static inline int 60 order_nb_event_ports(struct evt_options *opt) 61 { 62 return evt_nr_active_lcores(opt->wlcores) + 1 /* producer */; 63 } 64 65 static inline __attribute__((always_inline)) void 66 order_process_stage_1(struct test_order *const t, 67 struct rte_event *const ev, const uint32_t nb_flows, 68 uint32_t *const expected_flow_seq, 69 rte_atomic64_t *const outstand_pkts) 70 { 71 const uint32_t flow = (uintptr_t)ev->mbuf % nb_flows; 72 /* compare the seqn against expected value */ 73 if (ev->mbuf->seqn != expected_flow_seq[flow]) { 74 evt_err("flow=%x seqn mismatch got=%x expected=%x", 75 flow, ev->mbuf->seqn, expected_flow_seq[flow]); 76 t->err = true; 77 rte_smp_wmb(); 78 } 79 /* 80 * Events from an atomic flow of an event queue can be scheduled only to 81 * a single port at a time. The port is guaranteed to have exclusive 82 * (atomic) access for given atomic flow.So we don't need to update 83 * expected_flow_seq in critical section. 84 */ 85 expected_flow_seq[flow]++; 86 rte_pktmbuf_free(ev->mbuf); 87 rte_atomic64_sub(outstand_pkts, 1); 88 } 89 90 static inline __attribute__((always_inline)) void 91 order_process_stage_invalid(struct test_order *const t, 92 struct rte_event *const ev) 93 { 94 evt_err("invalid queue %d", ev->queue_id); 95 t->err = true; 96 rte_smp_wmb(); 97 } 98 99 #define ORDER_WORKER_INIT\ 100 struct worker_data *w = arg;\ 101 struct test_order *t = w->t;\ 102 struct evt_options *opt = t->opt;\ 103 const uint8_t dev_id = w->dev_id;\ 104 const uint8_t port = w->port_id;\ 105 const uint32_t nb_flows = t->nb_flows;\ 106 uint32_t *expected_flow_seq = t->expected_flow_seq;\ 107 rte_atomic64_t *outstand_pkts = &t->outstand_pkts;\ 108 if (opt->verbose_level > 1)\ 109 printf("%s(): lcore %d dev_id %d port=%d\n",\ 110 __func__, rte_lcore_id(), dev_id, port) 111 112 int order_test_result(struct evt_test *test, struct evt_options *opt); 113 int order_opt_check(struct evt_options *opt); 114 int order_test_setup(struct evt_test *test, struct evt_options *opt); 115 int order_mempool_setup(struct evt_test *test, struct evt_options *opt); 116 int order_launch_lcores(struct evt_test *test, struct evt_options *opt, 117 int (*worker)(void *)); 118 int order_event_dev_port_setup(struct evt_test *test, struct evt_options *opt, 119 uint8_t nb_workers, uint8_t nb_queues); 120 void order_test_destroy(struct evt_test *test, struct evt_options *opt); 121 void order_opt_dump(struct evt_options *opt); 122 void order_mempool_destroy(struct evt_test *test, struct evt_options *opt); 123 void order_eventdev_destroy(struct evt_test *test, struct evt_options *opt); 124 125 #endif /* _TEST_ORDER_COMMON_ */ 126