1 /* 2 * SPDX-License-Identifier: BSD-3-Clause 3 * Copyright 2017 Cavium, Inc. 4 */ 5 6 #ifndef _TEST_PIPELINE_COMMON_ 7 #define _TEST_PIPELINE_COMMON_ 8 9 #include <stdio.h> 10 #include <stdbool.h> 11 #include <unistd.h> 12 13 #include <rte_cycles.h> 14 #include <rte_ethdev.h> 15 #include <rte_eventdev.h> 16 #include <rte_event_eth_rx_adapter.h> 17 #include <rte_event_eth_tx_adapter.h> 18 #include <rte_lcore.h> 19 #include <rte_malloc.h> 20 #include <rte_mempool.h> 21 #include <rte_prefetch.h> 22 #include <rte_spinlock.h> 23 #include <rte_service.h> 24 #include <rte_service_component.h> 25 26 #include "evt_common.h" 27 #include "evt_options.h" 28 #include "evt_test.h" 29 30 struct test_pipeline; 31 32 struct worker_data { 33 uint64_t processed_pkts; 34 uint8_t dev_id; 35 uint8_t port_id; 36 struct test_pipeline *t; 37 } __rte_cache_aligned; 38 39 struct test_pipeline { 40 /* Don't change the offset of "done". Signal handler use this memory 41 * to terminate all lcores work. 42 */ 43 int done; 44 uint8_t nb_workers; 45 uint8_t internal_port; 46 uint8_t tx_evqueue_id[RTE_MAX_ETHPORTS]; 47 enum evt_test_result result; 48 uint32_t nb_flows; 49 uint64_t outstand_pkts; 50 struct rte_mempool *pool; 51 struct worker_data worker[EVT_MAX_PORTS]; 52 struct evt_options *opt; 53 uint8_t sched_type_list[EVT_MAX_STAGES] __rte_cache_aligned; 54 } __rte_cache_aligned; 55 56 #define BURST_SIZE 16 57 58 #define PIPELINE_WORKER_SINGLE_STAGE_INIT \ 59 struct worker_data *w = arg; \ 60 struct test_pipeline *t = w->t; \ 61 const uint8_t dev = w->dev_id; \ 62 const uint8_t port = w->port_id; \ 63 struct rte_event ev __rte_cache_aligned 64 65 #define PIPELINE_WORKER_SINGLE_STAGE_BURST_INIT \ 66 int i; \ 67 struct worker_data *w = arg; \ 68 struct test_pipeline *t = w->t; \ 69 const uint8_t dev = w->dev_id; \ 70 const uint8_t port = w->port_id; \ 71 struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned 72 73 #define PIPELINE_WORKER_MULTI_STAGE_INIT \ 74 struct worker_data *w = arg; \ 75 struct test_pipeline *t = w->t; \ 76 uint8_t cq_id; \ 77 const uint8_t dev = w->dev_id; \ 78 const uint8_t port = w->port_id; \ 79 const uint8_t last_queue = t->opt->nb_stages - 1; \ 80 uint8_t *const sched_type_list = &t->sched_type_list[0]; \ 81 const uint8_t nb_stages = t->opt->nb_stages + 1; \ 82 struct rte_event ev __rte_cache_aligned 83 84 #define PIPELINE_WORKER_MULTI_STAGE_BURST_INIT \ 85 int i; \ 86 struct worker_data *w = arg; \ 87 struct test_pipeline *t = w->t; \ 88 uint8_t cq_id; \ 89 const uint8_t dev = w->dev_id; \ 90 const uint8_t port = w->port_id; \ 91 const uint8_t last_queue = t->opt->nb_stages - 1; \ 92 uint8_t *const sched_type_list = &t->sched_type_list[0]; \ 93 const uint8_t nb_stages = t->opt->nb_stages + 1; \ 94 struct rte_event ev[BURST_SIZE + 1] __rte_cache_aligned 95 96 static __rte_always_inline void 97 pipeline_fwd_event(struct rte_event *ev, uint8_t sched) 98 { 99 ev->event_type = RTE_EVENT_TYPE_CPU; 100 ev->op = RTE_EVENT_OP_FORWARD; 101 ev->sched_type = sched; 102 } 103 104 static __rte_always_inline void 105 pipeline_event_tx(const uint8_t dev, const uint8_t port, 106 struct rte_event * const ev) 107 { 108 rte_event_eth_tx_adapter_txq_set(ev->mbuf, 0); 109 while (!rte_event_eth_tx_adapter_enqueue(dev, port, ev, 1)) 110 rte_pause(); 111 } 112 113 static __rte_always_inline void 114 pipeline_event_tx_burst(const uint8_t dev, const uint8_t port, 115 struct rte_event *ev, const uint16_t nb_rx) 116 { 117 uint16_t enq; 118 119 enq = rte_event_eth_tx_adapter_enqueue(dev, port, ev, nb_rx); 120 while (enq < nb_rx) { 121 enq += rte_event_eth_tx_adapter_enqueue(dev, port, 122 ev + enq, nb_rx - enq); 123 } 124 } 125 126 static __rte_always_inline void 127 pipeline_event_enqueue(const uint8_t dev, const uint8_t port, 128 struct rte_event *ev) 129 { 130 while (rte_event_enqueue_burst(dev, port, ev, 1) != 1) 131 rte_pause(); 132 } 133 134 static __rte_always_inline void 135 pipeline_event_enqueue_burst(const uint8_t dev, const uint8_t port, 136 struct rte_event *ev, const uint16_t nb_rx) 137 { 138 uint16_t enq; 139 140 enq = rte_event_enqueue_burst(dev, port, ev, nb_rx); 141 while (enq < nb_rx) { 142 enq += rte_event_enqueue_burst(dev, port, 143 ev + enq, nb_rx - enq); 144 } 145 } 146 147 static inline int 148 pipeline_nb_event_ports(struct evt_options *opt) 149 { 150 return evt_nr_active_lcores(opt->wlcores); 151 } 152 153 int pipeline_test_result(struct evt_test *test, struct evt_options *opt); 154 int pipeline_opt_check(struct evt_options *opt, uint64_t nb_queues); 155 int pipeline_test_setup(struct evt_test *test, struct evt_options *opt); 156 int pipeline_ethdev_setup(struct evt_test *test, struct evt_options *opt); 157 int pipeline_event_rx_adapter_setup(struct evt_options *opt, uint8_t stride, 158 struct rte_event_port_conf prod_conf); 159 int pipeline_event_tx_adapter_setup(struct evt_options *opt, 160 struct rte_event_port_conf prod_conf); 161 int pipeline_mempool_setup(struct evt_test *test, struct evt_options *opt); 162 int pipeline_event_port_setup(struct evt_test *test, struct evt_options *opt, 163 uint8_t *queue_arr, uint8_t nb_queues, 164 const struct rte_event_port_conf p_conf); 165 int pipeline_launch_lcores(struct evt_test *test, struct evt_options *opt, 166 int (*worker)(void *)); 167 void pipeline_opt_dump(struct evt_options *opt, uint8_t nb_queues); 168 void pipeline_test_destroy(struct evt_test *test, struct evt_options *opt); 169 void pipeline_eventdev_destroy(struct evt_test *test, struct evt_options *opt); 170 void pipeline_ethdev_destroy(struct evt_test *test, struct evt_options *opt); 171 void pipeline_mempool_destroy(struct evt_test *test, struct evt_options *opt); 172 173 #endif /* _TEST_PIPELINE_COMMON_ */ 174