1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2017 Cavium, Inc 3 */ 4 5 #ifndef _EVT_COMMON_ 6 #define _EVT_COMMON_ 7 8 #include <rte_common.h> 9 #include <rte_debug.h> 10 #include <rte_eventdev.h> 11 #include <rte_service.h> 12 13 #define CLNRM "\x1b[0m" 14 #define CLRED "\x1b[31m" 15 #define CLGRN "\x1b[32m" 16 #define CLYEL "\x1b[33m" 17 18 #define evt_err(fmt, args...) \ 19 fprintf(stderr, CLRED"error: %s() "fmt CLNRM "\n", __func__, ## args) 20 21 #define evt_info(fmt, args...) \ 22 fprintf(stdout, CLYEL""fmt CLNRM "\n", ## args) 23 24 #define EVT_STR_FMT 20 25 26 #define evt_dump(str, fmt, val...) \ 27 printf("\t%-*s : "fmt"\n", EVT_STR_FMT, str, ## val) 28 29 #define evt_dump_begin(str) printf("\t%-*s : {", EVT_STR_FMT, str) 30 31 #define evt_dump_end printf("\b}\n") 32 33 #define EVT_MAX_STAGES 64 34 #define EVT_MAX_PORTS 256 35 #define EVT_MAX_QUEUES 256 36 37 enum evt_prod_type { 38 EVT_PROD_TYPE_NONE, 39 EVT_PROD_TYPE_SYNT, /* Producer type Synthetic i.e. CPU. */ 40 EVT_PROD_TYPE_ETH_RX_ADPTR, /* Producer type Eth Rx Adapter. */ 41 EVT_PROD_TYPE_EVENT_TIMER_ADPTR, /* Producer type Timer Adapter. */ 42 EVT_PROD_TYPE_MAX, 43 }; 44 45 struct evt_options { 46 #define EVT_TEST_NAME_MAX_LEN 32 47 char test_name[EVT_TEST_NAME_MAX_LEN]; 48 bool plcores[RTE_MAX_LCORE]; 49 bool wlcores[RTE_MAX_LCORE]; 50 int pool_sz; 51 int socket_id; 52 int nb_stages; 53 int verbose_level; 54 uint8_t dev_id; 55 uint8_t timdev_cnt; 56 uint8_t nb_timer_adptrs; 57 uint8_t timdev_use_burst; 58 uint8_t sched_type_list[EVT_MAX_STAGES]; 59 uint16_t mbuf_sz; 60 uint16_t wkr_deq_dep; 61 uint32_t nb_flows; 62 uint32_t tx_first; 63 uint32_t max_pkt_sz; 64 uint32_t deq_tmo_nsec; 65 uint32_t q_priority:1; 66 uint32_t fwd_latency:1; 67 uint64_t nb_pkts; 68 uint64_t nb_timers; 69 uint64_t expiry_nsec; 70 uint64_t max_tmo_nsec; 71 uint64_t timer_tick_nsec; 72 uint64_t optm_timer_tick_nsec; 73 enum evt_prod_type prod_type; 74 }; 75 76 static inline bool 77 evt_has_distributed_sched(uint8_t dev_id) 78 { 79 struct rte_event_dev_info dev_info; 80 81 rte_event_dev_info_get(dev_id, &dev_info); 82 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_DISTRIBUTED_SCHED) ? 83 true : false; 84 } 85 86 static inline bool 87 evt_has_burst_mode(uint8_t dev_id) 88 { 89 struct rte_event_dev_info dev_info; 90 91 rte_event_dev_info_get(dev_id, &dev_info); 92 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_BURST_MODE) ? 93 true : false; 94 } 95 96 97 static inline bool 98 evt_has_all_types_queue(uint8_t dev_id) 99 { 100 struct rte_event_dev_info dev_info; 101 102 rte_event_dev_info_get(dev_id, &dev_info); 103 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_QUEUE_ALL_TYPES) ? 104 true : false; 105 } 106 107 static inline bool 108 evt_has_flow_id(uint8_t dev_id) 109 { 110 struct rte_event_dev_info dev_info; 111 112 rte_event_dev_info_get(dev_id, &dev_info); 113 return (dev_info.event_dev_cap & RTE_EVENT_DEV_CAP_CARRY_FLOW_ID) ? 114 true : false; 115 } 116 117 static inline int 118 evt_service_setup(uint32_t service_id) 119 { 120 int32_t core_cnt; 121 unsigned int lcore = 0; 122 uint32_t core_array[RTE_MAX_LCORE]; 123 uint8_t cnt; 124 uint8_t min_cnt = UINT8_MAX; 125 126 if (!rte_service_lcore_count()) 127 return -ENOENT; 128 129 core_cnt = rte_service_lcore_list(core_array, 130 RTE_MAX_LCORE); 131 if (core_cnt < 0) 132 return -ENOENT; 133 /* Get the core which has least number of services running. */ 134 while (core_cnt--) { 135 /* Reset default mapping */ 136 rte_service_map_lcore_set(service_id, 137 core_array[core_cnt], 0); 138 cnt = rte_service_lcore_count_services( 139 core_array[core_cnt]); 140 if (cnt < min_cnt) { 141 lcore = core_array[core_cnt]; 142 min_cnt = cnt; 143 } 144 } 145 if (rte_service_map_lcore_set(service_id, lcore, 1)) 146 return -ENOENT; 147 148 return 0; 149 } 150 151 static inline int 152 evt_configure_eventdev(struct evt_options *opt, uint8_t nb_queues, 153 uint8_t nb_ports) 154 { 155 struct rte_event_dev_info info; 156 int ret; 157 158 memset(&info, 0, sizeof(struct rte_event_dev_info)); 159 ret = rte_event_dev_info_get(opt->dev_id, &info); 160 if (ret) { 161 evt_err("failed to get eventdev info %d", opt->dev_id); 162 return ret; 163 } 164 165 if (opt->deq_tmo_nsec) { 166 if (opt->deq_tmo_nsec < info.min_dequeue_timeout_ns) { 167 opt->deq_tmo_nsec = info.min_dequeue_timeout_ns; 168 evt_info("dequeue_timeout_ns too low, using %d", 169 opt->deq_tmo_nsec); 170 } 171 if (opt->deq_tmo_nsec > info.max_dequeue_timeout_ns) { 172 opt->deq_tmo_nsec = info.max_dequeue_timeout_ns; 173 evt_info("dequeue_timeout_ns too high, using %d", 174 opt->deq_tmo_nsec); 175 } 176 } 177 178 const struct rte_event_dev_config config = { 179 .dequeue_timeout_ns = opt->deq_tmo_nsec, 180 .nb_event_queues = nb_queues, 181 .nb_event_ports = nb_ports, 182 .nb_single_link_event_port_queues = 0, 183 .nb_events_limit = info.max_num_events, 184 .nb_event_queue_flows = opt->nb_flows, 185 .nb_event_port_dequeue_depth = 186 info.max_event_port_dequeue_depth, 187 .nb_event_port_enqueue_depth = 188 info.max_event_port_enqueue_depth, 189 }; 190 191 return rte_event_dev_configure(opt->dev_id, &config); 192 } 193 194 #endif /* _EVT_COMMON_*/ 195