1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2014 Intel Corporation 3 */ 4 5 #include <unistd.h> 6 #include <stdint.h> 7 8 #include <rte_log.h> 9 #include <rte_mbuf.h> 10 #include <rte_malloc.h> 11 #include <rte_cycles.h> 12 #include <rte_ethdev.h> 13 #include <rte_memcpy.h> 14 #include <rte_byteorder.h> 15 #include <rte_branch_prediction.h> 16 17 #include <rte_sched.h> 18 19 #include "main.h" 20 21 #define APP_MODE_NONE 0 22 #define APP_RX_MODE 1 23 #define APP_WT_MODE 2 24 #define APP_TX_MODE 4 25 26 uint8_t interactive = APP_INTERACTIVE_DEFAULT; 27 uint32_t qavg_period = APP_QAVG_PERIOD; 28 uint32_t qavg_ntimes = APP_QAVG_NTIMES; 29 30 /* main processing loop */ 31 static int 32 app_main_loop(__attribute__((unused))void *dummy) 33 { 34 uint32_t lcore_id; 35 uint32_t i, mode; 36 uint32_t rx_idx = 0; 37 uint32_t wt_idx = 0; 38 uint32_t tx_idx = 0; 39 struct thread_conf *rx_confs[MAX_DATA_STREAMS]; 40 struct thread_conf *wt_confs[MAX_DATA_STREAMS]; 41 struct thread_conf *tx_confs[MAX_DATA_STREAMS]; 42 43 memset(rx_confs, 0, sizeof(rx_confs)); 44 memset(wt_confs, 0, sizeof(wt_confs)); 45 memset(tx_confs, 0, sizeof(tx_confs)); 46 47 48 mode = APP_MODE_NONE; 49 lcore_id = rte_lcore_id(); 50 51 for (i = 0; i < nb_pfc; i++) { 52 struct flow_conf *flow = &qos_conf[i]; 53 54 if (flow->rx_core == lcore_id) { 55 flow->rx_thread.rx_port = flow->rx_port; 56 flow->rx_thread.rx_ring = flow->rx_ring; 57 flow->rx_thread.rx_queue = flow->rx_queue; 58 flow->rx_thread.sched_port = flow->sched_port; 59 60 rx_confs[rx_idx++] = &flow->rx_thread; 61 62 mode |= APP_RX_MODE; 63 } 64 if (flow->tx_core == lcore_id) { 65 flow->tx_thread.tx_port = flow->tx_port; 66 flow->tx_thread.tx_ring = flow->tx_ring; 67 flow->tx_thread.tx_queue = flow->tx_queue; 68 69 tx_confs[tx_idx++] = &flow->tx_thread; 70 71 mode |= APP_TX_MODE; 72 } 73 if (flow->wt_core == lcore_id) { 74 flow->wt_thread.rx_ring = flow->rx_ring; 75 flow->wt_thread.tx_ring = flow->tx_ring; 76 flow->wt_thread.tx_port = flow->tx_port; 77 flow->wt_thread.sched_port = flow->sched_port; 78 79 wt_confs[wt_idx++] = &flow->wt_thread; 80 81 mode |= APP_WT_MODE; 82 } 83 } 84 85 if (mode == APP_MODE_NONE) { 86 RTE_LOG(INFO, APP, "lcore %u has nothing to do\n", lcore_id); 87 return -1; 88 } 89 90 if (mode == (APP_RX_MODE | APP_WT_MODE)) { 91 RTE_LOG(INFO, APP, "lcore %u was configured for both RX and WT !!!\n", 92 lcore_id); 93 return -1; 94 } 95 96 RTE_LOG(INFO, APP, "entering main loop on lcore %u\n", lcore_id); 97 /* initialize mbuf memory */ 98 if (mode == APP_RX_MODE) { 99 for (i = 0; i < rx_idx; i++) { 100 RTE_LOG(INFO, APP, "flow%u lcoreid%u reading port%u\n", 101 i, lcore_id, rx_confs[i]->rx_port); 102 } 103 104 app_rx_thread(rx_confs); 105 } 106 else if (mode == (APP_TX_MODE | APP_WT_MODE)) { 107 for (i = 0; i < wt_idx; i++) { 108 wt_confs[i]->m_table = rte_malloc("table_wt", sizeof(struct rte_mbuf *) 109 * burst_conf.tx_burst, RTE_CACHE_LINE_SIZE); 110 111 if (wt_confs[i]->m_table == NULL) 112 rte_panic("flow %u unable to allocate memory buffer\n", i); 113 114 RTE_LOG(INFO, APP, 115 "flow %u lcoreid %u sched+write port %u\n", 116 i, lcore_id, wt_confs[i]->tx_port); 117 } 118 119 app_mixed_thread(wt_confs); 120 } 121 else if (mode == APP_TX_MODE) { 122 for (i = 0; i < tx_idx; i++) { 123 tx_confs[i]->m_table = rte_malloc("table_tx", sizeof(struct rte_mbuf *) 124 * burst_conf.tx_burst, RTE_CACHE_LINE_SIZE); 125 126 if (tx_confs[i]->m_table == NULL) 127 rte_panic("flow %u unable to allocate memory buffer\n", i); 128 129 RTE_LOG(INFO, APP, "flow%u lcoreid%u write port%u\n", 130 i, lcore_id, tx_confs[i]->tx_port); 131 } 132 133 app_tx_thread(tx_confs); 134 } 135 else if (mode == APP_WT_MODE){ 136 for (i = 0; i < wt_idx; i++) { 137 RTE_LOG(INFO, APP, "flow %u lcoreid %u scheduling \n", i, lcore_id); 138 } 139 140 app_worker_thread(wt_confs); 141 } 142 143 return 0; 144 } 145 146 void 147 app_stat(void) 148 { 149 uint32_t i; 150 struct rte_eth_stats stats; 151 static struct rte_eth_stats rx_stats[MAX_DATA_STREAMS]; 152 static struct rte_eth_stats tx_stats[MAX_DATA_STREAMS]; 153 154 /* print statistics */ 155 for(i = 0; i < nb_pfc; i++) { 156 struct flow_conf *flow = &qos_conf[i]; 157 158 rte_eth_stats_get(flow->rx_port, &stats); 159 printf("\nRX port %"PRIu16": rx: %"PRIu64 " err: %"PRIu64 160 " no_mbuf: %"PRIu64 "\n", 161 flow->rx_port, 162 stats.ipackets - rx_stats[i].ipackets, 163 stats.ierrors - rx_stats[i].ierrors, 164 stats.rx_nombuf - rx_stats[i].rx_nombuf); 165 memcpy(&rx_stats[i], &stats, sizeof(stats)); 166 167 rte_eth_stats_get(flow->tx_port, &stats); 168 printf("TX port %"PRIu16": tx: %" PRIu64 " err: %" PRIu64 "\n", 169 flow->tx_port, 170 stats.opackets - tx_stats[i].opackets, 171 stats.oerrors - tx_stats[i].oerrors); 172 memcpy(&tx_stats[i], &stats, sizeof(stats)); 173 174 #if APP_COLLECT_STAT 175 printf("-------+------------+------------+\n"); 176 printf(" | received | dropped |\n"); 177 printf("-------+------------+------------+\n"); 178 printf(" RX | %10" PRIu64 " | %10" PRIu64 " |\n", 179 flow->rx_thread.stat.nb_rx, 180 flow->rx_thread.stat.nb_drop); 181 printf("QOS+TX | %10" PRIu64 " | %10" PRIu64 " | pps: %"PRIu64 " \n", 182 flow->wt_thread.stat.nb_rx, 183 flow->wt_thread.stat.nb_drop, 184 flow->wt_thread.stat.nb_rx - flow->wt_thread.stat.nb_drop); 185 printf("-------+------------+------------+\n"); 186 187 memset(&flow->rx_thread.stat, 0, sizeof(struct thread_stat)); 188 memset(&flow->wt_thread.stat, 0, sizeof(struct thread_stat)); 189 #endif 190 } 191 } 192 193 int 194 main(int argc, char **argv) 195 { 196 int ret; 197 198 ret = app_parse_args(argc, argv); 199 if (ret < 0) 200 return -1; 201 202 ret = app_init(); 203 if (ret < 0) 204 return -1; 205 206 /* launch per-lcore init on every lcore */ 207 rte_eal_mp_remote_launch(app_main_loop, NULL, SKIP_MASTER); 208 209 if (interactive) { 210 sleep(1); 211 prompt(); 212 } 213 else { 214 /* print statistics every second */ 215 while(1) { 216 sleep(1); 217 app_stat(); 218 } 219 } 220 221 return 0; 222 } 223