1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(C) 2019 Marvell International Ltd. 3 */ 4 5 #include <stdbool.h> 6 #include <getopt.h> 7 8 #include <rte_malloc.h> 9 10 #include "l3fwd.h" 11 #include "l3fwd_event.h" 12 13 static void 14 print_ethaddr(const char *name, const struct rte_ether_addr *eth_addr) 15 { 16 char buf[RTE_ETHER_ADDR_FMT_SIZE]; 17 rte_ether_format_addr(buf, RTE_ETHER_ADDR_FMT_SIZE, eth_addr); 18 printf("%s%s", name, buf); 19 } 20 21 struct l3fwd_event_resources * 22 l3fwd_get_eventdev_rsrc(void) 23 { 24 static struct l3fwd_event_resources *rsrc; 25 26 if (rsrc != NULL) 27 return rsrc; 28 29 rsrc = rte_zmalloc("l3fwd", sizeof(struct l3fwd_event_resources), 0); 30 if (rsrc != NULL) { 31 rsrc->sched_type = RTE_SCHED_TYPE_ATOMIC; 32 rsrc->eth_rx_queues = 1; 33 return rsrc; 34 } 35 36 rte_exit(EXIT_FAILURE, "Unable to allocate memory for eventdev cfg\n"); 37 38 return NULL; 39 } 40 41 static void 42 l3fwd_eth_dev_port_setup(struct rte_eth_conf *port_conf) 43 { 44 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 45 uint16_t nb_ports = rte_eth_dev_count_avail(); 46 uint16_t nb_rxd = RTE_TEST_RX_DESC_DEFAULT; 47 uint16_t nb_txd = RTE_TEST_TX_DESC_DEFAULT; 48 unsigned int nb_lcores = rte_lcore_count(); 49 struct rte_eth_conf local_port_conf; 50 struct rte_eth_dev_info dev_info; 51 struct rte_eth_txconf txconf; 52 struct rte_eth_rxconf rxconf; 53 unsigned int nb_mbuf; 54 uint16_t port_id; 55 uint8_t eth_qid; 56 int32_t ret; 57 58 /* initialize all ports */ 59 RTE_ETH_FOREACH_DEV(port_id) { 60 local_port_conf = *port_conf; 61 /* skip ports that are not enabled */ 62 if ((evt_rsrc->port_mask & (1 << port_id)) == 0) { 63 printf("\nSkipping disabled port %d\n", port_id); 64 continue; 65 } 66 67 /* init port */ 68 printf("Initializing port %d ... ", port_id); 69 fflush(stdout); 70 printf("Creating queues: nb_rxq=%d nb_txq=1...\n", 71 evt_rsrc->eth_rx_queues); 72 73 ret = rte_eth_dev_info_get(port_id, &dev_info); 74 if (ret != 0) 75 rte_panic("Error during getting device (port %u) info:" 76 "%s\n", port_id, strerror(-ret)); 77 78 if (dev_info.tx_offload_capa & DEV_TX_OFFLOAD_MBUF_FAST_FREE) 79 local_port_conf.txmode.offloads |= 80 DEV_TX_OFFLOAD_MBUF_FAST_FREE; 81 82 local_port_conf.rx_adv_conf.rss_conf.rss_hf &= 83 dev_info.flow_type_rss_offloads; 84 if (local_port_conf.rx_adv_conf.rss_conf.rss_hf != 85 port_conf->rx_adv_conf.rss_conf.rss_hf) { 86 printf("Port %u modified RSS hash function " 87 "based on hardware support," 88 "requested:%#"PRIx64" configured:%#"PRIx64"\n", 89 port_id, 90 port_conf->rx_adv_conf.rss_conf.rss_hf, 91 local_port_conf.rx_adv_conf.rss_conf.rss_hf); 92 } 93 94 ret = rte_eth_dev_configure(port_id, evt_rsrc->eth_rx_queues, 95 1, &local_port_conf); 96 if (ret < 0) 97 rte_exit(EXIT_FAILURE, 98 "Cannot configure device: err=%d, port=%d\n", 99 ret, port_id); 100 101 ret = rte_eth_dev_adjust_nb_rx_tx_desc(port_id, &nb_rxd, 102 &nb_txd); 103 if (ret < 0) 104 rte_exit(EXIT_FAILURE, 105 "Cannot adjust number of descriptors: err=%d, " 106 "port=%d\n", ret, port_id); 107 108 rte_eth_macaddr_get(port_id, &ports_eth_addr[port_id]); 109 print_ethaddr(" Address:", &ports_eth_addr[port_id]); 110 printf(", "); 111 print_ethaddr("Destination:", 112 (const struct rte_ether_addr *)&dest_eth_addr[port_id]); 113 printf(", "); 114 115 /* prepare source MAC for each port. */ 116 rte_ether_addr_copy(&ports_eth_addr[port_id], 117 (struct rte_ether_addr *)(val_eth + port_id) + 1); 118 119 /* init memory */ 120 if (!evt_rsrc->per_port_pool) { 121 /* port_id = 0; this is *not* signifying the first port, 122 * rather, it signifies that port_id is ignored. 123 */ 124 nb_mbuf = RTE_MAX(nb_ports * nb_rxd + 125 nb_ports * nb_txd + 126 nb_ports * nb_lcores * 127 MAX_PKT_BURST + 128 nb_lcores * MEMPOOL_CACHE_SIZE, 129 8192u); 130 ret = init_mem(0, nb_mbuf); 131 } else { 132 nb_mbuf = RTE_MAX(nb_rxd + nb_rxd + 133 nb_lcores * MAX_PKT_BURST + 134 nb_lcores * MEMPOOL_CACHE_SIZE, 135 8192u); 136 ret = init_mem(port_id, nb_mbuf); 137 } 138 /* init Rx queues per port */ 139 rxconf = dev_info.default_rxconf; 140 rxconf.offloads = local_port_conf.rxmode.offloads; 141 142 for (eth_qid = 0; eth_qid < evt_rsrc->eth_rx_queues; 143 eth_qid++) { 144 if (!evt_rsrc->per_port_pool) 145 ret = rte_eth_rx_queue_setup(port_id, eth_qid, 146 nb_rxd, 0, &rxconf, 147 evt_rsrc->pkt_pool[0][0]); 148 else 149 ret = rte_eth_rx_queue_setup(port_id, eth_qid, 150 nb_rxd, 0, &rxconf, 151 evt_rsrc->pkt_pool[port_id][0]); 152 if (ret < 0) 153 rte_exit(EXIT_FAILURE, 154 "rte_eth_rx_queue_setup: err=%d, " 155 "port=%d, eth_qid: %d\n", 156 ret, port_id, eth_qid); 157 } 158 159 /* init one Tx queue per port */ 160 txconf = dev_info.default_txconf; 161 txconf.offloads = local_port_conf.txmode.offloads; 162 ret = rte_eth_tx_queue_setup(port_id, 0, nb_txd, 0, &txconf); 163 if (ret < 0) 164 rte_exit(EXIT_FAILURE, 165 "rte_eth_tx_queue_setup: err=%d, " 166 "port=%d\n", ret, port_id); 167 } 168 } 169 170 static void 171 l3fwd_event_capability_setup(void) 172 { 173 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 174 uint32_t caps = 0; 175 uint16_t i; 176 int ret; 177 178 RTE_ETH_FOREACH_DEV(i) { 179 ret = rte_event_eth_tx_adapter_caps_get(0, i, &caps); 180 if (ret) 181 rte_exit(EXIT_FAILURE, 182 "Invalid capability for Tx adptr port %d\n", 183 i); 184 185 evt_rsrc->tx_mode_q |= !(caps & 186 RTE_EVENT_ETH_TX_ADAPTER_CAP_INTERNAL_PORT); 187 } 188 189 if (evt_rsrc->tx_mode_q) 190 l3fwd_event_set_generic_ops(&evt_rsrc->ops); 191 else 192 l3fwd_event_set_internal_port_ops(&evt_rsrc->ops); 193 } 194 195 int 196 l3fwd_get_free_event_port(struct l3fwd_event_resources *evt_rsrc) 197 { 198 static int index; 199 int port_id; 200 201 rte_spinlock_lock(&evt_rsrc->evp.lock); 202 if (index >= evt_rsrc->evp.nb_ports) { 203 printf("No free event port is available\n"); 204 return -1; 205 } 206 207 port_id = evt_rsrc->evp.event_p_id[index]; 208 index++; 209 rte_spinlock_unlock(&evt_rsrc->evp.lock); 210 211 return port_id; 212 } 213 214 void 215 l3fwd_event_resource_setup(struct rte_eth_conf *port_conf) 216 { 217 struct l3fwd_event_resources *evt_rsrc = l3fwd_get_eventdev_rsrc(); 218 const event_loop_cb lpm_event_loop[2][2] = { 219 [0][0] = lpm_event_main_loop_tx_d, 220 [0][1] = lpm_event_main_loop_tx_d_burst, 221 [1][0] = lpm_event_main_loop_tx_q, 222 [1][1] = lpm_event_main_loop_tx_q_burst, 223 }; 224 const event_loop_cb em_event_loop[2][2] = { 225 [0][0] = em_event_main_loop_tx_d, 226 [0][1] = em_event_main_loop_tx_d_burst, 227 [1][0] = em_event_main_loop_tx_q, 228 [1][1] = em_event_main_loop_tx_q_burst, 229 }; 230 uint32_t event_queue_cfg; 231 int ret; 232 233 if (!evt_rsrc->enabled) 234 return; 235 236 if (!rte_event_dev_count()) 237 rte_exit(EXIT_FAILURE, "No Eventdev found"); 238 239 /* Setup eventdev capability callbacks */ 240 l3fwd_event_capability_setup(); 241 242 /* Ethernet device configuration */ 243 l3fwd_eth_dev_port_setup(port_conf); 244 245 /* Event device configuration */ 246 event_queue_cfg = evt_rsrc->ops.event_device_setup(); 247 248 /* Event queue configuration */ 249 evt_rsrc->ops.event_queue_setup(event_queue_cfg); 250 251 /* Event port configuration */ 252 evt_rsrc->ops.event_port_setup(); 253 254 /* Rx/Tx adapters configuration */ 255 evt_rsrc->ops.adapter_setup(); 256 257 /* Start event device */ 258 ret = rte_event_dev_start(evt_rsrc->event_d_id); 259 if (ret < 0) 260 rte_exit(EXIT_FAILURE, "Error in starting eventdev"); 261 262 evt_rsrc->ops.lpm_event_loop = lpm_event_loop[evt_rsrc->tx_mode_q] 263 [evt_rsrc->has_burst]; 264 265 evt_rsrc->ops.em_event_loop = em_event_loop[evt_rsrc->tx_mode_q] 266 [evt_rsrc->has_burst]; 267 } 268