1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2020 Intel Corporation 3 */ 4 5 #include <stdlib.h> 6 #include <string.h> 7 #include <netinet/in.h> 8 #ifdef RTE_EXEC_ENV_LINUX 9 #include <linux/if.h> 10 #include <linux/if_tun.h> 11 #endif 12 #include <sys/ioctl.h> 13 #include <fcntl.h> 14 #include <unistd.h> 15 16 #include <rte_mempool.h> 17 #include <rte_mbuf.h> 18 #include <rte_ethdev.h> 19 #include <rte_swx_ctl.h> 20 21 #include "obj.h" 22 23 /* 24 * mempool 25 */ 26 TAILQ_HEAD(mempool_list, mempool); 27 28 /* 29 * link 30 */ 31 TAILQ_HEAD(link_list, link); 32 33 /* 34 * ring 35 */ 36 TAILQ_HEAD(ring_list, ring); 37 38 /* 39 * obj 40 */ 41 struct obj { 42 struct mempool_list mempool_list; 43 struct link_list link_list; 44 struct ring_list ring_list; 45 }; 46 47 /* 48 * mempool 49 */ 50 #define BUFFER_SIZE_MIN (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) 51 52 struct mempool * 53 mempool_create(struct obj *obj, const char *name, struct mempool_params *params) 54 { 55 struct mempool *mempool; 56 struct rte_mempool *m; 57 58 /* Check input params */ 59 if ((name == NULL) || 60 mempool_find(obj, name) || 61 (params == NULL) || 62 (params->buffer_size < BUFFER_SIZE_MIN) || 63 (params->pool_size == 0)) 64 return NULL; 65 66 /* Resource create */ 67 m = rte_pktmbuf_pool_create( 68 name, 69 params->pool_size, 70 params->cache_size, 71 0, 72 params->buffer_size - sizeof(struct rte_mbuf), 73 params->cpu_id); 74 75 if (m == NULL) 76 return NULL; 77 78 /* Node allocation */ 79 mempool = calloc(1, sizeof(struct mempool)); 80 if (mempool == NULL) { 81 rte_mempool_free(m); 82 return NULL; 83 } 84 85 /* Node fill in */ 86 strlcpy(mempool->name, name, sizeof(mempool->name)); 87 mempool->m = m; 88 mempool->buffer_size = params->buffer_size; 89 90 /* Node add to list */ 91 TAILQ_INSERT_TAIL(&obj->mempool_list, mempool, node); 92 93 return mempool; 94 } 95 96 struct mempool * 97 mempool_find(struct obj *obj, const char *name) 98 { 99 struct mempool *mempool; 100 101 if (!obj || !name) 102 return NULL; 103 104 TAILQ_FOREACH(mempool, &obj->mempool_list, node) 105 if (strcmp(mempool->name, name) == 0) 106 return mempool; 107 108 return NULL; 109 } 110 111 /* 112 * link 113 */ 114 static struct rte_eth_conf port_conf_default = { 115 .link_speeds = 0, 116 .rxmode = { 117 .mq_mode = RTE_ETH_MQ_RX_NONE, 118 .mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */ 119 }, 120 .rx_adv_conf = { 121 .rss_conf = { 122 .rss_key = NULL, 123 .rss_key_len = 40, 124 .rss_hf = 0, 125 }, 126 }, 127 .txmode = { 128 .mq_mode = RTE_ETH_MQ_TX_NONE, 129 }, 130 .lpbk_mode = 0, 131 }; 132 133 #define RETA_CONF_SIZE (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE) 134 135 static int 136 rss_setup(uint16_t port_id, 137 uint16_t reta_size, 138 struct link_params_rss *rss) 139 { 140 struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE]; 141 uint32_t i; 142 int status; 143 144 /* RETA setting */ 145 memset(reta_conf, 0, sizeof(reta_conf)); 146 147 for (i = 0; i < reta_size; i++) 148 reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX; 149 150 for (i = 0; i < reta_size; i++) { 151 uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE; 152 uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE; 153 uint32_t rss_qs_pos = i % rss->n_queues; 154 155 reta_conf[reta_id].reta[reta_pos] = 156 (uint16_t) rss->queue_id[rss_qs_pos]; 157 } 158 159 /* RETA update */ 160 status = rte_eth_dev_rss_reta_update(port_id, 161 reta_conf, 162 reta_size); 163 164 return status; 165 } 166 167 struct link * 168 link_create(struct obj *obj, const char *name, struct link_params *params) 169 { 170 struct rte_eth_dev_info port_info; 171 struct rte_eth_conf port_conf; 172 struct link *link; 173 struct link_params_rss *rss; 174 struct mempool *mempool; 175 uint32_t cpu_id, i; 176 int status; 177 uint16_t port_id = 0; 178 179 /* Check input params */ 180 if ((name == NULL) || 181 link_find(obj, name) || 182 (params == NULL) || 183 (params->rx.n_queues == 0) || 184 (params->rx.queue_size == 0) || 185 (params->tx.n_queues == 0) || 186 (params->tx.queue_size == 0)) 187 return NULL; 188 189 status = rte_eth_dev_get_port_by_name(name, &port_id); 190 if (status) 191 return NULL; 192 193 if (rte_eth_dev_info_get(port_id, &port_info) != 0) 194 return NULL; 195 196 mempool = mempool_find(obj, params->rx.mempool_name); 197 if (mempool == NULL) 198 return NULL; 199 200 rss = params->rx.rss; 201 if (rss) { 202 if ((port_info.reta_size == 0) || 203 (port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512)) 204 return NULL; 205 206 if ((rss->n_queues == 0) || 207 (rss->n_queues >= LINK_RXQ_RSS_MAX)) 208 return NULL; 209 210 for (i = 0; i < rss->n_queues; i++) 211 if (rss->queue_id[i] >= port_info.max_rx_queues) 212 return NULL; 213 } 214 215 /** 216 * Resource create 217 */ 218 /* Port */ 219 memcpy(&port_conf, &port_conf_default, sizeof(port_conf)); 220 if (rss) { 221 port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS; 222 port_conf.rx_adv_conf.rss_conf.rss_hf = 223 (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) & 224 port_info.flow_type_rss_offloads; 225 } 226 227 cpu_id = (uint32_t) rte_eth_dev_socket_id(port_id); 228 if (cpu_id == (uint32_t) SOCKET_ID_ANY) 229 cpu_id = 0; 230 231 status = rte_eth_dev_configure( 232 port_id, 233 params->rx.n_queues, 234 params->tx.n_queues, 235 &port_conf); 236 237 if (status < 0) 238 return NULL; 239 240 if (params->promiscuous) { 241 status = rte_eth_promiscuous_enable(port_id); 242 if (status != 0) 243 return NULL; 244 } 245 246 /* Port RX */ 247 for (i = 0; i < params->rx.n_queues; i++) { 248 status = rte_eth_rx_queue_setup( 249 port_id, 250 i, 251 params->rx.queue_size, 252 cpu_id, 253 NULL, 254 mempool->m); 255 256 if (status < 0) 257 return NULL; 258 } 259 260 /* Port TX */ 261 for (i = 0; i < params->tx.n_queues; i++) { 262 status = rte_eth_tx_queue_setup( 263 port_id, 264 i, 265 params->tx.queue_size, 266 cpu_id, 267 NULL); 268 269 if (status < 0) 270 return NULL; 271 } 272 273 /* Port start */ 274 status = rte_eth_dev_start(port_id); 275 if (status < 0) 276 return NULL; 277 278 if (rss) { 279 status = rss_setup(port_id, port_info.reta_size, rss); 280 281 if (status) { 282 rte_eth_dev_stop(port_id); 283 return NULL; 284 } 285 } 286 287 /* Port link up */ 288 status = rte_eth_dev_set_link_up(port_id); 289 if ((status < 0) && (status != -ENOTSUP)) { 290 rte_eth_dev_stop(port_id); 291 return NULL; 292 } 293 294 /* Node allocation */ 295 link = calloc(1, sizeof(struct link)); 296 if (link == NULL) { 297 rte_eth_dev_stop(port_id); 298 return NULL; 299 } 300 301 /* Node fill in */ 302 strlcpy(link->name, name, sizeof(link->name)); 303 link->port_id = port_id; 304 link->n_rxq = params->rx.n_queues; 305 link->n_txq = params->tx.n_queues; 306 307 /* Node add to list */ 308 TAILQ_INSERT_TAIL(&obj->link_list, link, node); 309 310 return link; 311 } 312 313 int 314 link_is_up(struct obj *obj, const char *name) 315 { 316 struct rte_eth_link link_params; 317 struct link *link; 318 319 /* Check input params */ 320 if (!obj || !name) 321 return 0; 322 323 link = link_find(obj, name); 324 if (link == NULL) 325 return 0; 326 327 /* Resource */ 328 if (rte_eth_link_get(link->port_id, &link_params) < 0) 329 return 0; 330 331 return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1; 332 } 333 334 struct link * 335 link_find(struct obj *obj, const char *name) 336 { 337 struct link *link; 338 339 if (!obj || !name) 340 return NULL; 341 342 TAILQ_FOREACH(link, &obj->link_list, node) 343 if (strcmp(link->name, name) == 0) 344 return link; 345 346 return NULL; 347 } 348 349 struct link * 350 link_next(struct obj *obj, struct link *link) 351 { 352 return (link == NULL) ? 353 TAILQ_FIRST(&obj->link_list) : TAILQ_NEXT(link, node); 354 } 355 356 /* 357 * ring 358 */ 359 struct ring * 360 ring_create(struct obj *obj, const char *name, struct ring_params *params) 361 { 362 struct ring *ring; 363 struct rte_ring *r; 364 unsigned int flags = RING_F_SP_ENQ | RING_F_SC_DEQ; 365 366 /* Check input params */ 367 if (!name || ring_find(obj, name) || !params || !params->size) 368 return NULL; 369 370 /** 371 * Resource create 372 */ 373 r = rte_ring_create( 374 name, 375 params->size, 376 params->numa_node, 377 flags); 378 if (!r) 379 return NULL; 380 381 /* Node allocation */ 382 ring = calloc(1, sizeof(struct ring)); 383 if (!ring) { 384 rte_ring_free(r); 385 return NULL; 386 } 387 388 /* Node fill in */ 389 strlcpy(ring->name, name, sizeof(ring->name)); 390 391 /* Node add to list */ 392 TAILQ_INSERT_TAIL(&obj->ring_list, ring, node); 393 394 return ring; 395 } 396 397 struct ring * 398 ring_find(struct obj *obj, const char *name) 399 { 400 struct ring *ring; 401 402 if (!obj || !name) 403 return NULL; 404 405 TAILQ_FOREACH(ring, &obj->ring_list, node) 406 if (strcmp(ring->name, name) == 0) 407 return ring; 408 409 return NULL; 410 } 411 412 /* 413 * obj 414 */ 415 struct obj * 416 obj_init(void) 417 { 418 struct obj *obj; 419 420 obj = calloc(1, sizeof(struct obj)); 421 if (!obj) 422 return NULL; 423 424 TAILQ_INIT(&obj->mempool_list); 425 TAILQ_INIT(&obj->link_list); 426 TAILQ_INIT(&obj->ring_list); 427 428 return obj; 429 } 430