1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2020 Intel Corporation 3 */ 4 5 #include <stdlib.h> 6 #include <string.h> 7 #include <netinet/in.h> 8 #ifdef RTE_EXEC_ENV_LINUX 9 #include <linux/if.h> 10 #include <linux/if_tun.h> 11 #endif 12 #include <sys/ioctl.h> 13 #include <fcntl.h> 14 #include <unistd.h> 15 16 #include <rte_mempool.h> 17 #include <rte_mbuf.h> 18 #include <rte_ethdev.h> 19 #include <rte_swx_ctl.h> 20 21 #include "obj.h" 22 23 /* 24 * mempool 25 */ 26 TAILQ_HEAD(mempool_list, mempool); 27 28 /* 29 * link 30 */ 31 TAILQ_HEAD(link_list, link); 32 33 /* 34 * ring 35 */ 36 TAILQ_HEAD(ring_list, ring); 37 38 /* 39 * tap 40 */ 41 TAILQ_HEAD(tap_list, tap); 42 43 /* 44 * obj 45 */ 46 struct obj { 47 struct mempool_list mempool_list; 48 struct link_list link_list; 49 struct ring_list ring_list; 50 struct tap_list tap_list; 51 }; 52 53 /* 54 * mempool 55 */ 56 #define BUFFER_SIZE_MIN (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) 57 58 struct mempool * 59 mempool_create(struct obj *obj, const char *name, struct mempool_params *params) 60 { 61 struct mempool *mempool; 62 struct rte_mempool *m; 63 64 /* Check input params */ 65 if ((name == NULL) || 66 mempool_find(obj, name) || 67 (params == NULL) || 68 (params->buffer_size < BUFFER_SIZE_MIN) || 69 (params->pool_size == 0)) 70 return NULL; 71 72 /* Resource create */ 73 m = rte_pktmbuf_pool_create( 74 name, 75 params->pool_size, 76 params->cache_size, 77 0, 78 params->buffer_size - sizeof(struct rte_mbuf), 79 params->cpu_id); 80 81 if (m == NULL) 82 return NULL; 83 84 /* Node allocation */ 85 mempool = calloc(1, sizeof(struct mempool)); 86 if (mempool == NULL) { 87 rte_mempool_free(m); 88 return NULL; 89 } 90 91 /* Node fill in */ 92 strlcpy(mempool->name, name, sizeof(mempool->name)); 93 mempool->m = m; 94 mempool->buffer_size = params->buffer_size; 95 96 /* Node add to list */ 97 TAILQ_INSERT_TAIL(&obj->mempool_list, mempool, node); 98 99 return mempool; 100 } 101 102 struct mempool * 103 mempool_find(struct obj *obj, const char *name) 104 { 105 struct mempool *mempool; 106 107 if (!obj || !name) 108 return NULL; 109 110 TAILQ_FOREACH(mempool, &obj->mempool_list, node) 111 if (strcmp(mempool->name, name) == 0) 112 return mempool; 113 114 return NULL; 115 } 116 117 /* 118 * link 119 */ 120 static struct rte_eth_conf port_conf_default = { 121 .link_speeds = 0, 122 .rxmode = { 123 .mq_mode = RTE_ETH_MQ_RX_NONE, 124 .mtu = 9000 - (RTE_ETHER_HDR_LEN + RTE_ETHER_CRC_LEN), /* Jumbo frame MTU */ 125 .split_hdr_size = 0, /* Header split buffer size */ 126 }, 127 .rx_adv_conf = { 128 .rss_conf = { 129 .rss_key = NULL, 130 .rss_key_len = 40, 131 .rss_hf = 0, 132 }, 133 }, 134 .txmode = { 135 .mq_mode = RTE_ETH_MQ_TX_NONE, 136 }, 137 .lpbk_mode = 0, 138 }; 139 140 #define RETA_CONF_SIZE (RTE_ETH_RSS_RETA_SIZE_512 / RTE_ETH_RETA_GROUP_SIZE) 141 142 static int 143 rss_setup(uint16_t port_id, 144 uint16_t reta_size, 145 struct link_params_rss *rss) 146 { 147 struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE]; 148 uint32_t i; 149 int status; 150 151 /* RETA setting */ 152 memset(reta_conf, 0, sizeof(reta_conf)); 153 154 for (i = 0; i < reta_size; i++) 155 reta_conf[i / RTE_ETH_RETA_GROUP_SIZE].mask = UINT64_MAX; 156 157 for (i = 0; i < reta_size; i++) { 158 uint32_t reta_id = i / RTE_ETH_RETA_GROUP_SIZE; 159 uint32_t reta_pos = i % RTE_ETH_RETA_GROUP_SIZE; 160 uint32_t rss_qs_pos = i % rss->n_queues; 161 162 reta_conf[reta_id].reta[reta_pos] = 163 (uint16_t) rss->queue_id[rss_qs_pos]; 164 } 165 166 /* RETA update */ 167 status = rte_eth_dev_rss_reta_update(port_id, 168 reta_conf, 169 reta_size); 170 171 return status; 172 } 173 174 struct link * 175 link_create(struct obj *obj, const char *name, struct link_params *params) 176 { 177 struct rte_eth_dev_info port_info; 178 struct rte_eth_conf port_conf; 179 struct link *link; 180 struct link_params_rss *rss; 181 struct mempool *mempool; 182 uint32_t cpu_id, i; 183 int status; 184 uint16_t port_id; 185 186 /* Check input params */ 187 if ((name == NULL) || 188 link_find(obj, name) || 189 (params == NULL) || 190 (params->rx.n_queues == 0) || 191 (params->rx.queue_size == 0) || 192 (params->tx.n_queues == 0) || 193 (params->tx.queue_size == 0)) 194 return NULL; 195 196 port_id = params->port_id; 197 if (params->dev_name) { 198 status = rte_eth_dev_get_port_by_name(params->dev_name, 199 &port_id); 200 201 if (status) 202 return NULL; 203 } else 204 if (!rte_eth_dev_is_valid_port(port_id)) 205 return NULL; 206 207 if (rte_eth_dev_info_get(port_id, &port_info) != 0) 208 return NULL; 209 210 mempool = mempool_find(obj, params->rx.mempool_name); 211 if (mempool == NULL) 212 return NULL; 213 214 rss = params->rx.rss; 215 if (rss) { 216 if ((port_info.reta_size == 0) || 217 (port_info.reta_size > RTE_ETH_RSS_RETA_SIZE_512)) 218 return NULL; 219 220 if ((rss->n_queues == 0) || 221 (rss->n_queues >= LINK_RXQ_RSS_MAX)) 222 return NULL; 223 224 for (i = 0; i < rss->n_queues; i++) 225 if (rss->queue_id[i] >= port_info.max_rx_queues) 226 return NULL; 227 } 228 229 /** 230 * Resource create 231 */ 232 /* Port */ 233 memcpy(&port_conf, &port_conf_default, sizeof(port_conf)); 234 if (rss) { 235 port_conf.rxmode.mq_mode = RTE_ETH_MQ_RX_RSS; 236 port_conf.rx_adv_conf.rss_conf.rss_hf = 237 (RTE_ETH_RSS_IP | RTE_ETH_RSS_TCP | RTE_ETH_RSS_UDP) & 238 port_info.flow_type_rss_offloads; 239 } 240 241 cpu_id = (uint32_t) rte_eth_dev_socket_id(port_id); 242 if (cpu_id == (uint32_t) SOCKET_ID_ANY) 243 cpu_id = 0; 244 245 status = rte_eth_dev_configure( 246 port_id, 247 params->rx.n_queues, 248 params->tx.n_queues, 249 &port_conf); 250 251 if (status < 0) 252 return NULL; 253 254 if (params->promiscuous) { 255 status = rte_eth_promiscuous_enable(port_id); 256 if (status != 0) 257 return NULL; 258 } 259 260 /* Port RX */ 261 for (i = 0; i < params->rx.n_queues; i++) { 262 status = rte_eth_rx_queue_setup( 263 port_id, 264 i, 265 params->rx.queue_size, 266 cpu_id, 267 NULL, 268 mempool->m); 269 270 if (status < 0) 271 return NULL; 272 } 273 274 /* Port TX */ 275 for (i = 0; i < params->tx.n_queues; i++) { 276 status = rte_eth_tx_queue_setup( 277 port_id, 278 i, 279 params->tx.queue_size, 280 cpu_id, 281 NULL); 282 283 if (status < 0) 284 return NULL; 285 } 286 287 /* Port start */ 288 status = rte_eth_dev_start(port_id); 289 if (status < 0) 290 return NULL; 291 292 if (rss) { 293 status = rss_setup(port_id, port_info.reta_size, rss); 294 295 if (status) { 296 rte_eth_dev_stop(port_id); 297 return NULL; 298 } 299 } 300 301 /* Port link up */ 302 status = rte_eth_dev_set_link_up(port_id); 303 if ((status < 0) && (status != -ENOTSUP)) { 304 rte_eth_dev_stop(port_id); 305 return NULL; 306 } 307 308 /* Node allocation */ 309 link = calloc(1, sizeof(struct link)); 310 if (link == NULL) { 311 rte_eth_dev_stop(port_id); 312 return NULL; 313 } 314 315 /* Node fill in */ 316 strlcpy(link->name, name, sizeof(link->name)); 317 link->port_id = port_id; 318 rte_eth_dev_get_name_by_port(port_id, link->dev_name); 319 link->n_rxq = params->rx.n_queues; 320 link->n_txq = params->tx.n_queues; 321 322 /* Node add to list */ 323 TAILQ_INSERT_TAIL(&obj->link_list, link, node); 324 325 return link; 326 } 327 328 int 329 link_is_up(struct obj *obj, const char *name) 330 { 331 struct rte_eth_link link_params; 332 struct link *link; 333 334 /* Check input params */ 335 if (!obj || !name) 336 return 0; 337 338 link = link_find(obj, name); 339 if (link == NULL) 340 return 0; 341 342 /* Resource */ 343 if (rte_eth_link_get(link->port_id, &link_params) < 0) 344 return 0; 345 346 return (link_params.link_status == RTE_ETH_LINK_DOWN) ? 0 : 1; 347 } 348 349 struct link * 350 link_find(struct obj *obj, const char *name) 351 { 352 struct link *link; 353 354 if (!obj || !name) 355 return NULL; 356 357 TAILQ_FOREACH(link, &obj->link_list, node) 358 if (strcmp(link->name, name) == 0) 359 return link; 360 361 return NULL; 362 } 363 364 struct link * 365 link_next(struct obj *obj, struct link *link) 366 { 367 return (link == NULL) ? 368 TAILQ_FIRST(&obj->link_list) : TAILQ_NEXT(link, node); 369 } 370 371 /* 372 * ring 373 */ 374 struct ring * 375 ring_create(struct obj *obj, const char *name, struct ring_params *params) 376 { 377 struct ring *ring; 378 struct rte_ring *r; 379 unsigned int flags = RING_F_SP_ENQ | RING_F_SC_DEQ; 380 381 /* Check input params */ 382 if (!name || ring_find(obj, name) || !params || !params->size) 383 return NULL; 384 385 /** 386 * Resource create 387 */ 388 r = rte_ring_create( 389 name, 390 params->size, 391 params->numa_node, 392 flags); 393 if (!r) 394 return NULL; 395 396 /* Node allocation */ 397 ring = calloc(1, sizeof(struct ring)); 398 if (!ring) { 399 rte_ring_free(r); 400 return NULL; 401 } 402 403 /* Node fill in */ 404 strlcpy(ring->name, name, sizeof(ring->name)); 405 406 /* Node add to list */ 407 TAILQ_INSERT_TAIL(&obj->ring_list, ring, node); 408 409 return ring; 410 } 411 412 struct ring * 413 ring_find(struct obj *obj, const char *name) 414 { 415 struct ring *ring; 416 417 if (!obj || !name) 418 return NULL; 419 420 TAILQ_FOREACH(ring, &obj->ring_list, node) 421 if (strcmp(ring->name, name) == 0) 422 return ring; 423 424 return NULL; 425 } 426 427 /* 428 * tap 429 */ 430 #define TAP_DEV "/dev/net/tun" 431 432 struct tap * 433 tap_find(struct obj *obj, const char *name) 434 { 435 struct tap *tap; 436 437 if (!obj || !name) 438 return NULL; 439 440 TAILQ_FOREACH(tap, &obj->tap_list, node) 441 if (strcmp(tap->name, name) == 0) 442 return tap; 443 444 return NULL; 445 } 446 447 struct tap * 448 tap_next(struct obj *obj, struct tap *tap) 449 { 450 return (tap == NULL) ? 451 TAILQ_FIRST(&obj->tap_list) : TAILQ_NEXT(tap, node); 452 } 453 454 #ifndef RTE_EXEC_ENV_LINUX 455 456 struct tap * 457 tap_create(struct obj *obj __rte_unused, const char *name __rte_unused) 458 { 459 return NULL; 460 } 461 462 #else 463 464 struct tap * 465 tap_create(struct obj *obj, const char *name) 466 { 467 struct tap *tap; 468 struct ifreq ifr; 469 int fd, status; 470 471 /* Check input params */ 472 if ((name == NULL) || 473 tap_find(obj, name)) 474 return NULL; 475 476 /* Resource create */ 477 fd = open(TAP_DEV, O_RDWR | O_NONBLOCK); 478 if (fd < 0) 479 return NULL; 480 481 memset(&ifr, 0, sizeof(ifr)); 482 ifr.ifr_flags = IFF_TAP | IFF_NO_PI; /* No packet information */ 483 strlcpy(ifr.ifr_name, name, IFNAMSIZ); 484 485 status = ioctl(fd, TUNSETIFF, (void *) &ifr); 486 if (status < 0) { 487 close(fd); 488 return NULL; 489 } 490 491 /* Node allocation */ 492 tap = calloc(1, sizeof(struct tap)); 493 if (tap == NULL) { 494 close(fd); 495 return NULL; 496 } 497 /* Node fill in */ 498 strlcpy(tap->name, name, sizeof(tap->name)); 499 tap->fd = fd; 500 501 /* Node add to list */ 502 TAILQ_INSERT_TAIL(&obj->tap_list, tap, node); 503 504 return tap; 505 } 506 507 #endif 508 509 /* 510 * obj 511 */ 512 struct obj * 513 obj_init(void) 514 { 515 struct obj *obj; 516 517 obj = calloc(1, sizeof(struct obj)); 518 if (!obj) 519 return NULL; 520 521 TAILQ_INIT(&obj->mempool_list); 522 TAILQ_INIT(&obj->link_list); 523 TAILQ_INIT(&obj->ring_list); 524 TAILQ_INIT(&obj->tap_list); 525 526 return obj; 527 } 528