1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2020 Intel Corporation 3 */ 4 5 #include <stdlib.h> 6 #include <string.h> 7 8 #include <rte_mempool.h> 9 #include <rte_mbuf.h> 10 #include <rte_ethdev.h> 11 #include <rte_swx_port_ethdev.h> 12 #include <rte_swx_port_ring.h> 13 #include <rte_swx_port_source_sink.h> 14 #include <rte_swx_table_em.h> 15 #include <rte_swx_table_wm.h> 16 #include <rte_swx_pipeline.h> 17 #include <rte_swx_ctl.h> 18 19 #include "obj.h" 20 21 /* 22 * mempool 23 */ 24 TAILQ_HEAD(mempool_list, mempool); 25 26 /* 27 * link 28 */ 29 TAILQ_HEAD(link_list, link); 30 31 /* 32 * ring 33 */ 34 TAILQ_HEAD(ring_list, ring); 35 36 /* 37 * pipeline 38 */ 39 TAILQ_HEAD(pipeline_list, pipeline); 40 41 /* 42 * obj 43 */ 44 struct obj { 45 struct mempool_list mempool_list; 46 struct link_list link_list; 47 struct ring_list ring_list; 48 struct pipeline_list pipeline_list; 49 }; 50 51 /* 52 * mempool 53 */ 54 #define BUFFER_SIZE_MIN (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM) 55 56 struct mempool * 57 mempool_create(struct obj *obj, const char *name, struct mempool_params *params) 58 { 59 struct mempool *mempool; 60 struct rte_mempool *m; 61 62 /* Check input params */ 63 if ((name == NULL) || 64 mempool_find(obj, name) || 65 (params == NULL) || 66 (params->buffer_size < BUFFER_SIZE_MIN) || 67 (params->pool_size == 0)) 68 return NULL; 69 70 /* Resource create */ 71 m = rte_pktmbuf_pool_create( 72 name, 73 params->pool_size, 74 params->cache_size, 75 0, 76 params->buffer_size - sizeof(struct rte_mbuf), 77 params->cpu_id); 78 79 if (m == NULL) 80 return NULL; 81 82 /* Node allocation */ 83 mempool = calloc(1, sizeof(struct mempool)); 84 if (mempool == NULL) { 85 rte_mempool_free(m); 86 return NULL; 87 } 88 89 /* Node fill in */ 90 strlcpy(mempool->name, name, sizeof(mempool->name)); 91 mempool->m = m; 92 mempool->buffer_size = params->buffer_size; 93 94 /* Node add to list */ 95 TAILQ_INSERT_TAIL(&obj->mempool_list, mempool, node); 96 97 return mempool; 98 } 99 100 struct mempool * 101 mempool_find(struct obj *obj, const char *name) 102 { 103 struct mempool *mempool; 104 105 if (!obj || !name) 106 return NULL; 107 108 TAILQ_FOREACH(mempool, &obj->mempool_list, node) 109 if (strcmp(mempool->name, name) == 0) 110 return mempool; 111 112 return NULL; 113 } 114 115 /* 116 * link 117 */ 118 static struct rte_eth_conf port_conf_default = { 119 .link_speeds = 0, 120 .rxmode = { 121 .mq_mode = ETH_MQ_RX_NONE, 122 .max_rx_pkt_len = 9000, /* Jumbo frame max packet len */ 123 .split_hdr_size = 0, /* Header split buffer size */ 124 }, 125 .rx_adv_conf = { 126 .rss_conf = { 127 .rss_key = NULL, 128 .rss_key_len = 40, 129 .rss_hf = 0, 130 }, 131 }, 132 .txmode = { 133 .mq_mode = ETH_MQ_TX_NONE, 134 }, 135 .lpbk_mode = 0, 136 }; 137 138 #define RETA_CONF_SIZE (ETH_RSS_RETA_SIZE_512 / RTE_RETA_GROUP_SIZE) 139 140 static int 141 rss_setup(uint16_t port_id, 142 uint16_t reta_size, 143 struct link_params_rss *rss) 144 { 145 struct rte_eth_rss_reta_entry64 reta_conf[RETA_CONF_SIZE]; 146 uint32_t i; 147 int status; 148 149 /* RETA setting */ 150 memset(reta_conf, 0, sizeof(reta_conf)); 151 152 for (i = 0; i < reta_size; i++) 153 reta_conf[i / RTE_RETA_GROUP_SIZE].mask = UINT64_MAX; 154 155 for (i = 0; i < reta_size; i++) { 156 uint32_t reta_id = i / RTE_RETA_GROUP_SIZE; 157 uint32_t reta_pos = i % RTE_RETA_GROUP_SIZE; 158 uint32_t rss_qs_pos = i % rss->n_queues; 159 160 reta_conf[reta_id].reta[reta_pos] = 161 (uint16_t) rss->queue_id[rss_qs_pos]; 162 } 163 164 /* RETA update */ 165 status = rte_eth_dev_rss_reta_update(port_id, 166 reta_conf, 167 reta_size); 168 169 return status; 170 } 171 172 struct link * 173 link_create(struct obj *obj, const char *name, struct link_params *params) 174 { 175 struct rte_eth_dev_info port_info; 176 struct rte_eth_conf port_conf; 177 struct link *link; 178 struct link_params_rss *rss; 179 struct mempool *mempool; 180 uint32_t cpu_id, i; 181 int status; 182 uint16_t port_id; 183 184 /* Check input params */ 185 if ((name == NULL) || 186 link_find(obj, name) || 187 (params == NULL) || 188 (params->rx.n_queues == 0) || 189 (params->rx.queue_size == 0) || 190 (params->tx.n_queues == 0) || 191 (params->tx.queue_size == 0)) 192 return NULL; 193 194 port_id = params->port_id; 195 if (params->dev_name) { 196 status = rte_eth_dev_get_port_by_name(params->dev_name, 197 &port_id); 198 199 if (status) 200 return NULL; 201 } else 202 if (!rte_eth_dev_is_valid_port(port_id)) 203 return NULL; 204 205 if (rte_eth_dev_info_get(port_id, &port_info) != 0) 206 return NULL; 207 208 mempool = mempool_find(obj, params->rx.mempool_name); 209 if (mempool == NULL) 210 return NULL; 211 212 rss = params->rx.rss; 213 if (rss) { 214 if ((port_info.reta_size == 0) || 215 (port_info.reta_size > ETH_RSS_RETA_SIZE_512)) 216 return NULL; 217 218 if ((rss->n_queues == 0) || 219 (rss->n_queues >= LINK_RXQ_RSS_MAX)) 220 return NULL; 221 222 for (i = 0; i < rss->n_queues; i++) 223 if (rss->queue_id[i] >= port_info.max_rx_queues) 224 return NULL; 225 } 226 227 /** 228 * Resource create 229 */ 230 /* Port */ 231 memcpy(&port_conf, &port_conf_default, sizeof(port_conf)); 232 if (rss) { 233 port_conf.rxmode.mq_mode = ETH_MQ_RX_RSS; 234 port_conf.rx_adv_conf.rss_conf.rss_hf = 235 (ETH_RSS_IP | ETH_RSS_TCP | ETH_RSS_UDP) & 236 port_info.flow_type_rss_offloads; 237 } 238 239 cpu_id = (uint32_t) rte_eth_dev_socket_id(port_id); 240 if (cpu_id == (uint32_t) SOCKET_ID_ANY) 241 cpu_id = 0; 242 243 status = rte_eth_dev_configure( 244 port_id, 245 params->rx.n_queues, 246 params->tx.n_queues, 247 &port_conf); 248 249 if (status < 0) 250 return NULL; 251 252 if (params->promiscuous) { 253 status = rte_eth_promiscuous_enable(port_id); 254 if (status != 0) 255 return NULL; 256 } 257 258 /* Port RX */ 259 for (i = 0; i < params->rx.n_queues; i++) { 260 status = rte_eth_rx_queue_setup( 261 port_id, 262 i, 263 params->rx.queue_size, 264 cpu_id, 265 NULL, 266 mempool->m); 267 268 if (status < 0) 269 return NULL; 270 } 271 272 /* Port TX */ 273 for (i = 0; i < params->tx.n_queues; i++) { 274 status = rte_eth_tx_queue_setup( 275 port_id, 276 i, 277 params->tx.queue_size, 278 cpu_id, 279 NULL); 280 281 if (status < 0) 282 return NULL; 283 } 284 285 /* Port start */ 286 status = rte_eth_dev_start(port_id); 287 if (status < 0) 288 return NULL; 289 290 if (rss) { 291 status = rss_setup(port_id, port_info.reta_size, rss); 292 293 if (status) { 294 rte_eth_dev_stop(port_id); 295 return NULL; 296 } 297 } 298 299 /* Port link up */ 300 status = rte_eth_dev_set_link_up(port_id); 301 if ((status < 0) && (status != -ENOTSUP)) { 302 rte_eth_dev_stop(port_id); 303 return NULL; 304 } 305 306 /* Node allocation */ 307 link = calloc(1, sizeof(struct link)); 308 if (link == NULL) { 309 rte_eth_dev_stop(port_id); 310 return NULL; 311 } 312 313 /* Node fill in */ 314 strlcpy(link->name, name, sizeof(link->name)); 315 link->port_id = port_id; 316 rte_eth_dev_get_name_by_port(port_id, link->dev_name); 317 link->n_rxq = params->rx.n_queues; 318 link->n_txq = params->tx.n_queues; 319 320 /* Node add to list */ 321 TAILQ_INSERT_TAIL(&obj->link_list, link, node); 322 323 return link; 324 } 325 326 int 327 link_is_up(struct obj *obj, const char *name) 328 { 329 struct rte_eth_link link_params; 330 struct link *link; 331 332 /* Check input params */ 333 if (!obj || !name) 334 return 0; 335 336 link = link_find(obj, name); 337 if (link == NULL) 338 return 0; 339 340 /* Resource */ 341 if (rte_eth_link_get(link->port_id, &link_params) < 0) 342 return 0; 343 344 return (link_params.link_status == ETH_LINK_DOWN) ? 0 : 1; 345 } 346 347 struct link * 348 link_find(struct obj *obj, const char *name) 349 { 350 struct link *link; 351 352 if (!obj || !name) 353 return NULL; 354 355 TAILQ_FOREACH(link, &obj->link_list, node) 356 if (strcmp(link->name, name) == 0) 357 return link; 358 359 return NULL; 360 } 361 362 struct link * 363 link_next(struct obj *obj, struct link *link) 364 { 365 return (link == NULL) ? 366 TAILQ_FIRST(&obj->link_list) : TAILQ_NEXT(link, node); 367 } 368 369 /* 370 * ring 371 */ 372 struct ring * 373 ring_create(struct obj *obj, const char *name, struct ring_params *params) 374 { 375 struct ring *ring; 376 struct rte_ring *r; 377 unsigned int flags = RING_F_SP_ENQ | RING_F_SC_DEQ; 378 379 /* Check input params */ 380 if (!name || ring_find(obj, name) || !params || !params->size) 381 return NULL; 382 383 /** 384 * Resource create 385 */ 386 r = rte_ring_create( 387 name, 388 params->size, 389 params->numa_node, 390 flags); 391 if (!r) 392 return NULL; 393 394 /* Node allocation */ 395 ring = calloc(1, sizeof(struct ring)); 396 if (!ring) { 397 rte_ring_free(r); 398 return NULL; 399 } 400 401 /* Node fill in */ 402 strlcpy(ring->name, name, sizeof(ring->name)); 403 404 /* Node add to list */ 405 TAILQ_INSERT_TAIL(&obj->ring_list, ring, node); 406 407 return ring; 408 } 409 410 struct ring * 411 ring_find(struct obj *obj, const char *name) 412 { 413 struct ring *ring; 414 415 if (!obj || !name) 416 return NULL; 417 418 TAILQ_FOREACH(ring, &obj->ring_list, node) 419 if (strcmp(ring->name, name) == 0) 420 return ring; 421 422 return NULL; 423 } 424 425 /* 426 * pipeline 427 */ 428 #ifndef PIPELINE_MSGQ_SIZE 429 #define PIPELINE_MSGQ_SIZE 64 430 #endif 431 432 struct pipeline * 433 pipeline_create(struct obj *obj, const char *name, int numa_node) 434 { 435 struct pipeline *pipeline; 436 struct rte_swx_pipeline *p = NULL; 437 int status; 438 439 /* Check input params */ 440 if ((name == NULL) || 441 pipeline_find(obj, name)) 442 return NULL; 443 444 /* Resource create */ 445 status = rte_swx_pipeline_config(&p, numa_node); 446 if (status) 447 goto error; 448 449 status = rte_swx_pipeline_port_in_type_register(p, 450 "ethdev", 451 &rte_swx_port_ethdev_reader_ops); 452 if (status) 453 goto error; 454 455 status = rte_swx_pipeline_port_out_type_register(p, 456 "ethdev", 457 &rte_swx_port_ethdev_writer_ops); 458 if (status) 459 goto error; 460 461 status = rte_swx_pipeline_port_in_type_register(p, 462 "ring", 463 &rte_swx_port_ring_reader_ops); 464 if (status) 465 goto error; 466 467 status = rte_swx_pipeline_port_out_type_register(p, 468 "ring", 469 &rte_swx_port_ring_writer_ops); 470 if (status) 471 goto error; 472 473 #ifdef RTE_PORT_PCAP 474 status = rte_swx_pipeline_port_in_type_register(p, 475 "source", 476 &rte_swx_port_source_ops); 477 if (status) 478 goto error; 479 #endif 480 481 status = rte_swx_pipeline_port_out_type_register(p, 482 "sink", 483 &rte_swx_port_sink_ops); 484 if (status) 485 goto error; 486 487 status = rte_swx_pipeline_table_type_register(p, 488 "exact", 489 RTE_SWX_TABLE_MATCH_EXACT, 490 &rte_swx_table_exact_match_ops); 491 if (status) 492 goto error; 493 494 status = rte_swx_pipeline_table_type_register(p, 495 "wildcard", 496 RTE_SWX_TABLE_MATCH_WILDCARD, 497 &rte_swx_table_wildcard_match_ops); 498 if (status) 499 goto error; 500 501 /* Node allocation */ 502 pipeline = calloc(1, sizeof(struct pipeline)); 503 if (pipeline == NULL) 504 goto error; 505 506 /* Node fill in */ 507 strlcpy(pipeline->name, name, sizeof(pipeline->name)); 508 pipeline->p = p; 509 pipeline->timer_period_ms = 10; 510 511 /* Node add to list */ 512 TAILQ_INSERT_TAIL(&obj->pipeline_list, pipeline, node); 513 514 return pipeline; 515 516 error: 517 rte_swx_pipeline_free(p); 518 return NULL; 519 } 520 521 struct pipeline * 522 pipeline_find(struct obj *obj, const char *name) 523 { 524 struct pipeline *pipeline; 525 526 if (!obj || !name) 527 return NULL; 528 529 TAILQ_FOREACH(pipeline, &obj->pipeline_list, node) 530 if (strcmp(name, pipeline->name) == 0) 531 return pipeline; 532 533 return NULL; 534 } 535 536 /* 537 * obj 538 */ 539 struct obj * 540 obj_init(void) 541 { 542 struct obj *obj; 543 544 obj = calloc(1, sizeof(struct obj)); 545 if (!obj) 546 return NULL; 547 548 TAILQ_INIT(&obj->mempool_list); 549 TAILQ_INIT(&obj->link_list); 550 TAILQ_INIT(&obj->ring_list); 551 TAILQ_INIT(&obj->pipeline_list); 552 553 return obj; 554 } 555