1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2010-2018 Intel Corporation 3 */ 4 5 #include <stdlib.h> 6 #include <string.h> 7 8 #include <rte_common.h> 9 #include <rte_ip.h> 10 #include <rte_tcp.h> 11 12 #include <rte_string_fns.h> 13 #include <rte_port_ethdev.h> 14 #include <rte_port_ring.h> 15 #include <rte_port_source_sink.h> 16 #include <rte_port_fd.h> 17 #include <rte_port_sched.h> 18 #include <rte_port_sym_crypto.h> 19 20 #include <rte_table_acl.h> 21 #include <rte_table_array.h> 22 #include <rte_table_hash.h> 23 #include <rte_table_hash_func.h> 24 #include <rte_table_lpm.h> 25 #include <rte_table_lpm_ipv6.h> 26 #include <rte_table_stub.h> 27 28 #include "link.h" 29 #include "mempool.h" 30 #include "pipeline.h" 31 #include "tap.h" 32 #include "tmgr.h" 33 #include "swq.h" 34 #include "cryptodev.h" 35 36 #ifndef PIPELINE_MSGQ_SIZE 37 #define PIPELINE_MSGQ_SIZE 64 38 #endif 39 40 #ifndef TABLE_LPM_NUMBER_TBL8 41 #define TABLE_LPM_NUMBER_TBL8 256 42 #endif 43 44 static struct pipeline_list pipeline_list; 45 46 int 47 pipeline_init(void) 48 { 49 TAILQ_INIT(&pipeline_list); 50 51 return 0; 52 } 53 54 struct pipeline * 55 pipeline_find(const char *name) 56 { 57 struct pipeline *pipeline; 58 59 if (name == NULL) 60 return NULL; 61 62 TAILQ_FOREACH(pipeline, &pipeline_list, node) 63 if (strcmp(name, pipeline->name) == 0) 64 return pipeline; 65 66 return NULL; 67 } 68 69 struct pipeline * 70 pipeline_create(const char *name, struct pipeline_params *params) 71 { 72 char msgq_name[NAME_MAX]; 73 struct rte_pipeline_params pp; 74 struct pipeline *pipeline; 75 struct rte_pipeline *p; 76 struct rte_ring *msgq_req; 77 struct rte_ring *msgq_rsp; 78 79 /* Check input params */ 80 if ((name == NULL) || 81 pipeline_find(name) || 82 (params == NULL) || 83 (params->timer_period_ms == 0)) 84 return NULL; 85 86 /* Resource create */ 87 snprintf(msgq_name, sizeof(msgq_name), "%s-MSGQ-REQ", name); 88 89 msgq_req = rte_ring_create(msgq_name, 90 PIPELINE_MSGQ_SIZE, 91 params->cpu_id, 92 RING_F_SP_ENQ | RING_F_SC_DEQ); 93 if (msgq_req == NULL) 94 return NULL; 95 96 snprintf(msgq_name, sizeof(msgq_name), "%s-MSGQ-RSP", name); 97 98 msgq_rsp = rte_ring_create(msgq_name, 99 PIPELINE_MSGQ_SIZE, 100 params->cpu_id, 101 RING_F_SP_ENQ | RING_F_SC_DEQ); 102 if (msgq_rsp == NULL) { 103 rte_ring_free(msgq_req); 104 return NULL; 105 } 106 107 pp.name = name; 108 pp.socket_id = (int) params->cpu_id; 109 pp.offset_port_id = params->offset_port_id; 110 111 p = rte_pipeline_create(&pp); 112 if (p == NULL) { 113 rte_ring_free(msgq_rsp); 114 rte_ring_free(msgq_req); 115 return NULL; 116 } 117 118 /* Node allocation */ 119 pipeline = calloc(1, sizeof(struct pipeline)); 120 if (pipeline == NULL) { 121 rte_pipeline_free(p); 122 rte_ring_free(msgq_rsp); 123 rte_ring_free(msgq_req); 124 return NULL; 125 } 126 127 /* Node fill in */ 128 strlcpy(pipeline->name, name, sizeof(pipeline->name)); 129 pipeline->p = p; 130 pipeline->n_ports_in = 0; 131 pipeline->n_ports_out = 0; 132 pipeline->n_tables = 0; 133 pipeline->msgq_req = msgq_req; 134 pipeline->msgq_rsp = msgq_rsp; 135 pipeline->timer_period_ms = params->timer_period_ms; 136 pipeline->enabled = 0; 137 pipeline->cpu_id = params->cpu_id; 138 139 /* Node add to list */ 140 TAILQ_INSERT_TAIL(&pipeline_list, pipeline, node); 141 142 return pipeline; 143 } 144 145 int 146 pipeline_port_in_create(const char *pipeline_name, 147 struct port_in_params *params, 148 int enabled) 149 { 150 struct rte_pipeline_port_in_params p; 151 152 union { 153 struct rte_port_ethdev_reader_params ethdev; 154 struct rte_port_ring_reader_params ring; 155 struct rte_port_sched_reader_params sched; 156 struct rte_port_fd_reader_params fd; 157 struct rte_port_source_params source; 158 struct rte_port_sym_crypto_reader_params sym_crypto; 159 } pp; 160 161 struct pipeline *pipeline; 162 struct port_in *port_in; 163 struct port_in_action_profile *ap; 164 struct rte_port_in_action *action; 165 uint32_t port_id; 166 int status; 167 168 memset(&p, 0, sizeof(p)); 169 memset(&pp, 0, sizeof(pp)); 170 171 /* Check input params */ 172 if ((pipeline_name == NULL) || 173 (params == NULL) || 174 (params->burst_size == 0) || 175 (params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)) 176 return -1; 177 178 pipeline = pipeline_find(pipeline_name); 179 if (pipeline == NULL) 180 return -1; 181 182 ap = NULL; 183 if (params->action_profile_name) { 184 ap = port_in_action_profile_find(params->action_profile_name); 185 if (ap == NULL) 186 return -1; 187 } 188 189 switch (params->type) { 190 case PORT_IN_RXQ: 191 { 192 struct link *link; 193 194 link = link_find(params->dev_name); 195 if (link == NULL) 196 return -1; 197 198 if (params->rxq.queue_id >= link->n_rxq) 199 return -1; 200 201 pp.ethdev.port_id = link->port_id; 202 pp.ethdev.queue_id = params->rxq.queue_id; 203 204 p.ops = &rte_port_ethdev_reader_ops; 205 p.arg_create = &pp.ethdev; 206 break; 207 } 208 209 case PORT_IN_SWQ: 210 { 211 struct swq *swq; 212 213 swq = swq_find(params->dev_name); 214 if (swq == NULL) 215 return -1; 216 217 pp.ring.ring = swq->r; 218 219 p.ops = &rte_port_ring_reader_ops; 220 p.arg_create = &pp.ring; 221 break; 222 } 223 224 case PORT_IN_TMGR: 225 { 226 struct tmgr_port *tmgr_port; 227 228 tmgr_port = tmgr_port_find(params->dev_name); 229 if (tmgr_port == NULL) 230 return -1; 231 232 pp.sched.sched = tmgr_port->s; 233 234 p.ops = &rte_port_sched_reader_ops; 235 p.arg_create = &pp.sched; 236 break; 237 } 238 239 case PORT_IN_TAP: 240 { 241 struct tap *tap; 242 struct mempool *mempool; 243 244 tap = tap_find(params->dev_name); 245 mempool = mempool_find(params->tap.mempool_name); 246 if ((tap == NULL) || (mempool == NULL)) 247 return -1; 248 249 pp.fd.fd = tap->fd; 250 pp.fd.mempool = mempool->m; 251 pp.fd.mtu = params->tap.mtu; 252 253 p.ops = &rte_port_fd_reader_ops; 254 p.arg_create = &pp.fd; 255 break; 256 } 257 258 259 case PORT_IN_SOURCE: 260 { 261 struct mempool *mempool; 262 263 mempool = mempool_find(params->source.mempool_name); 264 if (mempool == NULL) 265 return -1; 266 267 pp.source.mempool = mempool->m; 268 pp.source.file_name = params->source.file_name; 269 pp.source.n_bytes_per_pkt = params->source.n_bytes_per_pkt; 270 271 p.ops = &rte_port_source_ops; 272 p.arg_create = &pp.source; 273 break; 274 } 275 276 case PORT_IN_CRYPTODEV: 277 { 278 struct cryptodev *cryptodev; 279 280 cryptodev = cryptodev_find(params->dev_name); 281 if (cryptodev == NULL) 282 return -1; 283 284 if (params->rxq.queue_id > cryptodev->n_queues - 1) 285 return -1; 286 287 pp.sym_crypto.cryptodev_id = cryptodev->dev_id; 288 pp.sym_crypto.queue_id = params->cryptodev.queue_id; 289 pp.sym_crypto.f_callback = params->cryptodev.f_callback; 290 pp.sym_crypto.arg_callback = params->cryptodev.arg_callback; 291 p.ops = &rte_port_sym_crypto_reader_ops; 292 p.arg_create = &pp.sym_crypto; 293 294 break; 295 } 296 297 default: 298 return -1; 299 } 300 301 p.burst_size = params->burst_size; 302 303 /* Resource create */ 304 action = NULL; 305 p.f_action = NULL; 306 p.arg_ah = NULL; 307 308 if (ap) { 309 action = rte_port_in_action_create(ap->ap, 310 pipeline->cpu_id); 311 if (action == NULL) 312 return -1; 313 314 status = rte_port_in_action_params_get( 315 action, 316 &p); 317 if (status) { 318 rte_port_in_action_free(action); 319 return -1; 320 } 321 } 322 323 status = rte_pipeline_port_in_create(pipeline->p, 324 &p, 325 &port_id); 326 if (status) { 327 rte_port_in_action_free(action); 328 return -1; 329 } 330 331 if (enabled) 332 rte_pipeline_port_in_enable(pipeline->p, port_id); 333 334 /* Pipeline */ 335 port_in = &pipeline->port_in[pipeline->n_ports_in]; 336 memcpy(&port_in->params, params, sizeof(*params)); 337 port_in->ap = ap; 338 port_in->a = action; 339 pipeline->n_ports_in++; 340 341 return 0; 342 } 343 344 int 345 pipeline_port_in_connect_to_table(const char *pipeline_name, 346 uint32_t port_id, 347 uint32_t table_id) 348 { 349 struct pipeline *pipeline; 350 int status; 351 352 /* Check input params */ 353 if (pipeline_name == NULL) 354 return -1; 355 356 pipeline = pipeline_find(pipeline_name); 357 if ((pipeline == NULL) || 358 (port_id >= pipeline->n_ports_in) || 359 (table_id >= pipeline->n_tables)) 360 return -1; 361 362 /* Resource */ 363 status = rte_pipeline_port_in_connect_to_table(pipeline->p, 364 port_id, 365 table_id); 366 367 return status; 368 369 } 370 371 int 372 pipeline_port_out_create(const char *pipeline_name, 373 struct port_out_params *params) 374 { 375 struct rte_pipeline_port_out_params p; 376 377 union { 378 struct rte_port_ethdev_writer_params ethdev; 379 struct rte_port_ring_writer_params ring; 380 struct rte_port_sched_writer_params sched; 381 struct rte_port_fd_writer_params fd; 382 struct rte_port_sink_params sink; 383 struct rte_port_sym_crypto_writer_params sym_crypto; 384 } pp; 385 386 union { 387 struct rte_port_ethdev_writer_nodrop_params ethdev; 388 struct rte_port_ring_writer_nodrop_params ring; 389 struct rte_port_fd_writer_nodrop_params fd; 390 struct rte_port_sym_crypto_writer_nodrop_params sym_crypto; 391 } pp_nodrop; 392 393 struct pipeline *pipeline; 394 uint32_t port_id; 395 int status; 396 397 memset(&p, 0, sizeof(p)); 398 memset(&pp, 0, sizeof(pp)); 399 memset(&pp_nodrop, 0, sizeof(pp_nodrop)); 400 401 /* Check input params */ 402 if ((pipeline_name == NULL) || 403 (params == NULL) || 404 (params->burst_size == 0) || 405 (params->burst_size > RTE_PORT_IN_BURST_SIZE_MAX)) 406 return -1; 407 408 pipeline = pipeline_find(pipeline_name); 409 if (pipeline == NULL) 410 return -1; 411 412 switch (params->type) { 413 case PORT_OUT_TXQ: 414 { 415 struct link *link; 416 417 link = link_find(params->dev_name); 418 if (link == NULL) 419 return -1; 420 421 if (params->txq.queue_id >= link->n_txq) 422 return -1; 423 424 pp.ethdev.port_id = link->port_id; 425 pp.ethdev.queue_id = params->txq.queue_id; 426 pp.ethdev.tx_burst_sz = params->burst_size; 427 428 pp_nodrop.ethdev.port_id = link->port_id; 429 pp_nodrop.ethdev.queue_id = params->txq.queue_id; 430 pp_nodrop.ethdev.tx_burst_sz = params->burst_size; 431 pp_nodrop.ethdev.n_retries = params->n_retries; 432 433 if (params->retry == 0) { 434 p.ops = &rte_port_ethdev_writer_ops; 435 p.arg_create = &pp.ethdev; 436 } else { 437 p.ops = &rte_port_ethdev_writer_nodrop_ops; 438 p.arg_create = &pp_nodrop.ethdev; 439 } 440 break; 441 } 442 443 case PORT_OUT_SWQ: 444 { 445 struct swq *swq; 446 447 swq = swq_find(params->dev_name); 448 if (swq == NULL) 449 return -1; 450 451 pp.ring.ring = swq->r; 452 pp.ring.tx_burst_sz = params->burst_size; 453 454 pp_nodrop.ring.ring = swq->r; 455 pp_nodrop.ring.tx_burst_sz = params->burst_size; 456 pp_nodrop.ring.n_retries = params->n_retries; 457 458 if (params->retry == 0) { 459 p.ops = &rte_port_ring_writer_ops; 460 p.arg_create = &pp.ring; 461 } else { 462 p.ops = &rte_port_ring_writer_nodrop_ops; 463 p.arg_create = &pp_nodrop.ring; 464 } 465 break; 466 } 467 468 case PORT_OUT_TMGR: 469 { 470 struct tmgr_port *tmgr_port; 471 472 tmgr_port = tmgr_port_find(params->dev_name); 473 if (tmgr_port == NULL) 474 return -1; 475 476 pp.sched.sched = tmgr_port->s; 477 pp.sched.tx_burst_sz = params->burst_size; 478 479 p.ops = &rte_port_sched_writer_ops; 480 p.arg_create = &pp.sched; 481 break; 482 } 483 484 case PORT_OUT_TAP: 485 { 486 struct tap *tap; 487 488 tap = tap_find(params->dev_name); 489 if (tap == NULL) 490 return -1; 491 492 pp.fd.fd = tap->fd; 493 pp.fd.tx_burst_sz = params->burst_size; 494 495 pp_nodrop.fd.fd = tap->fd; 496 pp_nodrop.fd.tx_burst_sz = params->burst_size; 497 pp_nodrop.fd.n_retries = params->n_retries; 498 499 if (params->retry == 0) { 500 p.ops = &rte_port_fd_writer_ops; 501 p.arg_create = &pp.fd; 502 } else { 503 p.ops = &rte_port_fd_writer_nodrop_ops; 504 p.arg_create = &pp_nodrop.fd; 505 } 506 break; 507 } 508 509 510 case PORT_OUT_SINK: 511 { 512 pp.sink.file_name = params->sink.file_name; 513 pp.sink.max_n_pkts = params->sink.max_n_pkts; 514 515 p.ops = &rte_port_sink_ops; 516 p.arg_create = &pp.sink; 517 break; 518 } 519 520 case PORT_OUT_CRYPTODEV: 521 { 522 struct cryptodev *cryptodev; 523 524 cryptodev = cryptodev_find(params->dev_name); 525 if (cryptodev == NULL) 526 return -1; 527 528 if (params->cryptodev.queue_id >= cryptodev->n_queues) 529 return -1; 530 531 pp.sym_crypto.cryptodev_id = cryptodev->dev_id; 532 pp.sym_crypto.queue_id = params->cryptodev.queue_id; 533 pp.sym_crypto.tx_burst_sz = params->burst_size; 534 pp.sym_crypto.crypto_op_offset = params->cryptodev.op_offset; 535 536 pp_nodrop.sym_crypto.cryptodev_id = cryptodev->dev_id; 537 pp_nodrop.sym_crypto.queue_id = params->cryptodev.queue_id; 538 pp_nodrop.sym_crypto.tx_burst_sz = params->burst_size; 539 pp_nodrop.sym_crypto.n_retries = params->retry; 540 pp_nodrop.sym_crypto.crypto_op_offset = 541 params->cryptodev.op_offset; 542 543 if (params->retry == 0) { 544 p.ops = &rte_port_sym_crypto_writer_ops; 545 p.arg_create = &pp.sym_crypto; 546 } else { 547 p.ops = &rte_port_sym_crypto_writer_nodrop_ops; 548 p.arg_create = &pp_nodrop.sym_crypto; 549 } 550 551 break; 552 } 553 554 default: 555 return -1; 556 } 557 558 p.f_action = NULL; 559 p.arg_ah = NULL; 560 561 /* Resource create */ 562 status = rte_pipeline_port_out_create(pipeline->p, 563 &p, 564 &port_id); 565 566 if (status) 567 return -1; 568 569 /* Pipeline */ 570 pipeline->n_ports_out++; 571 572 return 0; 573 } 574 575 static const struct rte_acl_field_def table_acl_field_format_ipv4[] = { 576 /* Protocol */ 577 [0] = { 578 .type = RTE_ACL_FIELD_TYPE_BITMASK, 579 .size = sizeof(uint8_t), 580 .field_index = 0, 581 .input_index = 0, 582 .offset = offsetof(struct rte_ipv4_hdr, next_proto_id), 583 }, 584 585 /* Source IP address (IPv4) */ 586 [1] = { 587 .type = RTE_ACL_FIELD_TYPE_MASK, 588 .size = sizeof(uint32_t), 589 .field_index = 1, 590 .input_index = 1, 591 .offset = offsetof(struct rte_ipv4_hdr, src_addr), 592 }, 593 594 /* Destination IP address (IPv4) */ 595 [2] = { 596 .type = RTE_ACL_FIELD_TYPE_MASK, 597 .size = sizeof(uint32_t), 598 .field_index = 2, 599 .input_index = 2, 600 .offset = offsetof(struct rte_ipv4_hdr, dst_addr), 601 }, 602 603 /* Source Port */ 604 [3] = { 605 .type = RTE_ACL_FIELD_TYPE_RANGE, 606 .size = sizeof(uint16_t), 607 .field_index = 3, 608 .input_index = 3, 609 .offset = sizeof(struct rte_ipv4_hdr) + 610 offsetof(struct rte_tcp_hdr, src_port), 611 }, 612 613 /* Destination Port */ 614 [4] = { 615 .type = RTE_ACL_FIELD_TYPE_RANGE, 616 .size = sizeof(uint16_t), 617 .field_index = 4, 618 .input_index = 3, 619 .offset = sizeof(struct rte_ipv4_hdr) + 620 offsetof(struct rte_tcp_hdr, dst_port), 621 }, 622 }; 623 624 static const struct rte_acl_field_def table_acl_field_format_ipv6[] = { 625 /* Protocol */ 626 [0] = { 627 .type = RTE_ACL_FIELD_TYPE_BITMASK, 628 .size = sizeof(uint8_t), 629 .field_index = 0, 630 .input_index = 0, 631 .offset = offsetof(struct rte_ipv6_hdr, proto), 632 }, 633 634 /* Source IP address (IPv6) */ 635 [1] = { 636 .type = RTE_ACL_FIELD_TYPE_MASK, 637 .size = sizeof(uint32_t), 638 .field_index = 1, 639 .input_index = 1, 640 .offset = offsetof(struct rte_ipv6_hdr, src_addr.a[0]), 641 }, 642 643 [2] = { 644 .type = RTE_ACL_FIELD_TYPE_MASK, 645 .size = sizeof(uint32_t), 646 .field_index = 2, 647 .input_index = 2, 648 .offset = offsetof(struct rte_ipv6_hdr, src_addr.a[4]), 649 }, 650 651 [3] = { 652 .type = RTE_ACL_FIELD_TYPE_MASK, 653 .size = sizeof(uint32_t), 654 .field_index = 3, 655 .input_index = 3, 656 .offset = offsetof(struct rte_ipv6_hdr, src_addr.a[8]), 657 }, 658 659 [4] = { 660 .type = RTE_ACL_FIELD_TYPE_MASK, 661 .size = sizeof(uint32_t), 662 .field_index = 4, 663 .input_index = 4, 664 .offset = offsetof(struct rte_ipv6_hdr, src_addr.a[12]), 665 }, 666 667 /* Destination IP address (IPv6) */ 668 [5] = { 669 .type = RTE_ACL_FIELD_TYPE_MASK, 670 .size = sizeof(uint32_t), 671 .field_index = 5, 672 .input_index = 5, 673 .offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[0]), 674 }, 675 676 [6] = { 677 .type = RTE_ACL_FIELD_TYPE_MASK, 678 .size = sizeof(uint32_t), 679 .field_index = 6, 680 .input_index = 6, 681 .offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[4]), 682 }, 683 684 [7] = { 685 .type = RTE_ACL_FIELD_TYPE_MASK, 686 .size = sizeof(uint32_t), 687 .field_index = 7, 688 .input_index = 7, 689 .offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[8]), 690 }, 691 692 [8] = { 693 .type = RTE_ACL_FIELD_TYPE_MASK, 694 .size = sizeof(uint32_t), 695 .field_index = 8, 696 .input_index = 8, 697 .offset = offsetof(struct rte_ipv6_hdr, dst_addr.a[12]), 698 }, 699 700 /* Source Port */ 701 [9] = { 702 .type = RTE_ACL_FIELD_TYPE_RANGE, 703 .size = sizeof(uint16_t), 704 .field_index = 9, 705 .input_index = 9, 706 .offset = sizeof(struct rte_ipv6_hdr) + 707 offsetof(struct rte_tcp_hdr, src_port), 708 }, 709 710 /* Destination Port */ 711 [10] = { 712 .type = RTE_ACL_FIELD_TYPE_RANGE, 713 .size = sizeof(uint16_t), 714 .field_index = 10, 715 .input_index = 9, 716 .offset = sizeof(struct rte_ipv6_hdr) + 717 offsetof(struct rte_tcp_hdr, dst_port), 718 }, 719 }; 720 721 int 722 pipeline_table_create(const char *pipeline_name, 723 struct table_params *params) 724 { 725 char name[NAME_MAX]; 726 struct rte_pipeline_table_params p; 727 728 union { 729 struct rte_table_acl_params acl; 730 struct rte_table_array_params array; 731 struct rte_table_hash_params hash; 732 struct rte_table_lpm_params lpm; 733 struct rte_table_lpm_ipv6_params lpm_ipv6; 734 } pp; 735 736 struct pipeline *pipeline; 737 struct table *table; 738 struct table_action_profile *ap; 739 struct rte_table_action *action; 740 uint32_t table_id; 741 int status; 742 743 memset(&p, 0, sizeof(p)); 744 memset(&pp, 0, sizeof(pp)); 745 746 /* Check input params */ 747 if ((pipeline_name == NULL) || 748 (params == NULL)) 749 return -1; 750 751 pipeline = pipeline_find(pipeline_name); 752 if ((pipeline == NULL) || 753 (pipeline->n_tables >= RTE_PIPELINE_TABLE_MAX)) 754 return -1; 755 756 ap = NULL; 757 if (params->action_profile_name) { 758 ap = table_action_profile_find(params->action_profile_name); 759 if (ap == NULL) 760 return -1; 761 } 762 763 snprintf(name, NAME_MAX, "%s_table%u", 764 pipeline_name, pipeline->n_tables); 765 766 switch (params->match_type) { 767 case TABLE_ACL: 768 { 769 uint32_t ip_header_offset = params->match.acl.ip_header_offset - 770 (sizeof(struct rte_mbuf) + RTE_PKTMBUF_HEADROOM); 771 uint32_t i; 772 773 if (params->match.acl.n_rules == 0) 774 return -1; 775 776 pp.acl.name = name; 777 pp.acl.n_rules = params->match.acl.n_rules; 778 if (params->match.acl.ip_version) { 779 memcpy(&pp.acl.field_format, 780 &table_acl_field_format_ipv4, 781 sizeof(table_acl_field_format_ipv4)); 782 pp.acl.n_rule_fields = 783 RTE_DIM(table_acl_field_format_ipv4); 784 } else { 785 memcpy(&pp.acl.field_format, 786 &table_acl_field_format_ipv6, 787 sizeof(table_acl_field_format_ipv6)); 788 pp.acl.n_rule_fields = 789 RTE_DIM(table_acl_field_format_ipv6); 790 } 791 792 for (i = 0; i < pp.acl.n_rule_fields; i++) 793 pp.acl.field_format[i].offset += ip_header_offset; 794 795 p.ops = &rte_table_acl_ops; 796 p.arg_create = &pp.acl; 797 break; 798 } 799 800 case TABLE_ARRAY: 801 { 802 if (params->match.array.n_keys == 0) 803 return -1; 804 805 pp.array.n_entries = params->match.array.n_keys; 806 pp.array.offset = params->match.array.key_offset; 807 808 p.ops = &rte_table_array_ops; 809 p.arg_create = &pp.array; 810 break; 811 } 812 813 case TABLE_HASH: 814 { 815 struct rte_table_ops *ops; 816 rte_table_hash_op_hash f_hash; 817 818 if (params->match.hash.n_keys == 0) 819 return -1; 820 821 switch (params->match.hash.key_size) { 822 case 8: 823 f_hash = rte_table_hash_crc_key8; 824 break; 825 case 16: 826 f_hash = rte_table_hash_crc_key16; 827 break; 828 case 24: 829 f_hash = rte_table_hash_crc_key24; 830 break; 831 case 32: 832 f_hash = rte_table_hash_crc_key32; 833 break; 834 case 40: 835 f_hash = rte_table_hash_crc_key40; 836 break; 837 case 48: 838 f_hash = rte_table_hash_crc_key48; 839 break; 840 case 56: 841 f_hash = rte_table_hash_crc_key56; 842 break; 843 case 64: 844 f_hash = rte_table_hash_crc_key64; 845 break; 846 default: 847 return -1; 848 } 849 850 pp.hash.name = name; 851 pp.hash.key_size = params->match.hash.key_size; 852 pp.hash.key_offset = params->match.hash.key_offset; 853 pp.hash.key_mask = params->match.hash.key_mask; 854 pp.hash.n_keys = params->match.hash.n_keys; 855 pp.hash.n_buckets = params->match.hash.n_buckets; 856 pp.hash.f_hash = f_hash; 857 pp.hash.seed = 0; 858 859 if (params->match.hash.extendable_bucket) 860 switch (params->match.hash.key_size) { 861 case 8: 862 ops = &rte_table_hash_key8_ext_ops; 863 break; 864 case 16: 865 ops = &rte_table_hash_key16_ext_ops; 866 break; 867 default: 868 ops = &rte_table_hash_ext_ops; 869 } 870 else 871 switch (params->match.hash.key_size) { 872 case 8: 873 ops = &rte_table_hash_key8_lru_ops; 874 break; 875 case 16: 876 ops = &rte_table_hash_key16_lru_ops; 877 break; 878 default: 879 ops = &rte_table_hash_lru_ops; 880 } 881 882 p.ops = ops; 883 p.arg_create = &pp.hash; 884 break; 885 } 886 887 case TABLE_LPM: 888 { 889 if (params->match.lpm.n_rules == 0) 890 return -1; 891 892 switch (params->match.lpm.key_size) { 893 case 4: 894 { 895 pp.lpm.name = name; 896 pp.lpm.n_rules = params->match.lpm.n_rules; 897 pp.lpm.number_tbl8s = TABLE_LPM_NUMBER_TBL8; 898 pp.lpm.flags = 0; 899 pp.lpm.entry_unique_size = p.action_data_size + 900 sizeof(struct rte_pipeline_table_entry); 901 pp.lpm.offset = params->match.lpm.key_offset; 902 903 p.ops = &rte_table_lpm_ops; 904 p.arg_create = &pp.lpm; 905 break; 906 } 907 908 case 16: 909 { 910 pp.lpm_ipv6.name = name; 911 pp.lpm_ipv6.n_rules = params->match.lpm.n_rules; 912 pp.lpm_ipv6.number_tbl8s = TABLE_LPM_NUMBER_TBL8; 913 pp.lpm_ipv6.entry_unique_size = p.action_data_size + 914 sizeof(struct rte_pipeline_table_entry); 915 pp.lpm_ipv6.offset = params->match.lpm.key_offset; 916 917 p.ops = &rte_table_lpm_ipv6_ops; 918 p.arg_create = &pp.lpm_ipv6; 919 break; 920 } 921 922 default: 923 return -1; 924 } 925 926 break; 927 } 928 929 case TABLE_STUB: 930 { 931 p.ops = &rte_table_stub_ops; 932 p.arg_create = NULL; 933 break; 934 } 935 936 default: 937 return -1; 938 } 939 940 /* Resource create */ 941 action = NULL; 942 p.f_action_hit = NULL; 943 p.f_action_miss = NULL; 944 p.arg_ah = NULL; 945 946 if (ap) { 947 action = rte_table_action_create(ap->ap, 948 pipeline->cpu_id); 949 if (action == NULL) 950 return -1; 951 952 status = rte_table_action_table_params_get( 953 action, 954 &p); 955 if (status || 956 ((p.action_data_size + 957 sizeof(struct rte_pipeline_table_entry)) > 958 TABLE_RULE_ACTION_SIZE_MAX)) { 959 rte_table_action_free(action); 960 return -1; 961 } 962 } 963 964 if (params->match_type == TABLE_LPM) { 965 if (params->match.lpm.key_size == 4) 966 pp.lpm.entry_unique_size = p.action_data_size + 967 sizeof(struct rte_pipeline_table_entry); 968 969 if (params->match.lpm.key_size == 16) 970 pp.lpm_ipv6.entry_unique_size = p.action_data_size + 971 sizeof(struct rte_pipeline_table_entry); 972 } 973 974 status = rte_pipeline_table_create(pipeline->p, 975 &p, 976 &table_id); 977 if (status) { 978 rte_table_action_free(action); 979 return -1; 980 } 981 982 /* Pipeline */ 983 table = &pipeline->table[pipeline->n_tables]; 984 memcpy(&table->params, params, sizeof(*params)); 985 table->ap = ap; 986 table->a = action; 987 TAILQ_INIT(&table->rules); 988 table->rule_default = NULL; 989 990 pipeline->n_tables++; 991 992 return 0; 993 } 994 995 struct table_rule * 996 table_rule_find(struct table *table, 997 struct table_rule_match *match) 998 { 999 struct table_rule *rule; 1000 1001 TAILQ_FOREACH(rule, &table->rules, node) 1002 if (memcmp(&rule->match, match, sizeof(*match)) == 0) 1003 return rule; 1004 1005 return NULL; 1006 } 1007 1008 void 1009 table_rule_add(struct table *table, 1010 struct table_rule *new_rule) 1011 { 1012 struct table_rule *existing_rule; 1013 1014 existing_rule = table_rule_find(table, &new_rule->match); 1015 if (existing_rule == NULL) 1016 TAILQ_INSERT_TAIL(&table->rules, new_rule, node); 1017 else { 1018 TAILQ_INSERT_AFTER(&table->rules, existing_rule, new_rule, node); 1019 TAILQ_REMOVE(&table->rules, existing_rule, node); 1020 free(existing_rule); 1021 } 1022 } 1023 1024 void 1025 table_rule_add_bulk(struct table *table, 1026 struct table_rule_list *list, 1027 uint32_t n_rules) 1028 { 1029 uint32_t i; 1030 1031 for (i = 0; i < n_rules; i++) { 1032 struct table_rule *existing_rule, *new_rule; 1033 1034 new_rule = TAILQ_FIRST(list); 1035 if (new_rule == NULL) 1036 break; 1037 1038 TAILQ_REMOVE(list, new_rule, node); 1039 1040 existing_rule = table_rule_find(table, &new_rule->match); 1041 if (existing_rule == NULL) 1042 TAILQ_INSERT_TAIL(&table->rules, new_rule, node); 1043 else { 1044 TAILQ_INSERT_AFTER(&table->rules, existing_rule, new_rule, node); 1045 TAILQ_REMOVE(&table->rules, existing_rule, node); 1046 free(existing_rule); 1047 } 1048 } 1049 } 1050 1051 void 1052 table_rule_delete(struct table *table, 1053 struct table_rule_match *match) 1054 { 1055 struct table_rule *rule; 1056 1057 rule = table_rule_find(table, match); 1058 if (rule == NULL) 1059 return; 1060 1061 TAILQ_REMOVE(&table->rules, rule, node); 1062 free(rule); 1063 } 1064 1065 void 1066 table_rule_default_add(struct table *table, 1067 struct table_rule *rule) 1068 { 1069 free(table->rule_default); 1070 table->rule_default = rule; 1071 } 1072 1073 void 1074 table_rule_default_delete(struct table *table) 1075 { 1076 free(table->rule_default); 1077 table->rule_default = NULL; 1078 } 1079