1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2020 Intel Corporation 3 */ 4 #include <stdlib.h> 5 #include <string.h> 6 #include <stdio.h> 7 #include <errno.h> 8 #include <inttypes.h> 9 #include <sys/queue.h> 10 #include <arpa/inet.h> 11 12 #include <rte_common.h> 13 #include <rte_prefetch.h> 14 #include <rte_byteorder.h> 15 #include <rte_cycles.h> 16 #include <rte_meter.h> 17 18 #include <rte_swx_table_selector.h> 19 20 #include "rte_swx_pipeline.h" 21 #include "rte_swx_ctl.h" 22 23 #define CHECK(condition, err_code) \ 24 do { \ 25 if (!(condition)) \ 26 return -(err_code); \ 27 } while (0) 28 29 #define CHECK_NAME(name, err_code) \ 30 CHECK((name) && \ 31 (name)[0] && \ 32 (strnlen((name), RTE_SWX_NAME_SIZE) < RTE_SWX_NAME_SIZE), \ 33 err_code) 34 35 #define CHECK_INSTRUCTION(instr, err_code) \ 36 CHECK((instr) && \ 37 (instr)[0] && \ 38 (strnlen((instr), RTE_SWX_INSTRUCTION_SIZE) < \ 39 RTE_SWX_INSTRUCTION_SIZE), \ 40 err_code) 41 42 #ifndef TRACE_LEVEL 43 #define TRACE_LEVEL 0 44 #endif 45 46 #if TRACE_LEVEL 47 #define TRACE(...) printf(__VA_ARGS__) 48 #else 49 #define TRACE(...) 50 #endif 51 52 /* 53 * Environment. 54 */ 55 #define ntoh64(x) rte_be_to_cpu_64(x) 56 #define hton64(x) rte_cpu_to_be_64(x) 57 58 #ifndef RTE_SWX_PIPELINE_HUGE_PAGES_DISABLE 59 60 #include <rte_malloc.h> 61 62 static void * 63 env_malloc(size_t size, size_t alignment, int numa_node) 64 { 65 return rte_zmalloc_socket(NULL, size, alignment, numa_node); 66 } 67 68 static void 69 env_free(void *start, size_t size __rte_unused) 70 { 71 rte_free(start); 72 } 73 74 #else 75 76 #include <numa.h> 77 78 static void * 79 env_malloc(size_t size, size_t alignment __rte_unused, int numa_node) 80 { 81 void *start; 82 83 if (numa_available() == -1) 84 return NULL; 85 86 start = numa_alloc_onnode(size, numa_node); 87 if (!start) 88 return NULL; 89 90 memset(start, 0, size); 91 return start; 92 } 93 94 static void 95 env_free(void *start, size_t size) 96 { 97 if (numa_available() == -1) 98 return; 99 100 numa_free(start, size); 101 } 102 103 #endif 104 105 /* 106 * Struct. 107 */ 108 struct field { 109 char name[RTE_SWX_NAME_SIZE]; 110 uint32_t n_bits; 111 uint32_t offset; 112 }; 113 114 struct struct_type { 115 TAILQ_ENTRY(struct_type) node; 116 char name[RTE_SWX_NAME_SIZE]; 117 struct field *fields; 118 uint32_t n_fields; 119 uint32_t n_bits; 120 }; 121 122 TAILQ_HEAD(struct_type_tailq, struct_type); 123 124 /* 125 * Input port. 126 */ 127 struct port_in_type { 128 TAILQ_ENTRY(port_in_type) node; 129 char name[RTE_SWX_NAME_SIZE]; 130 struct rte_swx_port_in_ops ops; 131 }; 132 133 TAILQ_HEAD(port_in_type_tailq, port_in_type); 134 135 struct port_in { 136 TAILQ_ENTRY(port_in) node; 137 struct port_in_type *type; 138 void *obj; 139 uint32_t id; 140 }; 141 142 TAILQ_HEAD(port_in_tailq, port_in); 143 144 struct port_in_runtime { 145 rte_swx_port_in_pkt_rx_t pkt_rx; 146 void *obj; 147 }; 148 149 /* 150 * Output port. 151 */ 152 struct port_out_type { 153 TAILQ_ENTRY(port_out_type) node; 154 char name[RTE_SWX_NAME_SIZE]; 155 struct rte_swx_port_out_ops ops; 156 }; 157 158 TAILQ_HEAD(port_out_type_tailq, port_out_type); 159 160 struct port_out { 161 TAILQ_ENTRY(port_out) node; 162 struct port_out_type *type; 163 void *obj; 164 uint32_t id; 165 }; 166 167 TAILQ_HEAD(port_out_tailq, port_out); 168 169 struct port_out_runtime { 170 rte_swx_port_out_pkt_tx_t pkt_tx; 171 rte_swx_port_out_flush_t flush; 172 void *obj; 173 }; 174 175 /* 176 * Extern object. 177 */ 178 struct extern_type_member_func { 179 TAILQ_ENTRY(extern_type_member_func) node; 180 char name[RTE_SWX_NAME_SIZE]; 181 rte_swx_extern_type_member_func_t func; 182 uint32_t id; 183 }; 184 185 TAILQ_HEAD(extern_type_member_func_tailq, extern_type_member_func); 186 187 struct extern_type { 188 TAILQ_ENTRY(extern_type) node; 189 char name[RTE_SWX_NAME_SIZE]; 190 struct struct_type *mailbox_struct_type; 191 rte_swx_extern_type_constructor_t constructor; 192 rte_swx_extern_type_destructor_t destructor; 193 struct extern_type_member_func_tailq funcs; 194 uint32_t n_funcs; 195 }; 196 197 TAILQ_HEAD(extern_type_tailq, extern_type); 198 199 struct extern_obj { 200 TAILQ_ENTRY(extern_obj) node; 201 char name[RTE_SWX_NAME_SIZE]; 202 struct extern_type *type; 203 void *obj; 204 uint32_t struct_id; 205 uint32_t id; 206 }; 207 208 TAILQ_HEAD(extern_obj_tailq, extern_obj); 209 210 #ifndef RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 211 #define RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX 8 212 #endif 213 214 struct extern_obj_runtime { 215 void *obj; 216 uint8_t *mailbox; 217 rte_swx_extern_type_member_func_t funcs[RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX]; 218 }; 219 220 /* 221 * Extern function. 222 */ 223 struct extern_func { 224 TAILQ_ENTRY(extern_func) node; 225 char name[RTE_SWX_NAME_SIZE]; 226 struct struct_type *mailbox_struct_type; 227 rte_swx_extern_func_t func; 228 uint32_t struct_id; 229 uint32_t id; 230 }; 231 232 TAILQ_HEAD(extern_func_tailq, extern_func); 233 234 struct extern_func_runtime { 235 uint8_t *mailbox; 236 rte_swx_extern_func_t func; 237 }; 238 239 /* 240 * Header. 241 */ 242 struct header { 243 TAILQ_ENTRY(header) node; 244 char name[RTE_SWX_NAME_SIZE]; 245 struct struct_type *st; 246 uint32_t struct_id; 247 uint32_t id; 248 }; 249 250 TAILQ_HEAD(header_tailq, header); 251 252 struct header_runtime { 253 uint8_t *ptr0; 254 }; 255 256 struct header_out_runtime { 257 uint8_t *ptr0; 258 uint8_t *ptr; 259 uint32_t n_bytes; 260 }; 261 262 /* 263 * Instruction. 264 */ 265 266 /* Packet headers are always in Network Byte Order (NBO), i.e. big endian. 267 * Packet meta-data fields are always assumed to be in Host Byte Order (HBO). 268 * Table entry fields can be in either NBO or HBO; they are assumed to be in HBO 269 * when transferred to packet meta-data and in NBO when transferred to packet 270 * headers. 271 */ 272 273 /* Notation conventions: 274 * -Header field: H = h.header.field (dst/src) 275 * -Meta-data field: M = m.field (dst/src) 276 * -Extern object mailbox field: E = e.field (dst/src) 277 * -Extern function mailbox field: F = f.field (dst/src) 278 * -Table action data field: T = t.field (src only) 279 * -Immediate value: I = 32-bit unsigned value (src only) 280 */ 281 282 enum instruction_type { 283 /* rx m.port_in */ 284 INSTR_RX, 285 286 /* tx port_out 287 * port_out = MI 288 */ 289 INSTR_TX, /* port_out = M */ 290 INSTR_TX_I, /* port_out = I */ 291 292 /* extract h.header */ 293 INSTR_HDR_EXTRACT, 294 INSTR_HDR_EXTRACT2, 295 INSTR_HDR_EXTRACT3, 296 INSTR_HDR_EXTRACT4, 297 INSTR_HDR_EXTRACT5, 298 INSTR_HDR_EXTRACT6, 299 INSTR_HDR_EXTRACT7, 300 INSTR_HDR_EXTRACT8, 301 302 /* emit h.header */ 303 INSTR_HDR_EMIT, 304 INSTR_HDR_EMIT_TX, 305 INSTR_HDR_EMIT2_TX, 306 INSTR_HDR_EMIT3_TX, 307 INSTR_HDR_EMIT4_TX, 308 INSTR_HDR_EMIT5_TX, 309 INSTR_HDR_EMIT6_TX, 310 INSTR_HDR_EMIT7_TX, 311 INSTR_HDR_EMIT8_TX, 312 313 /* validate h.header */ 314 INSTR_HDR_VALIDATE, 315 316 /* invalidate h.header */ 317 INSTR_HDR_INVALIDATE, 318 319 /* mov dst src 320 * dst = src 321 * dst = HMEF, src = HMEFTI 322 */ 323 INSTR_MOV, /* dst = MEF, src = MEFT */ 324 INSTR_MOV_MH, /* dst = MEF, src = H */ 325 INSTR_MOV_HM, /* dst = H, src = MEFT */ 326 INSTR_MOV_HH, /* dst = H, src = H */ 327 INSTR_MOV_I, /* dst = HMEF, src = I */ 328 329 /* dma h.header t.field 330 * memcpy(h.header, t.field, sizeof(h.header)) 331 */ 332 INSTR_DMA_HT, 333 INSTR_DMA_HT2, 334 INSTR_DMA_HT3, 335 INSTR_DMA_HT4, 336 INSTR_DMA_HT5, 337 INSTR_DMA_HT6, 338 INSTR_DMA_HT7, 339 INSTR_DMA_HT8, 340 341 /* add dst src 342 * dst += src 343 * dst = HMEF, src = HMEFTI 344 */ 345 INSTR_ALU_ADD, /* dst = MEF, src = MEF */ 346 INSTR_ALU_ADD_MH, /* dst = MEF, src = H */ 347 INSTR_ALU_ADD_HM, /* dst = H, src = MEF */ 348 INSTR_ALU_ADD_HH, /* dst = H, src = H */ 349 INSTR_ALU_ADD_MI, /* dst = MEF, src = I */ 350 INSTR_ALU_ADD_HI, /* dst = H, src = I */ 351 352 /* sub dst src 353 * dst -= src 354 * dst = HMEF, src = HMEFTI 355 */ 356 INSTR_ALU_SUB, /* dst = MEF, src = MEF */ 357 INSTR_ALU_SUB_MH, /* dst = MEF, src = H */ 358 INSTR_ALU_SUB_HM, /* dst = H, src = MEF */ 359 INSTR_ALU_SUB_HH, /* dst = H, src = H */ 360 INSTR_ALU_SUB_MI, /* dst = MEF, src = I */ 361 INSTR_ALU_SUB_HI, /* dst = H, src = I */ 362 363 /* ckadd dst src 364 * dst = dst '+ src[0:1] '+ src[2:3] + ... 365 * dst = H, src = {H, h.header} 366 */ 367 INSTR_ALU_CKADD_FIELD, /* src = H */ 368 INSTR_ALU_CKADD_STRUCT20, /* src = h.header, with sizeof(header) = 20 */ 369 INSTR_ALU_CKADD_STRUCT, /* src = h.hdeader, with any sizeof(header) */ 370 371 /* cksub dst src 372 * dst = dst '- src 373 * dst = H, src = H 374 */ 375 INSTR_ALU_CKSUB_FIELD, 376 377 /* and dst src 378 * dst &= src 379 * dst = HMEF, src = HMEFTI 380 */ 381 INSTR_ALU_AND, /* dst = MEF, src = MEFT */ 382 INSTR_ALU_AND_MH, /* dst = MEF, src = H */ 383 INSTR_ALU_AND_HM, /* dst = H, src = MEFT */ 384 INSTR_ALU_AND_HH, /* dst = H, src = H */ 385 INSTR_ALU_AND_I, /* dst = HMEF, src = I */ 386 387 /* or dst src 388 * dst |= src 389 * dst = HMEF, src = HMEFTI 390 */ 391 INSTR_ALU_OR, /* dst = MEF, src = MEFT */ 392 INSTR_ALU_OR_MH, /* dst = MEF, src = H */ 393 INSTR_ALU_OR_HM, /* dst = H, src = MEFT */ 394 INSTR_ALU_OR_HH, /* dst = H, src = H */ 395 INSTR_ALU_OR_I, /* dst = HMEF, src = I */ 396 397 /* xor dst src 398 * dst ^= src 399 * dst = HMEF, src = HMEFTI 400 */ 401 INSTR_ALU_XOR, /* dst = MEF, src = MEFT */ 402 INSTR_ALU_XOR_MH, /* dst = MEF, src = H */ 403 INSTR_ALU_XOR_HM, /* dst = H, src = MEFT */ 404 INSTR_ALU_XOR_HH, /* dst = H, src = H */ 405 INSTR_ALU_XOR_I, /* dst = HMEF, src = I */ 406 407 /* shl dst src 408 * dst <<= src 409 * dst = HMEF, src = HMEFTI 410 */ 411 INSTR_ALU_SHL, /* dst = MEF, src = MEF */ 412 INSTR_ALU_SHL_MH, /* dst = MEF, src = H */ 413 INSTR_ALU_SHL_HM, /* dst = H, src = MEF */ 414 INSTR_ALU_SHL_HH, /* dst = H, src = H */ 415 INSTR_ALU_SHL_MI, /* dst = MEF, src = I */ 416 INSTR_ALU_SHL_HI, /* dst = H, src = I */ 417 418 /* shr dst src 419 * dst >>= src 420 * dst = HMEF, src = HMEFTI 421 */ 422 INSTR_ALU_SHR, /* dst = MEF, src = MEF */ 423 INSTR_ALU_SHR_MH, /* dst = MEF, src = H */ 424 INSTR_ALU_SHR_HM, /* dst = H, src = MEF */ 425 INSTR_ALU_SHR_HH, /* dst = H, src = H */ 426 INSTR_ALU_SHR_MI, /* dst = MEF, src = I */ 427 INSTR_ALU_SHR_HI, /* dst = H, src = I */ 428 429 /* regprefetch REGARRAY index 430 * prefetch REGARRAY[index] 431 * index = HMEFTI 432 */ 433 INSTR_REGPREFETCH_RH, /* index = H */ 434 INSTR_REGPREFETCH_RM, /* index = MEFT */ 435 INSTR_REGPREFETCH_RI, /* index = I */ 436 437 /* regrd dst REGARRAY index 438 * dst = REGARRAY[index] 439 * dst = HMEF, index = HMEFTI 440 */ 441 INSTR_REGRD_HRH, /* dst = H, index = H */ 442 INSTR_REGRD_HRM, /* dst = H, index = MEFT */ 443 INSTR_REGRD_HRI, /* dst = H, index = I */ 444 INSTR_REGRD_MRH, /* dst = MEF, index = H */ 445 INSTR_REGRD_MRM, /* dst = MEF, index = MEFT */ 446 INSTR_REGRD_MRI, /* dst = MEF, index = I */ 447 448 /* regwr REGARRAY index src 449 * REGARRAY[index] = src 450 * index = HMEFTI, src = HMEFTI 451 */ 452 INSTR_REGWR_RHH, /* index = H, src = H */ 453 INSTR_REGWR_RHM, /* index = H, src = MEFT */ 454 INSTR_REGWR_RHI, /* index = H, src = I */ 455 INSTR_REGWR_RMH, /* index = MEFT, src = H */ 456 INSTR_REGWR_RMM, /* index = MEFT, src = MEFT */ 457 INSTR_REGWR_RMI, /* index = MEFT, src = I */ 458 INSTR_REGWR_RIH, /* index = I, src = H */ 459 INSTR_REGWR_RIM, /* index = I, src = MEFT */ 460 INSTR_REGWR_RII, /* index = I, src = I */ 461 462 /* regadd REGARRAY index src 463 * REGARRAY[index] += src 464 * index = HMEFTI, src = HMEFTI 465 */ 466 INSTR_REGADD_RHH, /* index = H, src = H */ 467 INSTR_REGADD_RHM, /* index = H, src = MEFT */ 468 INSTR_REGADD_RHI, /* index = H, src = I */ 469 INSTR_REGADD_RMH, /* index = MEFT, src = H */ 470 INSTR_REGADD_RMM, /* index = MEFT, src = MEFT */ 471 INSTR_REGADD_RMI, /* index = MEFT, src = I */ 472 INSTR_REGADD_RIH, /* index = I, src = H */ 473 INSTR_REGADD_RIM, /* index = I, src = MEFT */ 474 INSTR_REGADD_RII, /* index = I, src = I */ 475 476 /* metprefetch METARRAY index 477 * prefetch METARRAY[index] 478 * index = HMEFTI 479 */ 480 INSTR_METPREFETCH_H, /* index = H */ 481 INSTR_METPREFETCH_M, /* index = MEFT */ 482 INSTR_METPREFETCH_I, /* index = I */ 483 484 /* meter METARRAY index length color_in color_out 485 * color_out = meter(METARRAY[index], length, color_in) 486 * index = HMEFTI, length = HMEFT, color_in = MEFTI, color_out = MEF 487 */ 488 INSTR_METER_HHM, /* index = H, length = H, color_in = MEFT */ 489 INSTR_METER_HHI, /* index = H, length = H, color_in = I */ 490 INSTR_METER_HMM, /* index = H, length = MEFT, color_in = MEFT */ 491 INSTR_METER_HMI, /* index = H, length = MEFT, color_in = I */ 492 INSTR_METER_MHM, /* index = MEFT, length = H, color_in = MEFT */ 493 INSTR_METER_MHI, /* index = MEFT, length = H, color_in = I */ 494 INSTR_METER_MMM, /* index = MEFT, length = MEFT, color_in = MEFT */ 495 INSTR_METER_MMI, /* index = MEFT, length = MEFT, color_in = I */ 496 INSTR_METER_IHM, /* index = I, length = H, color_in = MEFT */ 497 INSTR_METER_IHI, /* index = I, length = H, color_in = I */ 498 INSTR_METER_IMM, /* index = I, length = MEFT, color_in = MEFT */ 499 INSTR_METER_IMI, /* index = I, length = MEFT, color_in = I */ 500 501 /* table TABLE */ 502 INSTR_TABLE, 503 INSTR_SELECTOR, 504 505 /* extern e.obj.func */ 506 INSTR_EXTERN_OBJ, 507 508 /* extern f.func */ 509 INSTR_EXTERN_FUNC, 510 511 /* jmp LABEL 512 * Unconditional jump 513 */ 514 INSTR_JMP, 515 516 /* jmpv LABEL h.header 517 * Jump if header is valid 518 */ 519 INSTR_JMP_VALID, 520 521 /* jmpnv LABEL h.header 522 * Jump if header is invalid 523 */ 524 INSTR_JMP_INVALID, 525 526 /* jmph LABEL 527 * Jump if table lookup hit 528 */ 529 INSTR_JMP_HIT, 530 531 /* jmpnh LABEL 532 * Jump if table lookup miss 533 */ 534 INSTR_JMP_MISS, 535 536 /* jmpa LABEL ACTION 537 * Jump if action run 538 */ 539 INSTR_JMP_ACTION_HIT, 540 541 /* jmpna LABEL ACTION 542 * Jump if action not run 543 */ 544 INSTR_JMP_ACTION_MISS, 545 546 /* jmpeq LABEL a b 547 * Jump if a is equal to b 548 * a = HMEFT, b = HMEFTI 549 */ 550 INSTR_JMP_EQ, /* a = MEFT, b = MEFT */ 551 INSTR_JMP_EQ_MH, /* a = MEFT, b = H */ 552 INSTR_JMP_EQ_HM, /* a = H, b = MEFT */ 553 INSTR_JMP_EQ_HH, /* a = H, b = H */ 554 INSTR_JMP_EQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */ 555 556 /* jmpneq LABEL a b 557 * Jump if a is not equal to b 558 * a = HMEFT, b = HMEFTI 559 */ 560 INSTR_JMP_NEQ, /* a = MEFT, b = MEFT */ 561 INSTR_JMP_NEQ_MH, /* a = MEFT, b = H */ 562 INSTR_JMP_NEQ_HM, /* a = H, b = MEFT */ 563 INSTR_JMP_NEQ_HH, /* a = H, b = H */ 564 INSTR_JMP_NEQ_I, /* (a, b) = (MEFT, I) or (a, b) = (H, I) */ 565 566 /* jmplt LABEL a b 567 * Jump if a is less than b 568 * a = HMEFT, b = HMEFTI 569 */ 570 INSTR_JMP_LT, /* a = MEFT, b = MEFT */ 571 INSTR_JMP_LT_MH, /* a = MEFT, b = H */ 572 INSTR_JMP_LT_HM, /* a = H, b = MEFT */ 573 INSTR_JMP_LT_HH, /* a = H, b = H */ 574 INSTR_JMP_LT_MI, /* a = MEFT, b = I */ 575 INSTR_JMP_LT_HI, /* a = H, b = I */ 576 577 /* jmpgt LABEL a b 578 * Jump if a is greater than b 579 * a = HMEFT, b = HMEFTI 580 */ 581 INSTR_JMP_GT, /* a = MEFT, b = MEFT */ 582 INSTR_JMP_GT_MH, /* a = MEFT, b = H */ 583 INSTR_JMP_GT_HM, /* a = H, b = MEFT */ 584 INSTR_JMP_GT_HH, /* a = H, b = H */ 585 INSTR_JMP_GT_MI, /* a = MEFT, b = I */ 586 INSTR_JMP_GT_HI, /* a = H, b = I */ 587 588 /* return 589 * Return from action 590 */ 591 INSTR_RETURN, 592 }; 593 594 struct instr_operand { 595 uint8_t struct_id; 596 uint8_t n_bits; 597 uint8_t offset; 598 uint8_t pad; 599 }; 600 601 struct instr_io { 602 struct { 603 union { 604 struct { 605 uint8_t offset; 606 uint8_t n_bits; 607 uint8_t pad[2]; 608 }; 609 610 uint32_t val; 611 }; 612 } io; 613 614 struct { 615 uint8_t header_id[8]; 616 uint8_t struct_id[8]; 617 uint8_t n_bytes[8]; 618 } hdr; 619 }; 620 621 struct instr_hdr_validity { 622 uint8_t header_id; 623 }; 624 625 struct instr_table { 626 uint8_t table_id; 627 }; 628 629 struct instr_extern_obj { 630 uint8_t ext_obj_id; 631 uint8_t func_id; 632 }; 633 634 struct instr_extern_func { 635 uint8_t ext_func_id; 636 }; 637 638 struct instr_dst_src { 639 struct instr_operand dst; 640 union { 641 struct instr_operand src; 642 uint64_t src_val; 643 }; 644 }; 645 646 struct instr_regarray { 647 uint8_t regarray_id; 648 uint8_t pad[3]; 649 650 union { 651 struct instr_operand idx; 652 uint32_t idx_val; 653 }; 654 655 union { 656 struct instr_operand dstsrc; 657 uint64_t dstsrc_val; 658 }; 659 }; 660 661 struct instr_meter { 662 uint8_t metarray_id; 663 uint8_t pad[3]; 664 665 union { 666 struct instr_operand idx; 667 uint32_t idx_val; 668 }; 669 670 struct instr_operand length; 671 672 union { 673 struct instr_operand color_in; 674 uint32_t color_in_val; 675 }; 676 677 struct instr_operand color_out; 678 }; 679 680 struct instr_dma { 681 struct { 682 uint8_t header_id[8]; 683 uint8_t struct_id[8]; 684 } dst; 685 686 struct { 687 uint8_t offset[8]; 688 } src; 689 690 uint16_t n_bytes[8]; 691 }; 692 693 struct instr_jmp { 694 struct instruction *ip; 695 696 union { 697 struct instr_operand a; 698 uint8_t header_id; 699 uint8_t action_id; 700 }; 701 702 union { 703 struct instr_operand b; 704 uint64_t b_val; 705 }; 706 }; 707 708 struct instruction { 709 enum instruction_type type; 710 union { 711 struct instr_io io; 712 struct instr_hdr_validity valid; 713 struct instr_dst_src mov; 714 struct instr_regarray regarray; 715 struct instr_meter meter; 716 struct instr_dma dma; 717 struct instr_dst_src alu; 718 struct instr_table table; 719 struct instr_extern_obj ext_obj; 720 struct instr_extern_func ext_func; 721 struct instr_jmp jmp; 722 }; 723 }; 724 725 struct instruction_data { 726 char label[RTE_SWX_NAME_SIZE]; 727 char jmp_label[RTE_SWX_NAME_SIZE]; 728 uint32_t n_users; /* user = jmp instruction to this instruction. */ 729 int invalid; 730 }; 731 732 /* 733 * Action. 734 */ 735 struct action { 736 TAILQ_ENTRY(action) node; 737 char name[RTE_SWX_NAME_SIZE]; 738 struct struct_type *st; 739 int *args_endianness; /* 0 = Host Byte Order (HBO). */ 740 struct instruction *instructions; 741 uint32_t n_instructions; 742 uint32_t id; 743 }; 744 745 TAILQ_HEAD(action_tailq, action); 746 747 /* 748 * Table. 749 */ 750 struct table_type { 751 TAILQ_ENTRY(table_type) node; 752 char name[RTE_SWX_NAME_SIZE]; 753 enum rte_swx_table_match_type match_type; 754 struct rte_swx_table_ops ops; 755 }; 756 757 TAILQ_HEAD(table_type_tailq, table_type); 758 759 struct match_field { 760 enum rte_swx_table_match_type match_type; 761 struct field *field; 762 }; 763 764 struct table { 765 TAILQ_ENTRY(table) node; 766 char name[RTE_SWX_NAME_SIZE]; 767 char args[RTE_SWX_NAME_SIZE]; 768 struct table_type *type; /* NULL when n_fields == 0. */ 769 770 /* Match. */ 771 struct match_field *fields; 772 uint32_t n_fields; 773 struct header *header; /* Only valid when n_fields > 0. */ 774 775 /* Action. */ 776 struct action **actions; 777 struct action *default_action; 778 uint8_t *default_action_data; 779 uint32_t n_actions; 780 int default_action_is_const; 781 uint32_t action_data_size_max; 782 783 uint32_t size; 784 uint32_t id; 785 }; 786 787 TAILQ_HEAD(table_tailq, table); 788 789 struct table_runtime { 790 rte_swx_table_lookup_t func; 791 void *mailbox; 792 uint8_t **key; 793 }; 794 795 struct table_statistics { 796 uint64_t n_pkts_hit[2]; /* 0 = Miss, 1 = Hit. */ 797 uint64_t *n_pkts_action; 798 }; 799 800 /* 801 * Selector. 802 */ 803 struct selector { 804 TAILQ_ENTRY(selector) node; 805 char name[RTE_SWX_NAME_SIZE]; 806 807 struct field *group_id_field; 808 struct field **selector_fields; 809 uint32_t n_selector_fields; 810 struct header *selector_header; 811 struct field *member_id_field; 812 813 uint32_t n_groups_max; 814 uint32_t n_members_per_group_max; 815 816 uint32_t id; 817 }; 818 819 TAILQ_HEAD(selector_tailq, selector); 820 821 struct selector_runtime { 822 void *mailbox; 823 uint8_t **group_id_buffer; 824 uint8_t **selector_buffer; 825 uint8_t **member_id_buffer; 826 }; 827 828 struct selector_statistics { 829 uint64_t n_pkts; 830 }; 831 832 /* 833 * Register array. 834 */ 835 struct regarray { 836 TAILQ_ENTRY(regarray) node; 837 char name[RTE_SWX_NAME_SIZE]; 838 uint64_t init_val; 839 uint32_t size; 840 uint32_t id; 841 }; 842 843 TAILQ_HEAD(regarray_tailq, regarray); 844 845 struct regarray_runtime { 846 uint64_t *regarray; 847 uint32_t size_mask; 848 }; 849 850 /* 851 * Meter array. 852 */ 853 struct meter_profile { 854 TAILQ_ENTRY(meter_profile) node; 855 char name[RTE_SWX_NAME_SIZE]; 856 struct rte_meter_trtcm_params params; 857 struct rte_meter_trtcm_profile profile; 858 uint32_t n_users; 859 }; 860 861 TAILQ_HEAD(meter_profile_tailq, meter_profile); 862 863 struct metarray { 864 TAILQ_ENTRY(metarray) node; 865 char name[RTE_SWX_NAME_SIZE]; 866 uint32_t size; 867 uint32_t id; 868 }; 869 870 TAILQ_HEAD(metarray_tailq, metarray); 871 872 struct meter { 873 struct rte_meter_trtcm m; 874 struct meter_profile *profile; 875 enum rte_color color_mask; 876 uint8_t pad[20]; 877 878 uint64_t n_pkts[RTE_COLORS]; 879 uint64_t n_bytes[RTE_COLORS]; 880 }; 881 882 struct metarray_runtime { 883 struct meter *metarray; 884 uint32_t size_mask; 885 }; 886 887 /* 888 * Pipeline. 889 */ 890 struct thread { 891 /* Packet. */ 892 struct rte_swx_pkt pkt; 893 uint8_t *ptr; 894 895 /* Structures. */ 896 uint8_t **structs; 897 898 /* Packet headers. */ 899 struct header_runtime *headers; /* Extracted or generated headers. */ 900 struct header_out_runtime *headers_out; /* Emitted headers. */ 901 uint8_t *header_storage; 902 uint8_t *header_out_storage; 903 uint64_t valid_headers; 904 uint32_t n_headers_out; 905 906 /* Packet meta-data. */ 907 uint8_t *metadata; 908 909 /* Tables. */ 910 struct table_runtime *tables; 911 struct selector_runtime *selectors; 912 struct rte_swx_table_state *table_state; 913 uint64_t action_id; 914 int hit; /* 0 = Miss, 1 = Hit. */ 915 916 /* Extern objects and functions. */ 917 struct extern_obj_runtime *extern_objs; 918 struct extern_func_runtime *extern_funcs; 919 920 /* Instructions. */ 921 struct instruction *ip; 922 struct instruction *ret; 923 }; 924 925 #define MASK64_BIT_GET(mask, pos) ((mask) & (1LLU << (pos))) 926 #define MASK64_BIT_SET(mask, pos) ((mask) | (1LLU << (pos))) 927 #define MASK64_BIT_CLR(mask, pos) ((mask) & ~(1LLU << (pos))) 928 929 #define HEADER_VALID(thread, header_id) \ 930 MASK64_BIT_GET((thread)->valid_headers, header_id) 931 932 #define ALU(thread, ip, operator) \ 933 { \ 934 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 935 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 936 uint64_t dst64 = *dst64_ptr; \ 937 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 938 uint64_t dst = dst64 & dst64_mask; \ 939 \ 940 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 941 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 942 uint64_t src64 = *src64_ptr; \ 943 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 944 uint64_t src = src64 & src64_mask; \ 945 \ 946 uint64_t result = dst operator src; \ 947 \ 948 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 949 } 950 951 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 952 953 #define ALU_MH(thread, ip, operator) \ 954 { \ 955 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 956 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 957 uint64_t dst64 = *dst64_ptr; \ 958 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 959 uint64_t dst = dst64 & dst64_mask; \ 960 \ 961 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 962 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 963 uint64_t src64 = *src64_ptr; \ 964 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 965 \ 966 uint64_t result = dst operator src; \ 967 \ 968 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 969 } 970 971 #define ALU_HM(thread, ip, operator) \ 972 { \ 973 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 974 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 975 uint64_t dst64 = *dst64_ptr; \ 976 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 977 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 978 \ 979 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 980 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 981 uint64_t src64 = *src64_ptr; \ 982 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 983 uint64_t src = src64 & src64_mask; \ 984 \ 985 uint64_t result = dst operator src; \ 986 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 987 \ 988 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 989 } 990 991 #define ALU_HM_FAST(thread, ip, operator) \ 992 { \ 993 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 994 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 995 uint64_t dst64 = *dst64_ptr; \ 996 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 997 uint64_t dst = dst64 & dst64_mask; \ 998 \ 999 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1000 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1001 uint64_t src64 = *src64_ptr; \ 1002 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->alu.src.n_bits); \ 1003 uint64_t src = hton64(src64 & src64_mask) >> (64 - (ip)->alu.dst.n_bits); \ 1004 \ 1005 uint64_t result = dst operator src; \ 1006 \ 1007 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1008 } 1009 1010 #define ALU_HH(thread, ip, operator) \ 1011 { \ 1012 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1013 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1014 uint64_t dst64 = *dst64_ptr; \ 1015 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1016 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1017 \ 1018 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1019 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1020 uint64_t src64 = *src64_ptr; \ 1021 uint64_t src = ntoh64(src64) >> (64 - (ip)->alu.src.n_bits); \ 1022 \ 1023 uint64_t result = dst operator src; \ 1024 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1025 \ 1026 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1027 } 1028 1029 #define ALU_HH_FAST(thread, ip, operator) \ 1030 { \ 1031 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1032 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1033 uint64_t dst64 = *dst64_ptr; \ 1034 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1035 uint64_t dst = dst64 & dst64_mask; \ 1036 \ 1037 uint8_t *src_struct = (thread)->structs[(ip)->alu.src.struct_id]; \ 1038 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->alu.src.offset]; \ 1039 uint64_t src64 = *src64_ptr; \ 1040 uint64_t src = (src64 << (64 - (ip)->alu.src.n_bits)) >> (64 - (ip)->alu.dst.n_bits); \ 1041 \ 1042 uint64_t result = dst operator src; \ 1043 \ 1044 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1045 } 1046 1047 #else 1048 1049 #define ALU_MH ALU 1050 #define ALU_HM ALU 1051 #define ALU_HM_FAST ALU 1052 #define ALU_HH ALU 1053 #define ALU_HH_FAST ALU 1054 1055 #endif 1056 1057 #define ALU_I(thread, ip, operator) \ 1058 { \ 1059 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1060 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1061 uint64_t dst64 = *dst64_ptr; \ 1062 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1063 uint64_t dst = dst64 & dst64_mask; \ 1064 \ 1065 uint64_t src = (ip)->alu.src_val; \ 1066 \ 1067 uint64_t result = dst operator src; \ 1068 \ 1069 *dst64_ptr = (dst64 & ~dst64_mask) | (result & dst64_mask); \ 1070 } 1071 1072 #define ALU_MI ALU_I 1073 1074 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1075 1076 #define ALU_HI(thread, ip, operator) \ 1077 { \ 1078 uint8_t *dst_struct = (thread)->structs[(ip)->alu.dst.struct_id]; \ 1079 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->alu.dst.offset]; \ 1080 uint64_t dst64 = *dst64_ptr; \ 1081 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->alu.dst.n_bits); \ 1082 uint64_t dst = ntoh64(dst64) >> (64 - (ip)->alu.dst.n_bits); \ 1083 \ 1084 uint64_t src = (ip)->alu.src_val; \ 1085 \ 1086 uint64_t result = dst operator src; \ 1087 result = hton64(result << (64 - (ip)->alu.dst.n_bits)); \ 1088 \ 1089 *dst64_ptr = (dst64 & ~dst64_mask) | result; \ 1090 } 1091 1092 #else 1093 1094 #define ALU_HI ALU_I 1095 1096 #endif 1097 1098 #define MOV(thread, ip) \ 1099 { \ 1100 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1101 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1102 uint64_t dst64 = *dst64_ptr; \ 1103 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1104 \ 1105 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1106 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1107 uint64_t src64 = *src64_ptr; \ 1108 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1109 uint64_t src = src64 & src64_mask; \ 1110 \ 1111 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1112 } 1113 1114 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1115 1116 #define MOV_MH(thread, ip) \ 1117 { \ 1118 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1119 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1120 uint64_t dst64 = *dst64_ptr; \ 1121 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1122 \ 1123 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1124 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1125 uint64_t src64 = *src64_ptr; \ 1126 uint64_t src = ntoh64(src64) >> (64 - (ip)->mov.src.n_bits); \ 1127 \ 1128 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1129 } 1130 1131 #define MOV_HM(thread, ip) \ 1132 { \ 1133 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1134 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1135 uint64_t dst64 = *dst64_ptr; \ 1136 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1137 \ 1138 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1139 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1140 uint64_t src64 = *src64_ptr; \ 1141 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->mov.src.n_bits); \ 1142 uint64_t src = src64 & src64_mask; \ 1143 \ 1144 src = hton64(src) >> (64 - (ip)->mov.dst.n_bits); \ 1145 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1146 } 1147 1148 #define MOV_HH(thread, ip) \ 1149 { \ 1150 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1151 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1152 uint64_t dst64 = *dst64_ptr; \ 1153 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1154 \ 1155 uint8_t *src_struct = (thread)->structs[(ip)->mov.src.struct_id]; \ 1156 uint64_t *src64_ptr = (uint64_t *)&src_struct[(ip)->mov.src.offset]; \ 1157 uint64_t src64 = *src64_ptr; \ 1158 \ 1159 uint64_t src = src64 << (64 - (ip)->mov.src.n_bits); \ 1160 src = src >> (64 - (ip)->mov.dst.n_bits); \ 1161 *dst64_ptr = (dst64 & ~dst64_mask) | src; \ 1162 } 1163 1164 #else 1165 1166 #define MOV_MH MOV 1167 #define MOV_HM MOV 1168 #define MOV_HH MOV 1169 1170 #endif 1171 1172 #define MOV_I(thread, ip) \ 1173 { \ 1174 uint8_t *dst_struct = (thread)->structs[(ip)->mov.dst.struct_id]; \ 1175 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[(ip)->mov.dst.offset]; \ 1176 uint64_t dst64 = *dst64_ptr; \ 1177 uint64_t dst64_mask = UINT64_MAX >> (64 - (ip)->mov.dst.n_bits); \ 1178 \ 1179 uint64_t src = (ip)->mov.src_val; \ 1180 \ 1181 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); \ 1182 } 1183 1184 #define JMP_CMP(thread, ip, operator) \ 1185 { \ 1186 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1187 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1188 uint64_t a64 = *a64_ptr; \ 1189 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1190 uint64_t a = a64 & a64_mask; \ 1191 \ 1192 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1193 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1194 uint64_t b64 = *b64_ptr; \ 1195 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1196 uint64_t b = b64 & b64_mask; \ 1197 \ 1198 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1199 } 1200 1201 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1202 1203 #define JMP_CMP_MH(thread, ip, operator) \ 1204 { \ 1205 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1206 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1207 uint64_t a64 = *a64_ptr; \ 1208 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1209 uint64_t a = a64 & a64_mask; \ 1210 \ 1211 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1212 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1213 uint64_t b64 = *b64_ptr; \ 1214 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1215 \ 1216 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1217 } 1218 1219 #define JMP_CMP_HM(thread, ip, operator) \ 1220 { \ 1221 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1222 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1223 uint64_t a64 = *a64_ptr; \ 1224 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1225 \ 1226 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1227 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1228 uint64_t b64 = *b64_ptr; \ 1229 uint64_t b64_mask = UINT64_MAX >> (64 - (ip)->jmp.b.n_bits); \ 1230 uint64_t b = b64 & b64_mask; \ 1231 \ 1232 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1233 } 1234 1235 #define JMP_CMP_HH(thread, ip, operator) \ 1236 { \ 1237 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1238 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1239 uint64_t a64 = *a64_ptr; \ 1240 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1241 \ 1242 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1243 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1244 uint64_t b64 = *b64_ptr; \ 1245 uint64_t b = ntoh64(b64) >> (64 - (ip)->jmp.b.n_bits); \ 1246 \ 1247 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1248 } 1249 1250 #define JMP_CMP_HH_FAST(thread, ip, operator) \ 1251 { \ 1252 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1253 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1254 uint64_t a64 = *a64_ptr; \ 1255 uint64_t a = a64 << (64 - (ip)->jmp.a.n_bits); \ 1256 \ 1257 uint8_t *b_struct = (thread)->structs[(ip)->jmp.b.struct_id]; \ 1258 uint64_t *b64_ptr = (uint64_t *)&b_struct[(ip)->jmp.b.offset]; \ 1259 uint64_t b64 = *b64_ptr; \ 1260 uint64_t b = b64 << (64 - (ip)->jmp.b.n_bits); \ 1261 \ 1262 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1263 } 1264 1265 #else 1266 1267 #define JMP_CMP_MH JMP_CMP 1268 #define JMP_CMP_HM JMP_CMP 1269 #define JMP_CMP_HH JMP_CMP 1270 #define JMP_CMP_HH_FAST JMP_CMP 1271 1272 #endif 1273 1274 #define JMP_CMP_I(thread, ip, operator) \ 1275 { \ 1276 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1277 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1278 uint64_t a64 = *a64_ptr; \ 1279 uint64_t a64_mask = UINT64_MAX >> (64 - (ip)->jmp.a.n_bits); \ 1280 uint64_t a = a64 & a64_mask; \ 1281 \ 1282 uint64_t b = (ip)->jmp.b_val; \ 1283 \ 1284 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1285 } 1286 1287 #define JMP_CMP_MI JMP_CMP_I 1288 1289 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 1290 1291 #define JMP_CMP_HI(thread, ip, operator) \ 1292 { \ 1293 uint8_t *a_struct = (thread)->structs[(ip)->jmp.a.struct_id]; \ 1294 uint64_t *a64_ptr = (uint64_t *)&a_struct[(ip)->jmp.a.offset]; \ 1295 uint64_t a64 = *a64_ptr; \ 1296 uint64_t a = ntoh64(a64) >> (64 - (ip)->jmp.a.n_bits); \ 1297 \ 1298 uint64_t b = (ip)->jmp.b_val; \ 1299 \ 1300 (thread)->ip = (a operator b) ? (ip)->jmp.ip : ((thread)->ip + 1); \ 1301 } 1302 1303 #else 1304 1305 #define JMP_CMP_HI JMP_CMP_I 1306 1307 #endif 1308 1309 #define METADATA_READ(thread, offset, n_bits) \ 1310 ({ \ 1311 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1312 uint64_t m64 = *m64_ptr; \ 1313 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1314 (m64 & m64_mask); \ 1315 }) 1316 1317 #define METADATA_WRITE(thread, offset, n_bits, value) \ 1318 { \ 1319 uint64_t *m64_ptr = (uint64_t *)&(thread)->metadata[offset]; \ 1320 uint64_t m64 = *m64_ptr; \ 1321 uint64_t m64_mask = UINT64_MAX >> (64 - (n_bits)); \ 1322 \ 1323 uint64_t m_new = value; \ 1324 \ 1325 *m64_ptr = (m64 & ~m64_mask) | (m_new & m64_mask); \ 1326 } 1327 1328 #ifndef RTE_SWX_PIPELINE_THREADS_MAX 1329 #define RTE_SWX_PIPELINE_THREADS_MAX 16 1330 #endif 1331 1332 struct rte_swx_pipeline { 1333 struct struct_type_tailq struct_types; 1334 struct port_in_type_tailq port_in_types; 1335 struct port_in_tailq ports_in; 1336 struct port_out_type_tailq port_out_types; 1337 struct port_out_tailq ports_out; 1338 struct extern_type_tailq extern_types; 1339 struct extern_obj_tailq extern_objs; 1340 struct extern_func_tailq extern_funcs; 1341 struct header_tailq headers; 1342 struct struct_type *metadata_st; 1343 uint32_t metadata_struct_id; 1344 struct action_tailq actions; 1345 struct table_type_tailq table_types; 1346 struct table_tailq tables; 1347 struct selector_tailq selectors; 1348 struct regarray_tailq regarrays; 1349 struct meter_profile_tailq meter_profiles; 1350 struct metarray_tailq metarrays; 1351 1352 struct port_in_runtime *in; 1353 struct port_out_runtime *out; 1354 struct instruction **action_instructions; 1355 struct rte_swx_table_state *table_state; 1356 struct table_statistics *table_stats; 1357 struct selector_statistics *selector_stats; 1358 struct regarray_runtime *regarray_runtime; 1359 struct metarray_runtime *metarray_runtime; 1360 struct instruction *instructions; 1361 struct thread threads[RTE_SWX_PIPELINE_THREADS_MAX]; 1362 1363 uint32_t n_structs; 1364 uint32_t n_ports_in; 1365 uint32_t n_ports_out; 1366 uint32_t n_extern_objs; 1367 uint32_t n_extern_funcs; 1368 uint32_t n_actions; 1369 uint32_t n_tables; 1370 uint32_t n_selectors; 1371 uint32_t n_regarrays; 1372 uint32_t n_metarrays; 1373 uint32_t n_headers; 1374 uint32_t thread_id; 1375 uint32_t port_id; 1376 uint32_t n_instructions; 1377 int build_done; 1378 int numa_node; 1379 }; 1380 1381 /* 1382 * Struct. 1383 */ 1384 static struct struct_type * 1385 struct_type_find(struct rte_swx_pipeline *p, const char *name) 1386 { 1387 struct struct_type *elem; 1388 1389 TAILQ_FOREACH(elem, &p->struct_types, node) 1390 if (strcmp(elem->name, name) == 0) 1391 return elem; 1392 1393 return NULL; 1394 } 1395 1396 static struct field * 1397 struct_type_field_find(struct struct_type *st, const char *name) 1398 { 1399 uint32_t i; 1400 1401 for (i = 0; i < st->n_fields; i++) { 1402 struct field *f = &st->fields[i]; 1403 1404 if (strcmp(f->name, name) == 0) 1405 return f; 1406 } 1407 1408 return NULL; 1409 } 1410 1411 int 1412 rte_swx_pipeline_struct_type_register(struct rte_swx_pipeline *p, 1413 const char *name, 1414 struct rte_swx_field_params *fields, 1415 uint32_t n_fields) 1416 { 1417 struct struct_type *st; 1418 uint32_t i; 1419 1420 CHECK(p, EINVAL); 1421 CHECK_NAME(name, EINVAL); 1422 CHECK(fields, EINVAL); 1423 CHECK(n_fields, EINVAL); 1424 1425 for (i = 0; i < n_fields; i++) { 1426 struct rte_swx_field_params *f = &fields[i]; 1427 uint32_t j; 1428 1429 CHECK_NAME(f->name, EINVAL); 1430 CHECK(f->n_bits, EINVAL); 1431 CHECK(f->n_bits <= 64, EINVAL); 1432 CHECK((f->n_bits & 7) == 0, EINVAL); 1433 1434 for (j = 0; j < i; j++) { 1435 struct rte_swx_field_params *f_prev = &fields[j]; 1436 1437 CHECK(strcmp(f->name, f_prev->name), EINVAL); 1438 } 1439 } 1440 1441 CHECK(!struct_type_find(p, name), EEXIST); 1442 1443 /* Node allocation. */ 1444 st = calloc(1, sizeof(struct struct_type)); 1445 CHECK(st, ENOMEM); 1446 1447 st->fields = calloc(n_fields, sizeof(struct field)); 1448 if (!st->fields) { 1449 free(st); 1450 CHECK(0, ENOMEM); 1451 } 1452 1453 /* Node initialization. */ 1454 strcpy(st->name, name); 1455 for (i = 0; i < n_fields; i++) { 1456 struct field *dst = &st->fields[i]; 1457 struct rte_swx_field_params *src = &fields[i]; 1458 1459 strcpy(dst->name, src->name); 1460 dst->n_bits = src->n_bits; 1461 dst->offset = st->n_bits; 1462 1463 st->n_bits += src->n_bits; 1464 } 1465 st->n_fields = n_fields; 1466 1467 /* Node add to tailq. */ 1468 TAILQ_INSERT_TAIL(&p->struct_types, st, node); 1469 1470 return 0; 1471 } 1472 1473 static int 1474 struct_build(struct rte_swx_pipeline *p) 1475 { 1476 uint32_t i; 1477 1478 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 1479 struct thread *t = &p->threads[i]; 1480 1481 t->structs = calloc(p->n_structs, sizeof(uint8_t *)); 1482 CHECK(t->structs, ENOMEM); 1483 } 1484 1485 return 0; 1486 } 1487 1488 static void 1489 struct_build_free(struct rte_swx_pipeline *p) 1490 { 1491 uint32_t i; 1492 1493 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 1494 struct thread *t = &p->threads[i]; 1495 1496 free(t->structs); 1497 t->structs = NULL; 1498 } 1499 } 1500 1501 static void 1502 struct_free(struct rte_swx_pipeline *p) 1503 { 1504 struct_build_free(p); 1505 1506 /* Struct types. */ 1507 for ( ; ; ) { 1508 struct struct_type *elem; 1509 1510 elem = TAILQ_FIRST(&p->struct_types); 1511 if (!elem) 1512 break; 1513 1514 TAILQ_REMOVE(&p->struct_types, elem, node); 1515 free(elem->fields); 1516 free(elem); 1517 } 1518 } 1519 1520 /* 1521 * Input port. 1522 */ 1523 static struct port_in_type * 1524 port_in_type_find(struct rte_swx_pipeline *p, const char *name) 1525 { 1526 struct port_in_type *elem; 1527 1528 if (!name) 1529 return NULL; 1530 1531 TAILQ_FOREACH(elem, &p->port_in_types, node) 1532 if (strcmp(elem->name, name) == 0) 1533 return elem; 1534 1535 return NULL; 1536 } 1537 1538 int 1539 rte_swx_pipeline_port_in_type_register(struct rte_swx_pipeline *p, 1540 const char *name, 1541 struct rte_swx_port_in_ops *ops) 1542 { 1543 struct port_in_type *elem; 1544 1545 CHECK(p, EINVAL); 1546 CHECK_NAME(name, EINVAL); 1547 CHECK(ops, EINVAL); 1548 CHECK(ops->create, EINVAL); 1549 CHECK(ops->free, EINVAL); 1550 CHECK(ops->pkt_rx, EINVAL); 1551 CHECK(ops->stats_read, EINVAL); 1552 1553 CHECK(!port_in_type_find(p, name), EEXIST); 1554 1555 /* Node allocation. */ 1556 elem = calloc(1, sizeof(struct port_in_type)); 1557 CHECK(elem, ENOMEM); 1558 1559 /* Node initialization. */ 1560 strcpy(elem->name, name); 1561 memcpy(&elem->ops, ops, sizeof(*ops)); 1562 1563 /* Node add to tailq. */ 1564 TAILQ_INSERT_TAIL(&p->port_in_types, elem, node); 1565 1566 return 0; 1567 } 1568 1569 static struct port_in * 1570 port_in_find(struct rte_swx_pipeline *p, uint32_t port_id) 1571 { 1572 struct port_in *port; 1573 1574 TAILQ_FOREACH(port, &p->ports_in, node) 1575 if (port->id == port_id) 1576 return port; 1577 1578 return NULL; 1579 } 1580 1581 int 1582 rte_swx_pipeline_port_in_config(struct rte_swx_pipeline *p, 1583 uint32_t port_id, 1584 const char *port_type_name, 1585 void *args) 1586 { 1587 struct port_in_type *type = NULL; 1588 struct port_in *port = NULL; 1589 void *obj = NULL; 1590 1591 CHECK(p, EINVAL); 1592 1593 CHECK(!port_in_find(p, port_id), EINVAL); 1594 1595 CHECK_NAME(port_type_name, EINVAL); 1596 type = port_in_type_find(p, port_type_name); 1597 CHECK(type, EINVAL); 1598 1599 obj = type->ops.create(args); 1600 CHECK(obj, ENODEV); 1601 1602 /* Node allocation. */ 1603 port = calloc(1, sizeof(struct port_in)); 1604 CHECK(port, ENOMEM); 1605 1606 /* Node initialization. */ 1607 port->type = type; 1608 port->obj = obj; 1609 port->id = port_id; 1610 1611 /* Node add to tailq. */ 1612 TAILQ_INSERT_TAIL(&p->ports_in, port, node); 1613 if (p->n_ports_in < port_id + 1) 1614 p->n_ports_in = port_id + 1; 1615 1616 return 0; 1617 } 1618 1619 static int 1620 port_in_build(struct rte_swx_pipeline *p) 1621 { 1622 struct port_in *port; 1623 uint32_t i; 1624 1625 CHECK(p->n_ports_in, EINVAL); 1626 CHECK(rte_is_power_of_2(p->n_ports_in), EINVAL); 1627 1628 for (i = 0; i < p->n_ports_in; i++) 1629 CHECK(port_in_find(p, i), EINVAL); 1630 1631 p->in = calloc(p->n_ports_in, sizeof(struct port_in_runtime)); 1632 CHECK(p->in, ENOMEM); 1633 1634 TAILQ_FOREACH(port, &p->ports_in, node) { 1635 struct port_in_runtime *in = &p->in[port->id]; 1636 1637 in->pkt_rx = port->type->ops.pkt_rx; 1638 in->obj = port->obj; 1639 } 1640 1641 return 0; 1642 } 1643 1644 static void 1645 port_in_build_free(struct rte_swx_pipeline *p) 1646 { 1647 free(p->in); 1648 p->in = NULL; 1649 } 1650 1651 static void 1652 port_in_free(struct rte_swx_pipeline *p) 1653 { 1654 port_in_build_free(p); 1655 1656 /* Input ports. */ 1657 for ( ; ; ) { 1658 struct port_in *port; 1659 1660 port = TAILQ_FIRST(&p->ports_in); 1661 if (!port) 1662 break; 1663 1664 TAILQ_REMOVE(&p->ports_in, port, node); 1665 port->type->ops.free(port->obj); 1666 free(port); 1667 } 1668 1669 /* Input port types. */ 1670 for ( ; ; ) { 1671 struct port_in_type *elem; 1672 1673 elem = TAILQ_FIRST(&p->port_in_types); 1674 if (!elem) 1675 break; 1676 1677 TAILQ_REMOVE(&p->port_in_types, elem, node); 1678 free(elem); 1679 } 1680 } 1681 1682 /* 1683 * Output port. 1684 */ 1685 static struct port_out_type * 1686 port_out_type_find(struct rte_swx_pipeline *p, const char *name) 1687 { 1688 struct port_out_type *elem; 1689 1690 if (!name) 1691 return NULL; 1692 1693 TAILQ_FOREACH(elem, &p->port_out_types, node) 1694 if (!strcmp(elem->name, name)) 1695 return elem; 1696 1697 return NULL; 1698 } 1699 1700 int 1701 rte_swx_pipeline_port_out_type_register(struct rte_swx_pipeline *p, 1702 const char *name, 1703 struct rte_swx_port_out_ops *ops) 1704 { 1705 struct port_out_type *elem; 1706 1707 CHECK(p, EINVAL); 1708 CHECK_NAME(name, EINVAL); 1709 CHECK(ops, EINVAL); 1710 CHECK(ops->create, EINVAL); 1711 CHECK(ops->free, EINVAL); 1712 CHECK(ops->pkt_tx, EINVAL); 1713 CHECK(ops->stats_read, EINVAL); 1714 1715 CHECK(!port_out_type_find(p, name), EEXIST); 1716 1717 /* Node allocation. */ 1718 elem = calloc(1, sizeof(struct port_out_type)); 1719 CHECK(elem, ENOMEM); 1720 1721 /* Node initialization. */ 1722 strcpy(elem->name, name); 1723 memcpy(&elem->ops, ops, sizeof(*ops)); 1724 1725 /* Node add to tailq. */ 1726 TAILQ_INSERT_TAIL(&p->port_out_types, elem, node); 1727 1728 return 0; 1729 } 1730 1731 static struct port_out * 1732 port_out_find(struct rte_swx_pipeline *p, uint32_t port_id) 1733 { 1734 struct port_out *port; 1735 1736 TAILQ_FOREACH(port, &p->ports_out, node) 1737 if (port->id == port_id) 1738 return port; 1739 1740 return NULL; 1741 } 1742 1743 int 1744 rte_swx_pipeline_port_out_config(struct rte_swx_pipeline *p, 1745 uint32_t port_id, 1746 const char *port_type_name, 1747 void *args) 1748 { 1749 struct port_out_type *type = NULL; 1750 struct port_out *port = NULL; 1751 void *obj = NULL; 1752 1753 CHECK(p, EINVAL); 1754 1755 CHECK(!port_out_find(p, port_id), EINVAL); 1756 1757 CHECK_NAME(port_type_name, EINVAL); 1758 type = port_out_type_find(p, port_type_name); 1759 CHECK(type, EINVAL); 1760 1761 obj = type->ops.create(args); 1762 CHECK(obj, ENODEV); 1763 1764 /* Node allocation. */ 1765 port = calloc(1, sizeof(struct port_out)); 1766 CHECK(port, ENOMEM); 1767 1768 /* Node initialization. */ 1769 port->type = type; 1770 port->obj = obj; 1771 port->id = port_id; 1772 1773 /* Node add to tailq. */ 1774 TAILQ_INSERT_TAIL(&p->ports_out, port, node); 1775 if (p->n_ports_out < port_id + 1) 1776 p->n_ports_out = port_id + 1; 1777 1778 return 0; 1779 } 1780 1781 static int 1782 port_out_build(struct rte_swx_pipeline *p) 1783 { 1784 struct port_out *port; 1785 uint32_t i; 1786 1787 CHECK(p->n_ports_out, EINVAL); 1788 1789 for (i = 0; i < p->n_ports_out; i++) 1790 CHECK(port_out_find(p, i), EINVAL); 1791 1792 p->out = calloc(p->n_ports_out, sizeof(struct port_out_runtime)); 1793 CHECK(p->out, ENOMEM); 1794 1795 TAILQ_FOREACH(port, &p->ports_out, node) { 1796 struct port_out_runtime *out = &p->out[port->id]; 1797 1798 out->pkt_tx = port->type->ops.pkt_tx; 1799 out->flush = port->type->ops.flush; 1800 out->obj = port->obj; 1801 } 1802 1803 return 0; 1804 } 1805 1806 static void 1807 port_out_build_free(struct rte_swx_pipeline *p) 1808 { 1809 free(p->out); 1810 p->out = NULL; 1811 } 1812 1813 static void 1814 port_out_free(struct rte_swx_pipeline *p) 1815 { 1816 port_out_build_free(p); 1817 1818 /* Output ports. */ 1819 for ( ; ; ) { 1820 struct port_out *port; 1821 1822 port = TAILQ_FIRST(&p->ports_out); 1823 if (!port) 1824 break; 1825 1826 TAILQ_REMOVE(&p->ports_out, port, node); 1827 port->type->ops.free(port->obj); 1828 free(port); 1829 } 1830 1831 /* Output port types. */ 1832 for ( ; ; ) { 1833 struct port_out_type *elem; 1834 1835 elem = TAILQ_FIRST(&p->port_out_types); 1836 if (!elem) 1837 break; 1838 1839 TAILQ_REMOVE(&p->port_out_types, elem, node); 1840 free(elem); 1841 } 1842 } 1843 1844 /* 1845 * Extern object. 1846 */ 1847 static struct extern_type * 1848 extern_type_find(struct rte_swx_pipeline *p, const char *name) 1849 { 1850 struct extern_type *elem; 1851 1852 TAILQ_FOREACH(elem, &p->extern_types, node) 1853 if (strcmp(elem->name, name) == 0) 1854 return elem; 1855 1856 return NULL; 1857 } 1858 1859 static struct extern_type_member_func * 1860 extern_type_member_func_find(struct extern_type *type, const char *name) 1861 { 1862 struct extern_type_member_func *elem; 1863 1864 TAILQ_FOREACH(elem, &type->funcs, node) 1865 if (strcmp(elem->name, name) == 0) 1866 return elem; 1867 1868 return NULL; 1869 } 1870 1871 static struct extern_obj * 1872 extern_obj_find(struct rte_swx_pipeline *p, const char *name) 1873 { 1874 struct extern_obj *elem; 1875 1876 TAILQ_FOREACH(elem, &p->extern_objs, node) 1877 if (strcmp(elem->name, name) == 0) 1878 return elem; 1879 1880 return NULL; 1881 } 1882 1883 static struct extern_type_member_func * 1884 extern_obj_member_func_parse(struct rte_swx_pipeline *p, 1885 const char *name, 1886 struct extern_obj **obj) 1887 { 1888 struct extern_obj *object; 1889 struct extern_type_member_func *func; 1890 char *object_name, *func_name; 1891 1892 if (name[0] != 'e' || name[1] != '.') 1893 return NULL; 1894 1895 object_name = strdup(&name[2]); 1896 if (!object_name) 1897 return NULL; 1898 1899 func_name = strchr(object_name, '.'); 1900 if (!func_name) { 1901 free(object_name); 1902 return NULL; 1903 } 1904 1905 *func_name = 0; 1906 func_name++; 1907 1908 object = extern_obj_find(p, object_name); 1909 if (!object) { 1910 free(object_name); 1911 return NULL; 1912 } 1913 1914 func = extern_type_member_func_find(object->type, func_name); 1915 if (!func) { 1916 free(object_name); 1917 return NULL; 1918 } 1919 1920 if (obj) 1921 *obj = object; 1922 1923 free(object_name); 1924 return func; 1925 } 1926 1927 static struct field * 1928 extern_obj_mailbox_field_parse(struct rte_swx_pipeline *p, 1929 const char *name, 1930 struct extern_obj **object) 1931 { 1932 struct extern_obj *obj; 1933 struct field *f; 1934 char *obj_name, *field_name; 1935 1936 if ((name[0] != 'e') || (name[1] != '.')) 1937 return NULL; 1938 1939 obj_name = strdup(&name[2]); 1940 if (!obj_name) 1941 return NULL; 1942 1943 field_name = strchr(obj_name, '.'); 1944 if (!field_name) { 1945 free(obj_name); 1946 return NULL; 1947 } 1948 1949 *field_name = 0; 1950 field_name++; 1951 1952 obj = extern_obj_find(p, obj_name); 1953 if (!obj) { 1954 free(obj_name); 1955 return NULL; 1956 } 1957 1958 f = struct_type_field_find(obj->type->mailbox_struct_type, field_name); 1959 if (!f) { 1960 free(obj_name); 1961 return NULL; 1962 } 1963 1964 if (object) 1965 *object = obj; 1966 1967 free(obj_name); 1968 return f; 1969 } 1970 1971 int 1972 rte_swx_pipeline_extern_type_register(struct rte_swx_pipeline *p, 1973 const char *name, 1974 const char *mailbox_struct_type_name, 1975 rte_swx_extern_type_constructor_t constructor, 1976 rte_swx_extern_type_destructor_t destructor) 1977 { 1978 struct extern_type *elem; 1979 struct struct_type *mailbox_struct_type; 1980 1981 CHECK(p, EINVAL); 1982 1983 CHECK_NAME(name, EINVAL); 1984 CHECK(!extern_type_find(p, name), EEXIST); 1985 1986 CHECK_NAME(mailbox_struct_type_name, EINVAL); 1987 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name); 1988 CHECK(mailbox_struct_type, EINVAL); 1989 1990 CHECK(constructor, EINVAL); 1991 CHECK(destructor, EINVAL); 1992 1993 /* Node allocation. */ 1994 elem = calloc(1, sizeof(struct extern_type)); 1995 CHECK(elem, ENOMEM); 1996 1997 /* Node initialization. */ 1998 strcpy(elem->name, name); 1999 elem->mailbox_struct_type = mailbox_struct_type; 2000 elem->constructor = constructor; 2001 elem->destructor = destructor; 2002 TAILQ_INIT(&elem->funcs); 2003 2004 /* Node add to tailq. */ 2005 TAILQ_INSERT_TAIL(&p->extern_types, elem, node); 2006 2007 return 0; 2008 } 2009 2010 int 2011 rte_swx_pipeline_extern_type_member_func_register(struct rte_swx_pipeline *p, 2012 const char *extern_type_name, 2013 const char *name, 2014 rte_swx_extern_type_member_func_t member_func) 2015 { 2016 struct extern_type *type; 2017 struct extern_type_member_func *type_member; 2018 2019 CHECK(p, EINVAL); 2020 2021 CHECK_NAME(extern_type_name, EINVAL); 2022 type = extern_type_find(p, extern_type_name); 2023 CHECK(type, EINVAL); 2024 CHECK(type->n_funcs < RTE_SWX_EXTERN_TYPE_MEMBER_FUNCS_MAX, ENOSPC); 2025 2026 CHECK_NAME(name, EINVAL); 2027 CHECK(!extern_type_member_func_find(type, name), EEXIST); 2028 2029 CHECK(member_func, EINVAL); 2030 2031 /* Node allocation. */ 2032 type_member = calloc(1, sizeof(struct extern_type_member_func)); 2033 CHECK(type_member, ENOMEM); 2034 2035 /* Node initialization. */ 2036 strcpy(type_member->name, name); 2037 type_member->func = member_func; 2038 type_member->id = type->n_funcs; 2039 2040 /* Node add to tailq. */ 2041 TAILQ_INSERT_TAIL(&type->funcs, type_member, node); 2042 type->n_funcs++; 2043 2044 return 0; 2045 } 2046 2047 int 2048 rte_swx_pipeline_extern_object_config(struct rte_swx_pipeline *p, 2049 const char *extern_type_name, 2050 const char *name, 2051 const char *args) 2052 { 2053 struct extern_type *type; 2054 struct extern_obj *obj; 2055 void *obj_handle; 2056 2057 CHECK(p, EINVAL); 2058 2059 CHECK_NAME(extern_type_name, EINVAL); 2060 type = extern_type_find(p, extern_type_name); 2061 CHECK(type, EINVAL); 2062 2063 CHECK_NAME(name, EINVAL); 2064 CHECK(!extern_obj_find(p, name), EEXIST); 2065 2066 /* Node allocation. */ 2067 obj = calloc(1, sizeof(struct extern_obj)); 2068 CHECK(obj, ENOMEM); 2069 2070 /* Object construction. */ 2071 obj_handle = type->constructor(args); 2072 if (!obj_handle) { 2073 free(obj); 2074 CHECK(0, ENODEV); 2075 } 2076 2077 /* Node initialization. */ 2078 strcpy(obj->name, name); 2079 obj->type = type; 2080 obj->obj = obj_handle; 2081 obj->struct_id = p->n_structs; 2082 obj->id = p->n_extern_objs; 2083 2084 /* Node add to tailq. */ 2085 TAILQ_INSERT_TAIL(&p->extern_objs, obj, node); 2086 p->n_extern_objs++; 2087 p->n_structs++; 2088 2089 return 0; 2090 } 2091 2092 static int 2093 extern_obj_build(struct rte_swx_pipeline *p) 2094 { 2095 uint32_t i; 2096 2097 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 2098 struct thread *t = &p->threads[i]; 2099 struct extern_obj *obj; 2100 2101 t->extern_objs = calloc(p->n_extern_objs, 2102 sizeof(struct extern_obj_runtime)); 2103 CHECK(t->extern_objs, ENOMEM); 2104 2105 TAILQ_FOREACH(obj, &p->extern_objs, node) { 2106 struct extern_obj_runtime *r = 2107 &t->extern_objs[obj->id]; 2108 struct extern_type_member_func *func; 2109 uint32_t mailbox_size = 2110 obj->type->mailbox_struct_type->n_bits / 8; 2111 2112 r->obj = obj->obj; 2113 2114 r->mailbox = calloc(1, mailbox_size); 2115 CHECK(r->mailbox, ENOMEM); 2116 2117 TAILQ_FOREACH(func, &obj->type->funcs, node) 2118 r->funcs[func->id] = func->func; 2119 2120 t->structs[obj->struct_id] = r->mailbox; 2121 } 2122 } 2123 2124 return 0; 2125 } 2126 2127 static void 2128 extern_obj_build_free(struct rte_swx_pipeline *p) 2129 { 2130 uint32_t i; 2131 2132 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 2133 struct thread *t = &p->threads[i]; 2134 uint32_t j; 2135 2136 if (!t->extern_objs) 2137 continue; 2138 2139 for (j = 0; j < p->n_extern_objs; j++) { 2140 struct extern_obj_runtime *r = &t->extern_objs[j]; 2141 2142 free(r->mailbox); 2143 } 2144 2145 free(t->extern_objs); 2146 t->extern_objs = NULL; 2147 } 2148 } 2149 2150 static void 2151 extern_obj_free(struct rte_swx_pipeline *p) 2152 { 2153 extern_obj_build_free(p); 2154 2155 /* Extern objects. */ 2156 for ( ; ; ) { 2157 struct extern_obj *elem; 2158 2159 elem = TAILQ_FIRST(&p->extern_objs); 2160 if (!elem) 2161 break; 2162 2163 TAILQ_REMOVE(&p->extern_objs, elem, node); 2164 if (elem->obj) 2165 elem->type->destructor(elem->obj); 2166 free(elem); 2167 } 2168 2169 /* Extern types. */ 2170 for ( ; ; ) { 2171 struct extern_type *elem; 2172 2173 elem = TAILQ_FIRST(&p->extern_types); 2174 if (!elem) 2175 break; 2176 2177 TAILQ_REMOVE(&p->extern_types, elem, node); 2178 2179 for ( ; ; ) { 2180 struct extern_type_member_func *func; 2181 2182 func = TAILQ_FIRST(&elem->funcs); 2183 if (!func) 2184 break; 2185 2186 TAILQ_REMOVE(&elem->funcs, func, node); 2187 free(func); 2188 } 2189 2190 free(elem); 2191 } 2192 } 2193 2194 /* 2195 * Extern function. 2196 */ 2197 static struct extern_func * 2198 extern_func_find(struct rte_swx_pipeline *p, const char *name) 2199 { 2200 struct extern_func *elem; 2201 2202 TAILQ_FOREACH(elem, &p->extern_funcs, node) 2203 if (strcmp(elem->name, name) == 0) 2204 return elem; 2205 2206 return NULL; 2207 } 2208 2209 static struct extern_func * 2210 extern_func_parse(struct rte_swx_pipeline *p, 2211 const char *name) 2212 { 2213 if (name[0] != 'f' || name[1] != '.') 2214 return NULL; 2215 2216 return extern_func_find(p, &name[2]); 2217 } 2218 2219 static struct field * 2220 extern_func_mailbox_field_parse(struct rte_swx_pipeline *p, 2221 const char *name, 2222 struct extern_func **function) 2223 { 2224 struct extern_func *func; 2225 struct field *f; 2226 char *func_name, *field_name; 2227 2228 if ((name[0] != 'f') || (name[1] != '.')) 2229 return NULL; 2230 2231 func_name = strdup(&name[2]); 2232 if (!func_name) 2233 return NULL; 2234 2235 field_name = strchr(func_name, '.'); 2236 if (!field_name) { 2237 free(func_name); 2238 return NULL; 2239 } 2240 2241 *field_name = 0; 2242 field_name++; 2243 2244 func = extern_func_find(p, func_name); 2245 if (!func) { 2246 free(func_name); 2247 return NULL; 2248 } 2249 2250 f = struct_type_field_find(func->mailbox_struct_type, field_name); 2251 if (!f) { 2252 free(func_name); 2253 return NULL; 2254 } 2255 2256 if (function) 2257 *function = func; 2258 2259 free(func_name); 2260 return f; 2261 } 2262 2263 int 2264 rte_swx_pipeline_extern_func_register(struct rte_swx_pipeline *p, 2265 const char *name, 2266 const char *mailbox_struct_type_name, 2267 rte_swx_extern_func_t func) 2268 { 2269 struct extern_func *f; 2270 struct struct_type *mailbox_struct_type; 2271 2272 CHECK(p, EINVAL); 2273 2274 CHECK_NAME(name, EINVAL); 2275 CHECK(!extern_func_find(p, name), EEXIST); 2276 2277 CHECK_NAME(mailbox_struct_type_name, EINVAL); 2278 mailbox_struct_type = struct_type_find(p, mailbox_struct_type_name); 2279 CHECK(mailbox_struct_type, EINVAL); 2280 2281 CHECK(func, EINVAL); 2282 2283 /* Node allocation. */ 2284 f = calloc(1, sizeof(struct extern_func)); 2285 CHECK(func, ENOMEM); 2286 2287 /* Node initialization. */ 2288 strcpy(f->name, name); 2289 f->mailbox_struct_type = mailbox_struct_type; 2290 f->func = func; 2291 f->struct_id = p->n_structs; 2292 f->id = p->n_extern_funcs; 2293 2294 /* Node add to tailq. */ 2295 TAILQ_INSERT_TAIL(&p->extern_funcs, f, node); 2296 p->n_extern_funcs++; 2297 p->n_structs++; 2298 2299 return 0; 2300 } 2301 2302 static int 2303 extern_func_build(struct rte_swx_pipeline *p) 2304 { 2305 uint32_t i; 2306 2307 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 2308 struct thread *t = &p->threads[i]; 2309 struct extern_func *func; 2310 2311 /* Memory allocation. */ 2312 t->extern_funcs = calloc(p->n_extern_funcs, 2313 sizeof(struct extern_func_runtime)); 2314 CHECK(t->extern_funcs, ENOMEM); 2315 2316 /* Extern function. */ 2317 TAILQ_FOREACH(func, &p->extern_funcs, node) { 2318 struct extern_func_runtime *r = 2319 &t->extern_funcs[func->id]; 2320 uint32_t mailbox_size = 2321 func->mailbox_struct_type->n_bits / 8; 2322 2323 r->func = func->func; 2324 2325 r->mailbox = calloc(1, mailbox_size); 2326 CHECK(r->mailbox, ENOMEM); 2327 2328 t->structs[func->struct_id] = r->mailbox; 2329 } 2330 } 2331 2332 return 0; 2333 } 2334 2335 static void 2336 extern_func_build_free(struct rte_swx_pipeline *p) 2337 { 2338 uint32_t i; 2339 2340 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 2341 struct thread *t = &p->threads[i]; 2342 uint32_t j; 2343 2344 if (!t->extern_funcs) 2345 continue; 2346 2347 for (j = 0; j < p->n_extern_funcs; j++) { 2348 struct extern_func_runtime *r = &t->extern_funcs[j]; 2349 2350 free(r->mailbox); 2351 } 2352 2353 free(t->extern_funcs); 2354 t->extern_funcs = NULL; 2355 } 2356 } 2357 2358 static void 2359 extern_func_free(struct rte_swx_pipeline *p) 2360 { 2361 extern_func_build_free(p); 2362 2363 for ( ; ; ) { 2364 struct extern_func *elem; 2365 2366 elem = TAILQ_FIRST(&p->extern_funcs); 2367 if (!elem) 2368 break; 2369 2370 TAILQ_REMOVE(&p->extern_funcs, elem, node); 2371 free(elem); 2372 } 2373 } 2374 2375 /* 2376 * Header. 2377 */ 2378 static struct header * 2379 header_find(struct rte_swx_pipeline *p, const char *name) 2380 { 2381 struct header *elem; 2382 2383 TAILQ_FOREACH(elem, &p->headers, node) 2384 if (strcmp(elem->name, name) == 0) 2385 return elem; 2386 2387 return NULL; 2388 } 2389 2390 static struct header * 2391 header_find_by_struct_id(struct rte_swx_pipeline *p, uint32_t struct_id) 2392 { 2393 struct header *elem; 2394 2395 TAILQ_FOREACH(elem, &p->headers, node) 2396 if (elem->struct_id == struct_id) 2397 return elem; 2398 2399 return NULL; 2400 } 2401 2402 static struct header * 2403 header_parse(struct rte_swx_pipeline *p, 2404 const char *name) 2405 { 2406 if (name[0] != 'h' || name[1] != '.') 2407 return NULL; 2408 2409 return header_find(p, &name[2]); 2410 } 2411 2412 static struct field * 2413 header_field_parse(struct rte_swx_pipeline *p, 2414 const char *name, 2415 struct header **header) 2416 { 2417 struct header *h; 2418 struct field *f; 2419 char *header_name, *field_name; 2420 2421 if ((name[0] != 'h') || (name[1] != '.')) 2422 return NULL; 2423 2424 header_name = strdup(&name[2]); 2425 if (!header_name) 2426 return NULL; 2427 2428 field_name = strchr(header_name, '.'); 2429 if (!field_name) { 2430 free(header_name); 2431 return NULL; 2432 } 2433 2434 *field_name = 0; 2435 field_name++; 2436 2437 h = header_find(p, header_name); 2438 if (!h) { 2439 free(header_name); 2440 return NULL; 2441 } 2442 2443 f = struct_type_field_find(h->st, field_name); 2444 if (!f) { 2445 free(header_name); 2446 return NULL; 2447 } 2448 2449 if (header) 2450 *header = h; 2451 2452 free(header_name); 2453 return f; 2454 } 2455 2456 int 2457 rte_swx_pipeline_packet_header_register(struct rte_swx_pipeline *p, 2458 const char *name, 2459 const char *struct_type_name) 2460 { 2461 struct struct_type *st; 2462 struct header *h; 2463 size_t n_headers_max; 2464 2465 CHECK(p, EINVAL); 2466 CHECK_NAME(name, EINVAL); 2467 CHECK_NAME(struct_type_name, EINVAL); 2468 2469 CHECK(!header_find(p, name), EEXIST); 2470 2471 st = struct_type_find(p, struct_type_name); 2472 CHECK(st, EINVAL); 2473 2474 n_headers_max = RTE_SIZEOF_FIELD(struct thread, valid_headers) * 8; 2475 CHECK(p->n_headers < n_headers_max, ENOSPC); 2476 2477 /* Node allocation. */ 2478 h = calloc(1, sizeof(struct header)); 2479 CHECK(h, ENOMEM); 2480 2481 /* Node initialization. */ 2482 strcpy(h->name, name); 2483 h->st = st; 2484 h->struct_id = p->n_structs; 2485 h->id = p->n_headers; 2486 2487 /* Node add to tailq. */ 2488 TAILQ_INSERT_TAIL(&p->headers, h, node); 2489 p->n_headers++; 2490 p->n_structs++; 2491 2492 return 0; 2493 } 2494 2495 static int 2496 header_build(struct rte_swx_pipeline *p) 2497 { 2498 struct header *h; 2499 uint32_t n_bytes = 0, i; 2500 2501 TAILQ_FOREACH(h, &p->headers, node) { 2502 n_bytes += h->st->n_bits / 8; 2503 } 2504 2505 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 2506 struct thread *t = &p->threads[i]; 2507 uint32_t offset = 0; 2508 2509 t->headers = calloc(p->n_headers, 2510 sizeof(struct header_runtime)); 2511 CHECK(t->headers, ENOMEM); 2512 2513 t->headers_out = calloc(p->n_headers, 2514 sizeof(struct header_out_runtime)); 2515 CHECK(t->headers_out, ENOMEM); 2516 2517 t->header_storage = calloc(1, n_bytes); 2518 CHECK(t->header_storage, ENOMEM); 2519 2520 t->header_out_storage = calloc(1, n_bytes); 2521 CHECK(t->header_out_storage, ENOMEM); 2522 2523 TAILQ_FOREACH(h, &p->headers, node) { 2524 uint8_t *header_storage; 2525 2526 header_storage = &t->header_storage[offset]; 2527 offset += h->st->n_bits / 8; 2528 2529 t->headers[h->id].ptr0 = header_storage; 2530 t->structs[h->struct_id] = header_storage; 2531 } 2532 } 2533 2534 return 0; 2535 } 2536 2537 static void 2538 header_build_free(struct rte_swx_pipeline *p) 2539 { 2540 uint32_t i; 2541 2542 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 2543 struct thread *t = &p->threads[i]; 2544 2545 free(t->headers_out); 2546 t->headers_out = NULL; 2547 2548 free(t->headers); 2549 t->headers = NULL; 2550 2551 free(t->header_out_storage); 2552 t->header_out_storage = NULL; 2553 2554 free(t->header_storage); 2555 t->header_storage = NULL; 2556 } 2557 } 2558 2559 static void 2560 header_free(struct rte_swx_pipeline *p) 2561 { 2562 header_build_free(p); 2563 2564 for ( ; ; ) { 2565 struct header *elem; 2566 2567 elem = TAILQ_FIRST(&p->headers); 2568 if (!elem) 2569 break; 2570 2571 TAILQ_REMOVE(&p->headers, elem, node); 2572 free(elem); 2573 } 2574 } 2575 2576 /* 2577 * Meta-data. 2578 */ 2579 static struct field * 2580 metadata_field_parse(struct rte_swx_pipeline *p, const char *name) 2581 { 2582 if (!p->metadata_st) 2583 return NULL; 2584 2585 if (name[0] != 'm' || name[1] != '.') 2586 return NULL; 2587 2588 return struct_type_field_find(p->metadata_st, &name[2]); 2589 } 2590 2591 int 2592 rte_swx_pipeline_packet_metadata_register(struct rte_swx_pipeline *p, 2593 const char *struct_type_name) 2594 { 2595 struct struct_type *st = NULL; 2596 2597 CHECK(p, EINVAL); 2598 2599 CHECK_NAME(struct_type_name, EINVAL); 2600 st = struct_type_find(p, struct_type_name); 2601 CHECK(st, EINVAL); 2602 CHECK(!p->metadata_st, EINVAL); 2603 2604 p->metadata_st = st; 2605 p->metadata_struct_id = p->n_structs; 2606 2607 p->n_structs++; 2608 2609 return 0; 2610 } 2611 2612 static int 2613 metadata_build(struct rte_swx_pipeline *p) 2614 { 2615 uint32_t n_bytes = p->metadata_st->n_bits / 8; 2616 uint32_t i; 2617 2618 /* Thread-level initialization. */ 2619 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 2620 struct thread *t = &p->threads[i]; 2621 uint8_t *metadata; 2622 2623 metadata = calloc(1, n_bytes); 2624 CHECK(metadata, ENOMEM); 2625 2626 t->metadata = metadata; 2627 t->structs[p->metadata_struct_id] = metadata; 2628 } 2629 2630 return 0; 2631 } 2632 2633 static void 2634 metadata_build_free(struct rte_swx_pipeline *p) 2635 { 2636 uint32_t i; 2637 2638 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 2639 struct thread *t = &p->threads[i]; 2640 2641 free(t->metadata); 2642 t->metadata = NULL; 2643 } 2644 } 2645 2646 static void 2647 metadata_free(struct rte_swx_pipeline *p) 2648 { 2649 metadata_build_free(p); 2650 } 2651 2652 /* 2653 * Instruction. 2654 */ 2655 static int 2656 instruction_is_tx(enum instruction_type type) 2657 { 2658 switch (type) { 2659 case INSTR_TX: 2660 case INSTR_TX_I: 2661 return 1; 2662 2663 default: 2664 return 0; 2665 } 2666 } 2667 2668 static int 2669 instruction_is_jmp(struct instruction *instr) 2670 { 2671 switch (instr->type) { 2672 case INSTR_JMP: 2673 case INSTR_JMP_VALID: 2674 case INSTR_JMP_INVALID: 2675 case INSTR_JMP_HIT: 2676 case INSTR_JMP_MISS: 2677 case INSTR_JMP_ACTION_HIT: 2678 case INSTR_JMP_ACTION_MISS: 2679 case INSTR_JMP_EQ: 2680 case INSTR_JMP_EQ_MH: 2681 case INSTR_JMP_EQ_HM: 2682 case INSTR_JMP_EQ_HH: 2683 case INSTR_JMP_EQ_I: 2684 case INSTR_JMP_NEQ: 2685 case INSTR_JMP_NEQ_MH: 2686 case INSTR_JMP_NEQ_HM: 2687 case INSTR_JMP_NEQ_HH: 2688 case INSTR_JMP_NEQ_I: 2689 case INSTR_JMP_LT: 2690 case INSTR_JMP_LT_MH: 2691 case INSTR_JMP_LT_HM: 2692 case INSTR_JMP_LT_HH: 2693 case INSTR_JMP_LT_MI: 2694 case INSTR_JMP_LT_HI: 2695 case INSTR_JMP_GT: 2696 case INSTR_JMP_GT_MH: 2697 case INSTR_JMP_GT_HM: 2698 case INSTR_JMP_GT_HH: 2699 case INSTR_JMP_GT_MI: 2700 case INSTR_JMP_GT_HI: 2701 return 1; 2702 2703 default: 2704 return 0; 2705 } 2706 } 2707 2708 static struct field * 2709 action_field_parse(struct action *action, const char *name); 2710 2711 static struct field * 2712 struct_field_parse(struct rte_swx_pipeline *p, 2713 struct action *action, 2714 const char *name, 2715 uint32_t *struct_id) 2716 { 2717 struct field *f; 2718 2719 switch (name[0]) { 2720 case 'h': 2721 { 2722 struct header *header; 2723 2724 f = header_field_parse(p, name, &header); 2725 if (!f) 2726 return NULL; 2727 2728 *struct_id = header->struct_id; 2729 return f; 2730 } 2731 2732 case 'm': 2733 { 2734 f = metadata_field_parse(p, name); 2735 if (!f) 2736 return NULL; 2737 2738 *struct_id = p->metadata_struct_id; 2739 return f; 2740 } 2741 2742 case 't': 2743 { 2744 if (!action) 2745 return NULL; 2746 2747 f = action_field_parse(action, name); 2748 if (!f) 2749 return NULL; 2750 2751 *struct_id = 0; 2752 return f; 2753 } 2754 2755 case 'e': 2756 { 2757 struct extern_obj *obj; 2758 2759 f = extern_obj_mailbox_field_parse(p, name, &obj); 2760 if (!f) 2761 return NULL; 2762 2763 *struct_id = obj->struct_id; 2764 return f; 2765 } 2766 2767 case 'f': 2768 { 2769 struct extern_func *func; 2770 2771 f = extern_func_mailbox_field_parse(p, name, &func); 2772 if (!f) 2773 return NULL; 2774 2775 *struct_id = func->struct_id; 2776 return f; 2777 } 2778 2779 default: 2780 return NULL; 2781 } 2782 } 2783 2784 static inline void 2785 pipeline_port_inc(struct rte_swx_pipeline *p) 2786 { 2787 p->port_id = (p->port_id + 1) & (p->n_ports_in - 1); 2788 } 2789 2790 static inline void 2791 thread_ip_reset(struct rte_swx_pipeline *p, struct thread *t) 2792 { 2793 t->ip = p->instructions; 2794 } 2795 2796 static inline void 2797 thread_ip_set(struct thread *t, struct instruction *ip) 2798 { 2799 t->ip = ip; 2800 } 2801 2802 static inline void 2803 thread_ip_action_call(struct rte_swx_pipeline *p, 2804 struct thread *t, 2805 uint32_t action_id) 2806 { 2807 t->ret = t->ip + 1; 2808 t->ip = p->action_instructions[action_id]; 2809 } 2810 2811 static inline void 2812 thread_ip_inc(struct rte_swx_pipeline *p); 2813 2814 static inline void 2815 thread_ip_inc(struct rte_swx_pipeline *p) 2816 { 2817 struct thread *t = &p->threads[p->thread_id]; 2818 2819 t->ip++; 2820 } 2821 2822 static inline void 2823 thread_ip_inc_cond(struct thread *t, int cond) 2824 { 2825 t->ip += cond; 2826 } 2827 2828 static inline void 2829 thread_yield(struct rte_swx_pipeline *p) 2830 { 2831 p->thread_id = (p->thread_id + 1) & (RTE_SWX_PIPELINE_THREADS_MAX - 1); 2832 } 2833 2834 static inline void 2835 thread_yield_cond(struct rte_swx_pipeline *p, int cond) 2836 { 2837 p->thread_id = (p->thread_id + cond) & (RTE_SWX_PIPELINE_THREADS_MAX - 1); 2838 } 2839 2840 /* 2841 * rx. 2842 */ 2843 static int 2844 instr_rx_translate(struct rte_swx_pipeline *p, 2845 struct action *action, 2846 char **tokens, 2847 int n_tokens, 2848 struct instruction *instr, 2849 struct instruction_data *data __rte_unused) 2850 { 2851 struct field *f; 2852 2853 CHECK(!action, EINVAL); 2854 CHECK(n_tokens == 2, EINVAL); 2855 2856 f = metadata_field_parse(p, tokens[1]); 2857 CHECK(f, EINVAL); 2858 2859 instr->type = INSTR_RX; 2860 instr->io.io.offset = f->offset / 8; 2861 instr->io.io.n_bits = f->n_bits; 2862 return 0; 2863 } 2864 2865 static inline void 2866 instr_rx_exec(struct rte_swx_pipeline *p); 2867 2868 static inline void 2869 instr_rx_exec(struct rte_swx_pipeline *p) 2870 { 2871 struct thread *t = &p->threads[p->thread_id]; 2872 struct instruction *ip = t->ip; 2873 struct port_in_runtime *port = &p->in[p->port_id]; 2874 struct rte_swx_pkt *pkt = &t->pkt; 2875 int pkt_received; 2876 2877 /* Packet. */ 2878 pkt_received = port->pkt_rx(port->obj, pkt); 2879 t->ptr = &pkt->pkt[pkt->offset]; 2880 rte_prefetch0(t->ptr); 2881 2882 TRACE("[Thread %2u] rx %s from port %u\n", 2883 p->thread_id, 2884 pkt_received ? "1 pkt" : "0 pkts", 2885 p->port_id); 2886 2887 /* Headers. */ 2888 t->valid_headers = 0; 2889 t->n_headers_out = 0; 2890 2891 /* Meta-data. */ 2892 METADATA_WRITE(t, ip->io.io.offset, ip->io.io.n_bits, p->port_id); 2893 2894 /* Tables. */ 2895 t->table_state = p->table_state; 2896 2897 /* Thread. */ 2898 pipeline_port_inc(p); 2899 thread_ip_inc_cond(t, pkt_received); 2900 thread_yield(p); 2901 } 2902 2903 /* 2904 * tx. 2905 */ 2906 static int 2907 instr_tx_translate(struct rte_swx_pipeline *p, 2908 struct action *action __rte_unused, 2909 char **tokens, 2910 int n_tokens, 2911 struct instruction *instr, 2912 struct instruction_data *data __rte_unused) 2913 { 2914 char *port = tokens[1]; 2915 struct field *f; 2916 uint32_t port_val; 2917 2918 CHECK(n_tokens == 2, EINVAL); 2919 2920 f = metadata_field_parse(p, port); 2921 if (f) { 2922 instr->type = INSTR_TX; 2923 instr->io.io.offset = f->offset / 8; 2924 instr->io.io.n_bits = f->n_bits; 2925 return 0; 2926 } 2927 2928 /* TX_I. */ 2929 port_val = strtoul(port, &port, 0); 2930 CHECK(!port[0], EINVAL); 2931 2932 instr->type = INSTR_TX_I; 2933 instr->io.io.val = port_val; 2934 return 0; 2935 } 2936 2937 static int 2938 instr_drop_translate(struct rte_swx_pipeline *p, 2939 struct action *action __rte_unused, 2940 char **tokens __rte_unused, 2941 int n_tokens, 2942 struct instruction *instr, 2943 struct instruction_data *data __rte_unused) 2944 { 2945 CHECK(n_tokens == 1, EINVAL); 2946 2947 /* TX_I. */ 2948 instr->type = INSTR_TX_I; 2949 instr->io.io.val = p->n_ports_out - 1; 2950 return 0; 2951 } 2952 2953 static inline void 2954 emit_handler(struct thread *t) 2955 { 2956 struct header_out_runtime *h0 = &t->headers_out[0]; 2957 struct header_out_runtime *h1 = &t->headers_out[1]; 2958 uint32_t offset = 0, i; 2959 2960 /* No header change or header decapsulation. */ 2961 if ((t->n_headers_out == 1) && 2962 (h0->ptr + h0->n_bytes == t->ptr)) { 2963 TRACE("Emit handler: no header change or header decap.\n"); 2964 2965 t->pkt.offset -= h0->n_bytes; 2966 t->pkt.length += h0->n_bytes; 2967 2968 return; 2969 } 2970 2971 /* Header encapsulation (optionally, with prior header decasulation). */ 2972 if ((t->n_headers_out == 2) && 2973 (h1->ptr + h1->n_bytes == t->ptr) && 2974 (h0->ptr == h0->ptr0)) { 2975 uint32_t offset; 2976 2977 TRACE("Emit handler: header encapsulation.\n"); 2978 2979 offset = h0->n_bytes + h1->n_bytes; 2980 memcpy(t->ptr - offset, h0->ptr, h0->n_bytes); 2981 t->pkt.offset -= offset; 2982 t->pkt.length += offset; 2983 2984 return; 2985 } 2986 2987 /* Header insertion. */ 2988 /* TBD */ 2989 2990 /* Header extraction. */ 2991 /* TBD */ 2992 2993 /* For any other case. */ 2994 TRACE("Emit handler: complex case.\n"); 2995 2996 for (i = 0; i < t->n_headers_out; i++) { 2997 struct header_out_runtime *h = &t->headers_out[i]; 2998 2999 memcpy(&t->header_out_storage[offset], h->ptr, h->n_bytes); 3000 offset += h->n_bytes; 3001 } 3002 3003 if (offset) { 3004 memcpy(t->ptr - offset, t->header_out_storage, offset); 3005 t->pkt.offset -= offset; 3006 t->pkt.length += offset; 3007 } 3008 } 3009 3010 static inline void 3011 instr_tx_exec(struct rte_swx_pipeline *p); 3012 3013 static inline void 3014 instr_tx_exec(struct rte_swx_pipeline *p) 3015 { 3016 struct thread *t = &p->threads[p->thread_id]; 3017 struct instruction *ip = t->ip; 3018 uint64_t port_id = METADATA_READ(t, ip->io.io.offset, ip->io.io.n_bits); 3019 struct port_out_runtime *port = &p->out[port_id]; 3020 struct rte_swx_pkt *pkt = &t->pkt; 3021 3022 TRACE("[Thread %2u]: tx 1 pkt to port %u\n", 3023 p->thread_id, 3024 (uint32_t)port_id); 3025 3026 /* Headers. */ 3027 emit_handler(t); 3028 3029 /* Packet. */ 3030 port->pkt_tx(port->obj, pkt); 3031 3032 /* Thread. */ 3033 thread_ip_reset(p, t); 3034 instr_rx_exec(p); 3035 } 3036 3037 static inline void 3038 instr_tx_i_exec(struct rte_swx_pipeline *p) 3039 { 3040 struct thread *t = &p->threads[p->thread_id]; 3041 struct instruction *ip = t->ip; 3042 uint64_t port_id = ip->io.io.val; 3043 struct port_out_runtime *port = &p->out[port_id]; 3044 struct rte_swx_pkt *pkt = &t->pkt; 3045 3046 TRACE("[Thread %2u]: tx (i) 1 pkt to port %u\n", 3047 p->thread_id, 3048 (uint32_t)port_id); 3049 3050 /* Headers. */ 3051 emit_handler(t); 3052 3053 /* Packet. */ 3054 port->pkt_tx(port->obj, pkt); 3055 3056 /* Thread. */ 3057 thread_ip_reset(p, t); 3058 instr_rx_exec(p); 3059 } 3060 3061 /* 3062 * extract. 3063 */ 3064 static int 3065 instr_hdr_extract_translate(struct rte_swx_pipeline *p, 3066 struct action *action, 3067 char **tokens, 3068 int n_tokens, 3069 struct instruction *instr, 3070 struct instruction_data *data __rte_unused) 3071 { 3072 struct header *h; 3073 3074 CHECK(!action, EINVAL); 3075 CHECK(n_tokens == 2, EINVAL); 3076 3077 h = header_parse(p, tokens[1]); 3078 CHECK(h, EINVAL); 3079 3080 instr->type = INSTR_HDR_EXTRACT; 3081 instr->io.hdr.header_id[0] = h->id; 3082 instr->io.hdr.struct_id[0] = h->struct_id; 3083 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8; 3084 return 0; 3085 } 3086 3087 static inline void 3088 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract); 3089 3090 static inline void 3091 __instr_hdr_extract_exec(struct rte_swx_pipeline *p, uint32_t n_extract) 3092 { 3093 struct thread *t = &p->threads[p->thread_id]; 3094 struct instruction *ip = t->ip; 3095 uint64_t valid_headers = t->valid_headers; 3096 uint8_t *ptr = t->ptr; 3097 uint32_t offset = t->pkt.offset; 3098 uint32_t length = t->pkt.length; 3099 uint32_t i; 3100 3101 for (i = 0; i < n_extract; i++) { 3102 uint32_t header_id = ip->io.hdr.header_id[i]; 3103 uint32_t struct_id = ip->io.hdr.struct_id[i]; 3104 uint32_t n_bytes = ip->io.hdr.n_bytes[i]; 3105 3106 TRACE("[Thread %2u]: extract header %u (%u bytes)\n", 3107 p->thread_id, 3108 header_id, 3109 n_bytes); 3110 3111 /* Headers. */ 3112 t->structs[struct_id] = ptr; 3113 valid_headers = MASK64_BIT_SET(valid_headers, header_id); 3114 3115 /* Packet. */ 3116 offset += n_bytes; 3117 length -= n_bytes; 3118 ptr += n_bytes; 3119 } 3120 3121 /* Headers. */ 3122 t->valid_headers = valid_headers; 3123 3124 /* Packet. */ 3125 t->pkt.offset = offset; 3126 t->pkt.length = length; 3127 t->ptr = ptr; 3128 } 3129 3130 static inline void 3131 instr_hdr_extract_exec(struct rte_swx_pipeline *p) 3132 { 3133 __instr_hdr_extract_exec(p, 1); 3134 3135 /* Thread. */ 3136 thread_ip_inc(p); 3137 } 3138 3139 static inline void 3140 instr_hdr_extract2_exec(struct rte_swx_pipeline *p) 3141 { 3142 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", 3143 p->thread_id); 3144 3145 __instr_hdr_extract_exec(p, 2); 3146 3147 /* Thread. */ 3148 thread_ip_inc(p); 3149 } 3150 3151 static inline void 3152 instr_hdr_extract3_exec(struct rte_swx_pipeline *p) 3153 { 3154 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", 3155 p->thread_id); 3156 3157 __instr_hdr_extract_exec(p, 3); 3158 3159 /* Thread. */ 3160 thread_ip_inc(p); 3161 } 3162 3163 static inline void 3164 instr_hdr_extract4_exec(struct rte_swx_pipeline *p) 3165 { 3166 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", 3167 p->thread_id); 3168 3169 __instr_hdr_extract_exec(p, 4); 3170 3171 /* Thread. */ 3172 thread_ip_inc(p); 3173 } 3174 3175 static inline void 3176 instr_hdr_extract5_exec(struct rte_swx_pipeline *p) 3177 { 3178 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", 3179 p->thread_id); 3180 3181 __instr_hdr_extract_exec(p, 5); 3182 3183 /* Thread. */ 3184 thread_ip_inc(p); 3185 } 3186 3187 static inline void 3188 instr_hdr_extract6_exec(struct rte_swx_pipeline *p) 3189 { 3190 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", 3191 p->thread_id); 3192 3193 __instr_hdr_extract_exec(p, 6); 3194 3195 /* Thread. */ 3196 thread_ip_inc(p); 3197 } 3198 3199 static inline void 3200 instr_hdr_extract7_exec(struct rte_swx_pipeline *p) 3201 { 3202 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", 3203 p->thread_id); 3204 3205 __instr_hdr_extract_exec(p, 7); 3206 3207 /* Thread. */ 3208 thread_ip_inc(p); 3209 } 3210 3211 static inline void 3212 instr_hdr_extract8_exec(struct rte_swx_pipeline *p) 3213 { 3214 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", 3215 p->thread_id); 3216 3217 __instr_hdr_extract_exec(p, 8); 3218 3219 /* Thread. */ 3220 thread_ip_inc(p); 3221 } 3222 3223 /* 3224 * emit. 3225 */ 3226 static int 3227 instr_hdr_emit_translate(struct rte_swx_pipeline *p, 3228 struct action *action __rte_unused, 3229 char **tokens, 3230 int n_tokens, 3231 struct instruction *instr, 3232 struct instruction_data *data __rte_unused) 3233 { 3234 struct header *h; 3235 3236 CHECK(n_tokens == 2, EINVAL); 3237 3238 h = header_parse(p, tokens[1]); 3239 CHECK(h, EINVAL); 3240 3241 instr->type = INSTR_HDR_EMIT; 3242 instr->io.hdr.header_id[0] = h->id; 3243 instr->io.hdr.struct_id[0] = h->struct_id; 3244 instr->io.hdr.n_bytes[0] = h->st->n_bits / 8; 3245 return 0; 3246 } 3247 3248 static inline void 3249 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit); 3250 3251 static inline void 3252 __instr_hdr_emit_exec(struct rte_swx_pipeline *p, uint32_t n_emit) 3253 { 3254 struct thread *t = &p->threads[p->thread_id]; 3255 struct instruction *ip = t->ip; 3256 uint64_t valid_headers = t->valid_headers; 3257 uint32_t n_headers_out = t->n_headers_out; 3258 struct header_out_runtime *ho = &t->headers_out[n_headers_out - 1]; 3259 uint8_t *ho_ptr = NULL; 3260 uint32_t ho_nbytes = 0, first = 1, i; 3261 3262 for (i = 0; i < n_emit; i++) { 3263 uint32_t header_id = ip->io.hdr.header_id[i]; 3264 uint32_t struct_id = ip->io.hdr.struct_id[i]; 3265 uint32_t n_bytes = ip->io.hdr.n_bytes[i]; 3266 3267 struct header_runtime *hi = &t->headers[header_id]; 3268 uint8_t *hi_ptr = t->structs[struct_id]; 3269 3270 if (!MASK64_BIT_GET(valid_headers, header_id)) 3271 continue; 3272 3273 TRACE("[Thread %2u]: emit header %u\n", 3274 p->thread_id, 3275 header_id); 3276 3277 /* Headers. */ 3278 if (first) { 3279 first = 0; 3280 3281 if (!t->n_headers_out) { 3282 ho = &t->headers_out[0]; 3283 3284 ho->ptr0 = hi->ptr0; 3285 ho->ptr = hi_ptr; 3286 3287 ho_ptr = hi_ptr; 3288 ho_nbytes = n_bytes; 3289 3290 n_headers_out = 1; 3291 3292 continue; 3293 } else { 3294 ho_ptr = ho->ptr; 3295 ho_nbytes = ho->n_bytes; 3296 } 3297 } 3298 3299 if (ho_ptr + ho_nbytes == hi_ptr) { 3300 ho_nbytes += n_bytes; 3301 } else { 3302 ho->n_bytes = ho_nbytes; 3303 3304 ho++; 3305 ho->ptr0 = hi->ptr0; 3306 ho->ptr = hi_ptr; 3307 3308 ho_ptr = hi_ptr; 3309 ho_nbytes = n_bytes; 3310 3311 n_headers_out++; 3312 } 3313 } 3314 3315 ho->n_bytes = ho_nbytes; 3316 t->n_headers_out = n_headers_out; 3317 } 3318 3319 static inline void 3320 instr_hdr_emit_exec(struct rte_swx_pipeline *p) 3321 { 3322 __instr_hdr_emit_exec(p, 1); 3323 3324 /* Thread. */ 3325 thread_ip_inc(p); 3326 } 3327 3328 static inline void 3329 instr_hdr_emit_tx_exec(struct rte_swx_pipeline *p) 3330 { 3331 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", 3332 p->thread_id); 3333 3334 __instr_hdr_emit_exec(p, 1); 3335 instr_tx_exec(p); 3336 } 3337 3338 static inline void 3339 instr_hdr_emit2_tx_exec(struct rte_swx_pipeline *p) 3340 { 3341 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", 3342 p->thread_id); 3343 3344 __instr_hdr_emit_exec(p, 2); 3345 instr_tx_exec(p); 3346 } 3347 3348 static inline void 3349 instr_hdr_emit3_tx_exec(struct rte_swx_pipeline *p) 3350 { 3351 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", 3352 p->thread_id); 3353 3354 __instr_hdr_emit_exec(p, 3); 3355 instr_tx_exec(p); 3356 } 3357 3358 static inline void 3359 instr_hdr_emit4_tx_exec(struct rte_swx_pipeline *p) 3360 { 3361 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", 3362 p->thread_id); 3363 3364 __instr_hdr_emit_exec(p, 4); 3365 instr_tx_exec(p); 3366 } 3367 3368 static inline void 3369 instr_hdr_emit5_tx_exec(struct rte_swx_pipeline *p) 3370 { 3371 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", 3372 p->thread_id); 3373 3374 __instr_hdr_emit_exec(p, 5); 3375 instr_tx_exec(p); 3376 } 3377 3378 static inline void 3379 instr_hdr_emit6_tx_exec(struct rte_swx_pipeline *p) 3380 { 3381 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", 3382 p->thread_id); 3383 3384 __instr_hdr_emit_exec(p, 6); 3385 instr_tx_exec(p); 3386 } 3387 3388 static inline void 3389 instr_hdr_emit7_tx_exec(struct rte_swx_pipeline *p) 3390 { 3391 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", 3392 p->thread_id); 3393 3394 __instr_hdr_emit_exec(p, 7); 3395 instr_tx_exec(p); 3396 } 3397 3398 static inline void 3399 instr_hdr_emit8_tx_exec(struct rte_swx_pipeline *p) 3400 { 3401 TRACE("[Thread %2u] *** The next 9 instructions are fused. ***\n", 3402 p->thread_id); 3403 3404 __instr_hdr_emit_exec(p, 8); 3405 instr_tx_exec(p); 3406 } 3407 3408 /* 3409 * validate. 3410 */ 3411 static int 3412 instr_hdr_validate_translate(struct rte_swx_pipeline *p, 3413 struct action *action __rte_unused, 3414 char **tokens, 3415 int n_tokens, 3416 struct instruction *instr, 3417 struct instruction_data *data __rte_unused) 3418 { 3419 struct header *h; 3420 3421 CHECK(n_tokens == 2, EINVAL); 3422 3423 h = header_parse(p, tokens[1]); 3424 CHECK(h, EINVAL); 3425 3426 instr->type = INSTR_HDR_VALIDATE; 3427 instr->valid.header_id = h->id; 3428 return 0; 3429 } 3430 3431 static inline void 3432 instr_hdr_validate_exec(struct rte_swx_pipeline *p) 3433 { 3434 struct thread *t = &p->threads[p->thread_id]; 3435 struct instruction *ip = t->ip; 3436 uint32_t header_id = ip->valid.header_id; 3437 3438 TRACE("[Thread %2u] validate header %u\n", p->thread_id, header_id); 3439 3440 /* Headers. */ 3441 t->valid_headers = MASK64_BIT_SET(t->valid_headers, header_id); 3442 3443 /* Thread. */ 3444 thread_ip_inc(p); 3445 } 3446 3447 /* 3448 * invalidate. 3449 */ 3450 static int 3451 instr_hdr_invalidate_translate(struct rte_swx_pipeline *p, 3452 struct action *action __rte_unused, 3453 char **tokens, 3454 int n_tokens, 3455 struct instruction *instr, 3456 struct instruction_data *data __rte_unused) 3457 { 3458 struct header *h; 3459 3460 CHECK(n_tokens == 2, EINVAL); 3461 3462 h = header_parse(p, tokens[1]); 3463 CHECK(h, EINVAL); 3464 3465 instr->type = INSTR_HDR_INVALIDATE; 3466 instr->valid.header_id = h->id; 3467 return 0; 3468 } 3469 3470 static inline void 3471 instr_hdr_invalidate_exec(struct rte_swx_pipeline *p) 3472 { 3473 struct thread *t = &p->threads[p->thread_id]; 3474 struct instruction *ip = t->ip; 3475 uint32_t header_id = ip->valid.header_id; 3476 3477 TRACE("[Thread %2u] invalidate header %u\n", p->thread_id, header_id); 3478 3479 /* Headers. */ 3480 t->valid_headers = MASK64_BIT_CLR(t->valid_headers, header_id); 3481 3482 /* Thread. */ 3483 thread_ip_inc(p); 3484 } 3485 3486 /* 3487 * table. 3488 */ 3489 static struct table * 3490 table_find(struct rte_swx_pipeline *p, const char *name); 3491 3492 static struct selector * 3493 selector_find(struct rte_swx_pipeline *p, const char *name); 3494 3495 static int 3496 instr_table_translate(struct rte_swx_pipeline *p, 3497 struct action *action, 3498 char **tokens, 3499 int n_tokens, 3500 struct instruction *instr, 3501 struct instruction_data *data __rte_unused) 3502 { 3503 struct table *t; 3504 struct selector *s; 3505 3506 CHECK(!action, EINVAL); 3507 CHECK(n_tokens == 2, EINVAL); 3508 3509 t = table_find(p, tokens[1]); 3510 if (t) { 3511 instr->type = INSTR_TABLE; 3512 instr->table.table_id = t->id; 3513 return 0; 3514 } 3515 3516 s = selector_find(p, tokens[1]); 3517 if (s) { 3518 instr->type = INSTR_SELECTOR; 3519 instr->table.table_id = s->id; 3520 return 0; 3521 } 3522 3523 CHECK(0, EINVAL); 3524 } 3525 3526 static inline void 3527 instr_table_exec(struct rte_swx_pipeline *p) 3528 { 3529 struct thread *t = &p->threads[p->thread_id]; 3530 struct instruction *ip = t->ip; 3531 uint32_t table_id = ip->table.table_id; 3532 struct rte_swx_table_state *ts = &t->table_state[table_id]; 3533 struct table_runtime *table = &t->tables[table_id]; 3534 struct table_statistics *stats = &p->table_stats[table_id]; 3535 uint64_t action_id, n_pkts_hit, n_pkts_action; 3536 uint8_t *action_data; 3537 int done, hit; 3538 3539 /* Table. */ 3540 done = table->func(ts->obj, 3541 table->mailbox, 3542 table->key, 3543 &action_id, 3544 &action_data, 3545 &hit); 3546 if (!done) { 3547 /* Thread. */ 3548 TRACE("[Thread %2u] table %u (not finalized)\n", 3549 p->thread_id, 3550 table_id); 3551 3552 thread_yield(p); 3553 return; 3554 } 3555 3556 action_id = hit ? action_id : ts->default_action_id; 3557 action_data = hit ? action_data : ts->default_action_data; 3558 n_pkts_hit = stats->n_pkts_hit[hit]; 3559 n_pkts_action = stats->n_pkts_action[action_id]; 3560 3561 TRACE("[Thread %2u] table %u (%s, action %u)\n", 3562 p->thread_id, 3563 table_id, 3564 hit ? "hit" : "miss", 3565 (uint32_t)action_id); 3566 3567 t->action_id = action_id; 3568 t->structs[0] = action_data; 3569 t->hit = hit; 3570 stats->n_pkts_hit[hit] = n_pkts_hit + 1; 3571 stats->n_pkts_action[action_id] = n_pkts_action + 1; 3572 3573 /* Thread. */ 3574 thread_ip_action_call(p, t, action_id); 3575 } 3576 3577 static inline void 3578 instr_selector_exec(struct rte_swx_pipeline *p) 3579 { 3580 struct thread *t = &p->threads[p->thread_id]; 3581 struct instruction *ip = t->ip; 3582 uint32_t selector_id = ip->table.table_id; 3583 struct rte_swx_table_state *ts = &t->table_state[p->n_tables + selector_id]; 3584 struct selector_runtime *selector = &t->selectors[selector_id]; 3585 struct selector_statistics *stats = &p->selector_stats[selector_id]; 3586 uint64_t n_pkts = stats->n_pkts; 3587 int done; 3588 3589 /* Table. */ 3590 done = rte_swx_table_selector_select(ts->obj, 3591 selector->mailbox, 3592 selector->group_id_buffer, 3593 selector->selector_buffer, 3594 selector->member_id_buffer); 3595 if (!done) { 3596 /* Thread. */ 3597 TRACE("[Thread %2u] selector %u (not finalized)\n", 3598 p->thread_id, 3599 selector_id); 3600 3601 thread_yield(p); 3602 return; 3603 } 3604 3605 3606 TRACE("[Thread %2u] selector %u\n", 3607 p->thread_id, 3608 selector_id); 3609 3610 stats->n_pkts = n_pkts + 1; 3611 3612 /* Thread. */ 3613 thread_ip_inc(p); 3614 } 3615 3616 /* 3617 * extern. 3618 */ 3619 static int 3620 instr_extern_translate(struct rte_swx_pipeline *p, 3621 struct action *action __rte_unused, 3622 char **tokens, 3623 int n_tokens, 3624 struct instruction *instr, 3625 struct instruction_data *data __rte_unused) 3626 { 3627 char *token = tokens[1]; 3628 3629 CHECK(n_tokens == 2, EINVAL); 3630 3631 if (token[0] == 'e') { 3632 struct extern_obj *obj; 3633 struct extern_type_member_func *func; 3634 3635 func = extern_obj_member_func_parse(p, token, &obj); 3636 CHECK(func, EINVAL); 3637 3638 instr->type = INSTR_EXTERN_OBJ; 3639 instr->ext_obj.ext_obj_id = obj->id; 3640 instr->ext_obj.func_id = func->id; 3641 3642 return 0; 3643 } 3644 3645 if (token[0] == 'f') { 3646 struct extern_func *func; 3647 3648 func = extern_func_parse(p, token); 3649 CHECK(func, EINVAL); 3650 3651 instr->type = INSTR_EXTERN_FUNC; 3652 instr->ext_func.ext_func_id = func->id; 3653 3654 return 0; 3655 } 3656 3657 CHECK(0, EINVAL); 3658 } 3659 3660 static inline void 3661 instr_extern_obj_exec(struct rte_swx_pipeline *p) 3662 { 3663 struct thread *t = &p->threads[p->thread_id]; 3664 struct instruction *ip = t->ip; 3665 uint32_t obj_id = ip->ext_obj.ext_obj_id; 3666 uint32_t func_id = ip->ext_obj.func_id; 3667 struct extern_obj_runtime *obj = &t->extern_objs[obj_id]; 3668 rte_swx_extern_type_member_func_t func = obj->funcs[func_id]; 3669 3670 TRACE("[Thread %2u] extern obj %u member func %u\n", 3671 p->thread_id, 3672 obj_id, 3673 func_id); 3674 3675 /* Extern object member function execute. */ 3676 uint32_t done = func(obj->obj, obj->mailbox); 3677 3678 /* Thread. */ 3679 thread_ip_inc_cond(t, done); 3680 thread_yield_cond(p, done ^ 1); 3681 } 3682 3683 static inline void 3684 instr_extern_func_exec(struct rte_swx_pipeline *p) 3685 { 3686 struct thread *t = &p->threads[p->thread_id]; 3687 struct instruction *ip = t->ip; 3688 uint32_t ext_func_id = ip->ext_func.ext_func_id; 3689 struct extern_func_runtime *ext_func = &t->extern_funcs[ext_func_id]; 3690 rte_swx_extern_func_t func = ext_func->func; 3691 3692 TRACE("[Thread %2u] extern func %u\n", 3693 p->thread_id, 3694 ext_func_id); 3695 3696 /* Extern function execute. */ 3697 uint32_t done = func(ext_func->mailbox); 3698 3699 /* Thread. */ 3700 thread_ip_inc_cond(t, done); 3701 thread_yield_cond(p, done ^ 1); 3702 } 3703 3704 /* 3705 * mov. 3706 */ 3707 static int 3708 instr_mov_translate(struct rte_swx_pipeline *p, 3709 struct action *action, 3710 char **tokens, 3711 int n_tokens, 3712 struct instruction *instr, 3713 struct instruction_data *data __rte_unused) 3714 { 3715 char *dst = tokens[1], *src = tokens[2]; 3716 struct field *fdst, *fsrc; 3717 uint64_t src_val; 3718 uint32_t dst_struct_id = 0, src_struct_id = 0; 3719 3720 CHECK(n_tokens == 3, EINVAL); 3721 3722 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id); 3723 CHECK(fdst, EINVAL); 3724 3725 /* MOV, MOV_MH, MOV_HM or MOV_HH. */ 3726 fsrc = struct_field_parse(p, action, src, &src_struct_id); 3727 if (fsrc) { 3728 instr->type = INSTR_MOV; 3729 if (dst[0] != 'h' && src[0] == 'h') 3730 instr->type = INSTR_MOV_MH; 3731 if (dst[0] == 'h' && src[0] != 'h') 3732 instr->type = INSTR_MOV_HM; 3733 if (dst[0] == 'h' && src[0] == 'h') 3734 instr->type = INSTR_MOV_HH; 3735 3736 instr->mov.dst.struct_id = (uint8_t)dst_struct_id; 3737 instr->mov.dst.n_bits = fdst->n_bits; 3738 instr->mov.dst.offset = fdst->offset / 8; 3739 instr->mov.src.struct_id = (uint8_t)src_struct_id; 3740 instr->mov.src.n_bits = fsrc->n_bits; 3741 instr->mov.src.offset = fsrc->offset / 8; 3742 return 0; 3743 } 3744 3745 /* MOV_I. */ 3746 src_val = strtoull(src, &src, 0); 3747 CHECK(!src[0], EINVAL); 3748 3749 if (dst[0] == 'h') 3750 src_val = hton64(src_val) >> (64 - fdst->n_bits); 3751 3752 instr->type = INSTR_MOV_I; 3753 instr->mov.dst.struct_id = (uint8_t)dst_struct_id; 3754 instr->mov.dst.n_bits = fdst->n_bits; 3755 instr->mov.dst.offset = fdst->offset / 8; 3756 instr->mov.src_val = src_val; 3757 return 0; 3758 } 3759 3760 static inline void 3761 instr_mov_exec(struct rte_swx_pipeline *p) 3762 { 3763 struct thread *t = &p->threads[p->thread_id]; 3764 struct instruction *ip = t->ip; 3765 3766 TRACE("[Thread %2u] mov\n", 3767 p->thread_id); 3768 3769 MOV(t, ip); 3770 3771 /* Thread. */ 3772 thread_ip_inc(p); 3773 } 3774 3775 static inline void 3776 instr_mov_mh_exec(struct rte_swx_pipeline *p) 3777 { 3778 struct thread *t = &p->threads[p->thread_id]; 3779 struct instruction *ip = t->ip; 3780 3781 TRACE("[Thread %2u] mov (mh)\n", 3782 p->thread_id); 3783 3784 MOV_MH(t, ip); 3785 3786 /* Thread. */ 3787 thread_ip_inc(p); 3788 } 3789 3790 static inline void 3791 instr_mov_hm_exec(struct rte_swx_pipeline *p) 3792 { 3793 struct thread *t = &p->threads[p->thread_id]; 3794 struct instruction *ip = t->ip; 3795 3796 TRACE("[Thread %2u] mov (hm)\n", 3797 p->thread_id); 3798 3799 MOV_HM(t, ip); 3800 3801 /* Thread. */ 3802 thread_ip_inc(p); 3803 } 3804 3805 static inline void 3806 instr_mov_hh_exec(struct rte_swx_pipeline *p) 3807 { 3808 struct thread *t = &p->threads[p->thread_id]; 3809 struct instruction *ip = t->ip; 3810 3811 TRACE("[Thread %2u] mov (hh)\n", 3812 p->thread_id); 3813 3814 MOV_HH(t, ip); 3815 3816 /* Thread. */ 3817 thread_ip_inc(p); 3818 } 3819 3820 static inline void 3821 instr_mov_i_exec(struct rte_swx_pipeline *p) 3822 { 3823 struct thread *t = &p->threads[p->thread_id]; 3824 struct instruction *ip = t->ip; 3825 3826 TRACE("[Thread %2u] mov m.f %" PRIx64 "\n", 3827 p->thread_id, 3828 ip->mov.src_val); 3829 3830 MOV_I(t, ip); 3831 3832 /* Thread. */ 3833 thread_ip_inc(p); 3834 } 3835 3836 /* 3837 * dma. 3838 */ 3839 static inline void 3840 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma); 3841 3842 static inline void 3843 __instr_dma_ht_exec(struct rte_swx_pipeline *p, uint32_t n_dma) 3844 { 3845 struct thread *t = &p->threads[p->thread_id]; 3846 struct instruction *ip = t->ip; 3847 uint8_t *action_data = t->structs[0]; 3848 uint64_t valid_headers = t->valid_headers; 3849 uint32_t i; 3850 3851 for (i = 0; i < n_dma; i++) { 3852 uint32_t header_id = ip->dma.dst.header_id[i]; 3853 uint32_t struct_id = ip->dma.dst.struct_id[i]; 3854 uint32_t offset = ip->dma.src.offset[i]; 3855 uint32_t n_bytes = ip->dma.n_bytes[i]; 3856 3857 struct header_runtime *h = &t->headers[header_id]; 3858 uint8_t *h_ptr0 = h->ptr0; 3859 uint8_t *h_ptr = t->structs[struct_id]; 3860 3861 void *dst = MASK64_BIT_GET(valid_headers, header_id) ? 3862 h_ptr : h_ptr0; 3863 void *src = &action_data[offset]; 3864 3865 TRACE("[Thread %2u] dma h.s t.f\n", p->thread_id); 3866 3867 /* Headers. */ 3868 memcpy(dst, src, n_bytes); 3869 t->structs[struct_id] = dst; 3870 valid_headers = MASK64_BIT_SET(valid_headers, header_id); 3871 } 3872 3873 t->valid_headers = valid_headers; 3874 } 3875 3876 static inline void 3877 instr_dma_ht_exec(struct rte_swx_pipeline *p) 3878 { 3879 __instr_dma_ht_exec(p, 1); 3880 3881 /* Thread. */ 3882 thread_ip_inc(p); 3883 } 3884 3885 static inline void 3886 instr_dma_ht2_exec(struct rte_swx_pipeline *p) 3887 { 3888 TRACE("[Thread %2u] *** The next 2 instructions are fused. ***\n", 3889 p->thread_id); 3890 3891 __instr_dma_ht_exec(p, 2); 3892 3893 /* Thread. */ 3894 thread_ip_inc(p); 3895 } 3896 3897 static inline void 3898 instr_dma_ht3_exec(struct rte_swx_pipeline *p) 3899 { 3900 TRACE("[Thread %2u] *** The next 3 instructions are fused. ***\n", 3901 p->thread_id); 3902 3903 __instr_dma_ht_exec(p, 3); 3904 3905 /* Thread. */ 3906 thread_ip_inc(p); 3907 } 3908 3909 static inline void 3910 instr_dma_ht4_exec(struct rte_swx_pipeline *p) 3911 { 3912 TRACE("[Thread %2u] *** The next 4 instructions are fused. ***\n", 3913 p->thread_id); 3914 3915 __instr_dma_ht_exec(p, 4); 3916 3917 /* Thread. */ 3918 thread_ip_inc(p); 3919 } 3920 3921 static inline void 3922 instr_dma_ht5_exec(struct rte_swx_pipeline *p) 3923 { 3924 TRACE("[Thread %2u] *** The next 5 instructions are fused. ***\n", 3925 p->thread_id); 3926 3927 __instr_dma_ht_exec(p, 5); 3928 3929 /* Thread. */ 3930 thread_ip_inc(p); 3931 } 3932 3933 static inline void 3934 instr_dma_ht6_exec(struct rte_swx_pipeline *p) 3935 { 3936 TRACE("[Thread %2u] *** The next 6 instructions are fused. ***\n", 3937 p->thread_id); 3938 3939 __instr_dma_ht_exec(p, 6); 3940 3941 /* Thread. */ 3942 thread_ip_inc(p); 3943 } 3944 3945 static inline void 3946 instr_dma_ht7_exec(struct rte_swx_pipeline *p) 3947 { 3948 TRACE("[Thread %2u] *** The next 7 instructions are fused. ***\n", 3949 p->thread_id); 3950 3951 __instr_dma_ht_exec(p, 7); 3952 3953 /* Thread. */ 3954 thread_ip_inc(p); 3955 } 3956 3957 static inline void 3958 instr_dma_ht8_exec(struct rte_swx_pipeline *p) 3959 { 3960 TRACE("[Thread %2u] *** The next 8 instructions are fused. ***\n", 3961 p->thread_id); 3962 3963 __instr_dma_ht_exec(p, 8); 3964 3965 /* Thread. */ 3966 thread_ip_inc(p); 3967 } 3968 3969 /* 3970 * alu. 3971 */ 3972 static int 3973 instr_alu_add_translate(struct rte_swx_pipeline *p, 3974 struct action *action, 3975 char **tokens, 3976 int n_tokens, 3977 struct instruction *instr, 3978 struct instruction_data *data __rte_unused) 3979 { 3980 char *dst = tokens[1], *src = tokens[2]; 3981 struct field *fdst, *fsrc; 3982 uint64_t src_val; 3983 uint32_t dst_struct_id = 0, src_struct_id = 0; 3984 3985 CHECK(n_tokens == 3, EINVAL); 3986 3987 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id); 3988 CHECK(fdst, EINVAL); 3989 3990 /* ADD, ADD_HM, ADD_MH, ADD_HH. */ 3991 fsrc = struct_field_parse(p, action, src, &src_struct_id); 3992 if (fsrc) { 3993 instr->type = INSTR_ALU_ADD; 3994 if (dst[0] == 'h' && src[0] != 'h') 3995 instr->type = INSTR_ALU_ADD_HM; 3996 if (dst[0] != 'h' && src[0] == 'h') 3997 instr->type = INSTR_ALU_ADD_MH; 3998 if (dst[0] == 'h' && src[0] == 'h') 3999 instr->type = INSTR_ALU_ADD_HH; 4000 4001 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4002 instr->alu.dst.n_bits = fdst->n_bits; 4003 instr->alu.dst.offset = fdst->offset / 8; 4004 instr->alu.src.struct_id = (uint8_t)src_struct_id; 4005 instr->alu.src.n_bits = fsrc->n_bits; 4006 instr->alu.src.offset = fsrc->offset / 8; 4007 return 0; 4008 } 4009 4010 /* ADD_MI, ADD_HI. */ 4011 src_val = strtoull(src, &src, 0); 4012 CHECK(!src[0], EINVAL); 4013 4014 instr->type = INSTR_ALU_ADD_MI; 4015 if (dst[0] == 'h') 4016 instr->type = INSTR_ALU_ADD_HI; 4017 4018 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4019 instr->alu.dst.n_bits = fdst->n_bits; 4020 instr->alu.dst.offset = fdst->offset / 8; 4021 instr->alu.src_val = src_val; 4022 return 0; 4023 } 4024 4025 static int 4026 instr_alu_sub_translate(struct rte_swx_pipeline *p, 4027 struct action *action, 4028 char **tokens, 4029 int n_tokens, 4030 struct instruction *instr, 4031 struct instruction_data *data __rte_unused) 4032 { 4033 char *dst = tokens[1], *src = tokens[2]; 4034 struct field *fdst, *fsrc; 4035 uint64_t src_val; 4036 uint32_t dst_struct_id = 0, src_struct_id = 0; 4037 4038 CHECK(n_tokens == 3, EINVAL); 4039 4040 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id); 4041 CHECK(fdst, EINVAL); 4042 4043 /* SUB, SUB_HM, SUB_MH, SUB_HH. */ 4044 fsrc = struct_field_parse(p, action, src, &src_struct_id); 4045 if (fsrc) { 4046 instr->type = INSTR_ALU_SUB; 4047 if (dst[0] == 'h' && src[0] != 'h') 4048 instr->type = INSTR_ALU_SUB_HM; 4049 if (dst[0] != 'h' && src[0] == 'h') 4050 instr->type = INSTR_ALU_SUB_MH; 4051 if (dst[0] == 'h' && src[0] == 'h') 4052 instr->type = INSTR_ALU_SUB_HH; 4053 4054 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4055 instr->alu.dst.n_bits = fdst->n_bits; 4056 instr->alu.dst.offset = fdst->offset / 8; 4057 instr->alu.src.struct_id = (uint8_t)src_struct_id; 4058 instr->alu.src.n_bits = fsrc->n_bits; 4059 instr->alu.src.offset = fsrc->offset / 8; 4060 return 0; 4061 } 4062 4063 /* SUB_MI, SUB_HI. */ 4064 src_val = strtoull(src, &src, 0); 4065 CHECK(!src[0], EINVAL); 4066 4067 instr->type = INSTR_ALU_SUB_MI; 4068 if (dst[0] == 'h') 4069 instr->type = INSTR_ALU_SUB_HI; 4070 4071 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4072 instr->alu.dst.n_bits = fdst->n_bits; 4073 instr->alu.dst.offset = fdst->offset / 8; 4074 instr->alu.src_val = src_val; 4075 return 0; 4076 } 4077 4078 static int 4079 instr_alu_ckadd_translate(struct rte_swx_pipeline *p, 4080 struct action *action __rte_unused, 4081 char **tokens, 4082 int n_tokens, 4083 struct instruction *instr, 4084 struct instruction_data *data __rte_unused) 4085 { 4086 char *dst = tokens[1], *src = tokens[2]; 4087 struct header *hdst, *hsrc; 4088 struct field *fdst, *fsrc; 4089 4090 CHECK(n_tokens == 3, EINVAL); 4091 4092 fdst = header_field_parse(p, dst, &hdst); 4093 CHECK(fdst && (fdst->n_bits == 16), EINVAL); 4094 4095 /* CKADD_FIELD. */ 4096 fsrc = header_field_parse(p, src, &hsrc); 4097 if (fsrc) { 4098 instr->type = INSTR_ALU_CKADD_FIELD; 4099 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id; 4100 instr->alu.dst.n_bits = fdst->n_bits; 4101 instr->alu.dst.offset = fdst->offset / 8; 4102 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id; 4103 instr->alu.src.n_bits = fsrc->n_bits; 4104 instr->alu.src.offset = fsrc->offset / 8; 4105 return 0; 4106 } 4107 4108 /* CKADD_STRUCT, CKADD_STRUCT20. */ 4109 hsrc = header_parse(p, src); 4110 CHECK(hsrc, EINVAL); 4111 4112 instr->type = INSTR_ALU_CKADD_STRUCT; 4113 if ((hsrc->st->n_bits / 8) == 20) 4114 instr->type = INSTR_ALU_CKADD_STRUCT20; 4115 4116 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id; 4117 instr->alu.dst.n_bits = fdst->n_bits; 4118 instr->alu.dst.offset = fdst->offset / 8; 4119 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id; 4120 instr->alu.src.n_bits = hsrc->st->n_bits; 4121 instr->alu.src.offset = 0; /* Unused. */ 4122 return 0; 4123 } 4124 4125 static int 4126 instr_alu_cksub_translate(struct rte_swx_pipeline *p, 4127 struct action *action __rte_unused, 4128 char **tokens, 4129 int n_tokens, 4130 struct instruction *instr, 4131 struct instruction_data *data __rte_unused) 4132 { 4133 char *dst = tokens[1], *src = tokens[2]; 4134 struct header *hdst, *hsrc; 4135 struct field *fdst, *fsrc; 4136 4137 CHECK(n_tokens == 3, EINVAL); 4138 4139 fdst = header_field_parse(p, dst, &hdst); 4140 CHECK(fdst && (fdst->n_bits == 16), EINVAL); 4141 4142 fsrc = header_field_parse(p, src, &hsrc); 4143 CHECK(fsrc, EINVAL); 4144 4145 instr->type = INSTR_ALU_CKSUB_FIELD; 4146 instr->alu.dst.struct_id = (uint8_t)hdst->struct_id; 4147 instr->alu.dst.n_bits = fdst->n_bits; 4148 instr->alu.dst.offset = fdst->offset / 8; 4149 instr->alu.src.struct_id = (uint8_t)hsrc->struct_id; 4150 instr->alu.src.n_bits = fsrc->n_bits; 4151 instr->alu.src.offset = fsrc->offset / 8; 4152 return 0; 4153 } 4154 4155 static int 4156 instr_alu_shl_translate(struct rte_swx_pipeline *p, 4157 struct action *action, 4158 char **tokens, 4159 int n_tokens, 4160 struct instruction *instr, 4161 struct instruction_data *data __rte_unused) 4162 { 4163 char *dst = tokens[1], *src = tokens[2]; 4164 struct field *fdst, *fsrc; 4165 uint64_t src_val; 4166 uint32_t dst_struct_id = 0, src_struct_id = 0; 4167 4168 CHECK(n_tokens == 3, EINVAL); 4169 4170 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id); 4171 CHECK(fdst, EINVAL); 4172 4173 /* SHL, SHL_HM, SHL_MH, SHL_HH. */ 4174 fsrc = struct_field_parse(p, action, src, &src_struct_id); 4175 if (fsrc) { 4176 instr->type = INSTR_ALU_SHL; 4177 if (dst[0] == 'h' && src[0] != 'h') 4178 instr->type = INSTR_ALU_SHL_HM; 4179 if (dst[0] != 'h' && src[0] == 'h') 4180 instr->type = INSTR_ALU_SHL_MH; 4181 if (dst[0] == 'h' && src[0] == 'h') 4182 instr->type = INSTR_ALU_SHL_HH; 4183 4184 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4185 instr->alu.dst.n_bits = fdst->n_bits; 4186 instr->alu.dst.offset = fdst->offset / 8; 4187 instr->alu.src.struct_id = (uint8_t)src_struct_id; 4188 instr->alu.src.n_bits = fsrc->n_bits; 4189 instr->alu.src.offset = fsrc->offset / 8; 4190 return 0; 4191 } 4192 4193 /* SHL_MI, SHL_HI. */ 4194 src_val = strtoull(src, &src, 0); 4195 CHECK(!src[0], EINVAL); 4196 4197 instr->type = INSTR_ALU_SHL_MI; 4198 if (dst[0] == 'h') 4199 instr->type = INSTR_ALU_SHL_HI; 4200 4201 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4202 instr->alu.dst.n_bits = fdst->n_bits; 4203 instr->alu.dst.offset = fdst->offset / 8; 4204 instr->alu.src_val = src_val; 4205 return 0; 4206 } 4207 4208 static int 4209 instr_alu_shr_translate(struct rte_swx_pipeline *p, 4210 struct action *action, 4211 char **tokens, 4212 int n_tokens, 4213 struct instruction *instr, 4214 struct instruction_data *data __rte_unused) 4215 { 4216 char *dst = tokens[1], *src = tokens[2]; 4217 struct field *fdst, *fsrc; 4218 uint64_t src_val; 4219 uint32_t dst_struct_id = 0, src_struct_id = 0; 4220 4221 CHECK(n_tokens == 3, EINVAL); 4222 4223 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id); 4224 CHECK(fdst, EINVAL); 4225 4226 /* SHR, SHR_HM, SHR_MH, SHR_HH. */ 4227 fsrc = struct_field_parse(p, action, src, &src_struct_id); 4228 if (fsrc) { 4229 instr->type = INSTR_ALU_SHR; 4230 if (dst[0] == 'h' && src[0] != 'h') 4231 instr->type = INSTR_ALU_SHR_HM; 4232 if (dst[0] != 'h' && src[0] == 'h') 4233 instr->type = INSTR_ALU_SHR_MH; 4234 if (dst[0] == 'h' && src[0] == 'h') 4235 instr->type = INSTR_ALU_SHR_HH; 4236 4237 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4238 instr->alu.dst.n_bits = fdst->n_bits; 4239 instr->alu.dst.offset = fdst->offset / 8; 4240 instr->alu.src.struct_id = (uint8_t)src_struct_id; 4241 instr->alu.src.n_bits = fsrc->n_bits; 4242 instr->alu.src.offset = fsrc->offset / 8; 4243 return 0; 4244 } 4245 4246 /* SHR_MI, SHR_HI. */ 4247 src_val = strtoull(src, &src, 0); 4248 CHECK(!src[0], EINVAL); 4249 4250 instr->type = INSTR_ALU_SHR_MI; 4251 if (dst[0] == 'h') 4252 instr->type = INSTR_ALU_SHR_HI; 4253 4254 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4255 instr->alu.dst.n_bits = fdst->n_bits; 4256 instr->alu.dst.offset = fdst->offset / 8; 4257 instr->alu.src_val = src_val; 4258 return 0; 4259 } 4260 4261 static int 4262 instr_alu_and_translate(struct rte_swx_pipeline *p, 4263 struct action *action, 4264 char **tokens, 4265 int n_tokens, 4266 struct instruction *instr, 4267 struct instruction_data *data __rte_unused) 4268 { 4269 char *dst = tokens[1], *src = tokens[2]; 4270 struct field *fdst, *fsrc; 4271 uint64_t src_val; 4272 uint32_t dst_struct_id = 0, src_struct_id = 0; 4273 4274 CHECK(n_tokens == 3, EINVAL); 4275 4276 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id); 4277 CHECK(fdst, EINVAL); 4278 4279 /* AND, AND_MH, AND_HM, AND_HH. */ 4280 fsrc = struct_field_parse(p, action, src, &src_struct_id); 4281 if (fsrc) { 4282 instr->type = INSTR_ALU_AND; 4283 if (dst[0] != 'h' && src[0] == 'h') 4284 instr->type = INSTR_ALU_AND_MH; 4285 if (dst[0] == 'h' && src[0] != 'h') 4286 instr->type = INSTR_ALU_AND_HM; 4287 if (dst[0] == 'h' && src[0] == 'h') 4288 instr->type = INSTR_ALU_AND_HH; 4289 4290 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4291 instr->alu.dst.n_bits = fdst->n_bits; 4292 instr->alu.dst.offset = fdst->offset / 8; 4293 instr->alu.src.struct_id = (uint8_t)src_struct_id; 4294 instr->alu.src.n_bits = fsrc->n_bits; 4295 instr->alu.src.offset = fsrc->offset / 8; 4296 return 0; 4297 } 4298 4299 /* AND_I. */ 4300 src_val = strtoull(src, &src, 0); 4301 CHECK(!src[0], EINVAL); 4302 4303 if (dst[0] == 'h') 4304 src_val = hton64(src_val) >> (64 - fdst->n_bits); 4305 4306 instr->type = INSTR_ALU_AND_I; 4307 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4308 instr->alu.dst.n_bits = fdst->n_bits; 4309 instr->alu.dst.offset = fdst->offset / 8; 4310 instr->alu.src_val = src_val; 4311 return 0; 4312 } 4313 4314 static int 4315 instr_alu_or_translate(struct rte_swx_pipeline *p, 4316 struct action *action, 4317 char **tokens, 4318 int n_tokens, 4319 struct instruction *instr, 4320 struct instruction_data *data __rte_unused) 4321 { 4322 char *dst = tokens[1], *src = tokens[2]; 4323 struct field *fdst, *fsrc; 4324 uint64_t src_val; 4325 uint32_t dst_struct_id = 0, src_struct_id = 0; 4326 4327 CHECK(n_tokens == 3, EINVAL); 4328 4329 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id); 4330 CHECK(fdst, EINVAL); 4331 4332 /* OR, OR_MH, OR_HM, OR_HH. */ 4333 fsrc = struct_field_parse(p, action, src, &src_struct_id); 4334 if (fsrc) { 4335 instr->type = INSTR_ALU_OR; 4336 if (dst[0] != 'h' && src[0] == 'h') 4337 instr->type = INSTR_ALU_OR_MH; 4338 if (dst[0] == 'h' && src[0] != 'h') 4339 instr->type = INSTR_ALU_OR_HM; 4340 if (dst[0] == 'h' && src[0] == 'h') 4341 instr->type = INSTR_ALU_OR_HH; 4342 4343 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4344 instr->alu.dst.n_bits = fdst->n_bits; 4345 instr->alu.dst.offset = fdst->offset / 8; 4346 instr->alu.src.struct_id = (uint8_t)src_struct_id; 4347 instr->alu.src.n_bits = fsrc->n_bits; 4348 instr->alu.src.offset = fsrc->offset / 8; 4349 return 0; 4350 } 4351 4352 /* OR_I. */ 4353 src_val = strtoull(src, &src, 0); 4354 CHECK(!src[0], EINVAL); 4355 4356 if (dst[0] == 'h') 4357 src_val = hton64(src_val) >> (64 - fdst->n_bits); 4358 4359 instr->type = INSTR_ALU_OR_I; 4360 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4361 instr->alu.dst.n_bits = fdst->n_bits; 4362 instr->alu.dst.offset = fdst->offset / 8; 4363 instr->alu.src_val = src_val; 4364 return 0; 4365 } 4366 4367 static int 4368 instr_alu_xor_translate(struct rte_swx_pipeline *p, 4369 struct action *action, 4370 char **tokens, 4371 int n_tokens, 4372 struct instruction *instr, 4373 struct instruction_data *data __rte_unused) 4374 { 4375 char *dst = tokens[1], *src = tokens[2]; 4376 struct field *fdst, *fsrc; 4377 uint64_t src_val; 4378 uint32_t dst_struct_id = 0, src_struct_id = 0; 4379 4380 CHECK(n_tokens == 3, EINVAL); 4381 4382 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id); 4383 CHECK(fdst, EINVAL); 4384 4385 /* XOR, XOR_MH, XOR_HM, XOR_HH. */ 4386 fsrc = struct_field_parse(p, action, src, &src_struct_id); 4387 if (fsrc) { 4388 instr->type = INSTR_ALU_XOR; 4389 if (dst[0] != 'h' && src[0] == 'h') 4390 instr->type = INSTR_ALU_XOR_MH; 4391 if (dst[0] == 'h' && src[0] != 'h') 4392 instr->type = INSTR_ALU_XOR_HM; 4393 if (dst[0] == 'h' && src[0] == 'h') 4394 instr->type = INSTR_ALU_XOR_HH; 4395 4396 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4397 instr->alu.dst.n_bits = fdst->n_bits; 4398 instr->alu.dst.offset = fdst->offset / 8; 4399 instr->alu.src.struct_id = (uint8_t)src_struct_id; 4400 instr->alu.src.n_bits = fsrc->n_bits; 4401 instr->alu.src.offset = fsrc->offset / 8; 4402 return 0; 4403 } 4404 4405 /* XOR_I. */ 4406 src_val = strtoull(src, &src, 0); 4407 CHECK(!src[0], EINVAL); 4408 4409 if (dst[0] == 'h') 4410 src_val = hton64(src_val) >> (64 - fdst->n_bits); 4411 4412 instr->type = INSTR_ALU_XOR_I; 4413 instr->alu.dst.struct_id = (uint8_t)dst_struct_id; 4414 instr->alu.dst.n_bits = fdst->n_bits; 4415 instr->alu.dst.offset = fdst->offset / 8; 4416 instr->alu.src_val = src_val; 4417 return 0; 4418 } 4419 4420 static inline void 4421 instr_alu_add_exec(struct rte_swx_pipeline *p) 4422 { 4423 struct thread *t = &p->threads[p->thread_id]; 4424 struct instruction *ip = t->ip; 4425 4426 TRACE("[Thread %2u] add\n", p->thread_id); 4427 4428 /* Structs. */ 4429 ALU(t, ip, +); 4430 4431 /* Thread. */ 4432 thread_ip_inc(p); 4433 } 4434 4435 static inline void 4436 instr_alu_add_mh_exec(struct rte_swx_pipeline *p) 4437 { 4438 struct thread *t = &p->threads[p->thread_id]; 4439 struct instruction *ip = t->ip; 4440 4441 TRACE("[Thread %2u] add (mh)\n", p->thread_id); 4442 4443 /* Structs. */ 4444 ALU_MH(t, ip, +); 4445 4446 /* Thread. */ 4447 thread_ip_inc(p); 4448 } 4449 4450 static inline void 4451 instr_alu_add_hm_exec(struct rte_swx_pipeline *p) 4452 { 4453 struct thread *t = &p->threads[p->thread_id]; 4454 struct instruction *ip = t->ip; 4455 4456 TRACE("[Thread %2u] add (hm)\n", p->thread_id); 4457 4458 /* Structs. */ 4459 ALU_HM(t, ip, +); 4460 4461 /* Thread. */ 4462 thread_ip_inc(p); 4463 } 4464 4465 static inline void 4466 instr_alu_add_hh_exec(struct rte_swx_pipeline *p) 4467 { 4468 struct thread *t = &p->threads[p->thread_id]; 4469 struct instruction *ip = t->ip; 4470 4471 TRACE("[Thread %2u] add (hh)\n", p->thread_id); 4472 4473 /* Structs. */ 4474 ALU_HH(t, ip, +); 4475 4476 /* Thread. */ 4477 thread_ip_inc(p); 4478 } 4479 4480 static inline void 4481 instr_alu_add_mi_exec(struct rte_swx_pipeline *p) 4482 { 4483 struct thread *t = &p->threads[p->thread_id]; 4484 struct instruction *ip = t->ip; 4485 4486 TRACE("[Thread %2u] add (mi)\n", p->thread_id); 4487 4488 /* Structs. */ 4489 ALU_MI(t, ip, +); 4490 4491 /* Thread. */ 4492 thread_ip_inc(p); 4493 } 4494 4495 static inline void 4496 instr_alu_add_hi_exec(struct rte_swx_pipeline *p) 4497 { 4498 struct thread *t = &p->threads[p->thread_id]; 4499 struct instruction *ip = t->ip; 4500 4501 TRACE("[Thread %2u] add (hi)\n", p->thread_id); 4502 4503 /* Structs. */ 4504 ALU_HI(t, ip, +); 4505 4506 /* Thread. */ 4507 thread_ip_inc(p); 4508 } 4509 4510 static inline void 4511 instr_alu_sub_exec(struct rte_swx_pipeline *p) 4512 { 4513 struct thread *t = &p->threads[p->thread_id]; 4514 struct instruction *ip = t->ip; 4515 4516 TRACE("[Thread %2u] sub\n", p->thread_id); 4517 4518 /* Structs. */ 4519 ALU(t, ip, -); 4520 4521 /* Thread. */ 4522 thread_ip_inc(p); 4523 } 4524 4525 static inline void 4526 instr_alu_sub_mh_exec(struct rte_swx_pipeline *p) 4527 { 4528 struct thread *t = &p->threads[p->thread_id]; 4529 struct instruction *ip = t->ip; 4530 4531 TRACE("[Thread %2u] sub (mh)\n", p->thread_id); 4532 4533 /* Structs. */ 4534 ALU_MH(t, ip, -); 4535 4536 /* Thread. */ 4537 thread_ip_inc(p); 4538 } 4539 4540 static inline void 4541 instr_alu_sub_hm_exec(struct rte_swx_pipeline *p) 4542 { 4543 struct thread *t = &p->threads[p->thread_id]; 4544 struct instruction *ip = t->ip; 4545 4546 TRACE("[Thread %2u] sub (hm)\n", p->thread_id); 4547 4548 /* Structs. */ 4549 ALU_HM(t, ip, -); 4550 4551 /* Thread. */ 4552 thread_ip_inc(p); 4553 } 4554 4555 static inline void 4556 instr_alu_sub_hh_exec(struct rte_swx_pipeline *p) 4557 { 4558 struct thread *t = &p->threads[p->thread_id]; 4559 struct instruction *ip = t->ip; 4560 4561 TRACE("[Thread %2u] sub (hh)\n", p->thread_id); 4562 4563 /* Structs. */ 4564 ALU_HH(t, ip, -); 4565 4566 /* Thread. */ 4567 thread_ip_inc(p); 4568 } 4569 4570 static inline void 4571 instr_alu_sub_mi_exec(struct rte_swx_pipeline *p) 4572 { 4573 struct thread *t = &p->threads[p->thread_id]; 4574 struct instruction *ip = t->ip; 4575 4576 TRACE("[Thread %2u] sub (mi)\n", p->thread_id); 4577 4578 /* Structs. */ 4579 ALU_MI(t, ip, -); 4580 4581 /* Thread. */ 4582 thread_ip_inc(p); 4583 } 4584 4585 static inline void 4586 instr_alu_sub_hi_exec(struct rte_swx_pipeline *p) 4587 { 4588 struct thread *t = &p->threads[p->thread_id]; 4589 struct instruction *ip = t->ip; 4590 4591 TRACE("[Thread %2u] sub (hi)\n", p->thread_id); 4592 4593 /* Structs. */ 4594 ALU_HI(t, ip, -); 4595 4596 /* Thread. */ 4597 thread_ip_inc(p); 4598 } 4599 4600 static inline void 4601 instr_alu_shl_exec(struct rte_swx_pipeline *p) 4602 { 4603 struct thread *t = &p->threads[p->thread_id]; 4604 struct instruction *ip = t->ip; 4605 4606 TRACE("[Thread %2u] shl\n", p->thread_id); 4607 4608 /* Structs. */ 4609 ALU(t, ip, <<); 4610 4611 /* Thread. */ 4612 thread_ip_inc(p); 4613 } 4614 4615 static inline void 4616 instr_alu_shl_mh_exec(struct rte_swx_pipeline *p) 4617 { 4618 struct thread *t = &p->threads[p->thread_id]; 4619 struct instruction *ip = t->ip; 4620 4621 TRACE("[Thread %2u] shl (mh)\n", p->thread_id); 4622 4623 /* Structs. */ 4624 ALU_MH(t, ip, <<); 4625 4626 /* Thread. */ 4627 thread_ip_inc(p); 4628 } 4629 4630 static inline void 4631 instr_alu_shl_hm_exec(struct rte_swx_pipeline *p) 4632 { 4633 struct thread *t = &p->threads[p->thread_id]; 4634 struct instruction *ip = t->ip; 4635 4636 TRACE("[Thread %2u] shl (hm)\n", p->thread_id); 4637 4638 /* Structs. */ 4639 ALU_HM(t, ip, <<); 4640 4641 /* Thread. */ 4642 thread_ip_inc(p); 4643 } 4644 4645 static inline void 4646 instr_alu_shl_hh_exec(struct rte_swx_pipeline *p) 4647 { 4648 struct thread *t = &p->threads[p->thread_id]; 4649 struct instruction *ip = t->ip; 4650 4651 TRACE("[Thread %2u] shl (hh)\n", p->thread_id); 4652 4653 /* Structs. */ 4654 ALU_HH(t, ip, <<); 4655 4656 /* Thread. */ 4657 thread_ip_inc(p); 4658 } 4659 4660 static inline void 4661 instr_alu_shl_mi_exec(struct rte_swx_pipeline *p) 4662 { 4663 struct thread *t = &p->threads[p->thread_id]; 4664 struct instruction *ip = t->ip; 4665 4666 TRACE("[Thread %2u] shl (mi)\n", p->thread_id); 4667 4668 /* Structs. */ 4669 ALU_MI(t, ip, <<); 4670 4671 /* Thread. */ 4672 thread_ip_inc(p); 4673 } 4674 4675 static inline void 4676 instr_alu_shl_hi_exec(struct rte_swx_pipeline *p) 4677 { 4678 struct thread *t = &p->threads[p->thread_id]; 4679 struct instruction *ip = t->ip; 4680 4681 TRACE("[Thread %2u] shl (hi)\n", p->thread_id); 4682 4683 /* Structs. */ 4684 ALU_HI(t, ip, <<); 4685 4686 /* Thread. */ 4687 thread_ip_inc(p); 4688 } 4689 4690 static inline void 4691 instr_alu_shr_exec(struct rte_swx_pipeline *p) 4692 { 4693 struct thread *t = &p->threads[p->thread_id]; 4694 struct instruction *ip = t->ip; 4695 4696 TRACE("[Thread %2u] shr\n", p->thread_id); 4697 4698 /* Structs. */ 4699 ALU(t, ip, >>); 4700 4701 /* Thread. */ 4702 thread_ip_inc(p); 4703 } 4704 4705 static inline void 4706 instr_alu_shr_mh_exec(struct rte_swx_pipeline *p) 4707 { 4708 struct thread *t = &p->threads[p->thread_id]; 4709 struct instruction *ip = t->ip; 4710 4711 TRACE("[Thread %2u] shr (mh)\n", p->thread_id); 4712 4713 /* Structs. */ 4714 ALU_MH(t, ip, >>); 4715 4716 /* Thread. */ 4717 thread_ip_inc(p); 4718 } 4719 4720 static inline void 4721 instr_alu_shr_hm_exec(struct rte_swx_pipeline *p) 4722 { 4723 struct thread *t = &p->threads[p->thread_id]; 4724 struct instruction *ip = t->ip; 4725 4726 TRACE("[Thread %2u] shr (hm)\n", p->thread_id); 4727 4728 /* Structs. */ 4729 ALU_HM(t, ip, >>); 4730 4731 /* Thread. */ 4732 thread_ip_inc(p); 4733 } 4734 4735 static inline void 4736 instr_alu_shr_hh_exec(struct rte_swx_pipeline *p) 4737 { 4738 struct thread *t = &p->threads[p->thread_id]; 4739 struct instruction *ip = t->ip; 4740 4741 TRACE("[Thread %2u] shr (hh)\n", p->thread_id); 4742 4743 /* Structs. */ 4744 ALU_HH(t, ip, >>); 4745 4746 /* Thread. */ 4747 thread_ip_inc(p); 4748 } 4749 4750 static inline void 4751 instr_alu_shr_mi_exec(struct rte_swx_pipeline *p) 4752 { 4753 struct thread *t = &p->threads[p->thread_id]; 4754 struct instruction *ip = t->ip; 4755 4756 TRACE("[Thread %2u] shr (mi)\n", p->thread_id); 4757 4758 /* Structs. */ 4759 ALU_MI(t, ip, >>); 4760 4761 /* Thread. */ 4762 thread_ip_inc(p); 4763 } 4764 4765 static inline void 4766 instr_alu_shr_hi_exec(struct rte_swx_pipeline *p) 4767 { 4768 struct thread *t = &p->threads[p->thread_id]; 4769 struct instruction *ip = t->ip; 4770 4771 TRACE("[Thread %2u] shr (hi)\n", p->thread_id); 4772 4773 /* Structs. */ 4774 ALU_HI(t, ip, >>); 4775 4776 /* Thread. */ 4777 thread_ip_inc(p); 4778 } 4779 4780 static inline void 4781 instr_alu_and_exec(struct rte_swx_pipeline *p) 4782 { 4783 struct thread *t = &p->threads[p->thread_id]; 4784 struct instruction *ip = t->ip; 4785 4786 TRACE("[Thread %2u] and\n", p->thread_id); 4787 4788 /* Structs. */ 4789 ALU(t, ip, &); 4790 4791 /* Thread. */ 4792 thread_ip_inc(p); 4793 } 4794 4795 static inline void 4796 instr_alu_and_mh_exec(struct rte_swx_pipeline *p) 4797 { 4798 struct thread *t = &p->threads[p->thread_id]; 4799 struct instruction *ip = t->ip; 4800 4801 TRACE("[Thread %2u] and (mh)\n", p->thread_id); 4802 4803 /* Structs. */ 4804 ALU_MH(t, ip, &); 4805 4806 /* Thread. */ 4807 thread_ip_inc(p); 4808 } 4809 4810 static inline void 4811 instr_alu_and_hm_exec(struct rte_swx_pipeline *p) 4812 { 4813 struct thread *t = &p->threads[p->thread_id]; 4814 struct instruction *ip = t->ip; 4815 4816 TRACE("[Thread %2u] and (hm)\n", p->thread_id); 4817 4818 /* Structs. */ 4819 ALU_HM_FAST(t, ip, &); 4820 4821 /* Thread. */ 4822 thread_ip_inc(p); 4823 } 4824 4825 static inline void 4826 instr_alu_and_hh_exec(struct rte_swx_pipeline *p) 4827 { 4828 struct thread *t = &p->threads[p->thread_id]; 4829 struct instruction *ip = t->ip; 4830 4831 TRACE("[Thread %2u] and (hh)\n", p->thread_id); 4832 4833 /* Structs. */ 4834 ALU_HH_FAST(t, ip, &); 4835 4836 /* Thread. */ 4837 thread_ip_inc(p); 4838 } 4839 4840 static inline void 4841 instr_alu_and_i_exec(struct rte_swx_pipeline *p) 4842 { 4843 struct thread *t = &p->threads[p->thread_id]; 4844 struct instruction *ip = t->ip; 4845 4846 TRACE("[Thread %2u] and (i)\n", p->thread_id); 4847 4848 /* Structs. */ 4849 ALU_I(t, ip, &); 4850 4851 /* Thread. */ 4852 thread_ip_inc(p); 4853 } 4854 4855 static inline void 4856 instr_alu_or_exec(struct rte_swx_pipeline *p) 4857 { 4858 struct thread *t = &p->threads[p->thread_id]; 4859 struct instruction *ip = t->ip; 4860 4861 TRACE("[Thread %2u] or\n", p->thread_id); 4862 4863 /* Structs. */ 4864 ALU(t, ip, |); 4865 4866 /* Thread. */ 4867 thread_ip_inc(p); 4868 } 4869 4870 static inline void 4871 instr_alu_or_mh_exec(struct rte_swx_pipeline *p) 4872 { 4873 struct thread *t = &p->threads[p->thread_id]; 4874 struct instruction *ip = t->ip; 4875 4876 TRACE("[Thread %2u] or (mh)\n", p->thread_id); 4877 4878 /* Structs. */ 4879 ALU_MH(t, ip, |); 4880 4881 /* Thread. */ 4882 thread_ip_inc(p); 4883 } 4884 4885 static inline void 4886 instr_alu_or_hm_exec(struct rte_swx_pipeline *p) 4887 { 4888 struct thread *t = &p->threads[p->thread_id]; 4889 struct instruction *ip = t->ip; 4890 4891 TRACE("[Thread %2u] or (hm)\n", p->thread_id); 4892 4893 /* Structs. */ 4894 ALU_HM_FAST(t, ip, |); 4895 4896 /* Thread. */ 4897 thread_ip_inc(p); 4898 } 4899 4900 static inline void 4901 instr_alu_or_hh_exec(struct rte_swx_pipeline *p) 4902 { 4903 struct thread *t = &p->threads[p->thread_id]; 4904 struct instruction *ip = t->ip; 4905 4906 TRACE("[Thread %2u] or (hh)\n", p->thread_id); 4907 4908 /* Structs. */ 4909 ALU_HH_FAST(t, ip, |); 4910 4911 /* Thread. */ 4912 thread_ip_inc(p); 4913 } 4914 4915 static inline void 4916 instr_alu_or_i_exec(struct rte_swx_pipeline *p) 4917 { 4918 struct thread *t = &p->threads[p->thread_id]; 4919 struct instruction *ip = t->ip; 4920 4921 TRACE("[Thread %2u] or (i)\n", p->thread_id); 4922 4923 /* Structs. */ 4924 ALU_I(t, ip, |); 4925 4926 /* Thread. */ 4927 thread_ip_inc(p); 4928 } 4929 4930 static inline void 4931 instr_alu_xor_exec(struct rte_swx_pipeline *p) 4932 { 4933 struct thread *t = &p->threads[p->thread_id]; 4934 struct instruction *ip = t->ip; 4935 4936 TRACE("[Thread %2u] xor\n", p->thread_id); 4937 4938 /* Structs. */ 4939 ALU(t, ip, ^); 4940 4941 /* Thread. */ 4942 thread_ip_inc(p); 4943 } 4944 4945 static inline void 4946 instr_alu_xor_mh_exec(struct rte_swx_pipeline *p) 4947 { 4948 struct thread *t = &p->threads[p->thread_id]; 4949 struct instruction *ip = t->ip; 4950 4951 TRACE("[Thread %2u] xor (mh)\n", p->thread_id); 4952 4953 /* Structs. */ 4954 ALU_MH(t, ip, ^); 4955 4956 /* Thread. */ 4957 thread_ip_inc(p); 4958 } 4959 4960 static inline void 4961 instr_alu_xor_hm_exec(struct rte_swx_pipeline *p) 4962 { 4963 struct thread *t = &p->threads[p->thread_id]; 4964 struct instruction *ip = t->ip; 4965 4966 TRACE("[Thread %2u] xor (hm)\n", p->thread_id); 4967 4968 /* Structs. */ 4969 ALU_HM_FAST(t, ip, ^); 4970 4971 /* Thread. */ 4972 thread_ip_inc(p); 4973 } 4974 4975 static inline void 4976 instr_alu_xor_hh_exec(struct rte_swx_pipeline *p) 4977 { 4978 struct thread *t = &p->threads[p->thread_id]; 4979 struct instruction *ip = t->ip; 4980 4981 TRACE("[Thread %2u] xor (hh)\n", p->thread_id); 4982 4983 /* Structs. */ 4984 ALU_HH_FAST(t, ip, ^); 4985 4986 /* Thread. */ 4987 thread_ip_inc(p); 4988 } 4989 4990 static inline void 4991 instr_alu_xor_i_exec(struct rte_swx_pipeline *p) 4992 { 4993 struct thread *t = &p->threads[p->thread_id]; 4994 struct instruction *ip = t->ip; 4995 4996 TRACE("[Thread %2u] xor (i)\n", p->thread_id); 4997 4998 /* Structs. */ 4999 ALU_I(t, ip, ^); 5000 5001 /* Thread. */ 5002 thread_ip_inc(p); 5003 } 5004 5005 static inline void 5006 instr_alu_ckadd_field_exec(struct rte_swx_pipeline *p) 5007 { 5008 struct thread *t = &p->threads[p->thread_id]; 5009 struct instruction *ip = t->ip; 5010 uint8_t *dst_struct, *src_struct; 5011 uint16_t *dst16_ptr, dst; 5012 uint64_t *src64_ptr, src64, src64_mask, src; 5013 uint64_t r; 5014 5015 TRACE("[Thread %2u] ckadd (field)\n", p->thread_id); 5016 5017 /* Structs. */ 5018 dst_struct = t->structs[ip->alu.dst.struct_id]; 5019 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset]; 5020 dst = *dst16_ptr; 5021 5022 src_struct = t->structs[ip->alu.src.struct_id]; 5023 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset]; 5024 src64 = *src64_ptr; 5025 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits); 5026 src = src64 & src64_mask; 5027 5028 r = dst; 5029 r = ~r & 0xFFFF; 5030 5031 /* The first input (r) is a 16-bit number. The second and the third 5032 * inputs are 32-bit numbers. In the worst case scenario, the sum of the 5033 * three numbers (output r) is a 34-bit number. 5034 */ 5035 r += (src >> 32) + (src & 0xFFFFFFFF); 5036 5037 /* The first input is a 16-bit number. The second input is an 18-bit 5038 * number. In the worst case scenario, the sum of the two numbers is a 5039 * 19-bit number. 5040 */ 5041 r = (r & 0xFFFF) + (r >> 16); 5042 5043 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is 5044 * a 3-bit number (0 .. 7). Their sum is a 17-bit number (0 .. 0x10006). 5045 */ 5046 r = (r & 0xFFFF) + (r >> 16); 5047 5048 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input 5049 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 .. 5050 * 0x10006), the output r is (0 .. 7). So no carry bit can be generated, 5051 * therefore the output r is always a 16-bit number. 5052 */ 5053 r = (r & 0xFFFF) + (r >> 16); 5054 5055 r = ~r & 0xFFFF; 5056 r = r ? r : 0xFFFF; 5057 5058 *dst16_ptr = (uint16_t)r; 5059 5060 /* Thread. */ 5061 thread_ip_inc(p); 5062 } 5063 5064 static inline void 5065 instr_alu_cksub_field_exec(struct rte_swx_pipeline *p) 5066 { 5067 struct thread *t = &p->threads[p->thread_id]; 5068 struct instruction *ip = t->ip; 5069 uint8_t *dst_struct, *src_struct; 5070 uint16_t *dst16_ptr, dst; 5071 uint64_t *src64_ptr, src64, src64_mask, src; 5072 uint64_t r; 5073 5074 TRACE("[Thread %2u] cksub (field)\n", p->thread_id); 5075 5076 /* Structs. */ 5077 dst_struct = t->structs[ip->alu.dst.struct_id]; 5078 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset]; 5079 dst = *dst16_ptr; 5080 5081 src_struct = t->structs[ip->alu.src.struct_id]; 5082 src64_ptr = (uint64_t *)&src_struct[ip->alu.src.offset]; 5083 src64 = *src64_ptr; 5084 src64_mask = UINT64_MAX >> (64 - ip->alu.src.n_bits); 5085 src = src64 & src64_mask; 5086 5087 r = dst; 5088 r = ~r & 0xFFFF; 5089 5090 /* Subtraction in 1's complement arithmetic (i.e. a '- b) is the same as 5091 * the following sequence of operations in 2's complement arithmetic: 5092 * a '- b = (a - b) % 0xFFFF. 5093 * 5094 * In order to prevent an underflow for the below subtraction, in which 5095 * a 33-bit number (the subtrahend) is taken out of a 16-bit number (the 5096 * minuend), we first add a multiple of the 0xFFFF modulus to the 5097 * minuend. The number we add to the minuend needs to be a 34-bit number 5098 * or higher, so for readability reasons we picked the 36-bit multiple. 5099 * We are effectively turning the 16-bit minuend into a 36-bit number: 5100 * (a - b) % 0xFFFF = (a + 0xFFFF00000 - b) % 0xFFFF. 5101 */ 5102 r += 0xFFFF00000ULL; /* The output r is a 36-bit number. */ 5103 5104 /* A 33-bit number is subtracted from a 36-bit number (the input r). The 5105 * result (the output r) is a 36-bit number. 5106 */ 5107 r -= (src >> 32) + (src & 0xFFFFFFFF); 5108 5109 /* The first input is a 16-bit number. The second input is a 20-bit 5110 * number. Their sum is a 21-bit number. 5111 */ 5112 r = (r & 0xFFFF) + (r >> 16); 5113 5114 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is 5115 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1001E). 5116 */ 5117 r = (r & 0xFFFF) + (r >> 16); 5118 5119 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input 5120 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 .. 5121 * 0x1001E), the output r is (0 .. 31). So no carry bit can be 5122 * generated, therefore the output r is always a 16-bit number. 5123 */ 5124 r = (r & 0xFFFF) + (r >> 16); 5125 5126 r = ~r & 0xFFFF; 5127 r = r ? r : 0xFFFF; 5128 5129 *dst16_ptr = (uint16_t)r; 5130 5131 /* Thread. */ 5132 thread_ip_inc(p); 5133 } 5134 5135 static inline void 5136 instr_alu_ckadd_struct20_exec(struct rte_swx_pipeline *p) 5137 { 5138 struct thread *t = &p->threads[p->thread_id]; 5139 struct instruction *ip = t->ip; 5140 uint8_t *dst_struct, *src_struct; 5141 uint16_t *dst16_ptr; 5142 uint32_t *src32_ptr; 5143 uint64_t r0, r1; 5144 5145 TRACE("[Thread %2u] ckadd (struct of 20 bytes)\n", p->thread_id); 5146 5147 /* Structs. */ 5148 dst_struct = t->structs[ip->alu.dst.struct_id]; 5149 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset]; 5150 5151 src_struct = t->structs[ip->alu.src.struct_id]; 5152 src32_ptr = (uint32_t *)&src_struct[0]; 5153 5154 r0 = src32_ptr[0]; /* r0 is a 32-bit number. */ 5155 r1 = src32_ptr[1]; /* r1 is a 32-bit number. */ 5156 r0 += src32_ptr[2]; /* The output r0 is a 33-bit number. */ 5157 r1 += src32_ptr[3]; /* The output r1 is a 33-bit number. */ 5158 r0 += r1 + src32_ptr[4]; /* The output r0 is a 35-bit number. */ 5159 5160 /* The first input is a 16-bit number. The second input is a 19-bit 5161 * number. Their sum is a 20-bit number. 5162 */ 5163 r0 = (r0 & 0xFFFF) + (r0 >> 16); 5164 5165 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is 5166 * a 4-bit number (0 .. 15). The sum is a 17-bit number (0 .. 0x1000E). 5167 */ 5168 r0 = (r0 & 0xFFFF) + (r0 >> 16); 5169 5170 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input 5171 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 .. 5172 * 0x1000E), the output r is (0 .. 15). So no carry bit can be 5173 * generated, therefore the output r is always a 16-bit number. 5174 */ 5175 r0 = (r0 & 0xFFFF) + (r0 >> 16); 5176 5177 r0 = ~r0 & 0xFFFF; 5178 r0 = r0 ? r0 : 0xFFFF; 5179 5180 *dst16_ptr = (uint16_t)r0; 5181 5182 /* Thread. */ 5183 thread_ip_inc(p); 5184 } 5185 5186 static inline void 5187 instr_alu_ckadd_struct_exec(struct rte_swx_pipeline *p) 5188 { 5189 struct thread *t = &p->threads[p->thread_id]; 5190 struct instruction *ip = t->ip; 5191 uint8_t *dst_struct, *src_struct; 5192 uint16_t *dst16_ptr; 5193 uint32_t *src32_ptr; 5194 uint64_t r = 0; 5195 uint32_t i; 5196 5197 TRACE("[Thread %2u] ckadd (struct)\n", p->thread_id); 5198 5199 /* Structs. */ 5200 dst_struct = t->structs[ip->alu.dst.struct_id]; 5201 dst16_ptr = (uint16_t *)&dst_struct[ip->alu.dst.offset]; 5202 5203 src_struct = t->structs[ip->alu.src.struct_id]; 5204 src32_ptr = (uint32_t *)&src_struct[0]; 5205 5206 /* The max number of 32-bit words in a 256-byte header is 8 = 2^3. 5207 * Therefore, in the worst case scenario, a 35-bit number is added to a 5208 * 16-bit number (the input r), so the output r is 36-bit number. 5209 */ 5210 for (i = 0; i < ip->alu.src.n_bits / 32; i++, src32_ptr++) 5211 r += *src32_ptr; 5212 5213 /* The first input is a 16-bit number. The second input is a 20-bit 5214 * number. Their sum is a 21-bit number. 5215 */ 5216 r = (r & 0xFFFF) + (r >> 16); 5217 5218 /* The first input is a 16-bit number (0 .. 0xFFFF). The second input is 5219 * a 5-bit number (0 .. 31). The sum is a 17-bit number (0 .. 0x1000E). 5220 */ 5221 r = (r & 0xFFFF) + (r >> 16); 5222 5223 /* When the input r is (0 .. 0xFFFF), the output r is equal to the input 5224 * r, so the output is (0 .. 0xFFFF). When the input r is (0x10000 .. 5225 * 0x1001E), the output r is (0 .. 31). So no carry bit can be 5226 * generated, therefore the output r is always a 16-bit number. 5227 */ 5228 r = (r & 0xFFFF) + (r >> 16); 5229 5230 r = ~r & 0xFFFF; 5231 r = r ? r : 0xFFFF; 5232 5233 *dst16_ptr = (uint16_t)r; 5234 5235 /* Thread. */ 5236 thread_ip_inc(p); 5237 } 5238 5239 /* 5240 * Register array. 5241 */ 5242 static struct regarray * 5243 regarray_find(struct rte_swx_pipeline *p, const char *name); 5244 5245 static int 5246 instr_regprefetch_translate(struct rte_swx_pipeline *p, 5247 struct action *action, 5248 char **tokens, 5249 int n_tokens, 5250 struct instruction *instr, 5251 struct instruction_data *data __rte_unused) 5252 { 5253 char *regarray = tokens[1], *idx = tokens[2]; 5254 struct regarray *r; 5255 struct field *fidx; 5256 uint32_t idx_struct_id, idx_val; 5257 5258 CHECK(n_tokens == 3, EINVAL); 5259 5260 r = regarray_find(p, regarray); 5261 CHECK(r, EINVAL); 5262 5263 /* REGPREFETCH_RH, REGPREFETCH_RM. */ 5264 fidx = struct_field_parse(p, action, idx, &idx_struct_id); 5265 if (fidx) { 5266 instr->type = INSTR_REGPREFETCH_RM; 5267 if (idx[0] == 'h') 5268 instr->type = INSTR_REGPREFETCH_RH; 5269 5270 instr->regarray.regarray_id = r->id; 5271 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id; 5272 instr->regarray.idx.n_bits = fidx->n_bits; 5273 instr->regarray.idx.offset = fidx->offset / 8; 5274 instr->regarray.dstsrc_val = 0; /* Unused. */ 5275 return 0; 5276 } 5277 5278 /* REGPREFETCH_RI. */ 5279 idx_val = strtoul(idx, &idx, 0); 5280 CHECK(!idx[0], EINVAL); 5281 5282 instr->type = INSTR_REGPREFETCH_RI; 5283 instr->regarray.regarray_id = r->id; 5284 instr->regarray.idx_val = idx_val; 5285 instr->regarray.dstsrc_val = 0; /* Unused. */ 5286 return 0; 5287 } 5288 5289 static int 5290 instr_regrd_translate(struct rte_swx_pipeline *p, 5291 struct action *action, 5292 char **tokens, 5293 int n_tokens, 5294 struct instruction *instr, 5295 struct instruction_data *data __rte_unused) 5296 { 5297 char *dst = tokens[1], *regarray = tokens[2], *idx = tokens[3]; 5298 struct regarray *r; 5299 struct field *fdst, *fidx; 5300 uint32_t dst_struct_id, idx_struct_id, idx_val; 5301 5302 CHECK(n_tokens == 4, EINVAL); 5303 5304 r = regarray_find(p, regarray); 5305 CHECK(r, EINVAL); 5306 5307 fdst = struct_field_parse(p, NULL, dst, &dst_struct_id); 5308 CHECK(fdst, EINVAL); 5309 5310 /* REGRD_HRH, REGRD_HRM, REGRD_MRH, REGRD_MRM. */ 5311 fidx = struct_field_parse(p, action, idx, &idx_struct_id); 5312 if (fidx) { 5313 instr->type = INSTR_REGRD_MRM; 5314 if (dst[0] == 'h' && idx[0] != 'h') 5315 instr->type = INSTR_REGRD_HRM; 5316 if (dst[0] != 'h' && idx[0] == 'h') 5317 instr->type = INSTR_REGRD_MRH; 5318 if (dst[0] == 'h' && idx[0] == 'h') 5319 instr->type = INSTR_REGRD_HRH; 5320 5321 instr->regarray.regarray_id = r->id; 5322 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id; 5323 instr->regarray.idx.n_bits = fidx->n_bits; 5324 instr->regarray.idx.offset = fidx->offset / 8; 5325 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id; 5326 instr->regarray.dstsrc.n_bits = fdst->n_bits; 5327 instr->regarray.dstsrc.offset = fdst->offset / 8; 5328 return 0; 5329 } 5330 5331 /* REGRD_MRI, REGRD_HRI. */ 5332 idx_val = strtoul(idx, &idx, 0); 5333 CHECK(!idx[0], EINVAL); 5334 5335 instr->type = INSTR_REGRD_MRI; 5336 if (dst[0] == 'h') 5337 instr->type = INSTR_REGRD_HRI; 5338 5339 instr->regarray.regarray_id = r->id; 5340 instr->regarray.idx_val = idx_val; 5341 instr->regarray.dstsrc.struct_id = (uint8_t)dst_struct_id; 5342 instr->regarray.dstsrc.n_bits = fdst->n_bits; 5343 instr->regarray.dstsrc.offset = fdst->offset / 8; 5344 return 0; 5345 } 5346 5347 static int 5348 instr_regwr_translate(struct rte_swx_pipeline *p, 5349 struct action *action, 5350 char **tokens, 5351 int n_tokens, 5352 struct instruction *instr, 5353 struct instruction_data *data __rte_unused) 5354 { 5355 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3]; 5356 struct regarray *r; 5357 struct field *fidx, *fsrc; 5358 uint64_t src_val; 5359 uint32_t idx_struct_id, idx_val, src_struct_id; 5360 5361 CHECK(n_tokens == 4, EINVAL); 5362 5363 r = regarray_find(p, regarray); 5364 CHECK(r, EINVAL); 5365 5366 /* REGWR_RHH, REGWR_RHM, REGWR_RMH, REGWR_RMM. */ 5367 fidx = struct_field_parse(p, action, idx, &idx_struct_id); 5368 fsrc = struct_field_parse(p, action, src, &src_struct_id); 5369 if (fidx && fsrc) { 5370 instr->type = INSTR_REGWR_RMM; 5371 if (idx[0] == 'h' && src[0] != 'h') 5372 instr->type = INSTR_REGWR_RHM; 5373 if (idx[0] != 'h' && src[0] == 'h') 5374 instr->type = INSTR_REGWR_RMH; 5375 if (idx[0] == 'h' && src[0] == 'h') 5376 instr->type = INSTR_REGWR_RHH; 5377 5378 instr->regarray.regarray_id = r->id; 5379 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id; 5380 instr->regarray.idx.n_bits = fidx->n_bits; 5381 instr->regarray.idx.offset = fidx->offset / 8; 5382 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id; 5383 instr->regarray.dstsrc.n_bits = fsrc->n_bits; 5384 instr->regarray.dstsrc.offset = fsrc->offset / 8; 5385 return 0; 5386 } 5387 5388 /* REGWR_RHI, REGWR_RMI. */ 5389 if (fidx && !fsrc) { 5390 src_val = strtoull(src, &src, 0); 5391 CHECK(!src[0], EINVAL); 5392 5393 instr->type = INSTR_REGWR_RMI; 5394 if (idx[0] == 'h') 5395 instr->type = INSTR_REGWR_RHI; 5396 5397 instr->regarray.regarray_id = r->id; 5398 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id; 5399 instr->regarray.idx.n_bits = fidx->n_bits; 5400 instr->regarray.idx.offset = fidx->offset / 8; 5401 instr->regarray.dstsrc_val = src_val; 5402 return 0; 5403 } 5404 5405 /* REGWR_RIH, REGWR_RIM. */ 5406 if (!fidx && fsrc) { 5407 idx_val = strtoul(idx, &idx, 0); 5408 CHECK(!idx[0], EINVAL); 5409 5410 instr->type = INSTR_REGWR_RIM; 5411 if (src[0] == 'h') 5412 instr->type = INSTR_REGWR_RIH; 5413 5414 instr->regarray.regarray_id = r->id; 5415 instr->regarray.idx_val = idx_val; 5416 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id; 5417 instr->regarray.dstsrc.n_bits = fsrc->n_bits; 5418 instr->regarray.dstsrc.offset = fsrc->offset / 8; 5419 return 0; 5420 } 5421 5422 /* REGWR_RII. */ 5423 src_val = strtoull(src, &src, 0); 5424 CHECK(!src[0], EINVAL); 5425 5426 idx_val = strtoul(idx, &idx, 0); 5427 CHECK(!idx[0], EINVAL); 5428 5429 instr->type = INSTR_REGWR_RII; 5430 instr->regarray.idx_val = idx_val; 5431 instr->regarray.dstsrc_val = src_val; 5432 5433 return 0; 5434 } 5435 5436 static int 5437 instr_regadd_translate(struct rte_swx_pipeline *p, 5438 struct action *action, 5439 char **tokens, 5440 int n_tokens, 5441 struct instruction *instr, 5442 struct instruction_data *data __rte_unused) 5443 { 5444 char *regarray = tokens[1], *idx = tokens[2], *src = tokens[3]; 5445 struct regarray *r; 5446 struct field *fidx, *fsrc; 5447 uint64_t src_val; 5448 uint32_t idx_struct_id, idx_val, src_struct_id; 5449 5450 CHECK(n_tokens == 4, EINVAL); 5451 5452 r = regarray_find(p, regarray); 5453 CHECK(r, EINVAL); 5454 5455 /* REGADD_RHH, REGADD_RHM, REGADD_RMH, REGADD_RMM. */ 5456 fidx = struct_field_parse(p, action, idx, &idx_struct_id); 5457 fsrc = struct_field_parse(p, action, src, &src_struct_id); 5458 if (fidx && fsrc) { 5459 instr->type = INSTR_REGADD_RMM; 5460 if (idx[0] == 'h' && src[0] != 'h') 5461 instr->type = INSTR_REGADD_RHM; 5462 if (idx[0] != 'h' && src[0] == 'h') 5463 instr->type = INSTR_REGADD_RMH; 5464 if (idx[0] == 'h' && src[0] == 'h') 5465 instr->type = INSTR_REGADD_RHH; 5466 5467 instr->regarray.regarray_id = r->id; 5468 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id; 5469 instr->regarray.idx.n_bits = fidx->n_bits; 5470 instr->regarray.idx.offset = fidx->offset / 8; 5471 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id; 5472 instr->regarray.dstsrc.n_bits = fsrc->n_bits; 5473 instr->regarray.dstsrc.offset = fsrc->offset / 8; 5474 return 0; 5475 } 5476 5477 /* REGADD_RHI, REGADD_RMI. */ 5478 if (fidx && !fsrc) { 5479 src_val = strtoull(src, &src, 0); 5480 CHECK(!src[0], EINVAL); 5481 5482 instr->type = INSTR_REGADD_RMI; 5483 if (idx[0] == 'h') 5484 instr->type = INSTR_REGADD_RHI; 5485 5486 instr->regarray.regarray_id = r->id; 5487 instr->regarray.idx.struct_id = (uint8_t)idx_struct_id; 5488 instr->regarray.idx.n_bits = fidx->n_bits; 5489 instr->regarray.idx.offset = fidx->offset / 8; 5490 instr->regarray.dstsrc_val = src_val; 5491 return 0; 5492 } 5493 5494 /* REGADD_RIH, REGADD_RIM. */ 5495 if (!fidx && fsrc) { 5496 idx_val = strtoul(idx, &idx, 0); 5497 CHECK(!idx[0], EINVAL); 5498 5499 instr->type = INSTR_REGADD_RIM; 5500 if (src[0] == 'h') 5501 instr->type = INSTR_REGADD_RIH; 5502 5503 instr->regarray.regarray_id = r->id; 5504 instr->regarray.idx_val = idx_val; 5505 instr->regarray.dstsrc.struct_id = (uint8_t)src_struct_id; 5506 instr->regarray.dstsrc.n_bits = fsrc->n_bits; 5507 instr->regarray.dstsrc.offset = fsrc->offset / 8; 5508 return 0; 5509 } 5510 5511 /* REGADD_RII. */ 5512 src_val = strtoull(src, &src, 0); 5513 CHECK(!src[0], EINVAL); 5514 5515 idx_val = strtoul(idx, &idx, 0); 5516 CHECK(!idx[0], EINVAL); 5517 5518 instr->type = INSTR_REGADD_RII; 5519 instr->regarray.idx_val = idx_val; 5520 instr->regarray.dstsrc_val = src_val; 5521 return 0; 5522 } 5523 5524 static inline uint64_t * 5525 instr_regarray_regarray(struct rte_swx_pipeline *p, struct instruction *ip) 5526 { 5527 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id]; 5528 return r->regarray; 5529 } 5530 5531 static inline uint64_t 5532 instr_regarray_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip) 5533 { 5534 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id]; 5535 5536 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id]; 5537 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset]; 5538 uint64_t idx64 = *idx64_ptr; 5539 uint64_t idx64_mask = UINT64_MAX >> (64 - ip->regarray.idx.n_bits); 5540 uint64_t idx = idx64 & idx64_mask & r->size_mask; 5541 5542 return idx; 5543 } 5544 5545 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 5546 5547 static inline uint64_t 5548 instr_regarray_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip) 5549 { 5550 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id]; 5551 5552 uint8_t *idx_struct = t->structs[ip->regarray.idx.struct_id]; 5553 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->regarray.idx.offset]; 5554 uint64_t idx64 = *idx64_ptr; 5555 uint64_t idx = (ntoh64(idx64) >> (64 - ip->regarray.idx.n_bits)) & r->size_mask; 5556 5557 return idx; 5558 } 5559 5560 #else 5561 5562 #define instr_regarray_idx_nbo instr_regarray_idx_hbo 5563 5564 #endif 5565 5566 static inline uint64_t 5567 instr_regarray_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip) 5568 { 5569 struct regarray_runtime *r = &p->regarray_runtime[ip->regarray.regarray_id]; 5570 5571 uint64_t idx = ip->regarray.idx_val & r->size_mask; 5572 5573 return idx; 5574 } 5575 5576 static inline uint64_t 5577 instr_regarray_src_hbo(struct thread *t, struct instruction *ip) 5578 { 5579 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id]; 5580 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset]; 5581 uint64_t src64 = *src64_ptr; 5582 uint64_t src64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits); 5583 uint64_t src = src64 & src64_mask; 5584 5585 return src; 5586 } 5587 5588 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 5589 5590 static inline uint64_t 5591 instr_regarray_src_nbo(struct thread *t, struct instruction *ip) 5592 { 5593 uint8_t *src_struct = t->structs[ip->regarray.dstsrc.struct_id]; 5594 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->regarray.dstsrc.offset]; 5595 uint64_t src64 = *src64_ptr; 5596 uint64_t src = ntoh64(src64) >> (64 - ip->regarray.dstsrc.n_bits); 5597 5598 return src; 5599 } 5600 5601 #else 5602 5603 #define instr_regarray_src_nbo instr_regarray_src_hbo 5604 5605 #endif 5606 5607 static inline void 5608 instr_regarray_dst_hbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src) 5609 { 5610 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id]; 5611 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset]; 5612 uint64_t dst64 = *dst64_ptr; 5613 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits); 5614 5615 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); 5616 5617 } 5618 5619 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 5620 5621 static inline void 5622 instr_regarray_dst_nbo_src_hbo_set(struct thread *t, struct instruction *ip, uint64_t src) 5623 { 5624 uint8_t *dst_struct = t->structs[ip->regarray.dstsrc.struct_id]; 5625 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->regarray.dstsrc.offset]; 5626 uint64_t dst64 = *dst64_ptr; 5627 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->regarray.dstsrc.n_bits); 5628 5629 src = hton64(src) >> (64 - ip->regarray.dstsrc.n_bits); 5630 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); 5631 } 5632 5633 #else 5634 5635 #define instr_regarray_dst_nbo_src_hbo_set instr_regarray_dst_hbo_src_hbo_set 5636 5637 #endif 5638 5639 static inline void 5640 instr_regprefetch_rh_exec(struct rte_swx_pipeline *p) 5641 { 5642 struct thread *t = &p->threads[p->thread_id]; 5643 struct instruction *ip = t->ip; 5644 uint64_t *regarray, idx; 5645 5646 TRACE("[Thread %2u] regprefetch (r[h])\n", p->thread_id); 5647 5648 /* Structs. */ 5649 regarray = instr_regarray_regarray(p, ip); 5650 idx = instr_regarray_idx_nbo(p, t, ip); 5651 rte_prefetch0(®array[idx]); 5652 5653 /* Thread. */ 5654 thread_ip_inc(p); 5655 } 5656 5657 static inline void 5658 instr_regprefetch_rm_exec(struct rte_swx_pipeline *p) 5659 { 5660 struct thread *t = &p->threads[p->thread_id]; 5661 struct instruction *ip = t->ip; 5662 uint64_t *regarray, idx; 5663 5664 TRACE("[Thread %2u] regprefetch (r[m])\n", p->thread_id); 5665 5666 /* Structs. */ 5667 regarray = instr_regarray_regarray(p, ip); 5668 idx = instr_regarray_idx_hbo(p, t, ip); 5669 rte_prefetch0(®array[idx]); 5670 5671 /* Thread. */ 5672 thread_ip_inc(p); 5673 } 5674 5675 static inline void 5676 instr_regprefetch_ri_exec(struct rte_swx_pipeline *p) 5677 { 5678 struct thread *t = &p->threads[p->thread_id]; 5679 struct instruction *ip = t->ip; 5680 uint64_t *regarray, idx; 5681 5682 TRACE("[Thread %2u] regprefetch (r[i])\n", p->thread_id); 5683 5684 /* Structs. */ 5685 regarray = instr_regarray_regarray(p, ip); 5686 idx = instr_regarray_idx_imm(p, ip); 5687 rte_prefetch0(®array[idx]); 5688 5689 /* Thread. */ 5690 thread_ip_inc(p); 5691 } 5692 5693 static inline void 5694 instr_regrd_hrh_exec(struct rte_swx_pipeline *p) 5695 { 5696 struct thread *t = &p->threads[p->thread_id]; 5697 struct instruction *ip = t->ip; 5698 uint64_t *regarray, idx; 5699 5700 TRACE("[Thread %2u] regrd (h = r[h])\n", p->thread_id); 5701 5702 /* Structs. */ 5703 regarray = instr_regarray_regarray(p, ip); 5704 idx = instr_regarray_idx_nbo(p, t, ip); 5705 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]); 5706 5707 /* Thread. */ 5708 thread_ip_inc(p); 5709 } 5710 5711 static inline void 5712 instr_regrd_hrm_exec(struct rte_swx_pipeline *p) 5713 { 5714 struct thread *t = &p->threads[p->thread_id]; 5715 struct instruction *ip = t->ip; 5716 uint64_t *regarray, idx; 5717 5718 TRACE("[Thread %2u] regrd (h = r[m])\n", p->thread_id); 5719 5720 /* Structs. */ 5721 regarray = instr_regarray_regarray(p, ip); 5722 idx = instr_regarray_idx_hbo(p, t, ip); 5723 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]); 5724 5725 /* Thread. */ 5726 thread_ip_inc(p); 5727 } 5728 5729 static inline void 5730 instr_regrd_mrh_exec(struct rte_swx_pipeline *p) 5731 { 5732 struct thread *t = &p->threads[p->thread_id]; 5733 struct instruction *ip = t->ip; 5734 uint64_t *regarray, idx; 5735 5736 TRACE("[Thread %2u] regrd (m = r[h])\n", p->thread_id); 5737 5738 /* Structs. */ 5739 regarray = instr_regarray_regarray(p, ip); 5740 idx = instr_regarray_idx_nbo(p, t, ip); 5741 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]); 5742 5743 /* Thread. */ 5744 thread_ip_inc(p); 5745 } 5746 5747 static inline void 5748 instr_regrd_mrm_exec(struct rte_swx_pipeline *p) 5749 { 5750 struct thread *t = &p->threads[p->thread_id]; 5751 struct instruction *ip = t->ip; 5752 uint64_t *regarray, idx; 5753 5754 /* Structs. */ 5755 regarray = instr_regarray_regarray(p, ip); 5756 idx = instr_regarray_idx_hbo(p, t, ip); 5757 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]); 5758 5759 /* Thread. */ 5760 thread_ip_inc(p); 5761 } 5762 5763 static inline void 5764 instr_regrd_hri_exec(struct rte_swx_pipeline *p) 5765 { 5766 struct thread *t = &p->threads[p->thread_id]; 5767 struct instruction *ip = t->ip; 5768 uint64_t *regarray, idx; 5769 5770 TRACE("[Thread %2u] regrd (h = r[i])\n", p->thread_id); 5771 5772 /* Structs. */ 5773 regarray = instr_regarray_regarray(p, ip); 5774 idx = instr_regarray_idx_imm(p, ip); 5775 instr_regarray_dst_nbo_src_hbo_set(t, ip, regarray[idx]); 5776 5777 /* Thread. */ 5778 thread_ip_inc(p); 5779 } 5780 5781 static inline void 5782 instr_regrd_mri_exec(struct rte_swx_pipeline *p) 5783 { 5784 struct thread *t = &p->threads[p->thread_id]; 5785 struct instruction *ip = t->ip; 5786 uint64_t *regarray, idx; 5787 5788 TRACE("[Thread %2u] regrd (m = r[i])\n", p->thread_id); 5789 5790 /* Structs. */ 5791 regarray = instr_regarray_regarray(p, ip); 5792 idx = instr_regarray_idx_imm(p, ip); 5793 instr_regarray_dst_hbo_src_hbo_set(t, ip, regarray[idx]); 5794 5795 /* Thread. */ 5796 thread_ip_inc(p); 5797 } 5798 5799 static inline void 5800 instr_regwr_rhh_exec(struct rte_swx_pipeline *p) 5801 { 5802 struct thread *t = &p->threads[p->thread_id]; 5803 struct instruction *ip = t->ip; 5804 uint64_t *regarray, idx, src; 5805 5806 TRACE("[Thread %2u] regwr (r[h] = h)\n", p->thread_id); 5807 5808 /* Structs. */ 5809 regarray = instr_regarray_regarray(p, ip); 5810 idx = instr_regarray_idx_nbo(p, t, ip); 5811 src = instr_regarray_src_nbo(t, ip); 5812 regarray[idx] = src; 5813 5814 /* Thread. */ 5815 thread_ip_inc(p); 5816 } 5817 5818 static inline void 5819 instr_regwr_rhm_exec(struct rte_swx_pipeline *p) 5820 { 5821 struct thread *t = &p->threads[p->thread_id]; 5822 struct instruction *ip = t->ip; 5823 uint64_t *regarray, idx, src; 5824 5825 TRACE("[Thread %2u] regwr (r[h] = m)\n", p->thread_id); 5826 5827 /* Structs. */ 5828 regarray = instr_regarray_regarray(p, ip); 5829 idx = instr_regarray_idx_nbo(p, t, ip); 5830 src = instr_regarray_src_hbo(t, ip); 5831 regarray[idx] = src; 5832 5833 /* Thread. */ 5834 thread_ip_inc(p); 5835 } 5836 5837 static inline void 5838 instr_regwr_rmh_exec(struct rte_swx_pipeline *p) 5839 { 5840 struct thread *t = &p->threads[p->thread_id]; 5841 struct instruction *ip = t->ip; 5842 uint64_t *regarray, idx, src; 5843 5844 TRACE("[Thread %2u] regwr (r[m] = h)\n", p->thread_id); 5845 5846 /* Structs. */ 5847 regarray = instr_regarray_regarray(p, ip); 5848 idx = instr_regarray_idx_hbo(p, t, ip); 5849 src = instr_regarray_src_nbo(t, ip); 5850 regarray[idx] = src; 5851 5852 /* Thread. */ 5853 thread_ip_inc(p); 5854 } 5855 5856 static inline void 5857 instr_regwr_rmm_exec(struct rte_swx_pipeline *p) 5858 { 5859 struct thread *t = &p->threads[p->thread_id]; 5860 struct instruction *ip = t->ip; 5861 uint64_t *regarray, idx, src; 5862 5863 TRACE("[Thread %2u] regwr (r[m] = m)\n", p->thread_id); 5864 5865 /* Structs. */ 5866 regarray = instr_regarray_regarray(p, ip); 5867 idx = instr_regarray_idx_hbo(p, t, ip); 5868 src = instr_regarray_src_hbo(t, ip); 5869 regarray[idx] = src; 5870 5871 /* Thread. */ 5872 thread_ip_inc(p); 5873 } 5874 5875 static inline void 5876 instr_regwr_rhi_exec(struct rte_swx_pipeline *p) 5877 { 5878 struct thread *t = &p->threads[p->thread_id]; 5879 struct instruction *ip = t->ip; 5880 uint64_t *regarray, idx, src; 5881 5882 TRACE("[Thread %2u] regwr (r[h] = i)\n", p->thread_id); 5883 5884 /* Structs. */ 5885 regarray = instr_regarray_regarray(p, ip); 5886 idx = instr_regarray_idx_nbo(p, t, ip); 5887 src = ip->regarray.dstsrc_val; 5888 regarray[idx] = src; 5889 5890 /* Thread. */ 5891 thread_ip_inc(p); 5892 } 5893 5894 static inline void 5895 instr_regwr_rmi_exec(struct rte_swx_pipeline *p) 5896 { 5897 struct thread *t = &p->threads[p->thread_id]; 5898 struct instruction *ip = t->ip; 5899 uint64_t *regarray, idx, src; 5900 5901 TRACE("[Thread %2u] regwr (r[m] = i)\n", p->thread_id); 5902 5903 /* Structs. */ 5904 regarray = instr_regarray_regarray(p, ip); 5905 idx = instr_regarray_idx_hbo(p, t, ip); 5906 src = ip->regarray.dstsrc_val; 5907 regarray[idx] = src; 5908 5909 /* Thread. */ 5910 thread_ip_inc(p); 5911 } 5912 5913 static inline void 5914 instr_regwr_rih_exec(struct rte_swx_pipeline *p) 5915 { 5916 struct thread *t = &p->threads[p->thread_id]; 5917 struct instruction *ip = t->ip; 5918 uint64_t *regarray, idx, src; 5919 5920 TRACE("[Thread %2u] regwr (r[i] = h)\n", p->thread_id); 5921 5922 /* Structs. */ 5923 regarray = instr_regarray_regarray(p, ip); 5924 idx = instr_regarray_idx_imm(p, ip); 5925 src = instr_regarray_src_nbo(t, ip); 5926 regarray[idx] = src; 5927 5928 /* Thread. */ 5929 thread_ip_inc(p); 5930 } 5931 5932 static inline void 5933 instr_regwr_rim_exec(struct rte_swx_pipeline *p) 5934 { 5935 struct thread *t = &p->threads[p->thread_id]; 5936 struct instruction *ip = t->ip; 5937 uint64_t *regarray, idx, src; 5938 5939 TRACE("[Thread %2u] regwr (r[i] = m)\n", p->thread_id); 5940 5941 /* Structs. */ 5942 regarray = instr_regarray_regarray(p, ip); 5943 idx = instr_regarray_idx_imm(p, ip); 5944 src = instr_regarray_src_hbo(t, ip); 5945 regarray[idx] = src; 5946 5947 /* Thread. */ 5948 thread_ip_inc(p); 5949 } 5950 5951 static inline void 5952 instr_regwr_rii_exec(struct rte_swx_pipeline *p) 5953 { 5954 struct thread *t = &p->threads[p->thread_id]; 5955 struct instruction *ip = t->ip; 5956 uint64_t *regarray, idx, src; 5957 5958 TRACE("[Thread %2u] regwr (r[i] = i)\n", p->thread_id); 5959 5960 /* Structs. */ 5961 regarray = instr_regarray_regarray(p, ip); 5962 idx = instr_regarray_idx_imm(p, ip); 5963 src = ip->regarray.dstsrc_val; 5964 regarray[idx] = src; 5965 5966 /* Thread. */ 5967 thread_ip_inc(p); 5968 } 5969 5970 static inline void 5971 instr_regadd_rhh_exec(struct rte_swx_pipeline *p) 5972 { 5973 struct thread *t = &p->threads[p->thread_id]; 5974 struct instruction *ip = t->ip; 5975 uint64_t *regarray, idx, src; 5976 5977 TRACE("[Thread %2u] regadd (r[h] += h)\n", p->thread_id); 5978 5979 /* Structs. */ 5980 regarray = instr_regarray_regarray(p, ip); 5981 idx = instr_regarray_idx_nbo(p, t, ip); 5982 src = instr_regarray_src_nbo(t, ip); 5983 regarray[idx] += src; 5984 5985 /* Thread. */ 5986 thread_ip_inc(p); 5987 } 5988 5989 static inline void 5990 instr_regadd_rhm_exec(struct rte_swx_pipeline *p) 5991 { 5992 struct thread *t = &p->threads[p->thread_id]; 5993 struct instruction *ip = t->ip; 5994 uint64_t *regarray, idx, src; 5995 5996 TRACE("[Thread %2u] regadd (r[h] += m)\n", p->thread_id); 5997 5998 /* Structs. */ 5999 regarray = instr_regarray_regarray(p, ip); 6000 idx = instr_regarray_idx_nbo(p, t, ip); 6001 src = instr_regarray_src_hbo(t, ip); 6002 regarray[idx] += src; 6003 6004 /* Thread. */ 6005 thread_ip_inc(p); 6006 } 6007 6008 static inline void 6009 instr_regadd_rmh_exec(struct rte_swx_pipeline *p) 6010 { 6011 struct thread *t = &p->threads[p->thread_id]; 6012 struct instruction *ip = t->ip; 6013 uint64_t *regarray, idx, src; 6014 6015 TRACE("[Thread %2u] regadd (r[m] += h)\n", p->thread_id); 6016 6017 /* Structs. */ 6018 regarray = instr_regarray_regarray(p, ip); 6019 idx = instr_regarray_idx_hbo(p, t, ip); 6020 src = instr_regarray_src_nbo(t, ip); 6021 regarray[idx] += src; 6022 6023 /* Thread. */ 6024 thread_ip_inc(p); 6025 } 6026 6027 static inline void 6028 instr_regadd_rmm_exec(struct rte_swx_pipeline *p) 6029 { 6030 struct thread *t = &p->threads[p->thread_id]; 6031 struct instruction *ip = t->ip; 6032 uint64_t *regarray, idx, src; 6033 6034 TRACE("[Thread %2u] regadd (r[m] += m)\n", p->thread_id); 6035 6036 /* Structs. */ 6037 regarray = instr_regarray_regarray(p, ip); 6038 idx = instr_regarray_idx_hbo(p, t, ip); 6039 src = instr_regarray_src_hbo(t, ip); 6040 regarray[idx] += src; 6041 6042 /* Thread. */ 6043 thread_ip_inc(p); 6044 } 6045 6046 static inline void 6047 instr_regadd_rhi_exec(struct rte_swx_pipeline *p) 6048 { 6049 struct thread *t = &p->threads[p->thread_id]; 6050 struct instruction *ip = t->ip; 6051 uint64_t *regarray, idx, src; 6052 6053 TRACE("[Thread %2u] regadd (r[h] += i)\n", p->thread_id); 6054 6055 /* Structs. */ 6056 regarray = instr_regarray_regarray(p, ip); 6057 idx = instr_regarray_idx_nbo(p, t, ip); 6058 src = ip->regarray.dstsrc_val; 6059 regarray[idx] += src; 6060 6061 /* Thread. */ 6062 thread_ip_inc(p); 6063 } 6064 6065 static inline void 6066 instr_regadd_rmi_exec(struct rte_swx_pipeline *p) 6067 { 6068 struct thread *t = &p->threads[p->thread_id]; 6069 struct instruction *ip = t->ip; 6070 uint64_t *regarray, idx, src; 6071 6072 TRACE("[Thread %2u] regadd (r[m] += i)\n", p->thread_id); 6073 6074 /* Structs. */ 6075 regarray = instr_regarray_regarray(p, ip); 6076 idx = instr_regarray_idx_hbo(p, t, ip); 6077 src = ip->regarray.dstsrc_val; 6078 regarray[idx] += src; 6079 6080 /* Thread. */ 6081 thread_ip_inc(p); 6082 } 6083 6084 static inline void 6085 instr_regadd_rih_exec(struct rte_swx_pipeline *p) 6086 { 6087 struct thread *t = &p->threads[p->thread_id]; 6088 struct instruction *ip = t->ip; 6089 uint64_t *regarray, idx, src; 6090 6091 TRACE("[Thread %2u] regadd (r[i] += h)\n", p->thread_id); 6092 6093 /* Structs. */ 6094 regarray = instr_regarray_regarray(p, ip); 6095 idx = instr_regarray_idx_imm(p, ip); 6096 src = instr_regarray_src_nbo(t, ip); 6097 regarray[idx] += src; 6098 6099 /* Thread. */ 6100 thread_ip_inc(p); 6101 } 6102 6103 static inline void 6104 instr_regadd_rim_exec(struct rte_swx_pipeline *p) 6105 { 6106 struct thread *t = &p->threads[p->thread_id]; 6107 struct instruction *ip = t->ip; 6108 uint64_t *regarray, idx, src; 6109 6110 TRACE("[Thread %2u] regadd (r[i] += m)\n", p->thread_id); 6111 6112 /* Structs. */ 6113 regarray = instr_regarray_regarray(p, ip); 6114 idx = instr_regarray_idx_imm(p, ip); 6115 src = instr_regarray_src_hbo(t, ip); 6116 regarray[idx] += src; 6117 6118 /* Thread. */ 6119 thread_ip_inc(p); 6120 } 6121 6122 static inline void 6123 instr_regadd_rii_exec(struct rte_swx_pipeline *p) 6124 { 6125 struct thread *t = &p->threads[p->thread_id]; 6126 struct instruction *ip = t->ip; 6127 uint64_t *regarray, idx, src; 6128 6129 TRACE("[Thread %2u] regadd (r[i] += i)\n", p->thread_id); 6130 6131 /* Structs. */ 6132 regarray = instr_regarray_regarray(p, ip); 6133 idx = instr_regarray_idx_imm(p, ip); 6134 src = ip->regarray.dstsrc_val; 6135 regarray[idx] += src; 6136 6137 /* Thread. */ 6138 thread_ip_inc(p); 6139 } 6140 6141 /* 6142 * metarray. 6143 */ 6144 static struct metarray * 6145 metarray_find(struct rte_swx_pipeline *p, const char *name); 6146 6147 static int 6148 instr_metprefetch_translate(struct rte_swx_pipeline *p, 6149 struct action *action, 6150 char **tokens, 6151 int n_tokens, 6152 struct instruction *instr, 6153 struct instruction_data *data __rte_unused) 6154 { 6155 char *metarray = tokens[1], *idx = tokens[2]; 6156 struct metarray *m; 6157 struct field *fidx; 6158 uint32_t idx_struct_id, idx_val; 6159 6160 CHECK(n_tokens == 3, EINVAL); 6161 6162 m = metarray_find(p, metarray); 6163 CHECK(m, EINVAL); 6164 6165 /* METPREFETCH_H, METPREFETCH_M. */ 6166 fidx = struct_field_parse(p, action, idx, &idx_struct_id); 6167 if (fidx) { 6168 instr->type = INSTR_METPREFETCH_M; 6169 if (idx[0] == 'h') 6170 instr->type = INSTR_METPREFETCH_H; 6171 6172 instr->meter.metarray_id = m->id; 6173 instr->meter.idx.struct_id = (uint8_t)idx_struct_id; 6174 instr->meter.idx.n_bits = fidx->n_bits; 6175 instr->meter.idx.offset = fidx->offset / 8; 6176 return 0; 6177 } 6178 6179 /* METPREFETCH_I. */ 6180 idx_val = strtoul(idx, &idx, 0); 6181 CHECK(!idx[0], EINVAL); 6182 6183 instr->type = INSTR_METPREFETCH_I; 6184 instr->meter.metarray_id = m->id; 6185 instr->meter.idx_val = idx_val; 6186 return 0; 6187 } 6188 6189 static int 6190 instr_meter_translate(struct rte_swx_pipeline *p, 6191 struct action *action, 6192 char **tokens, 6193 int n_tokens, 6194 struct instruction *instr, 6195 struct instruction_data *data __rte_unused) 6196 { 6197 char *metarray = tokens[1], *idx = tokens[2], *length = tokens[3]; 6198 char *color_in = tokens[4], *color_out = tokens[5]; 6199 struct metarray *m; 6200 struct field *fidx, *flength, *fcin, *fcout; 6201 uint32_t idx_struct_id, length_struct_id; 6202 uint32_t color_in_struct_id, color_out_struct_id; 6203 6204 CHECK(n_tokens == 6, EINVAL); 6205 6206 m = metarray_find(p, metarray); 6207 CHECK(m, EINVAL); 6208 6209 fidx = struct_field_parse(p, action, idx, &idx_struct_id); 6210 6211 flength = struct_field_parse(p, action, length, &length_struct_id); 6212 CHECK(flength, EINVAL); 6213 6214 fcin = struct_field_parse(p, action, color_in, &color_in_struct_id); 6215 6216 fcout = struct_field_parse(p, NULL, color_out, &color_out_struct_id); 6217 CHECK(fcout, EINVAL); 6218 6219 /* index = HMEFT, length = HMEFT, color_in = MEFT, color_out = MEF. */ 6220 if (fidx && fcin) { 6221 instr->type = INSTR_METER_MMM; 6222 if (idx[0] == 'h' && length[0] == 'h') 6223 instr->type = INSTR_METER_HHM; 6224 if (idx[0] == 'h' && length[0] != 'h') 6225 instr->type = INSTR_METER_HMM; 6226 if (idx[0] != 'h' && length[0] == 'h') 6227 instr->type = INSTR_METER_MHM; 6228 6229 instr->meter.metarray_id = m->id; 6230 6231 instr->meter.idx.struct_id = (uint8_t)idx_struct_id; 6232 instr->meter.idx.n_bits = fidx->n_bits; 6233 instr->meter.idx.offset = fidx->offset / 8; 6234 6235 instr->meter.length.struct_id = (uint8_t)length_struct_id; 6236 instr->meter.length.n_bits = flength->n_bits; 6237 instr->meter.length.offset = flength->offset / 8; 6238 6239 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id; 6240 instr->meter.color_in.n_bits = fcin->n_bits; 6241 instr->meter.color_in.offset = fcin->offset / 8; 6242 6243 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id; 6244 instr->meter.color_out.n_bits = fcout->n_bits; 6245 instr->meter.color_out.offset = fcout->offset / 8; 6246 6247 return 0; 6248 } 6249 6250 /* index = HMEFT, length = HMEFT, color_in = I, color_out = MEF. */ 6251 if (fidx && !fcin) { 6252 uint32_t color_in_val = strtoul(color_in, &color_in, 0); 6253 CHECK(!color_in[0], EINVAL); 6254 6255 instr->type = INSTR_METER_MMI; 6256 if (idx[0] == 'h' && length[0] == 'h') 6257 instr->type = INSTR_METER_HHI; 6258 if (idx[0] == 'h' && length[0] != 'h') 6259 instr->type = INSTR_METER_HMI; 6260 if (idx[0] != 'h' && length[0] == 'h') 6261 instr->type = INSTR_METER_MHI; 6262 6263 instr->meter.metarray_id = m->id; 6264 6265 instr->meter.idx.struct_id = (uint8_t)idx_struct_id; 6266 instr->meter.idx.n_bits = fidx->n_bits; 6267 instr->meter.idx.offset = fidx->offset / 8; 6268 6269 instr->meter.length.struct_id = (uint8_t)length_struct_id; 6270 instr->meter.length.n_bits = flength->n_bits; 6271 instr->meter.length.offset = flength->offset / 8; 6272 6273 instr->meter.color_in_val = color_in_val; 6274 6275 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id; 6276 instr->meter.color_out.n_bits = fcout->n_bits; 6277 instr->meter.color_out.offset = fcout->offset / 8; 6278 6279 return 0; 6280 } 6281 6282 /* index = I, length = HMEFT, color_in = MEFT, color_out = MEF. */ 6283 if (!fidx && fcin) { 6284 uint32_t idx_val; 6285 6286 idx_val = strtoul(idx, &idx, 0); 6287 CHECK(!idx[0], EINVAL); 6288 6289 instr->type = INSTR_METER_IMM; 6290 if (length[0] == 'h') 6291 instr->type = INSTR_METER_IHM; 6292 6293 instr->meter.metarray_id = m->id; 6294 6295 instr->meter.idx_val = idx_val; 6296 6297 instr->meter.length.struct_id = (uint8_t)length_struct_id; 6298 instr->meter.length.n_bits = flength->n_bits; 6299 instr->meter.length.offset = flength->offset / 8; 6300 6301 instr->meter.color_in.struct_id = (uint8_t)color_in_struct_id; 6302 instr->meter.color_in.n_bits = fcin->n_bits; 6303 instr->meter.color_in.offset = fcin->offset / 8; 6304 6305 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id; 6306 instr->meter.color_out.n_bits = fcout->n_bits; 6307 instr->meter.color_out.offset = fcout->offset / 8; 6308 6309 return 0; 6310 } 6311 6312 /* index = I, length = HMEFT, color_in = I, color_out = MEF. */ 6313 if (!fidx && !fcin) { 6314 uint32_t idx_val, color_in_val; 6315 6316 idx_val = strtoul(idx, &idx, 0); 6317 CHECK(!idx[0], EINVAL); 6318 6319 color_in_val = strtoul(color_in, &color_in, 0); 6320 CHECK(!color_in[0], EINVAL); 6321 6322 instr->type = INSTR_METER_IMI; 6323 if (length[0] == 'h') 6324 instr->type = INSTR_METER_IHI; 6325 6326 instr->meter.metarray_id = m->id; 6327 6328 instr->meter.idx_val = idx_val; 6329 6330 instr->meter.length.struct_id = (uint8_t)length_struct_id; 6331 instr->meter.length.n_bits = flength->n_bits; 6332 instr->meter.length.offset = flength->offset / 8; 6333 6334 instr->meter.color_in_val = color_in_val; 6335 6336 instr->meter.color_out.struct_id = (uint8_t)color_out_struct_id; 6337 instr->meter.color_out.n_bits = fcout->n_bits; 6338 instr->meter.color_out.offset = fcout->offset / 8; 6339 6340 return 0; 6341 } 6342 6343 CHECK(0, EINVAL); 6344 } 6345 6346 static inline struct meter * 6347 instr_meter_idx_hbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip) 6348 { 6349 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id]; 6350 6351 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id]; 6352 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset]; 6353 uint64_t idx64 = *idx64_ptr; 6354 uint64_t idx64_mask = UINT64_MAX >> (64 - (ip)->meter.idx.n_bits); 6355 uint64_t idx = idx64 & idx64_mask & r->size_mask; 6356 6357 return &r->metarray[idx]; 6358 } 6359 6360 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 6361 6362 static inline struct meter * 6363 instr_meter_idx_nbo(struct rte_swx_pipeline *p, struct thread *t, struct instruction *ip) 6364 { 6365 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id]; 6366 6367 uint8_t *idx_struct = t->structs[ip->meter.idx.struct_id]; 6368 uint64_t *idx64_ptr = (uint64_t *)&idx_struct[ip->meter.idx.offset]; 6369 uint64_t idx64 = *idx64_ptr; 6370 uint64_t idx = (ntoh64(idx64) >> (64 - ip->meter.idx.n_bits)) & r->size_mask; 6371 6372 return &r->metarray[idx]; 6373 } 6374 6375 #else 6376 6377 #define instr_meter_idx_nbo instr_meter_idx_hbo 6378 6379 #endif 6380 6381 static inline struct meter * 6382 instr_meter_idx_imm(struct rte_swx_pipeline *p, struct instruction *ip) 6383 { 6384 struct metarray_runtime *r = &p->metarray_runtime[ip->meter.metarray_id]; 6385 6386 uint64_t idx = ip->meter.idx_val & r->size_mask; 6387 6388 return &r->metarray[idx]; 6389 } 6390 6391 static inline uint32_t 6392 instr_meter_length_hbo(struct thread *t, struct instruction *ip) 6393 { 6394 uint8_t *src_struct = t->structs[ip->meter.length.struct_id]; 6395 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset]; 6396 uint64_t src64 = *src64_ptr; 6397 uint64_t src64_mask = UINT64_MAX >> (64 - (ip)->meter.length.n_bits); 6398 uint64_t src = src64 & src64_mask; 6399 6400 return (uint32_t)src; 6401 } 6402 6403 #if RTE_BYTE_ORDER == RTE_LITTLE_ENDIAN 6404 6405 static inline uint32_t 6406 instr_meter_length_nbo(struct thread *t, struct instruction *ip) 6407 { 6408 uint8_t *src_struct = t->structs[ip->meter.length.struct_id]; 6409 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.length.offset]; 6410 uint64_t src64 = *src64_ptr; 6411 uint64_t src = ntoh64(src64) >> (64 - ip->meter.length.n_bits); 6412 6413 return (uint32_t)src; 6414 } 6415 6416 #else 6417 6418 #define instr_meter_length_nbo instr_meter_length_hbo 6419 6420 #endif 6421 6422 static inline enum rte_color 6423 instr_meter_color_in_hbo(struct thread *t, struct instruction *ip) 6424 { 6425 uint8_t *src_struct = t->structs[ip->meter.color_in.struct_id]; 6426 uint64_t *src64_ptr = (uint64_t *)&src_struct[ip->meter.color_in.offset]; 6427 uint64_t src64 = *src64_ptr; 6428 uint64_t src64_mask = UINT64_MAX >> (64 - ip->meter.color_in.n_bits); 6429 uint64_t src = src64 & src64_mask; 6430 6431 return (enum rte_color)src; 6432 } 6433 6434 static inline void 6435 instr_meter_color_out_hbo_set(struct thread *t, struct instruction *ip, enum rte_color color_out) 6436 { 6437 uint8_t *dst_struct = t->structs[ip->meter.color_out.struct_id]; 6438 uint64_t *dst64_ptr = (uint64_t *)&dst_struct[ip->meter.color_out.offset]; 6439 uint64_t dst64 = *dst64_ptr; 6440 uint64_t dst64_mask = UINT64_MAX >> (64 - ip->meter.color_out.n_bits); 6441 6442 uint64_t src = (uint64_t)color_out; 6443 6444 *dst64_ptr = (dst64 & ~dst64_mask) | (src & dst64_mask); 6445 } 6446 6447 static inline void 6448 instr_metprefetch_h_exec(struct rte_swx_pipeline *p) 6449 { 6450 struct thread *t = &p->threads[p->thread_id]; 6451 struct instruction *ip = t->ip; 6452 struct meter *m; 6453 6454 TRACE("[Thread %2u] metprefetch (h)\n", p->thread_id); 6455 6456 /* Structs. */ 6457 m = instr_meter_idx_nbo(p, t, ip); 6458 rte_prefetch0(m); 6459 6460 /* Thread. */ 6461 thread_ip_inc(p); 6462 } 6463 6464 static inline void 6465 instr_metprefetch_m_exec(struct rte_swx_pipeline *p) 6466 { 6467 struct thread *t = &p->threads[p->thread_id]; 6468 struct instruction *ip = t->ip; 6469 struct meter *m; 6470 6471 TRACE("[Thread %2u] metprefetch (m)\n", p->thread_id); 6472 6473 /* Structs. */ 6474 m = instr_meter_idx_hbo(p, t, ip); 6475 rte_prefetch0(m); 6476 6477 /* Thread. */ 6478 thread_ip_inc(p); 6479 } 6480 6481 static inline void 6482 instr_metprefetch_i_exec(struct rte_swx_pipeline *p) 6483 { 6484 struct thread *t = &p->threads[p->thread_id]; 6485 struct instruction *ip = t->ip; 6486 struct meter *m; 6487 6488 TRACE("[Thread %2u] metprefetch (i)\n", p->thread_id); 6489 6490 /* Structs. */ 6491 m = instr_meter_idx_imm(p, ip); 6492 rte_prefetch0(m); 6493 6494 /* Thread. */ 6495 thread_ip_inc(p); 6496 } 6497 6498 static inline void 6499 instr_meter_hhm_exec(struct rte_swx_pipeline *p) 6500 { 6501 struct thread *t = &p->threads[p->thread_id]; 6502 struct instruction *ip = t->ip; 6503 struct meter *m; 6504 uint64_t time, n_pkts, n_bytes; 6505 uint32_t length; 6506 enum rte_color color_in, color_out; 6507 6508 TRACE("[Thread %2u] meter (hhm)\n", p->thread_id); 6509 6510 /* Structs. */ 6511 m = instr_meter_idx_nbo(p, t, ip); 6512 rte_prefetch0(m->n_pkts); 6513 time = rte_get_tsc_cycles(); 6514 length = instr_meter_length_nbo(t, ip); 6515 color_in = instr_meter_color_in_hbo(t, ip); 6516 6517 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6518 &m->profile->profile, 6519 time, 6520 length, 6521 color_in); 6522 6523 color_out &= m->color_mask; 6524 6525 n_pkts = m->n_pkts[color_out]; 6526 n_bytes = m->n_bytes[color_out]; 6527 6528 instr_meter_color_out_hbo_set(t, ip, color_out); 6529 6530 m->n_pkts[color_out] = n_pkts + 1; 6531 m->n_bytes[color_out] = n_bytes + length; 6532 6533 /* Thread. */ 6534 thread_ip_inc(p); 6535 } 6536 6537 static inline void 6538 instr_meter_hhi_exec(struct rte_swx_pipeline *p) 6539 { 6540 struct thread *t = &p->threads[p->thread_id]; 6541 struct instruction *ip = t->ip; 6542 struct meter *m; 6543 uint64_t time, n_pkts, n_bytes; 6544 uint32_t length; 6545 enum rte_color color_in, color_out; 6546 6547 TRACE("[Thread %2u] meter (hhi)\n", p->thread_id); 6548 6549 /* Structs. */ 6550 m = instr_meter_idx_nbo(p, t, ip); 6551 rte_prefetch0(m->n_pkts); 6552 time = rte_get_tsc_cycles(); 6553 length = instr_meter_length_nbo(t, ip); 6554 color_in = (enum rte_color)ip->meter.color_in_val; 6555 6556 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6557 &m->profile->profile, 6558 time, 6559 length, 6560 color_in); 6561 6562 color_out &= m->color_mask; 6563 6564 n_pkts = m->n_pkts[color_out]; 6565 n_bytes = m->n_bytes[color_out]; 6566 6567 instr_meter_color_out_hbo_set(t, ip, color_out); 6568 6569 m->n_pkts[color_out] = n_pkts + 1; 6570 m->n_bytes[color_out] = n_bytes + length; 6571 6572 /* Thread. */ 6573 thread_ip_inc(p); 6574 } 6575 6576 static inline void 6577 instr_meter_hmm_exec(struct rte_swx_pipeline *p) 6578 { 6579 struct thread *t = &p->threads[p->thread_id]; 6580 struct instruction *ip = t->ip; 6581 struct meter *m; 6582 uint64_t time, n_pkts, n_bytes; 6583 uint32_t length; 6584 enum rte_color color_in, color_out; 6585 6586 TRACE("[Thread %2u] meter (hmm)\n", p->thread_id); 6587 6588 /* Structs. */ 6589 m = instr_meter_idx_nbo(p, t, ip); 6590 rte_prefetch0(m->n_pkts); 6591 time = rte_get_tsc_cycles(); 6592 length = instr_meter_length_hbo(t, ip); 6593 color_in = instr_meter_color_in_hbo(t, ip); 6594 6595 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6596 &m->profile->profile, 6597 time, 6598 length, 6599 color_in); 6600 6601 color_out &= m->color_mask; 6602 6603 n_pkts = m->n_pkts[color_out]; 6604 n_bytes = m->n_bytes[color_out]; 6605 6606 instr_meter_color_out_hbo_set(t, ip, color_out); 6607 6608 m->n_pkts[color_out] = n_pkts + 1; 6609 m->n_bytes[color_out] = n_bytes + length; 6610 6611 /* Thread. */ 6612 thread_ip_inc(p); 6613 } 6614 static inline void 6615 instr_meter_hmi_exec(struct rte_swx_pipeline *p) 6616 { 6617 struct thread *t = &p->threads[p->thread_id]; 6618 struct instruction *ip = t->ip; 6619 struct meter *m; 6620 uint64_t time, n_pkts, n_bytes; 6621 uint32_t length; 6622 enum rte_color color_in, color_out; 6623 6624 TRACE("[Thread %2u] meter (hmi)\n", p->thread_id); 6625 6626 /* Structs. */ 6627 m = instr_meter_idx_nbo(p, t, ip); 6628 rte_prefetch0(m->n_pkts); 6629 time = rte_get_tsc_cycles(); 6630 length = instr_meter_length_hbo(t, ip); 6631 color_in = (enum rte_color)ip->meter.color_in_val; 6632 6633 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6634 &m->profile->profile, 6635 time, 6636 length, 6637 color_in); 6638 6639 color_out &= m->color_mask; 6640 6641 n_pkts = m->n_pkts[color_out]; 6642 n_bytes = m->n_bytes[color_out]; 6643 6644 instr_meter_color_out_hbo_set(t, ip, color_out); 6645 6646 m->n_pkts[color_out] = n_pkts + 1; 6647 m->n_bytes[color_out] = n_bytes + length; 6648 6649 /* Thread. */ 6650 thread_ip_inc(p); 6651 } 6652 6653 static inline void 6654 instr_meter_mhm_exec(struct rte_swx_pipeline *p) 6655 { 6656 struct thread *t = &p->threads[p->thread_id]; 6657 struct instruction *ip = t->ip; 6658 struct meter *m; 6659 uint64_t time, n_pkts, n_bytes; 6660 uint32_t length; 6661 enum rte_color color_in, color_out; 6662 6663 TRACE("[Thread %2u] meter (mhm)\n", p->thread_id); 6664 6665 /* Structs. */ 6666 m = instr_meter_idx_hbo(p, t, ip); 6667 rte_prefetch0(m->n_pkts); 6668 time = rte_get_tsc_cycles(); 6669 length = instr_meter_length_nbo(t, ip); 6670 color_in = instr_meter_color_in_hbo(t, ip); 6671 6672 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6673 &m->profile->profile, 6674 time, 6675 length, 6676 color_in); 6677 6678 color_out &= m->color_mask; 6679 6680 n_pkts = m->n_pkts[color_out]; 6681 n_bytes = m->n_bytes[color_out]; 6682 6683 instr_meter_color_out_hbo_set(t, ip, color_out); 6684 6685 m->n_pkts[color_out] = n_pkts + 1; 6686 m->n_bytes[color_out] = n_bytes + length; 6687 6688 /* Thread. */ 6689 thread_ip_inc(p); 6690 } 6691 6692 static inline void 6693 instr_meter_mhi_exec(struct rte_swx_pipeline *p) 6694 { 6695 struct thread *t = &p->threads[p->thread_id]; 6696 struct instruction *ip = t->ip; 6697 struct meter *m; 6698 uint64_t time, n_pkts, n_bytes; 6699 uint32_t length; 6700 enum rte_color color_in, color_out; 6701 6702 TRACE("[Thread %2u] meter (mhi)\n", p->thread_id); 6703 6704 /* Structs. */ 6705 m = instr_meter_idx_hbo(p, t, ip); 6706 rte_prefetch0(m->n_pkts); 6707 time = rte_get_tsc_cycles(); 6708 length = instr_meter_length_nbo(t, ip); 6709 color_in = (enum rte_color)ip->meter.color_in_val; 6710 6711 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6712 &m->profile->profile, 6713 time, 6714 length, 6715 color_in); 6716 6717 color_out &= m->color_mask; 6718 6719 n_pkts = m->n_pkts[color_out]; 6720 n_bytes = m->n_bytes[color_out]; 6721 6722 instr_meter_color_out_hbo_set(t, ip, color_out); 6723 6724 m->n_pkts[color_out] = n_pkts + 1; 6725 m->n_bytes[color_out] = n_bytes + length; 6726 6727 /* Thread. */ 6728 thread_ip_inc(p); 6729 } 6730 6731 static inline void 6732 instr_meter_mmm_exec(struct rte_swx_pipeline *p) 6733 { 6734 struct thread *t = &p->threads[p->thread_id]; 6735 struct instruction *ip = t->ip; 6736 struct meter *m; 6737 uint64_t time, n_pkts, n_bytes; 6738 uint32_t length; 6739 enum rte_color color_in, color_out; 6740 6741 TRACE("[Thread %2u] meter (mmm)\n", p->thread_id); 6742 6743 /* Structs. */ 6744 m = instr_meter_idx_hbo(p, t, ip); 6745 rte_prefetch0(m->n_pkts); 6746 time = rte_get_tsc_cycles(); 6747 length = instr_meter_length_hbo(t, ip); 6748 color_in = instr_meter_color_in_hbo(t, ip); 6749 6750 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6751 &m->profile->profile, 6752 time, 6753 length, 6754 color_in); 6755 6756 color_out &= m->color_mask; 6757 6758 n_pkts = m->n_pkts[color_out]; 6759 n_bytes = m->n_bytes[color_out]; 6760 6761 instr_meter_color_out_hbo_set(t, ip, color_out); 6762 6763 m->n_pkts[color_out] = n_pkts + 1; 6764 m->n_bytes[color_out] = n_bytes + length; 6765 6766 /* Thread. */ 6767 thread_ip_inc(p); 6768 } 6769 6770 static inline void 6771 instr_meter_mmi_exec(struct rte_swx_pipeline *p) 6772 { 6773 struct thread *t = &p->threads[p->thread_id]; 6774 struct instruction *ip = t->ip; 6775 struct meter *m; 6776 uint64_t time, n_pkts, n_bytes; 6777 uint32_t length; 6778 enum rte_color color_in, color_out; 6779 6780 TRACE("[Thread %2u] meter (mmi)\n", p->thread_id); 6781 6782 /* Structs. */ 6783 m = instr_meter_idx_hbo(p, t, ip); 6784 rte_prefetch0(m->n_pkts); 6785 time = rte_get_tsc_cycles(); 6786 length = instr_meter_length_hbo(t, ip); 6787 color_in = (enum rte_color)ip->meter.color_in_val; 6788 6789 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6790 &m->profile->profile, 6791 time, 6792 length, 6793 color_in); 6794 6795 color_out &= m->color_mask; 6796 6797 n_pkts = m->n_pkts[color_out]; 6798 n_bytes = m->n_bytes[color_out]; 6799 6800 instr_meter_color_out_hbo_set(t, ip, color_out); 6801 6802 m->n_pkts[color_out] = n_pkts + 1; 6803 m->n_bytes[color_out] = n_bytes + length; 6804 6805 /* Thread. */ 6806 thread_ip_inc(p); 6807 } 6808 6809 static inline void 6810 instr_meter_ihm_exec(struct rte_swx_pipeline *p) 6811 { 6812 struct thread *t = &p->threads[p->thread_id]; 6813 struct instruction *ip = t->ip; 6814 struct meter *m; 6815 uint64_t time, n_pkts, n_bytes; 6816 uint32_t length; 6817 enum rte_color color_in, color_out; 6818 6819 TRACE("[Thread %2u] meter (ihm)\n", p->thread_id); 6820 6821 /* Structs. */ 6822 m = instr_meter_idx_imm(p, ip); 6823 rte_prefetch0(m->n_pkts); 6824 time = rte_get_tsc_cycles(); 6825 length = instr_meter_length_nbo(t, ip); 6826 color_in = instr_meter_color_in_hbo(t, ip); 6827 6828 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6829 &m->profile->profile, 6830 time, 6831 length, 6832 color_in); 6833 6834 color_out &= m->color_mask; 6835 6836 n_pkts = m->n_pkts[color_out]; 6837 n_bytes = m->n_bytes[color_out]; 6838 6839 instr_meter_color_out_hbo_set(t, ip, color_out); 6840 6841 m->n_pkts[color_out] = n_pkts + 1; 6842 m->n_bytes[color_out] = n_bytes + length; 6843 6844 /* Thread. */ 6845 thread_ip_inc(p); 6846 } 6847 6848 static inline void 6849 instr_meter_ihi_exec(struct rte_swx_pipeline *p) 6850 { 6851 struct thread *t = &p->threads[p->thread_id]; 6852 struct instruction *ip = t->ip; 6853 struct meter *m; 6854 uint64_t time, n_pkts, n_bytes; 6855 uint32_t length; 6856 enum rte_color color_in, color_out; 6857 6858 TRACE("[Thread %2u] meter (ihi)\n", p->thread_id); 6859 6860 /* Structs. */ 6861 m = instr_meter_idx_imm(p, ip); 6862 rte_prefetch0(m->n_pkts); 6863 time = rte_get_tsc_cycles(); 6864 length = instr_meter_length_nbo(t, ip); 6865 color_in = (enum rte_color)ip->meter.color_in_val; 6866 6867 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6868 &m->profile->profile, 6869 time, 6870 length, 6871 color_in); 6872 6873 color_out &= m->color_mask; 6874 6875 n_pkts = m->n_pkts[color_out]; 6876 n_bytes = m->n_bytes[color_out]; 6877 6878 instr_meter_color_out_hbo_set(t, ip, color_out); 6879 6880 m->n_pkts[color_out] = n_pkts + 1; 6881 m->n_bytes[color_out] = n_bytes + length; 6882 6883 /* Thread. */ 6884 thread_ip_inc(p); 6885 } 6886 6887 static inline void 6888 instr_meter_imm_exec(struct rte_swx_pipeline *p) 6889 { 6890 struct thread *t = &p->threads[p->thread_id]; 6891 struct instruction *ip = t->ip; 6892 struct meter *m; 6893 uint64_t time, n_pkts, n_bytes; 6894 uint32_t length; 6895 enum rte_color color_in, color_out; 6896 6897 TRACE("[Thread %2u] meter (imm)\n", p->thread_id); 6898 6899 /* Structs. */ 6900 m = instr_meter_idx_imm(p, ip); 6901 rte_prefetch0(m->n_pkts); 6902 time = rte_get_tsc_cycles(); 6903 length = instr_meter_length_hbo(t, ip); 6904 color_in = instr_meter_color_in_hbo(t, ip); 6905 6906 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6907 &m->profile->profile, 6908 time, 6909 length, 6910 color_in); 6911 6912 color_out &= m->color_mask; 6913 6914 n_pkts = m->n_pkts[color_out]; 6915 n_bytes = m->n_bytes[color_out]; 6916 6917 instr_meter_color_out_hbo_set(t, ip, color_out); 6918 6919 m->n_pkts[color_out] = n_pkts + 1; 6920 m->n_bytes[color_out] = n_bytes + length; 6921 6922 /* Thread. */ 6923 thread_ip_inc(p); 6924 } 6925 static inline void 6926 instr_meter_imi_exec(struct rte_swx_pipeline *p) 6927 { 6928 struct thread *t = &p->threads[p->thread_id]; 6929 struct instruction *ip = t->ip; 6930 struct meter *m; 6931 uint64_t time, n_pkts, n_bytes; 6932 uint32_t length; 6933 enum rte_color color_in, color_out; 6934 6935 TRACE("[Thread %2u] meter (imi)\n", p->thread_id); 6936 6937 /* Structs. */ 6938 m = instr_meter_idx_imm(p, ip); 6939 rte_prefetch0(m->n_pkts); 6940 time = rte_get_tsc_cycles(); 6941 length = instr_meter_length_hbo(t, ip); 6942 color_in = (enum rte_color)ip->meter.color_in_val; 6943 6944 color_out = rte_meter_trtcm_color_aware_check(&m->m, 6945 &m->profile->profile, 6946 time, 6947 length, 6948 color_in); 6949 6950 color_out &= m->color_mask; 6951 6952 n_pkts = m->n_pkts[color_out]; 6953 n_bytes = m->n_bytes[color_out]; 6954 6955 instr_meter_color_out_hbo_set(t, ip, color_out); 6956 6957 m->n_pkts[color_out] = n_pkts + 1; 6958 m->n_bytes[color_out] = n_bytes + length; 6959 6960 /* Thread. */ 6961 thread_ip_inc(p); 6962 } 6963 6964 /* 6965 * jmp. 6966 */ 6967 static struct action * 6968 action_find(struct rte_swx_pipeline *p, const char *name); 6969 6970 static int 6971 instr_jmp_translate(struct rte_swx_pipeline *p __rte_unused, 6972 struct action *action __rte_unused, 6973 char **tokens, 6974 int n_tokens, 6975 struct instruction *instr, 6976 struct instruction_data *data) 6977 { 6978 CHECK(n_tokens == 2, EINVAL); 6979 6980 strcpy(data->jmp_label, tokens[1]); 6981 6982 instr->type = INSTR_JMP; 6983 instr->jmp.ip = NULL; /* Resolved later. */ 6984 return 0; 6985 } 6986 6987 static int 6988 instr_jmp_valid_translate(struct rte_swx_pipeline *p, 6989 struct action *action __rte_unused, 6990 char **tokens, 6991 int n_tokens, 6992 struct instruction *instr, 6993 struct instruction_data *data) 6994 { 6995 struct header *h; 6996 6997 CHECK(n_tokens == 3, EINVAL); 6998 6999 strcpy(data->jmp_label, tokens[1]); 7000 7001 h = header_parse(p, tokens[2]); 7002 CHECK(h, EINVAL); 7003 7004 instr->type = INSTR_JMP_VALID; 7005 instr->jmp.ip = NULL; /* Resolved later. */ 7006 instr->jmp.header_id = h->id; 7007 return 0; 7008 } 7009 7010 static int 7011 instr_jmp_invalid_translate(struct rte_swx_pipeline *p, 7012 struct action *action __rte_unused, 7013 char **tokens, 7014 int n_tokens, 7015 struct instruction *instr, 7016 struct instruction_data *data) 7017 { 7018 struct header *h; 7019 7020 CHECK(n_tokens == 3, EINVAL); 7021 7022 strcpy(data->jmp_label, tokens[1]); 7023 7024 h = header_parse(p, tokens[2]); 7025 CHECK(h, EINVAL); 7026 7027 instr->type = INSTR_JMP_INVALID; 7028 instr->jmp.ip = NULL; /* Resolved later. */ 7029 instr->jmp.header_id = h->id; 7030 return 0; 7031 } 7032 7033 static int 7034 instr_jmp_hit_translate(struct rte_swx_pipeline *p __rte_unused, 7035 struct action *action, 7036 char **tokens, 7037 int n_tokens, 7038 struct instruction *instr, 7039 struct instruction_data *data) 7040 { 7041 CHECK(!action, EINVAL); 7042 CHECK(n_tokens == 2, EINVAL); 7043 7044 strcpy(data->jmp_label, tokens[1]); 7045 7046 instr->type = INSTR_JMP_HIT; 7047 instr->jmp.ip = NULL; /* Resolved later. */ 7048 return 0; 7049 } 7050 7051 static int 7052 instr_jmp_miss_translate(struct rte_swx_pipeline *p __rte_unused, 7053 struct action *action, 7054 char **tokens, 7055 int n_tokens, 7056 struct instruction *instr, 7057 struct instruction_data *data) 7058 { 7059 CHECK(!action, EINVAL); 7060 CHECK(n_tokens == 2, EINVAL); 7061 7062 strcpy(data->jmp_label, tokens[1]); 7063 7064 instr->type = INSTR_JMP_MISS; 7065 instr->jmp.ip = NULL; /* Resolved later. */ 7066 return 0; 7067 } 7068 7069 static int 7070 instr_jmp_action_hit_translate(struct rte_swx_pipeline *p, 7071 struct action *action, 7072 char **tokens, 7073 int n_tokens, 7074 struct instruction *instr, 7075 struct instruction_data *data) 7076 { 7077 struct action *a; 7078 7079 CHECK(!action, EINVAL); 7080 CHECK(n_tokens == 3, EINVAL); 7081 7082 strcpy(data->jmp_label, tokens[1]); 7083 7084 a = action_find(p, tokens[2]); 7085 CHECK(a, EINVAL); 7086 7087 instr->type = INSTR_JMP_ACTION_HIT; 7088 instr->jmp.ip = NULL; /* Resolved later. */ 7089 instr->jmp.action_id = a->id; 7090 return 0; 7091 } 7092 7093 static int 7094 instr_jmp_action_miss_translate(struct rte_swx_pipeline *p, 7095 struct action *action, 7096 char **tokens, 7097 int n_tokens, 7098 struct instruction *instr, 7099 struct instruction_data *data) 7100 { 7101 struct action *a; 7102 7103 CHECK(!action, EINVAL); 7104 CHECK(n_tokens == 3, EINVAL); 7105 7106 strcpy(data->jmp_label, tokens[1]); 7107 7108 a = action_find(p, tokens[2]); 7109 CHECK(a, EINVAL); 7110 7111 instr->type = INSTR_JMP_ACTION_MISS; 7112 instr->jmp.ip = NULL; /* Resolved later. */ 7113 instr->jmp.action_id = a->id; 7114 return 0; 7115 } 7116 7117 static int 7118 instr_jmp_eq_translate(struct rte_swx_pipeline *p, 7119 struct action *action, 7120 char **tokens, 7121 int n_tokens, 7122 struct instruction *instr, 7123 struct instruction_data *data) 7124 { 7125 char *a = tokens[2], *b = tokens[3]; 7126 struct field *fa, *fb; 7127 uint64_t b_val; 7128 uint32_t a_struct_id, b_struct_id; 7129 7130 CHECK(n_tokens == 4, EINVAL); 7131 7132 strcpy(data->jmp_label, tokens[1]); 7133 7134 fa = struct_field_parse(p, action, a, &a_struct_id); 7135 CHECK(fa, EINVAL); 7136 7137 /* JMP_EQ, JMP_EQ_MH, JMP_EQ_HM, JMP_EQ_HH. */ 7138 fb = struct_field_parse(p, action, b, &b_struct_id); 7139 if (fb) { 7140 instr->type = INSTR_JMP_EQ; 7141 if (a[0] != 'h' && b[0] == 'h') 7142 instr->type = INSTR_JMP_EQ_MH; 7143 if (a[0] == 'h' && b[0] != 'h') 7144 instr->type = INSTR_JMP_EQ_HM; 7145 if (a[0] == 'h' && b[0] == 'h') 7146 instr->type = INSTR_JMP_EQ_HH; 7147 instr->jmp.ip = NULL; /* Resolved later. */ 7148 7149 instr->jmp.a.struct_id = (uint8_t)a_struct_id; 7150 instr->jmp.a.n_bits = fa->n_bits; 7151 instr->jmp.a.offset = fa->offset / 8; 7152 instr->jmp.b.struct_id = (uint8_t)b_struct_id; 7153 instr->jmp.b.n_bits = fb->n_bits; 7154 instr->jmp.b.offset = fb->offset / 8; 7155 return 0; 7156 } 7157 7158 /* JMP_EQ_I. */ 7159 b_val = strtoull(b, &b, 0); 7160 CHECK(!b[0], EINVAL); 7161 7162 if (a[0] == 'h') 7163 b_val = hton64(b_val) >> (64 - fa->n_bits); 7164 7165 instr->type = INSTR_JMP_EQ_I; 7166 instr->jmp.ip = NULL; /* Resolved later. */ 7167 instr->jmp.a.struct_id = (uint8_t)a_struct_id; 7168 instr->jmp.a.n_bits = fa->n_bits; 7169 instr->jmp.a.offset = fa->offset / 8; 7170 instr->jmp.b_val = b_val; 7171 return 0; 7172 } 7173 7174 static int 7175 instr_jmp_neq_translate(struct rte_swx_pipeline *p, 7176 struct action *action, 7177 char **tokens, 7178 int n_tokens, 7179 struct instruction *instr, 7180 struct instruction_data *data) 7181 { 7182 char *a = tokens[2], *b = tokens[3]; 7183 struct field *fa, *fb; 7184 uint64_t b_val; 7185 uint32_t a_struct_id, b_struct_id; 7186 7187 CHECK(n_tokens == 4, EINVAL); 7188 7189 strcpy(data->jmp_label, tokens[1]); 7190 7191 fa = struct_field_parse(p, action, a, &a_struct_id); 7192 CHECK(fa, EINVAL); 7193 7194 /* JMP_NEQ, JMP_NEQ_MH, JMP_NEQ_HM, JMP_NEQ_HH. */ 7195 fb = struct_field_parse(p, action, b, &b_struct_id); 7196 if (fb) { 7197 instr->type = INSTR_JMP_NEQ; 7198 if (a[0] != 'h' && b[0] == 'h') 7199 instr->type = INSTR_JMP_NEQ_MH; 7200 if (a[0] == 'h' && b[0] != 'h') 7201 instr->type = INSTR_JMP_NEQ_HM; 7202 if (a[0] == 'h' && b[0] == 'h') 7203 instr->type = INSTR_JMP_NEQ_HH; 7204 instr->jmp.ip = NULL; /* Resolved later. */ 7205 7206 instr->jmp.a.struct_id = (uint8_t)a_struct_id; 7207 instr->jmp.a.n_bits = fa->n_bits; 7208 instr->jmp.a.offset = fa->offset / 8; 7209 instr->jmp.b.struct_id = (uint8_t)b_struct_id; 7210 instr->jmp.b.n_bits = fb->n_bits; 7211 instr->jmp.b.offset = fb->offset / 8; 7212 return 0; 7213 } 7214 7215 /* JMP_NEQ_I. */ 7216 b_val = strtoull(b, &b, 0); 7217 CHECK(!b[0], EINVAL); 7218 7219 if (a[0] == 'h') 7220 b_val = hton64(b_val) >> (64 - fa->n_bits); 7221 7222 instr->type = INSTR_JMP_NEQ_I; 7223 instr->jmp.ip = NULL; /* Resolved later. */ 7224 instr->jmp.a.struct_id = (uint8_t)a_struct_id; 7225 instr->jmp.a.n_bits = fa->n_bits; 7226 instr->jmp.a.offset = fa->offset / 8; 7227 instr->jmp.b_val = b_val; 7228 return 0; 7229 } 7230 7231 static int 7232 instr_jmp_lt_translate(struct rte_swx_pipeline *p, 7233 struct action *action, 7234 char **tokens, 7235 int n_tokens, 7236 struct instruction *instr, 7237 struct instruction_data *data) 7238 { 7239 char *a = tokens[2], *b = tokens[3]; 7240 struct field *fa, *fb; 7241 uint64_t b_val; 7242 uint32_t a_struct_id, b_struct_id; 7243 7244 CHECK(n_tokens == 4, EINVAL); 7245 7246 strcpy(data->jmp_label, tokens[1]); 7247 7248 fa = struct_field_parse(p, action, a, &a_struct_id); 7249 CHECK(fa, EINVAL); 7250 7251 /* JMP_LT, JMP_LT_MH, JMP_LT_HM, JMP_LT_HH. */ 7252 fb = struct_field_parse(p, action, b, &b_struct_id); 7253 if (fb) { 7254 instr->type = INSTR_JMP_LT; 7255 if (a[0] == 'h' && b[0] != 'h') 7256 instr->type = INSTR_JMP_LT_HM; 7257 if (a[0] != 'h' && b[0] == 'h') 7258 instr->type = INSTR_JMP_LT_MH; 7259 if (a[0] == 'h' && b[0] == 'h') 7260 instr->type = INSTR_JMP_LT_HH; 7261 instr->jmp.ip = NULL; /* Resolved later. */ 7262 7263 instr->jmp.a.struct_id = (uint8_t)a_struct_id; 7264 instr->jmp.a.n_bits = fa->n_bits; 7265 instr->jmp.a.offset = fa->offset / 8; 7266 instr->jmp.b.struct_id = (uint8_t)b_struct_id; 7267 instr->jmp.b.n_bits = fb->n_bits; 7268 instr->jmp.b.offset = fb->offset / 8; 7269 return 0; 7270 } 7271 7272 /* JMP_LT_MI, JMP_LT_HI. */ 7273 b_val = strtoull(b, &b, 0); 7274 CHECK(!b[0], EINVAL); 7275 7276 instr->type = INSTR_JMP_LT_MI; 7277 if (a[0] == 'h') 7278 instr->type = INSTR_JMP_LT_HI; 7279 instr->jmp.ip = NULL; /* Resolved later. */ 7280 7281 instr->jmp.a.struct_id = (uint8_t)a_struct_id; 7282 instr->jmp.a.n_bits = fa->n_bits; 7283 instr->jmp.a.offset = fa->offset / 8; 7284 instr->jmp.b_val = b_val; 7285 return 0; 7286 } 7287 7288 static int 7289 instr_jmp_gt_translate(struct rte_swx_pipeline *p, 7290 struct action *action, 7291 char **tokens, 7292 int n_tokens, 7293 struct instruction *instr, 7294 struct instruction_data *data) 7295 { 7296 char *a = tokens[2], *b = tokens[3]; 7297 struct field *fa, *fb; 7298 uint64_t b_val; 7299 uint32_t a_struct_id, b_struct_id; 7300 7301 CHECK(n_tokens == 4, EINVAL); 7302 7303 strcpy(data->jmp_label, tokens[1]); 7304 7305 fa = struct_field_parse(p, action, a, &a_struct_id); 7306 CHECK(fa, EINVAL); 7307 7308 /* JMP_GT, JMP_GT_MH, JMP_GT_HM, JMP_GT_HH. */ 7309 fb = struct_field_parse(p, action, b, &b_struct_id); 7310 if (fb) { 7311 instr->type = INSTR_JMP_GT; 7312 if (a[0] == 'h' && b[0] != 'h') 7313 instr->type = INSTR_JMP_GT_HM; 7314 if (a[0] != 'h' && b[0] == 'h') 7315 instr->type = INSTR_JMP_GT_MH; 7316 if (a[0] == 'h' && b[0] == 'h') 7317 instr->type = INSTR_JMP_GT_HH; 7318 instr->jmp.ip = NULL; /* Resolved later. */ 7319 7320 instr->jmp.a.struct_id = (uint8_t)a_struct_id; 7321 instr->jmp.a.n_bits = fa->n_bits; 7322 instr->jmp.a.offset = fa->offset / 8; 7323 instr->jmp.b.struct_id = (uint8_t)b_struct_id; 7324 instr->jmp.b.n_bits = fb->n_bits; 7325 instr->jmp.b.offset = fb->offset / 8; 7326 return 0; 7327 } 7328 7329 /* JMP_GT_MI, JMP_GT_HI. */ 7330 b_val = strtoull(b, &b, 0); 7331 CHECK(!b[0], EINVAL); 7332 7333 instr->type = INSTR_JMP_GT_MI; 7334 if (a[0] == 'h') 7335 instr->type = INSTR_JMP_GT_HI; 7336 instr->jmp.ip = NULL; /* Resolved later. */ 7337 7338 instr->jmp.a.struct_id = (uint8_t)a_struct_id; 7339 instr->jmp.a.n_bits = fa->n_bits; 7340 instr->jmp.a.offset = fa->offset / 8; 7341 instr->jmp.b_val = b_val; 7342 return 0; 7343 } 7344 7345 static inline void 7346 instr_jmp_exec(struct rte_swx_pipeline *p) 7347 { 7348 struct thread *t = &p->threads[p->thread_id]; 7349 struct instruction *ip = t->ip; 7350 7351 TRACE("[Thread %2u] jmp\n", p->thread_id); 7352 7353 thread_ip_set(t, ip->jmp.ip); 7354 } 7355 7356 static inline void 7357 instr_jmp_valid_exec(struct rte_swx_pipeline *p) 7358 { 7359 struct thread *t = &p->threads[p->thread_id]; 7360 struct instruction *ip = t->ip; 7361 uint32_t header_id = ip->jmp.header_id; 7362 7363 TRACE("[Thread %2u] jmpv\n", p->thread_id); 7364 7365 t->ip = HEADER_VALID(t, header_id) ? ip->jmp.ip : (t->ip + 1); 7366 } 7367 7368 static inline void 7369 instr_jmp_invalid_exec(struct rte_swx_pipeline *p) 7370 { 7371 struct thread *t = &p->threads[p->thread_id]; 7372 struct instruction *ip = t->ip; 7373 uint32_t header_id = ip->jmp.header_id; 7374 7375 TRACE("[Thread %2u] jmpnv\n", p->thread_id); 7376 7377 t->ip = HEADER_VALID(t, header_id) ? (t->ip + 1) : ip->jmp.ip; 7378 } 7379 7380 static inline void 7381 instr_jmp_hit_exec(struct rte_swx_pipeline *p) 7382 { 7383 struct thread *t = &p->threads[p->thread_id]; 7384 struct instruction *ip = t->ip; 7385 struct instruction *ip_next[] = {t->ip + 1, ip->jmp.ip}; 7386 7387 TRACE("[Thread %2u] jmph\n", p->thread_id); 7388 7389 t->ip = ip_next[t->hit]; 7390 } 7391 7392 static inline void 7393 instr_jmp_miss_exec(struct rte_swx_pipeline *p) 7394 { 7395 struct thread *t = &p->threads[p->thread_id]; 7396 struct instruction *ip = t->ip; 7397 struct instruction *ip_next[] = {ip->jmp.ip, t->ip + 1}; 7398 7399 TRACE("[Thread %2u] jmpnh\n", p->thread_id); 7400 7401 t->ip = ip_next[t->hit]; 7402 } 7403 7404 static inline void 7405 instr_jmp_action_hit_exec(struct rte_swx_pipeline *p) 7406 { 7407 struct thread *t = &p->threads[p->thread_id]; 7408 struct instruction *ip = t->ip; 7409 7410 TRACE("[Thread %2u] jmpa\n", p->thread_id); 7411 7412 t->ip = (ip->jmp.action_id == t->action_id) ? ip->jmp.ip : (t->ip + 1); 7413 } 7414 7415 static inline void 7416 instr_jmp_action_miss_exec(struct rte_swx_pipeline *p) 7417 { 7418 struct thread *t = &p->threads[p->thread_id]; 7419 struct instruction *ip = t->ip; 7420 7421 TRACE("[Thread %2u] jmpna\n", p->thread_id); 7422 7423 t->ip = (ip->jmp.action_id == t->action_id) ? (t->ip + 1) : ip->jmp.ip; 7424 } 7425 7426 static inline void 7427 instr_jmp_eq_exec(struct rte_swx_pipeline *p) 7428 { 7429 struct thread *t = &p->threads[p->thread_id]; 7430 struct instruction *ip = t->ip; 7431 7432 TRACE("[Thread %2u] jmpeq\n", p->thread_id); 7433 7434 JMP_CMP(t, ip, ==); 7435 } 7436 7437 static inline void 7438 instr_jmp_eq_mh_exec(struct rte_swx_pipeline *p) 7439 { 7440 struct thread *t = &p->threads[p->thread_id]; 7441 struct instruction *ip = t->ip; 7442 7443 TRACE("[Thread %2u] jmpeq (mh)\n", p->thread_id); 7444 7445 JMP_CMP_MH(t, ip, ==); 7446 } 7447 7448 static inline void 7449 instr_jmp_eq_hm_exec(struct rte_swx_pipeline *p) 7450 { 7451 struct thread *t = &p->threads[p->thread_id]; 7452 struct instruction *ip = t->ip; 7453 7454 TRACE("[Thread %2u] jmpeq (hm)\n", p->thread_id); 7455 7456 JMP_CMP_HM(t, ip, ==); 7457 } 7458 7459 static inline void 7460 instr_jmp_eq_hh_exec(struct rte_swx_pipeline *p) 7461 { 7462 struct thread *t = &p->threads[p->thread_id]; 7463 struct instruction *ip = t->ip; 7464 7465 TRACE("[Thread %2u] jmpeq (hh)\n", p->thread_id); 7466 7467 JMP_CMP_HH_FAST(t, ip, ==); 7468 } 7469 7470 static inline void 7471 instr_jmp_eq_i_exec(struct rte_swx_pipeline *p) 7472 { 7473 struct thread *t = &p->threads[p->thread_id]; 7474 struct instruction *ip = t->ip; 7475 7476 TRACE("[Thread %2u] jmpeq (i)\n", p->thread_id); 7477 7478 JMP_CMP_I(t, ip, ==); 7479 } 7480 7481 static inline void 7482 instr_jmp_neq_exec(struct rte_swx_pipeline *p) 7483 { 7484 struct thread *t = &p->threads[p->thread_id]; 7485 struct instruction *ip = t->ip; 7486 7487 TRACE("[Thread %2u] jmpneq\n", p->thread_id); 7488 7489 JMP_CMP(t, ip, !=); 7490 } 7491 7492 static inline void 7493 instr_jmp_neq_mh_exec(struct rte_swx_pipeline *p) 7494 { 7495 struct thread *t = &p->threads[p->thread_id]; 7496 struct instruction *ip = t->ip; 7497 7498 TRACE("[Thread %2u] jmpneq (mh)\n", p->thread_id); 7499 7500 JMP_CMP_MH(t, ip, !=); 7501 } 7502 7503 static inline void 7504 instr_jmp_neq_hm_exec(struct rte_swx_pipeline *p) 7505 { 7506 struct thread *t = &p->threads[p->thread_id]; 7507 struct instruction *ip = t->ip; 7508 7509 TRACE("[Thread %2u] jmpneq (hm)\n", p->thread_id); 7510 7511 JMP_CMP_HM(t, ip, !=); 7512 } 7513 7514 static inline void 7515 instr_jmp_neq_hh_exec(struct rte_swx_pipeline *p) 7516 { 7517 struct thread *t = &p->threads[p->thread_id]; 7518 struct instruction *ip = t->ip; 7519 7520 TRACE("[Thread %2u] jmpneq (hh)\n", p->thread_id); 7521 7522 JMP_CMP_HH_FAST(t, ip, !=); 7523 } 7524 7525 static inline void 7526 instr_jmp_neq_i_exec(struct rte_swx_pipeline *p) 7527 { 7528 struct thread *t = &p->threads[p->thread_id]; 7529 struct instruction *ip = t->ip; 7530 7531 TRACE("[Thread %2u] jmpneq (i)\n", p->thread_id); 7532 7533 JMP_CMP_I(t, ip, !=); 7534 } 7535 7536 static inline void 7537 instr_jmp_lt_exec(struct rte_swx_pipeline *p) 7538 { 7539 struct thread *t = &p->threads[p->thread_id]; 7540 struct instruction *ip = t->ip; 7541 7542 TRACE("[Thread %2u] jmplt\n", p->thread_id); 7543 7544 JMP_CMP(t, ip, <); 7545 } 7546 7547 static inline void 7548 instr_jmp_lt_mh_exec(struct rte_swx_pipeline *p) 7549 { 7550 struct thread *t = &p->threads[p->thread_id]; 7551 struct instruction *ip = t->ip; 7552 7553 TRACE("[Thread %2u] jmplt (mh)\n", p->thread_id); 7554 7555 JMP_CMP_MH(t, ip, <); 7556 } 7557 7558 static inline void 7559 instr_jmp_lt_hm_exec(struct rte_swx_pipeline *p) 7560 { 7561 struct thread *t = &p->threads[p->thread_id]; 7562 struct instruction *ip = t->ip; 7563 7564 TRACE("[Thread %2u] jmplt (hm)\n", p->thread_id); 7565 7566 JMP_CMP_HM(t, ip, <); 7567 } 7568 7569 static inline void 7570 instr_jmp_lt_hh_exec(struct rte_swx_pipeline *p) 7571 { 7572 struct thread *t = &p->threads[p->thread_id]; 7573 struct instruction *ip = t->ip; 7574 7575 TRACE("[Thread %2u] jmplt (hh)\n", p->thread_id); 7576 7577 JMP_CMP_HH(t, ip, <); 7578 } 7579 7580 static inline void 7581 instr_jmp_lt_mi_exec(struct rte_swx_pipeline *p) 7582 { 7583 struct thread *t = &p->threads[p->thread_id]; 7584 struct instruction *ip = t->ip; 7585 7586 TRACE("[Thread %2u] jmplt (mi)\n", p->thread_id); 7587 7588 JMP_CMP_MI(t, ip, <); 7589 } 7590 7591 static inline void 7592 instr_jmp_lt_hi_exec(struct rte_swx_pipeline *p) 7593 { 7594 struct thread *t = &p->threads[p->thread_id]; 7595 struct instruction *ip = t->ip; 7596 7597 TRACE("[Thread %2u] jmplt (hi)\n", p->thread_id); 7598 7599 JMP_CMP_HI(t, ip, <); 7600 } 7601 7602 static inline void 7603 instr_jmp_gt_exec(struct rte_swx_pipeline *p) 7604 { 7605 struct thread *t = &p->threads[p->thread_id]; 7606 struct instruction *ip = t->ip; 7607 7608 TRACE("[Thread %2u] jmpgt\n", p->thread_id); 7609 7610 JMP_CMP(t, ip, >); 7611 } 7612 7613 static inline void 7614 instr_jmp_gt_mh_exec(struct rte_swx_pipeline *p) 7615 { 7616 struct thread *t = &p->threads[p->thread_id]; 7617 struct instruction *ip = t->ip; 7618 7619 TRACE("[Thread %2u] jmpgt (mh)\n", p->thread_id); 7620 7621 JMP_CMP_MH(t, ip, >); 7622 } 7623 7624 static inline void 7625 instr_jmp_gt_hm_exec(struct rte_swx_pipeline *p) 7626 { 7627 struct thread *t = &p->threads[p->thread_id]; 7628 struct instruction *ip = t->ip; 7629 7630 TRACE("[Thread %2u] jmpgt (hm)\n", p->thread_id); 7631 7632 JMP_CMP_HM(t, ip, >); 7633 } 7634 7635 static inline void 7636 instr_jmp_gt_hh_exec(struct rte_swx_pipeline *p) 7637 { 7638 struct thread *t = &p->threads[p->thread_id]; 7639 struct instruction *ip = t->ip; 7640 7641 TRACE("[Thread %2u] jmpgt (hh)\n", p->thread_id); 7642 7643 JMP_CMP_HH(t, ip, >); 7644 } 7645 7646 static inline void 7647 instr_jmp_gt_mi_exec(struct rte_swx_pipeline *p) 7648 { 7649 struct thread *t = &p->threads[p->thread_id]; 7650 struct instruction *ip = t->ip; 7651 7652 TRACE("[Thread %2u] jmpgt (mi)\n", p->thread_id); 7653 7654 JMP_CMP_MI(t, ip, >); 7655 } 7656 7657 static inline void 7658 instr_jmp_gt_hi_exec(struct rte_swx_pipeline *p) 7659 { 7660 struct thread *t = &p->threads[p->thread_id]; 7661 struct instruction *ip = t->ip; 7662 7663 TRACE("[Thread %2u] jmpgt (hi)\n", p->thread_id); 7664 7665 JMP_CMP_HI(t, ip, >); 7666 } 7667 7668 /* 7669 * return. 7670 */ 7671 static int 7672 instr_return_translate(struct rte_swx_pipeline *p __rte_unused, 7673 struct action *action, 7674 char **tokens __rte_unused, 7675 int n_tokens, 7676 struct instruction *instr, 7677 struct instruction_data *data __rte_unused) 7678 { 7679 CHECK(action, EINVAL); 7680 CHECK(n_tokens == 1, EINVAL); 7681 7682 instr->type = INSTR_RETURN; 7683 return 0; 7684 } 7685 7686 static inline void 7687 instr_return_exec(struct rte_swx_pipeline *p) 7688 { 7689 struct thread *t = &p->threads[p->thread_id]; 7690 7691 TRACE("[Thread %2u] return\n", p->thread_id); 7692 7693 t->ip = t->ret; 7694 } 7695 7696 static int 7697 instr_translate(struct rte_swx_pipeline *p, 7698 struct action *action, 7699 char *string, 7700 struct instruction *instr, 7701 struct instruction_data *data) 7702 { 7703 char *tokens[RTE_SWX_INSTRUCTION_TOKENS_MAX]; 7704 int n_tokens = 0, tpos = 0; 7705 7706 /* Parse the instruction string into tokens. */ 7707 for ( ; ; ) { 7708 char *token; 7709 7710 token = strtok_r(string, " \t\v", &string); 7711 if (!token) 7712 break; 7713 7714 CHECK(n_tokens < RTE_SWX_INSTRUCTION_TOKENS_MAX, EINVAL); 7715 CHECK_NAME(token, EINVAL); 7716 7717 tokens[n_tokens] = token; 7718 n_tokens++; 7719 } 7720 7721 CHECK(n_tokens, EINVAL); 7722 7723 /* Handle the optional instruction label. */ 7724 if ((n_tokens >= 2) && !strcmp(tokens[1], ":")) { 7725 strcpy(data->label, tokens[0]); 7726 7727 tpos += 2; 7728 CHECK(n_tokens - tpos, EINVAL); 7729 } 7730 7731 /* Identify the instruction type. */ 7732 if (!strcmp(tokens[tpos], "rx")) 7733 return instr_rx_translate(p, 7734 action, 7735 &tokens[tpos], 7736 n_tokens - tpos, 7737 instr, 7738 data); 7739 7740 if (!strcmp(tokens[tpos], "tx")) 7741 return instr_tx_translate(p, 7742 action, 7743 &tokens[tpos], 7744 n_tokens - tpos, 7745 instr, 7746 data); 7747 7748 if (!strcmp(tokens[tpos], "drop")) 7749 return instr_drop_translate(p, 7750 action, 7751 &tokens[tpos], 7752 n_tokens - tpos, 7753 instr, 7754 data); 7755 7756 if (!strcmp(tokens[tpos], "extract")) 7757 return instr_hdr_extract_translate(p, 7758 action, 7759 &tokens[tpos], 7760 n_tokens - tpos, 7761 instr, 7762 data); 7763 7764 if (!strcmp(tokens[tpos], "emit")) 7765 return instr_hdr_emit_translate(p, 7766 action, 7767 &tokens[tpos], 7768 n_tokens - tpos, 7769 instr, 7770 data); 7771 7772 if (!strcmp(tokens[tpos], "validate")) 7773 return instr_hdr_validate_translate(p, 7774 action, 7775 &tokens[tpos], 7776 n_tokens - tpos, 7777 instr, 7778 data); 7779 7780 if (!strcmp(tokens[tpos], "invalidate")) 7781 return instr_hdr_invalidate_translate(p, 7782 action, 7783 &tokens[tpos], 7784 n_tokens - tpos, 7785 instr, 7786 data); 7787 7788 if (!strcmp(tokens[tpos], "mov")) 7789 return instr_mov_translate(p, 7790 action, 7791 &tokens[tpos], 7792 n_tokens - tpos, 7793 instr, 7794 data); 7795 7796 if (!strcmp(tokens[tpos], "add")) 7797 return instr_alu_add_translate(p, 7798 action, 7799 &tokens[tpos], 7800 n_tokens - tpos, 7801 instr, 7802 data); 7803 7804 if (!strcmp(tokens[tpos], "sub")) 7805 return instr_alu_sub_translate(p, 7806 action, 7807 &tokens[tpos], 7808 n_tokens - tpos, 7809 instr, 7810 data); 7811 7812 if (!strcmp(tokens[tpos], "ckadd")) 7813 return instr_alu_ckadd_translate(p, 7814 action, 7815 &tokens[tpos], 7816 n_tokens - tpos, 7817 instr, 7818 data); 7819 7820 if (!strcmp(tokens[tpos], "cksub")) 7821 return instr_alu_cksub_translate(p, 7822 action, 7823 &tokens[tpos], 7824 n_tokens - tpos, 7825 instr, 7826 data); 7827 7828 if (!strcmp(tokens[tpos], "and")) 7829 return instr_alu_and_translate(p, 7830 action, 7831 &tokens[tpos], 7832 n_tokens - tpos, 7833 instr, 7834 data); 7835 7836 if (!strcmp(tokens[tpos], "or")) 7837 return instr_alu_or_translate(p, 7838 action, 7839 &tokens[tpos], 7840 n_tokens - tpos, 7841 instr, 7842 data); 7843 7844 if (!strcmp(tokens[tpos], "xor")) 7845 return instr_alu_xor_translate(p, 7846 action, 7847 &tokens[tpos], 7848 n_tokens - tpos, 7849 instr, 7850 data); 7851 7852 if (!strcmp(tokens[tpos], "shl")) 7853 return instr_alu_shl_translate(p, 7854 action, 7855 &tokens[tpos], 7856 n_tokens - tpos, 7857 instr, 7858 data); 7859 7860 if (!strcmp(tokens[tpos], "shr")) 7861 return instr_alu_shr_translate(p, 7862 action, 7863 &tokens[tpos], 7864 n_tokens - tpos, 7865 instr, 7866 data); 7867 7868 if (!strcmp(tokens[tpos], "regprefetch")) 7869 return instr_regprefetch_translate(p, 7870 action, 7871 &tokens[tpos], 7872 n_tokens - tpos, 7873 instr, 7874 data); 7875 7876 if (!strcmp(tokens[tpos], "regrd")) 7877 return instr_regrd_translate(p, 7878 action, 7879 &tokens[tpos], 7880 n_tokens - tpos, 7881 instr, 7882 data); 7883 7884 if (!strcmp(tokens[tpos], "regwr")) 7885 return instr_regwr_translate(p, 7886 action, 7887 &tokens[tpos], 7888 n_tokens - tpos, 7889 instr, 7890 data); 7891 7892 if (!strcmp(tokens[tpos], "regadd")) 7893 return instr_regadd_translate(p, 7894 action, 7895 &tokens[tpos], 7896 n_tokens - tpos, 7897 instr, 7898 data); 7899 7900 if (!strcmp(tokens[tpos], "metprefetch")) 7901 return instr_metprefetch_translate(p, 7902 action, 7903 &tokens[tpos], 7904 n_tokens - tpos, 7905 instr, 7906 data); 7907 7908 if (!strcmp(tokens[tpos], "meter")) 7909 return instr_meter_translate(p, 7910 action, 7911 &tokens[tpos], 7912 n_tokens - tpos, 7913 instr, 7914 data); 7915 7916 if (!strcmp(tokens[tpos], "table")) 7917 return instr_table_translate(p, 7918 action, 7919 &tokens[tpos], 7920 n_tokens - tpos, 7921 instr, 7922 data); 7923 7924 if (!strcmp(tokens[tpos], "extern")) 7925 return instr_extern_translate(p, 7926 action, 7927 &tokens[tpos], 7928 n_tokens - tpos, 7929 instr, 7930 data); 7931 7932 if (!strcmp(tokens[tpos], "jmp")) 7933 return instr_jmp_translate(p, 7934 action, 7935 &tokens[tpos], 7936 n_tokens - tpos, 7937 instr, 7938 data); 7939 7940 if (!strcmp(tokens[tpos], "jmpv")) 7941 return instr_jmp_valid_translate(p, 7942 action, 7943 &tokens[tpos], 7944 n_tokens - tpos, 7945 instr, 7946 data); 7947 7948 if (!strcmp(tokens[tpos], "jmpnv")) 7949 return instr_jmp_invalid_translate(p, 7950 action, 7951 &tokens[tpos], 7952 n_tokens - tpos, 7953 instr, 7954 data); 7955 7956 if (!strcmp(tokens[tpos], "jmph")) 7957 return instr_jmp_hit_translate(p, 7958 action, 7959 &tokens[tpos], 7960 n_tokens - tpos, 7961 instr, 7962 data); 7963 7964 if (!strcmp(tokens[tpos], "jmpnh")) 7965 return instr_jmp_miss_translate(p, 7966 action, 7967 &tokens[tpos], 7968 n_tokens - tpos, 7969 instr, 7970 data); 7971 7972 if (!strcmp(tokens[tpos], "jmpa")) 7973 return instr_jmp_action_hit_translate(p, 7974 action, 7975 &tokens[tpos], 7976 n_tokens - tpos, 7977 instr, 7978 data); 7979 7980 if (!strcmp(tokens[tpos], "jmpna")) 7981 return instr_jmp_action_miss_translate(p, 7982 action, 7983 &tokens[tpos], 7984 n_tokens - tpos, 7985 instr, 7986 data); 7987 7988 if (!strcmp(tokens[tpos], "jmpeq")) 7989 return instr_jmp_eq_translate(p, 7990 action, 7991 &tokens[tpos], 7992 n_tokens - tpos, 7993 instr, 7994 data); 7995 7996 if (!strcmp(tokens[tpos], "jmpneq")) 7997 return instr_jmp_neq_translate(p, 7998 action, 7999 &tokens[tpos], 8000 n_tokens - tpos, 8001 instr, 8002 data); 8003 8004 if (!strcmp(tokens[tpos], "jmplt")) 8005 return instr_jmp_lt_translate(p, 8006 action, 8007 &tokens[tpos], 8008 n_tokens - tpos, 8009 instr, 8010 data); 8011 8012 if (!strcmp(tokens[tpos], "jmpgt")) 8013 return instr_jmp_gt_translate(p, 8014 action, 8015 &tokens[tpos], 8016 n_tokens - tpos, 8017 instr, 8018 data); 8019 8020 if (!strcmp(tokens[tpos], "return")) 8021 return instr_return_translate(p, 8022 action, 8023 &tokens[tpos], 8024 n_tokens - tpos, 8025 instr, 8026 data); 8027 8028 CHECK(0, EINVAL); 8029 } 8030 8031 static struct instruction_data * 8032 label_find(struct instruction_data *data, uint32_t n, const char *label) 8033 { 8034 uint32_t i; 8035 8036 for (i = 0; i < n; i++) 8037 if (!strcmp(label, data[i].label)) 8038 return &data[i]; 8039 8040 return NULL; 8041 } 8042 8043 static uint32_t 8044 label_is_used(struct instruction_data *data, uint32_t n, const char *label) 8045 { 8046 uint32_t count = 0, i; 8047 8048 if (!label[0]) 8049 return 0; 8050 8051 for (i = 0; i < n; i++) 8052 if (!strcmp(label, data[i].jmp_label)) 8053 count++; 8054 8055 return count; 8056 } 8057 8058 static int 8059 instr_label_check(struct instruction_data *instruction_data, 8060 uint32_t n_instructions) 8061 { 8062 uint32_t i; 8063 8064 /* Check that all instruction labels are unique. */ 8065 for (i = 0; i < n_instructions; i++) { 8066 struct instruction_data *data = &instruction_data[i]; 8067 char *label = data->label; 8068 uint32_t j; 8069 8070 if (!label[0]) 8071 continue; 8072 8073 for (j = i + 1; j < n_instructions; j++) 8074 CHECK(strcmp(label, data[j].label), EINVAL); 8075 } 8076 8077 /* Get users for each instruction label. */ 8078 for (i = 0; i < n_instructions; i++) { 8079 struct instruction_data *data = &instruction_data[i]; 8080 char *label = data->label; 8081 8082 data->n_users = label_is_used(instruction_data, 8083 n_instructions, 8084 label); 8085 } 8086 8087 return 0; 8088 } 8089 8090 static int 8091 instr_jmp_resolve(struct instruction *instructions, 8092 struct instruction_data *instruction_data, 8093 uint32_t n_instructions) 8094 { 8095 uint32_t i; 8096 8097 for (i = 0; i < n_instructions; i++) { 8098 struct instruction *instr = &instructions[i]; 8099 struct instruction_data *data = &instruction_data[i]; 8100 struct instruction_data *found; 8101 8102 if (!instruction_is_jmp(instr)) 8103 continue; 8104 8105 found = label_find(instruction_data, 8106 n_instructions, 8107 data->jmp_label); 8108 CHECK(found, EINVAL); 8109 8110 instr->jmp.ip = &instructions[found - instruction_data]; 8111 } 8112 8113 return 0; 8114 } 8115 8116 static int 8117 instr_verify(struct rte_swx_pipeline *p __rte_unused, 8118 struct action *a, 8119 struct instruction *instr, 8120 struct instruction_data *data __rte_unused, 8121 uint32_t n_instructions) 8122 { 8123 if (!a) { 8124 enum instruction_type type; 8125 uint32_t i; 8126 8127 /* Check that the first instruction is rx. */ 8128 CHECK(instr[0].type == INSTR_RX, EINVAL); 8129 8130 /* Check that there is at least one tx instruction. */ 8131 for (i = 0; i < n_instructions; i++) { 8132 type = instr[i].type; 8133 8134 if (instruction_is_tx(type)) 8135 break; 8136 } 8137 CHECK(i < n_instructions, EINVAL); 8138 8139 /* Check that the last instruction is either tx or unconditional 8140 * jump. 8141 */ 8142 type = instr[n_instructions - 1].type; 8143 CHECK(instruction_is_tx(type) || (type == INSTR_JMP), EINVAL); 8144 } 8145 8146 if (a) { 8147 enum instruction_type type; 8148 uint32_t i; 8149 8150 /* Check that there is at least one return or tx instruction. */ 8151 for (i = 0; i < n_instructions; i++) { 8152 type = instr[i].type; 8153 8154 if ((type == INSTR_RETURN) || instruction_is_tx(type)) 8155 break; 8156 } 8157 CHECK(i < n_instructions, EINVAL); 8158 } 8159 8160 return 0; 8161 } 8162 8163 static uint32_t 8164 instr_compact(struct instruction *instructions, 8165 struct instruction_data *instruction_data, 8166 uint32_t n_instructions) 8167 { 8168 uint32_t i, pos = 0; 8169 8170 /* Eliminate the invalid instructions that have been optimized out. */ 8171 for (i = 0; i < n_instructions; i++) { 8172 struct instruction *instr = &instructions[i]; 8173 struct instruction_data *data = &instruction_data[i]; 8174 8175 if (data->invalid) 8176 continue; 8177 8178 if (i != pos) { 8179 memcpy(&instructions[pos], instr, sizeof(*instr)); 8180 memcpy(&instruction_data[pos], data, sizeof(*data)); 8181 } 8182 8183 pos++; 8184 } 8185 8186 return pos; 8187 } 8188 8189 static int 8190 instr_pattern_extract_many_search(struct instruction *instr, 8191 struct instruction_data *data, 8192 uint32_t n_instr, 8193 uint32_t *n_pattern_instr) 8194 { 8195 uint32_t i; 8196 8197 for (i = 0; i < n_instr; i++) { 8198 if (data[i].invalid) 8199 break; 8200 8201 if (instr[i].type != INSTR_HDR_EXTRACT) 8202 break; 8203 8204 if (i == RTE_DIM(instr->io.hdr.header_id)) 8205 break; 8206 8207 if (i && data[i].n_users) 8208 break; 8209 } 8210 8211 if (i < 2) 8212 return 0; 8213 8214 *n_pattern_instr = i; 8215 return 1; 8216 } 8217 8218 static void 8219 instr_pattern_extract_many_replace(struct instruction *instr, 8220 struct instruction_data *data, 8221 uint32_t n_instr) 8222 { 8223 uint32_t i; 8224 8225 for (i = 1; i < n_instr; i++) { 8226 instr[0].type++; 8227 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0]; 8228 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0]; 8229 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0]; 8230 8231 data[i].invalid = 1; 8232 } 8233 } 8234 8235 static uint32_t 8236 instr_pattern_extract_many_optimize(struct instruction *instructions, 8237 struct instruction_data *instruction_data, 8238 uint32_t n_instructions) 8239 { 8240 uint32_t i; 8241 8242 for (i = 0; i < n_instructions; ) { 8243 struct instruction *instr = &instructions[i]; 8244 struct instruction_data *data = &instruction_data[i]; 8245 uint32_t n_instr = 0; 8246 int detected; 8247 8248 /* Extract many. */ 8249 detected = instr_pattern_extract_many_search(instr, 8250 data, 8251 n_instructions - i, 8252 &n_instr); 8253 if (detected) { 8254 instr_pattern_extract_many_replace(instr, 8255 data, 8256 n_instr); 8257 i += n_instr; 8258 continue; 8259 } 8260 8261 /* No pattern starting at the current instruction. */ 8262 i++; 8263 } 8264 8265 /* Eliminate the invalid instructions that have been optimized out. */ 8266 n_instructions = instr_compact(instructions, 8267 instruction_data, 8268 n_instructions); 8269 8270 return n_instructions; 8271 } 8272 8273 static int 8274 instr_pattern_emit_many_tx_search(struct instruction *instr, 8275 struct instruction_data *data, 8276 uint32_t n_instr, 8277 uint32_t *n_pattern_instr) 8278 { 8279 uint32_t i; 8280 8281 for (i = 0; i < n_instr; i++) { 8282 if (data[i].invalid) 8283 break; 8284 8285 if (instr[i].type != INSTR_HDR_EMIT) 8286 break; 8287 8288 if (i == RTE_DIM(instr->io.hdr.header_id)) 8289 break; 8290 8291 if (i && data[i].n_users) 8292 break; 8293 } 8294 8295 if (!i) 8296 return 0; 8297 8298 if (!instruction_is_tx(instr[i].type)) 8299 return 0; 8300 8301 if (data[i].n_users) 8302 return 0; 8303 8304 i++; 8305 8306 *n_pattern_instr = i; 8307 return 1; 8308 } 8309 8310 static void 8311 instr_pattern_emit_many_tx_replace(struct instruction *instr, 8312 struct instruction_data *data, 8313 uint32_t n_instr) 8314 { 8315 uint32_t i; 8316 8317 /* Any emit instruction in addition to the first one. */ 8318 for (i = 1; i < n_instr - 1; i++) { 8319 instr[0].type++; 8320 instr[0].io.hdr.header_id[i] = instr[i].io.hdr.header_id[0]; 8321 instr[0].io.hdr.struct_id[i] = instr[i].io.hdr.struct_id[0]; 8322 instr[0].io.hdr.n_bytes[i] = instr[i].io.hdr.n_bytes[0]; 8323 8324 data[i].invalid = 1; 8325 } 8326 8327 /* The TX instruction is the last one in the pattern. */ 8328 instr[0].type++; 8329 instr[0].io.io.offset = instr[i].io.io.offset; 8330 instr[0].io.io.n_bits = instr[i].io.io.n_bits; 8331 data[i].invalid = 1; 8332 } 8333 8334 static uint32_t 8335 instr_pattern_emit_many_tx_optimize(struct instruction *instructions, 8336 struct instruction_data *instruction_data, 8337 uint32_t n_instructions) 8338 { 8339 uint32_t i; 8340 8341 for (i = 0; i < n_instructions; ) { 8342 struct instruction *instr = &instructions[i]; 8343 struct instruction_data *data = &instruction_data[i]; 8344 uint32_t n_instr = 0; 8345 int detected; 8346 8347 /* Emit many + TX. */ 8348 detected = instr_pattern_emit_many_tx_search(instr, 8349 data, 8350 n_instructions - i, 8351 &n_instr); 8352 if (detected) { 8353 instr_pattern_emit_many_tx_replace(instr, 8354 data, 8355 n_instr); 8356 i += n_instr; 8357 continue; 8358 } 8359 8360 /* No pattern starting at the current instruction. */ 8361 i++; 8362 } 8363 8364 /* Eliminate the invalid instructions that have been optimized out. */ 8365 n_instructions = instr_compact(instructions, 8366 instruction_data, 8367 n_instructions); 8368 8369 return n_instructions; 8370 } 8371 8372 static uint32_t 8373 action_arg_src_mov_count(struct action *a, 8374 uint32_t arg_id, 8375 struct instruction *instructions, 8376 struct instruction_data *instruction_data, 8377 uint32_t n_instructions); 8378 8379 static int 8380 instr_pattern_mov_all_validate_search(struct rte_swx_pipeline *p, 8381 struct action *a, 8382 struct instruction *instr, 8383 struct instruction_data *data, 8384 uint32_t n_instr, 8385 struct instruction *instructions, 8386 struct instruction_data *instruction_data, 8387 uint32_t n_instructions, 8388 uint32_t *n_pattern_instr) 8389 { 8390 struct header *h; 8391 uint32_t src_field_id, i, j; 8392 8393 /* Prerequisites. */ 8394 if (!a || !a->st) 8395 return 0; 8396 8397 /* First instruction: MOV_HM. */ 8398 if (data[0].invalid || (instr[0].type != INSTR_MOV_HM)) 8399 return 0; 8400 8401 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id); 8402 if (!h) 8403 return 0; 8404 8405 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++) 8406 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8) 8407 break; 8408 8409 if (src_field_id == a->st->n_fields) 8410 return 0; 8411 8412 if (instr[0].mov.dst.offset || 8413 (instr[0].mov.dst.n_bits != h->st->fields[0].n_bits) || 8414 instr[0].mov.src.struct_id || 8415 (instr[0].mov.src.n_bits != a->st->fields[src_field_id].n_bits) || 8416 (instr[0].mov.dst.n_bits != instr[0].mov.src.n_bits)) 8417 return 0; 8418 8419 if ((n_instr < h->st->n_fields + 1) || 8420 (a->st->n_fields < src_field_id + h->st->n_fields + 1)) 8421 return 0; 8422 8423 /* Subsequent instructions: MOV_HM. */ 8424 for (i = 1; i < h->st->n_fields; i++) 8425 if (data[i].invalid || 8426 data[i].n_users || 8427 (instr[i].type != INSTR_MOV_HM) || 8428 (instr[i].mov.dst.struct_id != h->struct_id) || 8429 (instr[i].mov.dst.offset != h->st->fields[i].offset / 8) || 8430 (instr[i].mov.dst.n_bits != h->st->fields[i].n_bits) || 8431 instr[i].mov.src.struct_id || 8432 (instr[i].mov.src.offset != a->st->fields[src_field_id + i].offset / 8) || 8433 (instr[i].mov.src.n_bits != a->st->fields[src_field_id + i].n_bits) || 8434 (instr[i].mov.dst.n_bits != instr[i].mov.src.n_bits)) 8435 return 0; 8436 8437 /* Last instruction: HDR_VALIDATE. */ 8438 if ((instr[i].type != INSTR_HDR_VALIDATE) || 8439 (instr[i].valid.header_id != h->id)) 8440 return 0; 8441 8442 /* Check that none of the action args that are used as source for this 8443 * DMA transfer are not used as source in any other mov instruction. 8444 */ 8445 for (j = src_field_id; j < src_field_id + h->st->n_fields; j++) { 8446 uint32_t n_users; 8447 8448 n_users = action_arg_src_mov_count(a, 8449 j, 8450 instructions, 8451 instruction_data, 8452 n_instructions); 8453 if (n_users > 1) 8454 return 0; 8455 } 8456 8457 *n_pattern_instr = 1 + i; 8458 return 1; 8459 } 8460 8461 static void 8462 instr_pattern_mov_all_validate_replace(struct rte_swx_pipeline *p, 8463 struct action *a, 8464 struct instruction *instr, 8465 struct instruction_data *data, 8466 uint32_t n_instr) 8467 { 8468 struct header *h; 8469 uint32_t src_field_id, src_offset, i; 8470 8471 /* Read from the instructions before they are modified. */ 8472 h = header_find_by_struct_id(p, instr[0].mov.dst.struct_id); 8473 if (!h) 8474 return; 8475 8476 for (src_field_id = 0; src_field_id < a->st->n_fields; src_field_id++) 8477 if (instr[0].mov.src.offset == a->st->fields[src_field_id].offset / 8) 8478 break; 8479 8480 if (src_field_id == a->st->n_fields) 8481 return; 8482 8483 src_offset = instr[0].mov.src.offset; 8484 8485 /* Modify the instructions. */ 8486 instr[0].type = INSTR_DMA_HT; 8487 instr[0].dma.dst.header_id[0] = h->id; 8488 instr[0].dma.dst.struct_id[0] = h->struct_id; 8489 instr[0].dma.src.offset[0] = (uint8_t)src_offset; 8490 instr[0].dma.n_bytes[0] = h->st->n_bits / 8; 8491 8492 for (i = 1; i < n_instr; i++) 8493 data[i].invalid = 1; 8494 8495 /* Update the endianness of the action arguments to header endianness. */ 8496 for (i = 0; i < h->st->n_fields; i++) 8497 a->args_endianness[src_field_id + i] = 1; 8498 } 8499 8500 static uint32_t 8501 instr_pattern_mov_all_validate_optimize(struct rte_swx_pipeline *p, 8502 struct action *a, 8503 struct instruction *instructions, 8504 struct instruction_data *instruction_data, 8505 uint32_t n_instructions) 8506 { 8507 uint32_t i; 8508 8509 if (!a || !a->st) 8510 return n_instructions; 8511 8512 for (i = 0; i < n_instructions; ) { 8513 struct instruction *instr = &instructions[i]; 8514 struct instruction_data *data = &instruction_data[i]; 8515 uint32_t n_instr = 0; 8516 int detected; 8517 8518 /* Mov all + validate. */ 8519 detected = instr_pattern_mov_all_validate_search(p, 8520 a, 8521 instr, 8522 data, 8523 n_instructions - i, 8524 instructions, 8525 instruction_data, 8526 n_instructions, 8527 &n_instr); 8528 if (detected) { 8529 instr_pattern_mov_all_validate_replace(p, a, instr, data, n_instr); 8530 i += n_instr; 8531 continue; 8532 } 8533 8534 /* No pattern starting at the current instruction. */ 8535 i++; 8536 } 8537 8538 /* Eliminate the invalid instructions that have been optimized out. */ 8539 n_instructions = instr_compact(instructions, 8540 instruction_data, 8541 n_instructions); 8542 8543 return n_instructions; 8544 } 8545 8546 static int 8547 instr_pattern_dma_many_search(struct instruction *instr, 8548 struct instruction_data *data, 8549 uint32_t n_instr, 8550 uint32_t *n_pattern_instr) 8551 { 8552 uint32_t i; 8553 8554 for (i = 0; i < n_instr; i++) { 8555 if (data[i].invalid) 8556 break; 8557 8558 if (instr[i].type != INSTR_DMA_HT) 8559 break; 8560 8561 if (i == RTE_DIM(instr->dma.dst.header_id)) 8562 break; 8563 8564 if (i && data[i].n_users) 8565 break; 8566 } 8567 8568 if (i < 2) 8569 return 0; 8570 8571 *n_pattern_instr = i; 8572 return 1; 8573 } 8574 8575 static void 8576 instr_pattern_dma_many_replace(struct instruction *instr, 8577 struct instruction_data *data, 8578 uint32_t n_instr) 8579 { 8580 uint32_t i; 8581 8582 for (i = 1; i < n_instr; i++) { 8583 instr[0].type++; 8584 instr[0].dma.dst.header_id[i] = instr[i].dma.dst.header_id[0]; 8585 instr[0].dma.dst.struct_id[i] = instr[i].dma.dst.struct_id[0]; 8586 instr[0].dma.src.offset[i] = instr[i].dma.src.offset[0]; 8587 instr[0].dma.n_bytes[i] = instr[i].dma.n_bytes[0]; 8588 8589 data[i].invalid = 1; 8590 } 8591 } 8592 8593 static uint32_t 8594 instr_pattern_dma_many_optimize(struct instruction *instructions, 8595 struct instruction_data *instruction_data, 8596 uint32_t n_instructions) 8597 { 8598 uint32_t i; 8599 8600 for (i = 0; i < n_instructions; ) { 8601 struct instruction *instr = &instructions[i]; 8602 struct instruction_data *data = &instruction_data[i]; 8603 uint32_t n_instr = 0; 8604 int detected; 8605 8606 /* DMA many. */ 8607 detected = instr_pattern_dma_many_search(instr, 8608 data, 8609 n_instructions - i, 8610 &n_instr); 8611 if (detected) { 8612 instr_pattern_dma_many_replace(instr, data, n_instr); 8613 i += n_instr; 8614 continue; 8615 } 8616 8617 /* No pattern starting at the current instruction. */ 8618 i++; 8619 } 8620 8621 /* Eliminate the invalid instructions that have been optimized out. */ 8622 n_instructions = instr_compact(instructions, 8623 instruction_data, 8624 n_instructions); 8625 8626 return n_instructions; 8627 } 8628 8629 static uint32_t 8630 instr_optimize(struct rte_swx_pipeline *p, 8631 struct action *a, 8632 struct instruction *instructions, 8633 struct instruction_data *instruction_data, 8634 uint32_t n_instructions) 8635 { 8636 /* Extract many. */ 8637 n_instructions = instr_pattern_extract_many_optimize(instructions, 8638 instruction_data, 8639 n_instructions); 8640 8641 /* Emit many + TX. */ 8642 n_instructions = instr_pattern_emit_many_tx_optimize(instructions, 8643 instruction_data, 8644 n_instructions); 8645 8646 /* Mov all + validate. */ 8647 n_instructions = instr_pattern_mov_all_validate_optimize(p, 8648 a, 8649 instructions, 8650 instruction_data, 8651 n_instructions); 8652 8653 /* DMA many. */ 8654 n_instructions = instr_pattern_dma_many_optimize(instructions, 8655 instruction_data, 8656 n_instructions); 8657 8658 return n_instructions; 8659 } 8660 8661 static int 8662 instruction_config(struct rte_swx_pipeline *p, 8663 struct action *a, 8664 const char **instructions, 8665 uint32_t n_instructions) 8666 { 8667 struct instruction *instr = NULL; 8668 struct instruction_data *data = NULL; 8669 int err = 0; 8670 uint32_t i; 8671 8672 CHECK(n_instructions, EINVAL); 8673 CHECK(instructions, EINVAL); 8674 for (i = 0; i < n_instructions; i++) 8675 CHECK_INSTRUCTION(instructions[i], EINVAL); 8676 8677 /* Memory allocation. */ 8678 instr = calloc(n_instructions, sizeof(struct instruction)); 8679 if (!instr) { 8680 err = -ENOMEM; 8681 goto error; 8682 } 8683 8684 data = calloc(n_instructions, sizeof(struct instruction_data)); 8685 if (!data) { 8686 err = -ENOMEM; 8687 goto error; 8688 } 8689 8690 for (i = 0; i < n_instructions; i++) { 8691 char *string = strdup(instructions[i]); 8692 if (!string) { 8693 err = -ENOMEM; 8694 goto error; 8695 } 8696 8697 err = instr_translate(p, a, string, &instr[i], &data[i]); 8698 if (err) { 8699 free(string); 8700 goto error; 8701 } 8702 8703 free(string); 8704 } 8705 8706 err = instr_label_check(data, n_instructions); 8707 if (err) 8708 goto error; 8709 8710 err = instr_verify(p, a, instr, data, n_instructions); 8711 if (err) 8712 goto error; 8713 8714 n_instructions = instr_optimize(p, a, instr, data, n_instructions); 8715 8716 err = instr_jmp_resolve(instr, data, n_instructions); 8717 if (err) 8718 goto error; 8719 8720 if (a) { 8721 a->instructions = instr; 8722 a->n_instructions = n_instructions; 8723 } else { 8724 p->instructions = instr; 8725 p->n_instructions = n_instructions; 8726 } 8727 8728 free(data); 8729 return 0; 8730 8731 error: 8732 free(data); 8733 free(instr); 8734 return err; 8735 } 8736 8737 typedef void (*instr_exec_t)(struct rte_swx_pipeline *); 8738 8739 static instr_exec_t instruction_table[] = { 8740 [INSTR_RX] = instr_rx_exec, 8741 [INSTR_TX] = instr_tx_exec, 8742 [INSTR_TX_I] = instr_tx_i_exec, 8743 8744 [INSTR_HDR_EXTRACT] = instr_hdr_extract_exec, 8745 [INSTR_HDR_EXTRACT2] = instr_hdr_extract2_exec, 8746 [INSTR_HDR_EXTRACT3] = instr_hdr_extract3_exec, 8747 [INSTR_HDR_EXTRACT4] = instr_hdr_extract4_exec, 8748 [INSTR_HDR_EXTRACT5] = instr_hdr_extract5_exec, 8749 [INSTR_HDR_EXTRACT6] = instr_hdr_extract6_exec, 8750 [INSTR_HDR_EXTRACT7] = instr_hdr_extract7_exec, 8751 [INSTR_HDR_EXTRACT8] = instr_hdr_extract8_exec, 8752 8753 [INSTR_HDR_EMIT] = instr_hdr_emit_exec, 8754 [INSTR_HDR_EMIT_TX] = instr_hdr_emit_tx_exec, 8755 [INSTR_HDR_EMIT2_TX] = instr_hdr_emit2_tx_exec, 8756 [INSTR_HDR_EMIT3_TX] = instr_hdr_emit3_tx_exec, 8757 [INSTR_HDR_EMIT4_TX] = instr_hdr_emit4_tx_exec, 8758 [INSTR_HDR_EMIT5_TX] = instr_hdr_emit5_tx_exec, 8759 [INSTR_HDR_EMIT6_TX] = instr_hdr_emit6_tx_exec, 8760 [INSTR_HDR_EMIT7_TX] = instr_hdr_emit7_tx_exec, 8761 [INSTR_HDR_EMIT8_TX] = instr_hdr_emit8_tx_exec, 8762 8763 [INSTR_HDR_VALIDATE] = instr_hdr_validate_exec, 8764 [INSTR_HDR_INVALIDATE] = instr_hdr_invalidate_exec, 8765 8766 [INSTR_MOV] = instr_mov_exec, 8767 [INSTR_MOV_MH] = instr_mov_mh_exec, 8768 [INSTR_MOV_HM] = instr_mov_hm_exec, 8769 [INSTR_MOV_HH] = instr_mov_hh_exec, 8770 [INSTR_MOV_I] = instr_mov_i_exec, 8771 8772 [INSTR_DMA_HT] = instr_dma_ht_exec, 8773 [INSTR_DMA_HT2] = instr_dma_ht2_exec, 8774 [INSTR_DMA_HT3] = instr_dma_ht3_exec, 8775 [INSTR_DMA_HT4] = instr_dma_ht4_exec, 8776 [INSTR_DMA_HT5] = instr_dma_ht5_exec, 8777 [INSTR_DMA_HT6] = instr_dma_ht6_exec, 8778 [INSTR_DMA_HT7] = instr_dma_ht7_exec, 8779 [INSTR_DMA_HT8] = instr_dma_ht8_exec, 8780 8781 [INSTR_ALU_ADD] = instr_alu_add_exec, 8782 [INSTR_ALU_ADD_MH] = instr_alu_add_mh_exec, 8783 [INSTR_ALU_ADD_HM] = instr_alu_add_hm_exec, 8784 [INSTR_ALU_ADD_HH] = instr_alu_add_hh_exec, 8785 [INSTR_ALU_ADD_MI] = instr_alu_add_mi_exec, 8786 [INSTR_ALU_ADD_HI] = instr_alu_add_hi_exec, 8787 8788 [INSTR_ALU_SUB] = instr_alu_sub_exec, 8789 [INSTR_ALU_SUB_MH] = instr_alu_sub_mh_exec, 8790 [INSTR_ALU_SUB_HM] = instr_alu_sub_hm_exec, 8791 [INSTR_ALU_SUB_HH] = instr_alu_sub_hh_exec, 8792 [INSTR_ALU_SUB_MI] = instr_alu_sub_mi_exec, 8793 [INSTR_ALU_SUB_HI] = instr_alu_sub_hi_exec, 8794 8795 [INSTR_ALU_CKADD_FIELD] = instr_alu_ckadd_field_exec, 8796 [INSTR_ALU_CKADD_STRUCT] = instr_alu_ckadd_struct_exec, 8797 [INSTR_ALU_CKADD_STRUCT20] = instr_alu_ckadd_struct20_exec, 8798 [INSTR_ALU_CKSUB_FIELD] = instr_alu_cksub_field_exec, 8799 8800 [INSTR_ALU_AND] = instr_alu_and_exec, 8801 [INSTR_ALU_AND_MH] = instr_alu_and_mh_exec, 8802 [INSTR_ALU_AND_HM] = instr_alu_and_hm_exec, 8803 [INSTR_ALU_AND_HH] = instr_alu_and_hh_exec, 8804 [INSTR_ALU_AND_I] = instr_alu_and_i_exec, 8805 8806 [INSTR_ALU_OR] = instr_alu_or_exec, 8807 [INSTR_ALU_OR_MH] = instr_alu_or_mh_exec, 8808 [INSTR_ALU_OR_HM] = instr_alu_or_hm_exec, 8809 [INSTR_ALU_OR_HH] = instr_alu_or_hh_exec, 8810 [INSTR_ALU_OR_I] = instr_alu_or_i_exec, 8811 8812 [INSTR_ALU_XOR] = instr_alu_xor_exec, 8813 [INSTR_ALU_XOR_MH] = instr_alu_xor_mh_exec, 8814 [INSTR_ALU_XOR_HM] = instr_alu_xor_hm_exec, 8815 [INSTR_ALU_XOR_HH] = instr_alu_xor_hh_exec, 8816 [INSTR_ALU_XOR_I] = instr_alu_xor_i_exec, 8817 8818 [INSTR_ALU_SHL] = instr_alu_shl_exec, 8819 [INSTR_ALU_SHL_MH] = instr_alu_shl_mh_exec, 8820 [INSTR_ALU_SHL_HM] = instr_alu_shl_hm_exec, 8821 [INSTR_ALU_SHL_HH] = instr_alu_shl_hh_exec, 8822 [INSTR_ALU_SHL_MI] = instr_alu_shl_mi_exec, 8823 [INSTR_ALU_SHL_HI] = instr_alu_shl_hi_exec, 8824 8825 [INSTR_ALU_SHR] = instr_alu_shr_exec, 8826 [INSTR_ALU_SHR_MH] = instr_alu_shr_mh_exec, 8827 [INSTR_ALU_SHR_HM] = instr_alu_shr_hm_exec, 8828 [INSTR_ALU_SHR_HH] = instr_alu_shr_hh_exec, 8829 [INSTR_ALU_SHR_MI] = instr_alu_shr_mi_exec, 8830 [INSTR_ALU_SHR_HI] = instr_alu_shr_hi_exec, 8831 8832 [INSTR_REGPREFETCH_RH] = instr_regprefetch_rh_exec, 8833 [INSTR_REGPREFETCH_RM] = instr_regprefetch_rm_exec, 8834 [INSTR_REGPREFETCH_RI] = instr_regprefetch_ri_exec, 8835 8836 [INSTR_REGRD_HRH] = instr_regrd_hrh_exec, 8837 [INSTR_REGRD_HRM] = instr_regrd_hrm_exec, 8838 [INSTR_REGRD_MRH] = instr_regrd_mrh_exec, 8839 [INSTR_REGRD_MRM] = instr_regrd_mrm_exec, 8840 [INSTR_REGRD_HRI] = instr_regrd_hri_exec, 8841 [INSTR_REGRD_MRI] = instr_regrd_mri_exec, 8842 8843 [INSTR_REGWR_RHH] = instr_regwr_rhh_exec, 8844 [INSTR_REGWR_RHM] = instr_regwr_rhm_exec, 8845 [INSTR_REGWR_RMH] = instr_regwr_rmh_exec, 8846 [INSTR_REGWR_RMM] = instr_regwr_rmm_exec, 8847 [INSTR_REGWR_RHI] = instr_regwr_rhi_exec, 8848 [INSTR_REGWR_RMI] = instr_regwr_rmi_exec, 8849 [INSTR_REGWR_RIH] = instr_regwr_rih_exec, 8850 [INSTR_REGWR_RIM] = instr_regwr_rim_exec, 8851 [INSTR_REGWR_RII] = instr_regwr_rii_exec, 8852 8853 [INSTR_REGADD_RHH] = instr_regadd_rhh_exec, 8854 [INSTR_REGADD_RHM] = instr_regadd_rhm_exec, 8855 [INSTR_REGADD_RMH] = instr_regadd_rmh_exec, 8856 [INSTR_REGADD_RMM] = instr_regadd_rmm_exec, 8857 [INSTR_REGADD_RHI] = instr_regadd_rhi_exec, 8858 [INSTR_REGADD_RMI] = instr_regadd_rmi_exec, 8859 [INSTR_REGADD_RIH] = instr_regadd_rih_exec, 8860 [INSTR_REGADD_RIM] = instr_regadd_rim_exec, 8861 [INSTR_REGADD_RII] = instr_regadd_rii_exec, 8862 8863 [INSTR_METPREFETCH_H] = instr_metprefetch_h_exec, 8864 [INSTR_METPREFETCH_M] = instr_metprefetch_m_exec, 8865 [INSTR_METPREFETCH_I] = instr_metprefetch_i_exec, 8866 8867 [INSTR_METER_HHM] = instr_meter_hhm_exec, 8868 [INSTR_METER_HHI] = instr_meter_hhi_exec, 8869 [INSTR_METER_HMM] = instr_meter_hmm_exec, 8870 [INSTR_METER_HMI] = instr_meter_hmi_exec, 8871 [INSTR_METER_MHM] = instr_meter_mhm_exec, 8872 [INSTR_METER_MHI] = instr_meter_mhi_exec, 8873 [INSTR_METER_MMM] = instr_meter_mmm_exec, 8874 [INSTR_METER_MMI] = instr_meter_mmi_exec, 8875 [INSTR_METER_IHM] = instr_meter_ihm_exec, 8876 [INSTR_METER_IHI] = instr_meter_ihi_exec, 8877 [INSTR_METER_IMM] = instr_meter_imm_exec, 8878 [INSTR_METER_IMI] = instr_meter_imi_exec, 8879 8880 [INSTR_TABLE] = instr_table_exec, 8881 [INSTR_SELECTOR] = instr_selector_exec, 8882 [INSTR_EXTERN_OBJ] = instr_extern_obj_exec, 8883 [INSTR_EXTERN_FUNC] = instr_extern_func_exec, 8884 8885 [INSTR_JMP] = instr_jmp_exec, 8886 [INSTR_JMP_VALID] = instr_jmp_valid_exec, 8887 [INSTR_JMP_INVALID] = instr_jmp_invalid_exec, 8888 [INSTR_JMP_HIT] = instr_jmp_hit_exec, 8889 [INSTR_JMP_MISS] = instr_jmp_miss_exec, 8890 [INSTR_JMP_ACTION_HIT] = instr_jmp_action_hit_exec, 8891 [INSTR_JMP_ACTION_MISS] = instr_jmp_action_miss_exec, 8892 8893 [INSTR_JMP_EQ] = instr_jmp_eq_exec, 8894 [INSTR_JMP_EQ_MH] = instr_jmp_eq_mh_exec, 8895 [INSTR_JMP_EQ_HM] = instr_jmp_eq_hm_exec, 8896 [INSTR_JMP_EQ_HH] = instr_jmp_eq_hh_exec, 8897 [INSTR_JMP_EQ_I] = instr_jmp_eq_i_exec, 8898 8899 [INSTR_JMP_NEQ] = instr_jmp_neq_exec, 8900 [INSTR_JMP_NEQ_MH] = instr_jmp_neq_mh_exec, 8901 [INSTR_JMP_NEQ_HM] = instr_jmp_neq_hm_exec, 8902 [INSTR_JMP_NEQ_HH] = instr_jmp_neq_hh_exec, 8903 [INSTR_JMP_NEQ_I] = instr_jmp_neq_i_exec, 8904 8905 [INSTR_JMP_LT] = instr_jmp_lt_exec, 8906 [INSTR_JMP_LT_MH] = instr_jmp_lt_mh_exec, 8907 [INSTR_JMP_LT_HM] = instr_jmp_lt_hm_exec, 8908 [INSTR_JMP_LT_HH] = instr_jmp_lt_hh_exec, 8909 [INSTR_JMP_LT_MI] = instr_jmp_lt_mi_exec, 8910 [INSTR_JMP_LT_HI] = instr_jmp_lt_hi_exec, 8911 8912 [INSTR_JMP_GT] = instr_jmp_gt_exec, 8913 [INSTR_JMP_GT_MH] = instr_jmp_gt_mh_exec, 8914 [INSTR_JMP_GT_HM] = instr_jmp_gt_hm_exec, 8915 [INSTR_JMP_GT_HH] = instr_jmp_gt_hh_exec, 8916 [INSTR_JMP_GT_MI] = instr_jmp_gt_mi_exec, 8917 [INSTR_JMP_GT_HI] = instr_jmp_gt_hi_exec, 8918 8919 [INSTR_RETURN] = instr_return_exec, 8920 }; 8921 8922 static inline void 8923 instr_exec(struct rte_swx_pipeline *p) 8924 { 8925 struct thread *t = &p->threads[p->thread_id]; 8926 struct instruction *ip = t->ip; 8927 instr_exec_t instr = instruction_table[ip->type]; 8928 8929 instr(p); 8930 } 8931 8932 /* 8933 * Action. 8934 */ 8935 static struct action * 8936 action_find(struct rte_swx_pipeline *p, const char *name) 8937 { 8938 struct action *elem; 8939 8940 if (!name) 8941 return NULL; 8942 8943 TAILQ_FOREACH(elem, &p->actions, node) 8944 if (strcmp(elem->name, name) == 0) 8945 return elem; 8946 8947 return NULL; 8948 } 8949 8950 static struct action * 8951 action_find_by_id(struct rte_swx_pipeline *p, uint32_t id) 8952 { 8953 struct action *action = NULL; 8954 8955 TAILQ_FOREACH(action, &p->actions, node) 8956 if (action->id == id) 8957 return action; 8958 8959 return NULL; 8960 } 8961 8962 static struct field * 8963 action_field_find(struct action *a, const char *name) 8964 { 8965 return a->st ? struct_type_field_find(a->st, name) : NULL; 8966 } 8967 8968 static struct field * 8969 action_field_parse(struct action *action, const char *name) 8970 { 8971 if (name[0] != 't' || name[1] != '.') 8972 return NULL; 8973 8974 return action_field_find(action, &name[2]); 8975 } 8976 8977 int 8978 rte_swx_pipeline_action_config(struct rte_swx_pipeline *p, 8979 const char *name, 8980 const char *args_struct_type_name, 8981 const char **instructions, 8982 uint32_t n_instructions) 8983 { 8984 struct struct_type *args_struct_type; 8985 struct action *a; 8986 int err; 8987 8988 CHECK(p, EINVAL); 8989 8990 CHECK_NAME(name, EINVAL); 8991 CHECK(!action_find(p, name), EEXIST); 8992 8993 if (args_struct_type_name) { 8994 CHECK_NAME(args_struct_type_name, EINVAL); 8995 args_struct_type = struct_type_find(p, args_struct_type_name); 8996 CHECK(args_struct_type, EINVAL); 8997 } else { 8998 args_struct_type = NULL; 8999 } 9000 9001 /* Node allocation. */ 9002 a = calloc(1, sizeof(struct action)); 9003 CHECK(a, ENOMEM); 9004 if (args_struct_type) { 9005 a->args_endianness = calloc(args_struct_type->n_fields, sizeof(int)); 9006 if (!a->args_endianness) { 9007 free(a); 9008 CHECK(0, ENOMEM); 9009 } 9010 } 9011 9012 /* Node initialization. */ 9013 strcpy(a->name, name); 9014 a->st = args_struct_type; 9015 a->id = p->n_actions; 9016 9017 /* Instruction translation. */ 9018 err = instruction_config(p, a, instructions, n_instructions); 9019 if (err) { 9020 free(a->args_endianness); 9021 free(a); 9022 return err; 9023 } 9024 9025 /* Node add to tailq. */ 9026 TAILQ_INSERT_TAIL(&p->actions, a, node); 9027 p->n_actions++; 9028 9029 return 0; 9030 } 9031 9032 static int 9033 action_build(struct rte_swx_pipeline *p) 9034 { 9035 struct action *action; 9036 9037 p->action_instructions = calloc(p->n_actions, 9038 sizeof(struct instruction *)); 9039 CHECK(p->action_instructions, ENOMEM); 9040 9041 TAILQ_FOREACH(action, &p->actions, node) 9042 p->action_instructions[action->id] = action->instructions; 9043 9044 return 0; 9045 } 9046 9047 static void 9048 action_build_free(struct rte_swx_pipeline *p) 9049 { 9050 free(p->action_instructions); 9051 p->action_instructions = NULL; 9052 } 9053 9054 static void 9055 action_free(struct rte_swx_pipeline *p) 9056 { 9057 action_build_free(p); 9058 9059 for ( ; ; ) { 9060 struct action *action; 9061 9062 action = TAILQ_FIRST(&p->actions); 9063 if (!action) 9064 break; 9065 9066 TAILQ_REMOVE(&p->actions, action, node); 9067 free(action->instructions); 9068 free(action); 9069 } 9070 } 9071 9072 static uint32_t 9073 action_arg_src_mov_count(struct action *a, 9074 uint32_t arg_id, 9075 struct instruction *instructions, 9076 struct instruction_data *instruction_data, 9077 uint32_t n_instructions) 9078 { 9079 uint32_t offset, n_users = 0, i; 9080 9081 if (!a->st || 9082 (arg_id >= a->st->n_fields) || 9083 !instructions || 9084 !instruction_data || 9085 !n_instructions) 9086 return 0; 9087 9088 offset = a->st->fields[arg_id].offset / 8; 9089 9090 for (i = 0; i < n_instructions; i++) { 9091 struct instruction *instr = &instructions[i]; 9092 struct instruction_data *data = &instruction_data[i]; 9093 9094 if (data->invalid || 9095 ((instr->type != INSTR_MOV) && (instr->type != INSTR_MOV_HM)) || 9096 instr->mov.src.struct_id || 9097 (instr->mov.src.offset != offset)) 9098 continue; 9099 9100 n_users++; 9101 } 9102 9103 return n_users; 9104 } 9105 9106 /* 9107 * Table. 9108 */ 9109 static struct table_type * 9110 table_type_find(struct rte_swx_pipeline *p, const char *name) 9111 { 9112 struct table_type *elem; 9113 9114 TAILQ_FOREACH(elem, &p->table_types, node) 9115 if (strcmp(elem->name, name) == 0) 9116 return elem; 9117 9118 return NULL; 9119 } 9120 9121 static struct table_type * 9122 table_type_resolve(struct rte_swx_pipeline *p, 9123 const char *recommended_type_name, 9124 enum rte_swx_table_match_type match_type) 9125 { 9126 struct table_type *elem; 9127 9128 /* Only consider the recommended type if the match type is correct. */ 9129 if (recommended_type_name) 9130 TAILQ_FOREACH(elem, &p->table_types, node) 9131 if (!strcmp(elem->name, recommended_type_name) && 9132 (elem->match_type == match_type)) 9133 return elem; 9134 9135 /* Ignore the recommended type and get the first element with this match 9136 * type. 9137 */ 9138 TAILQ_FOREACH(elem, &p->table_types, node) 9139 if (elem->match_type == match_type) 9140 return elem; 9141 9142 return NULL; 9143 } 9144 9145 static struct table * 9146 table_find(struct rte_swx_pipeline *p, const char *name) 9147 { 9148 struct table *elem; 9149 9150 TAILQ_FOREACH(elem, &p->tables, node) 9151 if (strcmp(elem->name, name) == 0) 9152 return elem; 9153 9154 return NULL; 9155 } 9156 9157 static struct table * 9158 table_find_by_id(struct rte_swx_pipeline *p, uint32_t id) 9159 { 9160 struct table *table = NULL; 9161 9162 TAILQ_FOREACH(table, &p->tables, node) 9163 if (table->id == id) 9164 return table; 9165 9166 return NULL; 9167 } 9168 9169 int 9170 rte_swx_pipeline_table_type_register(struct rte_swx_pipeline *p, 9171 const char *name, 9172 enum rte_swx_table_match_type match_type, 9173 struct rte_swx_table_ops *ops) 9174 { 9175 struct table_type *elem; 9176 9177 CHECK(p, EINVAL); 9178 9179 CHECK_NAME(name, EINVAL); 9180 CHECK(!table_type_find(p, name), EEXIST); 9181 9182 CHECK(ops, EINVAL); 9183 CHECK(ops->create, EINVAL); 9184 CHECK(ops->lkp, EINVAL); 9185 CHECK(ops->free, EINVAL); 9186 9187 /* Node allocation. */ 9188 elem = calloc(1, sizeof(struct table_type)); 9189 CHECK(elem, ENOMEM); 9190 9191 /* Node initialization. */ 9192 strcpy(elem->name, name); 9193 elem->match_type = match_type; 9194 memcpy(&elem->ops, ops, sizeof(*ops)); 9195 9196 /* Node add to tailq. */ 9197 TAILQ_INSERT_TAIL(&p->table_types, elem, node); 9198 9199 return 0; 9200 } 9201 9202 static int 9203 table_match_type_resolve(struct rte_swx_match_field_params *fields, 9204 uint32_t n_fields, 9205 enum rte_swx_table_match_type *match_type) 9206 { 9207 uint32_t n_fields_em = 0, n_fields_lpm = 0, i; 9208 9209 for (i = 0; i < n_fields; i++) { 9210 struct rte_swx_match_field_params *f = &fields[i]; 9211 9212 if (f->match_type == RTE_SWX_TABLE_MATCH_EXACT) 9213 n_fields_em++; 9214 9215 if (f->match_type == RTE_SWX_TABLE_MATCH_LPM) 9216 n_fields_lpm++; 9217 } 9218 9219 if ((n_fields_lpm > 1) || 9220 (n_fields_lpm && (n_fields_em != n_fields - 1))) 9221 return -EINVAL; 9222 9223 *match_type = (n_fields_em == n_fields) ? 9224 RTE_SWX_TABLE_MATCH_EXACT : 9225 RTE_SWX_TABLE_MATCH_WILDCARD; 9226 9227 return 0; 9228 } 9229 9230 static int 9231 table_match_fields_check(struct rte_swx_pipeline *p, 9232 struct rte_swx_pipeline_table_params *params, 9233 struct header **header) 9234 { 9235 struct header *h0 = NULL; 9236 struct field *hf, *mf; 9237 uint32_t *offset = NULL, i; 9238 int status = 0; 9239 9240 /* Return if no match fields. */ 9241 if (!params->n_fields) { 9242 if (params->fields) { 9243 status = -EINVAL; 9244 goto end; 9245 } 9246 9247 return 0; 9248 } 9249 9250 /* Memory allocation. */ 9251 offset = calloc(params->n_fields, sizeof(uint32_t)); 9252 if (!offset) { 9253 status = -ENOMEM; 9254 goto end; 9255 } 9256 9257 /* Check that all the match fields belong to either the same header or 9258 * to the meta-data. 9259 */ 9260 hf = header_field_parse(p, params->fields[0].name, &h0); 9261 mf = metadata_field_parse(p, params->fields[0].name); 9262 if (!hf && !mf) { 9263 status = -EINVAL; 9264 goto end; 9265 } 9266 9267 offset[0] = h0 ? hf->offset : mf->offset; 9268 9269 for (i = 1; i < params->n_fields; i++) 9270 if (h0) { 9271 struct header *h; 9272 9273 hf = header_field_parse(p, params->fields[i].name, &h); 9274 if (!hf || (h->id != h0->id)) { 9275 status = -EINVAL; 9276 goto end; 9277 } 9278 9279 offset[i] = hf->offset; 9280 } else { 9281 mf = metadata_field_parse(p, params->fields[i].name); 9282 if (!mf) { 9283 status = -EINVAL; 9284 goto end; 9285 } 9286 9287 offset[i] = mf->offset; 9288 } 9289 9290 /* Check that there are no duplicated match fields. */ 9291 for (i = 0; i < params->n_fields; i++) { 9292 uint32_t j; 9293 9294 for (j = 0; j < i; j++) 9295 if (offset[j] == offset[i]) { 9296 status = -EINVAL; 9297 goto end; 9298 } 9299 } 9300 9301 /* Return. */ 9302 if (header) 9303 *header = h0; 9304 9305 end: 9306 free(offset); 9307 return status; 9308 } 9309 9310 int 9311 rte_swx_pipeline_table_config(struct rte_swx_pipeline *p, 9312 const char *name, 9313 struct rte_swx_pipeline_table_params *params, 9314 const char *recommended_table_type_name, 9315 const char *args, 9316 uint32_t size) 9317 { 9318 struct table_type *type; 9319 struct table *t; 9320 struct action *default_action; 9321 struct header *header = NULL; 9322 uint32_t action_data_size_max = 0, i; 9323 int status = 0; 9324 9325 CHECK(p, EINVAL); 9326 9327 CHECK_NAME(name, EINVAL); 9328 CHECK(!table_find(p, name), EEXIST); 9329 CHECK(!selector_find(p, name), EEXIST); 9330 9331 CHECK(params, EINVAL); 9332 9333 /* Match checks. */ 9334 status = table_match_fields_check(p, params, &header); 9335 if (status) 9336 return status; 9337 9338 /* Action checks. */ 9339 CHECK(params->n_actions, EINVAL); 9340 CHECK(params->action_names, EINVAL); 9341 for (i = 0; i < params->n_actions; i++) { 9342 const char *action_name = params->action_names[i]; 9343 struct action *a; 9344 uint32_t action_data_size; 9345 9346 CHECK_NAME(action_name, EINVAL); 9347 9348 a = action_find(p, action_name); 9349 CHECK(a, EINVAL); 9350 9351 action_data_size = a->st ? a->st->n_bits / 8 : 0; 9352 if (action_data_size > action_data_size_max) 9353 action_data_size_max = action_data_size; 9354 } 9355 9356 CHECK_NAME(params->default_action_name, EINVAL); 9357 for (i = 0; i < p->n_actions; i++) 9358 if (!strcmp(params->action_names[i], 9359 params->default_action_name)) 9360 break; 9361 CHECK(i < params->n_actions, EINVAL); 9362 default_action = action_find(p, params->default_action_name); 9363 CHECK((default_action->st && params->default_action_data) || 9364 !params->default_action_data, EINVAL); 9365 9366 /* Table type checks. */ 9367 if (recommended_table_type_name) 9368 CHECK_NAME(recommended_table_type_name, EINVAL); 9369 9370 if (params->n_fields) { 9371 enum rte_swx_table_match_type match_type; 9372 9373 status = table_match_type_resolve(params->fields, params->n_fields, &match_type); 9374 if (status) 9375 return status; 9376 9377 type = table_type_resolve(p, recommended_table_type_name, match_type); 9378 CHECK(type, EINVAL); 9379 } else { 9380 type = NULL; 9381 } 9382 9383 /* Memory allocation. */ 9384 t = calloc(1, sizeof(struct table)); 9385 CHECK(t, ENOMEM); 9386 9387 t->fields = calloc(params->n_fields, sizeof(struct match_field)); 9388 if (!t->fields) { 9389 free(t); 9390 CHECK(0, ENOMEM); 9391 } 9392 9393 t->actions = calloc(params->n_actions, sizeof(struct action *)); 9394 if (!t->actions) { 9395 free(t->fields); 9396 free(t); 9397 CHECK(0, ENOMEM); 9398 } 9399 9400 if (action_data_size_max) { 9401 t->default_action_data = calloc(1, action_data_size_max); 9402 if (!t->default_action_data) { 9403 free(t->actions); 9404 free(t->fields); 9405 free(t); 9406 CHECK(0, ENOMEM); 9407 } 9408 } 9409 9410 /* Node initialization. */ 9411 strcpy(t->name, name); 9412 if (args && args[0]) 9413 strcpy(t->args, args); 9414 t->type = type; 9415 9416 for (i = 0; i < params->n_fields; i++) { 9417 struct rte_swx_match_field_params *field = ¶ms->fields[i]; 9418 struct match_field *f = &t->fields[i]; 9419 9420 f->match_type = field->match_type; 9421 f->field = header ? 9422 header_field_parse(p, field->name, NULL) : 9423 metadata_field_parse(p, field->name); 9424 } 9425 t->n_fields = params->n_fields; 9426 t->header = header; 9427 9428 for (i = 0; i < params->n_actions; i++) 9429 t->actions[i] = action_find(p, params->action_names[i]); 9430 t->default_action = default_action; 9431 if (default_action->st) 9432 memcpy(t->default_action_data, 9433 params->default_action_data, 9434 default_action->st->n_bits / 8); 9435 t->n_actions = params->n_actions; 9436 t->default_action_is_const = params->default_action_is_const; 9437 t->action_data_size_max = action_data_size_max; 9438 9439 t->size = size; 9440 t->id = p->n_tables; 9441 9442 /* Node add to tailq. */ 9443 TAILQ_INSERT_TAIL(&p->tables, t, node); 9444 p->n_tables++; 9445 9446 return 0; 9447 } 9448 9449 static struct rte_swx_table_params * 9450 table_params_get(struct table *table) 9451 { 9452 struct rte_swx_table_params *params; 9453 struct field *first, *last; 9454 uint8_t *key_mask; 9455 uint32_t key_size, key_offset, action_data_size, i; 9456 9457 /* Memory allocation. */ 9458 params = calloc(1, sizeof(struct rte_swx_table_params)); 9459 if (!params) 9460 return NULL; 9461 9462 /* Find first (smallest offset) and last (biggest offset) match fields. */ 9463 first = table->fields[0].field; 9464 last = table->fields[0].field; 9465 9466 for (i = 0; i < table->n_fields; i++) { 9467 struct field *f = table->fields[i].field; 9468 9469 if (f->offset < first->offset) 9470 first = f; 9471 9472 if (f->offset > last->offset) 9473 last = f; 9474 } 9475 9476 /* Key offset and size. */ 9477 key_offset = first->offset / 8; 9478 key_size = (last->offset + last->n_bits - first->offset) / 8; 9479 9480 /* Memory allocation. */ 9481 key_mask = calloc(1, key_size); 9482 if (!key_mask) { 9483 free(params); 9484 return NULL; 9485 } 9486 9487 /* Key mask. */ 9488 for (i = 0; i < table->n_fields; i++) { 9489 struct field *f = table->fields[i].field; 9490 uint32_t start = (f->offset - first->offset) / 8; 9491 size_t size = f->n_bits / 8; 9492 9493 memset(&key_mask[start], 0xFF, size); 9494 } 9495 9496 /* Action data size. */ 9497 action_data_size = 0; 9498 for (i = 0; i < table->n_actions; i++) { 9499 struct action *action = table->actions[i]; 9500 uint32_t ads = action->st ? action->st->n_bits / 8 : 0; 9501 9502 if (ads > action_data_size) 9503 action_data_size = ads; 9504 } 9505 9506 /* Fill in. */ 9507 params->match_type = table->type->match_type; 9508 params->key_size = key_size; 9509 params->key_offset = key_offset; 9510 params->key_mask0 = key_mask; 9511 params->action_data_size = action_data_size; 9512 params->n_keys_max = table->size; 9513 9514 return params; 9515 } 9516 9517 static void 9518 table_params_free(struct rte_swx_table_params *params) 9519 { 9520 if (!params) 9521 return; 9522 9523 free(params->key_mask0); 9524 free(params); 9525 } 9526 9527 static int 9528 table_stub_lkp(void *table __rte_unused, 9529 void *mailbox __rte_unused, 9530 uint8_t **key __rte_unused, 9531 uint64_t *action_id __rte_unused, 9532 uint8_t **action_data __rte_unused, 9533 int *hit) 9534 { 9535 *hit = 0; 9536 return 1; /* DONE. */ 9537 } 9538 9539 static int 9540 table_build(struct rte_swx_pipeline *p) 9541 { 9542 uint32_t i; 9543 9544 /* Per pipeline: table statistics. */ 9545 p->table_stats = calloc(p->n_tables, sizeof(struct table_statistics)); 9546 CHECK(p->table_stats, ENOMEM); 9547 9548 for (i = 0; i < p->n_tables; i++) { 9549 p->table_stats[i].n_pkts_action = calloc(p->n_actions, sizeof(uint64_t)); 9550 CHECK(p->table_stats[i].n_pkts_action, ENOMEM); 9551 } 9552 9553 /* Per thread: table runt-time. */ 9554 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 9555 struct thread *t = &p->threads[i]; 9556 struct table *table; 9557 9558 t->tables = calloc(p->n_tables, sizeof(struct table_runtime)); 9559 CHECK(t->tables, ENOMEM); 9560 9561 TAILQ_FOREACH(table, &p->tables, node) { 9562 struct table_runtime *r = &t->tables[table->id]; 9563 9564 if (table->type) { 9565 uint64_t size; 9566 9567 size = table->type->ops.mailbox_size_get(); 9568 9569 /* r->func. */ 9570 r->func = table->type->ops.lkp; 9571 9572 /* r->mailbox. */ 9573 if (size) { 9574 r->mailbox = calloc(1, size); 9575 CHECK(r->mailbox, ENOMEM); 9576 } 9577 9578 /* r->key. */ 9579 r->key = table->header ? 9580 &t->structs[table->header->struct_id] : 9581 &t->structs[p->metadata_struct_id]; 9582 } else { 9583 r->func = table_stub_lkp; 9584 } 9585 } 9586 } 9587 9588 return 0; 9589 } 9590 9591 static void 9592 table_build_free(struct rte_swx_pipeline *p) 9593 { 9594 uint32_t i; 9595 9596 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 9597 struct thread *t = &p->threads[i]; 9598 uint32_t j; 9599 9600 if (!t->tables) 9601 continue; 9602 9603 for (j = 0; j < p->n_tables; j++) { 9604 struct table_runtime *r = &t->tables[j]; 9605 9606 free(r->mailbox); 9607 } 9608 9609 free(t->tables); 9610 t->tables = NULL; 9611 } 9612 9613 if (p->table_stats) { 9614 for (i = 0; i < p->n_tables; i++) 9615 free(p->table_stats[i].n_pkts_action); 9616 9617 free(p->table_stats); 9618 } 9619 } 9620 9621 static void 9622 table_free(struct rte_swx_pipeline *p) 9623 { 9624 table_build_free(p); 9625 9626 /* Tables. */ 9627 for ( ; ; ) { 9628 struct table *elem; 9629 9630 elem = TAILQ_FIRST(&p->tables); 9631 if (!elem) 9632 break; 9633 9634 TAILQ_REMOVE(&p->tables, elem, node); 9635 free(elem->fields); 9636 free(elem->actions); 9637 free(elem->default_action_data); 9638 free(elem); 9639 } 9640 9641 /* Table types. */ 9642 for ( ; ; ) { 9643 struct table_type *elem; 9644 9645 elem = TAILQ_FIRST(&p->table_types); 9646 if (!elem) 9647 break; 9648 9649 TAILQ_REMOVE(&p->table_types, elem, node); 9650 free(elem); 9651 } 9652 } 9653 9654 /* 9655 * Selector. 9656 */ 9657 static struct selector * 9658 selector_find(struct rte_swx_pipeline *p, const char *name) 9659 { 9660 struct selector *s; 9661 9662 TAILQ_FOREACH(s, &p->selectors, node) 9663 if (strcmp(s->name, name) == 0) 9664 return s; 9665 9666 return NULL; 9667 } 9668 9669 static struct selector * 9670 selector_find_by_id(struct rte_swx_pipeline *p, uint32_t id) 9671 { 9672 struct selector *s = NULL; 9673 9674 TAILQ_FOREACH(s, &p->selectors, node) 9675 if (s->id == id) 9676 return s; 9677 9678 return NULL; 9679 } 9680 9681 static int 9682 selector_fields_check(struct rte_swx_pipeline *p, 9683 struct rte_swx_pipeline_selector_params *params, 9684 struct header **header) 9685 { 9686 struct header *h0 = NULL; 9687 struct field *hf, *mf; 9688 uint32_t i; 9689 9690 /* Return if no selector fields. */ 9691 if (!params->n_selector_fields || !params->selector_field_names) 9692 return -EINVAL; 9693 9694 /* Check that all the selector fields either belong to the same header 9695 * or are all meta-data fields. 9696 */ 9697 hf = header_field_parse(p, params->selector_field_names[0], &h0); 9698 mf = metadata_field_parse(p, params->selector_field_names[0]); 9699 if (!hf && !mf) 9700 return -EINVAL; 9701 9702 for (i = 1; i < params->n_selector_fields; i++) 9703 if (h0) { 9704 struct header *h; 9705 9706 hf = header_field_parse(p, params->selector_field_names[i], &h); 9707 if (!hf || (h->id != h0->id)) 9708 return -EINVAL; 9709 } else { 9710 mf = metadata_field_parse(p, params->selector_field_names[i]); 9711 if (!mf) 9712 return -EINVAL; 9713 } 9714 9715 /* Check that there are no duplicated match fields. */ 9716 for (i = 0; i < params->n_selector_fields; i++) { 9717 const char *field_name = params->selector_field_names[i]; 9718 uint32_t j; 9719 9720 for (j = i + 1; j < params->n_selector_fields; j++) 9721 if (!strcmp(params->selector_field_names[j], field_name)) 9722 return -EINVAL; 9723 } 9724 9725 /* Return. */ 9726 if (header) 9727 *header = h0; 9728 9729 return 0; 9730 } 9731 9732 int 9733 rte_swx_pipeline_selector_config(struct rte_swx_pipeline *p, 9734 const char *name, 9735 struct rte_swx_pipeline_selector_params *params) 9736 { 9737 struct selector *s; 9738 struct header *selector_header = NULL; 9739 struct field *group_id_field, *member_id_field; 9740 uint32_t i; 9741 int status = 0; 9742 9743 CHECK(p, EINVAL); 9744 9745 CHECK_NAME(name, EINVAL); 9746 CHECK(!table_find(p, name), EEXIST); 9747 CHECK(!selector_find(p, name), EEXIST); 9748 9749 CHECK(params, EINVAL); 9750 9751 CHECK_NAME(params->group_id_field_name, EINVAL); 9752 group_id_field = metadata_field_parse(p, params->group_id_field_name); 9753 CHECK(group_id_field, EINVAL); 9754 9755 for (i = 0; i < params->n_selector_fields; i++) { 9756 const char *field_name = params->selector_field_names[i]; 9757 9758 CHECK_NAME(field_name, EINVAL); 9759 } 9760 status = selector_fields_check(p, params, &selector_header); 9761 if (status) 9762 return status; 9763 9764 CHECK_NAME(params->member_id_field_name, EINVAL); 9765 member_id_field = metadata_field_parse(p, params->member_id_field_name); 9766 CHECK(member_id_field, EINVAL); 9767 9768 CHECK(params->n_groups_max, EINVAL); 9769 9770 CHECK(params->n_members_per_group_max, EINVAL); 9771 9772 /* Memory allocation. */ 9773 s = calloc(1, sizeof(struct selector)); 9774 if (!s) { 9775 status = -ENOMEM; 9776 goto error; 9777 } 9778 9779 s->selector_fields = calloc(params->n_selector_fields, sizeof(struct field *)); 9780 if (!s->selector_fields) { 9781 status = -ENOMEM; 9782 goto error; 9783 } 9784 9785 /* Node initialization. */ 9786 strcpy(s->name, name); 9787 9788 s->group_id_field = group_id_field; 9789 9790 for (i = 0; i < params->n_selector_fields; i++) { 9791 const char *field_name = params->selector_field_names[i]; 9792 9793 s->selector_fields[i] = selector_header ? 9794 header_field_parse(p, field_name, NULL) : 9795 metadata_field_parse(p, field_name); 9796 } 9797 9798 s->n_selector_fields = params->n_selector_fields; 9799 9800 s->selector_header = selector_header; 9801 9802 s->member_id_field = member_id_field; 9803 9804 s->n_groups_max = params->n_groups_max; 9805 9806 s->n_members_per_group_max = params->n_members_per_group_max; 9807 9808 s->id = p->n_selectors; 9809 9810 /* Node add to tailq. */ 9811 TAILQ_INSERT_TAIL(&p->selectors, s, node); 9812 p->n_selectors++; 9813 9814 return 0; 9815 9816 error: 9817 if (!s) 9818 return status; 9819 9820 free(s->selector_fields); 9821 9822 free(s); 9823 9824 return status; 9825 } 9826 9827 static void 9828 selector_params_free(struct rte_swx_table_selector_params *params) 9829 { 9830 if (!params) 9831 return; 9832 9833 free(params->selector_mask); 9834 9835 free(params); 9836 } 9837 9838 static struct rte_swx_table_selector_params * 9839 selector_table_params_get(struct selector *s) 9840 { 9841 struct rte_swx_table_selector_params *params = NULL; 9842 struct field *first, *last; 9843 uint32_t i; 9844 9845 /* Memory allocation. */ 9846 params = calloc(1, sizeof(struct rte_swx_table_selector_params)); 9847 if (!params) 9848 goto error; 9849 9850 /* Group ID. */ 9851 params->group_id_offset = s->group_id_field->offset / 8; 9852 9853 /* Find first (smallest offset) and last (biggest offset) selector fields. */ 9854 first = s->selector_fields[0]; 9855 last = s->selector_fields[0]; 9856 9857 for (i = 0; i < s->n_selector_fields; i++) { 9858 struct field *f = s->selector_fields[i]; 9859 9860 if (f->offset < first->offset) 9861 first = f; 9862 9863 if (f->offset > last->offset) 9864 last = f; 9865 } 9866 9867 /* Selector offset and size. */ 9868 params->selector_offset = first->offset / 8; 9869 params->selector_size = (last->offset + last->n_bits - first->offset) / 8; 9870 9871 /* Memory allocation. */ 9872 params->selector_mask = calloc(1, params->selector_size); 9873 if (!params->selector_mask) 9874 goto error; 9875 9876 /* Selector mask. */ 9877 for (i = 0; i < s->n_selector_fields; i++) { 9878 struct field *f = s->selector_fields[i]; 9879 uint32_t start = (f->offset - first->offset) / 8; 9880 size_t size = f->n_bits / 8; 9881 9882 memset(¶ms->selector_mask[start], 0xFF, size); 9883 } 9884 9885 /* Member ID. */ 9886 params->member_id_offset = s->member_id_field->offset / 8; 9887 9888 /* Maximum number of groups. */ 9889 params->n_groups_max = s->n_groups_max; 9890 9891 /* Maximum number of members per group. */ 9892 params->n_members_per_group_max = s->n_members_per_group_max; 9893 9894 return params; 9895 9896 error: 9897 selector_params_free(params); 9898 return NULL; 9899 } 9900 9901 static void 9902 selector_build_free(struct rte_swx_pipeline *p) 9903 { 9904 uint32_t i; 9905 9906 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 9907 struct thread *t = &p->threads[i]; 9908 uint32_t j; 9909 9910 if (!t->selectors) 9911 continue; 9912 9913 for (j = 0; j < p->n_selectors; j++) { 9914 struct selector_runtime *r = &t->selectors[j]; 9915 9916 free(r->mailbox); 9917 } 9918 9919 free(t->selectors); 9920 t->selectors = NULL; 9921 } 9922 9923 free(p->selector_stats); 9924 p->selector_stats = NULL; 9925 } 9926 9927 static int 9928 selector_build(struct rte_swx_pipeline *p) 9929 { 9930 uint32_t i; 9931 int status = 0; 9932 9933 /* Per pipeline: selector statistics. */ 9934 p->selector_stats = calloc(p->n_selectors, sizeof(struct selector_statistics)); 9935 if (!p->selector_stats) { 9936 status = -ENOMEM; 9937 goto error; 9938 } 9939 9940 /* Per thread: selector run-time. */ 9941 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 9942 struct thread *t = &p->threads[i]; 9943 struct selector *s; 9944 9945 t->selectors = calloc(p->n_selectors, sizeof(struct selector_runtime)); 9946 if (!t->selectors) { 9947 status = -ENOMEM; 9948 goto error; 9949 } 9950 9951 TAILQ_FOREACH(s, &p->selectors, node) { 9952 struct selector_runtime *r = &t->selectors[s->id]; 9953 uint64_t size; 9954 9955 /* r->mailbox. */ 9956 size = rte_swx_table_selector_mailbox_size_get(); 9957 if (size) { 9958 r->mailbox = calloc(1, size); 9959 if (!r->mailbox) { 9960 status = -ENOMEM; 9961 goto error; 9962 } 9963 } 9964 9965 /* r->group_id_buffer. */ 9966 r->group_id_buffer = &t->structs[p->metadata_struct_id]; 9967 9968 /* r->selector_buffer. */ 9969 r->selector_buffer = s->selector_header ? 9970 &t->structs[s->selector_header->struct_id] : 9971 &t->structs[p->metadata_struct_id]; 9972 9973 /* r->member_id_buffer. */ 9974 r->member_id_buffer = &t->structs[p->metadata_struct_id]; 9975 } 9976 } 9977 9978 return 0; 9979 9980 error: 9981 selector_build_free(p); 9982 return status; 9983 } 9984 9985 static void 9986 selector_free(struct rte_swx_pipeline *p) 9987 { 9988 selector_build_free(p); 9989 9990 /* Selector tables. */ 9991 for ( ; ; ) { 9992 struct selector *elem; 9993 9994 elem = TAILQ_FIRST(&p->selectors); 9995 if (!elem) 9996 break; 9997 9998 TAILQ_REMOVE(&p->selectors, elem, node); 9999 free(elem->selector_fields); 10000 free(elem); 10001 } 10002 } 10003 10004 /* 10005 * Table state. 10006 */ 10007 static int 10008 table_state_build(struct rte_swx_pipeline *p) 10009 { 10010 struct table *table; 10011 struct selector *s; 10012 10013 p->table_state = calloc(p->n_tables + p->n_selectors, 10014 sizeof(struct rte_swx_table_state)); 10015 CHECK(p->table_state, ENOMEM); 10016 10017 TAILQ_FOREACH(table, &p->tables, node) { 10018 struct rte_swx_table_state *ts = &p->table_state[table->id]; 10019 10020 if (table->type) { 10021 struct rte_swx_table_params *params; 10022 10023 /* ts->obj. */ 10024 params = table_params_get(table); 10025 CHECK(params, ENOMEM); 10026 10027 ts->obj = table->type->ops.create(params, 10028 NULL, 10029 table->args, 10030 p->numa_node); 10031 10032 table_params_free(params); 10033 CHECK(ts->obj, ENODEV); 10034 } 10035 10036 /* ts->default_action_data. */ 10037 if (table->action_data_size_max) { 10038 ts->default_action_data = 10039 malloc(table->action_data_size_max); 10040 CHECK(ts->default_action_data, ENOMEM); 10041 10042 memcpy(ts->default_action_data, 10043 table->default_action_data, 10044 table->action_data_size_max); 10045 } 10046 10047 /* ts->default_action_id. */ 10048 ts->default_action_id = table->default_action->id; 10049 } 10050 10051 TAILQ_FOREACH(s, &p->selectors, node) { 10052 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + s->id]; 10053 struct rte_swx_table_selector_params *params; 10054 10055 /* ts->obj. */ 10056 params = selector_table_params_get(s); 10057 CHECK(params, ENOMEM); 10058 10059 ts->obj = rte_swx_table_selector_create(params, NULL, p->numa_node); 10060 10061 selector_params_free(params); 10062 CHECK(ts->obj, ENODEV); 10063 } 10064 10065 return 0; 10066 } 10067 10068 static void 10069 table_state_build_free(struct rte_swx_pipeline *p) 10070 { 10071 uint32_t i; 10072 10073 if (!p->table_state) 10074 return; 10075 10076 for (i = 0; i < p->n_tables; i++) { 10077 struct rte_swx_table_state *ts = &p->table_state[i]; 10078 struct table *table = table_find_by_id(p, i); 10079 10080 /* ts->obj. */ 10081 if (table->type && ts->obj) 10082 table->type->ops.free(ts->obj); 10083 10084 /* ts->default_action_data. */ 10085 free(ts->default_action_data); 10086 } 10087 10088 for (i = 0; i < p->n_selectors; i++) { 10089 struct rte_swx_table_state *ts = &p->table_state[p->n_tables + i]; 10090 10091 /* ts->obj. */ 10092 if (ts->obj) 10093 rte_swx_table_selector_free(ts->obj); 10094 } 10095 10096 free(p->table_state); 10097 p->table_state = NULL; 10098 } 10099 10100 static void 10101 table_state_free(struct rte_swx_pipeline *p) 10102 { 10103 table_state_build_free(p); 10104 } 10105 10106 /* 10107 * Register array. 10108 */ 10109 static struct regarray * 10110 regarray_find(struct rte_swx_pipeline *p, const char *name) 10111 { 10112 struct regarray *elem; 10113 10114 TAILQ_FOREACH(elem, &p->regarrays, node) 10115 if (!strcmp(elem->name, name)) 10116 return elem; 10117 10118 return NULL; 10119 } 10120 10121 static struct regarray * 10122 regarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id) 10123 { 10124 struct regarray *elem = NULL; 10125 10126 TAILQ_FOREACH(elem, &p->regarrays, node) 10127 if (elem->id == id) 10128 return elem; 10129 10130 return NULL; 10131 } 10132 10133 int 10134 rte_swx_pipeline_regarray_config(struct rte_swx_pipeline *p, 10135 const char *name, 10136 uint32_t size, 10137 uint64_t init_val) 10138 { 10139 struct regarray *r; 10140 10141 CHECK(p, EINVAL); 10142 10143 CHECK_NAME(name, EINVAL); 10144 CHECK(!regarray_find(p, name), EEXIST); 10145 10146 CHECK(size, EINVAL); 10147 size = rte_align32pow2(size); 10148 10149 /* Memory allocation. */ 10150 r = calloc(1, sizeof(struct regarray)); 10151 CHECK(r, ENOMEM); 10152 10153 /* Node initialization. */ 10154 strcpy(r->name, name); 10155 r->init_val = init_val; 10156 r->size = size; 10157 r->id = p->n_regarrays; 10158 10159 /* Node add to tailq. */ 10160 TAILQ_INSERT_TAIL(&p->regarrays, r, node); 10161 p->n_regarrays++; 10162 10163 return 0; 10164 } 10165 10166 static int 10167 regarray_build(struct rte_swx_pipeline *p) 10168 { 10169 struct regarray *regarray; 10170 10171 if (!p->n_regarrays) 10172 return 0; 10173 10174 p->regarray_runtime = calloc(p->n_regarrays, sizeof(struct regarray_runtime)); 10175 CHECK(p->regarray_runtime, ENOMEM); 10176 10177 TAILQ_FOREACH(regarray, &p->regarrays, node) { 10178 struct regarray_runtime *r = &p->regarray_runtime[regarray->id]; 10179 uint32_t i; 10180 10181 r->regarray = env_malloc(regarray->size * sizeof(uint64_t), 10182 RTE_CACHE_LINE_SIZE, 10183 p->numa_node); 10184 CHECK(r->regarray, ENOMEM); 10185 10186 if (regarray->init_val) 10187 for (i = 0; i < regarray->size; i++) 10188 r->regarray[i] = regarray->init_val; 10189 10190 r->size_mask = regarray->size - 1; 10191 } 10192 10193 return 0; 10194 } 10195 10196 static void 10197 regarray_build_free(struct rte_swx_pipeline *p) 10198 { 10199 uint32_t i; 10200 10201 if (!p->regarray_runtime) 10202 return; 10203 10204 for (i = 0; i < p->n_regarrays; i++) { 10205 struct regarray *regarray = regarray_find_by_id(p, i); 10206 struct regarray_runtime *r = &p->regarray_runtime[i]; 10207 10208 env_free(r->regarray, regarray->size * sizeof(uint64_t)); 10209 } 10210 10211 free(p->regarray_runtime); 10212 p->regarray_runtime = NULL; 10213 } 10214 10215 static void 10216 regarray_free(struct rte_swx_pipeline *p) 10217 { 10218 regarray_build_free(p); 10219 10220 for ( ; ; ) { 10221 struct regarray *elem; 10222 10223 elem = TAILQ_FIRST(&p->regarrays); 10224 if (!elem) 10225 break; 10226 10227 TAILQ_REMOVE(&p->regarrays, elem, node); 10228 free(elem); 10229 } 10230 } 10231 10232 /* 10233 * Meter array. 10234 */ 10235 static struct meter_profile * 10236 meter_profile_find(struct rte_swx_pipeline *p, const char *name) 10237 { 10238 struct meter_profile *elem; 10239 10240 TAILQ_FOREACH(elem, &p->meter_profiles, node) 10241 if (!strcmp(elem->name, name)) 10242 return elem; 10243 10244 return NULL; 10245 } 10246 10247 static struct metarray * 10248 metarray_find(struct rte_swx_pipeline *p, const char *name) 10249 { 10250 struct metarray *elem; 10251 10252 TAILQ_FOREACH(elem, &p->metarrays, node) 10253 if (!strcmp(elem->name, name)) 10254 return elem; 10255 10256 return NULL; 10257 } 10258 10259 static struct metarray * 10260 metarray_find_by_id(struct rte_swx_pipeline *p, uint32_t id) 10261 { 10262 struct metarray *elem = NULL; 10263 10264 TAILQ_FOREACH(elem, &p->metarrays, node) 10265 if (elem->id == id) 10266 return elem; 10267 10268 return NULL; 10269 } 10270 10271 int 10272 rte_swx_pipeline_metarray_config(struct rte_swx_pipeline *p, 10273 const char *name, 10274 uint32_t size) 10275 { 10276 struct metarray *m; 10277 10278 CHECK(p, EINVAL); 10279 10280 CHECK_NAME(name, EINVAL); 10281 CHECK(!metarray_find(p, name), EEXIST); 10282 10283 CHECK(size, EINVAL); 10284 size = rte_align32pow2(size); 10285 10286 /* Memory allocation. */ 10287 m = calloc(1, sizeof(struct metarray)); 10288 CHECK(m, ENOMEM); 10289 10290 /* Node initialization. */ 10291 strcpy(m->name, name); 10292 m->size = size; 10293 m->id = p->n_metarrays; 10294 10295 /* Node add to tailq. */ 10296 TAILQ_INSERT_TAIL(&p->metarrays, m, node); 10297 p->n_metarrays++; 10298 10299 return 0; 10300 } 10301 10302 struct meter_profile meter_profile_default = { 10303 .node = {0}, 10304 .name = "", 10305 .params = {0}, 10306 10307 .profile = { 10308 .cbs = 10000, 10309 .pbs = 10000, 10310 .cir_period = 1, 10311 .cir_bytes_per_period = 1, 10312 .pir_period = 1, 10313 .pir_bytes_per_period = 1, 10314 }, 10315 10316 .n_users = 0, 10317 }; 10318 10319 static void 10320 meter_init(struct meter *m) 10321 { 10322 memset(m, 0, sizeof(struct meter)); 10323 rte_meter_trtcm_config(&m->m, &meter_profile_default.profile); 10324 m->profile = &meter_profile_default; 10325 m->color_mask = RTE_COLOR_GREEN; 10326 10327 meter_profile_default.n_users++; 10328 } 10329 10330 static int 10331 metarray_build(struct rte_swx_pipeline *p) 10332 { 10333 struct metarray *m; 10334 10335 if (!p->n_metarrays) 10336 return 0; 10337 10338 p->metarray_runtime = calloc(p->n_metarrays, sizeof(struct metarray_runtime)); 10339 CHECK(p->metarray_runtime, ENOMEM); 10340 10341 TAILQ_FOREACH(m, &p->metarrays, node) { 10342 struct metarray_runtime *r = &p->metarray_runtime[m->id]; 10343 uint32_t i; 10344 10345 r->metarray = env_malloc(m->size * sizeof(struct meter), 10346 RTE_CACHE_LINE_SIZE, 10347 p->numa_node); 10348 CHECK(r->metarray, ENOMEM); 10349 10350 for (i = 0; i < m->size; i++) 10351 meter_init(&r->metarray[i]); 10352 10353 r->size_mask = m->size - 1; 10354 } 10355 10356 return 0; 10357 } 10358 10359 static void 10360 metarray_build_free(struct rte_swx_pipeline *p) 10361 { 10362 uint32_t i; 10363 10364 if (!p->metarray_runtime) 10365 return; 10366 10367 for (i = 0; i < p->n_metarrays; i++) { 10368 struct metarray *m = metarray_find_by_id(p, i); 10369 struct metarray_runtime *r = &p->metarray_runtime[i]; 10370 10371 env_free(r->metarray, m->size * sizeof(struct meter)); 10372 } 10373 10374 free(p->metarray_runtime); 10375 p->metarray_runtime = NULL; 10376 } 10377 10378 static void 10379 metarray_free(struct rte_swx_pipeline *p) 10380 { 10381 metarray_build_free(p); 10382 10383 /* Meter arrays. */ 10384 for ( ; ; ) { 10385 struct metarray *elem; 10386 10387 elem = TAILQ_FIRST(&p->metarrays); 10388 if (!elem) 10389 break; 10390 10391 TAILQ_REMOVE(&p->metarrays, elem, node); 10392 free(elem); 10393 } 10394 10395 /* Meter profiles. */ 10396 for ( ; ; ) { 10397 struct meter_profile *elem; 10398 10399 elem = TAILQ_FIRST(&p->meter_profiles); 10400 if (!elem) 10401 break; 10402 10403 TAILQ_REMOVE(&p->meter_profiles, elem, node); 10404 free(elem); 10405 } 10406 } 10407 10408 /* 10409 * Pipeline. 10410 */ 10411 int 10412 rte_swx_pipeline_config(struct rte_swx_pipeline **p, int numa_node) 10413 { 10414 struct rte_swx_pipeline *pipeline; 10415 10416 /* Check input parameters. */ 10417 CHECK(p, EINVAL); 10418 10419 /* Memory allocation. */ 10420 pipeline = calloc(1, sizeof(struct rte_swx_pipeline)); 10421 CHECK(pipeline, ENOMEM); 10422 10423 /* Initialization. */ 10424 TAILQ_INIT(&pipeline->struct_types); 10425 TAILQ_INIT(&pipeline->port_in_types); 10426 TAILQ_INIT(&pipeline->ports_in); 10427 TAILQ_INIT(&pipeline->port_out_types); 10428 TAILQ_INIT(&pipeline->ports_out); 10429 TAILQ_INIT(&pipeline->extern_types); 10430 TAILQ_INIT(&pipeline->extern_objs); 10431 TAILQ_INIT(&pipeline->extern_funcs); 10432 TAILQ_INIT(&pipeline->headers); 10433 TAILQ_INIT(&pipeline->actions); 10434 TAILQ_INIT(&pipeline->table_types); 10435 TAILQ_INIT(&pipeline->tables); 10436 TAILQ_INIT(&pipeline->selectors); 10437 TAILQ_INIT(&pipeline->regarrays); 10438 TAILQ_INIT(&pipeline->meter_profiles); 10439 TAILQ_INIT(&pipeline->metarrays); 10440 10441 pipeline->n_structs = 1; /* Struct 0 is reserved for action_data. */ 10442 pipeline->numa_node = numa_node; 10443 10444 *p = pipeline; 10445 return 0; 10446 } 10447 10448 void 10449 rte_swx_pipeline_free(struct rte_swx_pipeline *p) 10450 { 10451 if (!p) 10452 return; 10453 10454 free(p->instructions); 10455 10456 metarray_free(p); 10457 regarray_free(p); 10458 table_state_free(p); 10459 selector_free(p); 10460 table_free(p); 10461 action_free(p); 10462 metadata_free(p); 10463 header_free(p); 10464 extern_func_free(p); 10465 extern_obj_free(p); 10466 port_out_free(p); 10467 port_in_free(p); 10468 struct_free(p); 10469 10470 free(p); 10471 } 10472 10473 int 10474 rte_swx_pipeline_instructions_config(struct rte_swx_pipeline *p, 10475 const char **instructions, 10476 uint32_t n_instructions) 10477 { 10478 int err; 10479 uint32_t i; 10480 10481 err = instruction_config(p, NULL, instructions, n_instructions); 10482 if (err) 10483 return err; 10484 10485 /* Thread instruction pointer reset. */ 10486 for (i = 0; i < RTE_SWX_PIPELINE_THREADS_MAX; i++) { 10487 struct thread *t = &p->threads[i]; 10488 10489 thread_ip_reset(p, t); 10490 } 10491 10492 return 0; 10493 } 10494 10495 int 10496 rte_swx_pipeline_build(struct rte_swx_pipeline *p) 10497 { 10498 int status; 10499 10500 CHECK(p, EINVAL); 10501 CHECK(p->build_done == 0, EEXIST); 10502 10503 status = port_in_build(p); 10504 if (status) 10505 goto error; 10506 10507 status = port_out_build(p); 10508 if (status) 10509 goto error; 10510 10511 status = struct_build(p); 10512 if (status) 10513 goto error; 10514 10515 status = extern_obj_build(p); 10516 if (status) 10517 goto error; 10518 10519 status = extern_func_build(p); 10520 if (status) 10521 goto error; 10522 10523 status = header_build(p); 10524 if (status) 10525 goto error; 10526 10527 status = metadata_build(p); 10528 if (status) 10529 goto error; 10530 10531 status = action_build(p); 10532 if (status) 10533 goto error; 10534 10535 status = table_build(p); 10536 if (status) 10537 goto error; 10538 10539 status = selector_build(p); 10540 if (status) 10541 goto error; 10542 10543 status = table_state_build(p); 10544 if (status) 10545 goto error; 10546 10547 status = regarray_build(p); 10548 if (status) 10549 goto error; 10550 10551 status = metarray_build(p); 10552 if (status) 10553 goto error; 10554 10555 p->build_done = 1; 10556 return 0; 10557 10558 error: 10559 metarray_build_free(p); 10560 regarray_build_free(p); 10561 table_state_build_free(p); 10562 selector_build_free(p); 10563 table_build_free(p); 10564 action_build_free(p); 10565 metadata_build_free(p); 10566 header_build_free(p); 10567 extern_func_build_free(p); 10568 extern_obj_build_free(p); 10569 port_out_build_free(p); 10570 port_in_build_free(p); 10571 struct_build_free(p); 10572 10573 return status; 10574 } 10575 10576 void 10577 rte_swx_pipeline_run(struct rte_swx_pipeline *p, uint32_t n_instructions) 10578 { 10579 uint32_t i; 10580 10581 for (i = 0; i < n_instructions; i++) 10582 instr_exec(p); 10583 } 10584 10585 void 10586 rte_swx_pipeline_flush(struct rte_swx_pipeline *p) 10587 { 10588 uint32_t i; 10589 10590 for (i = 0; i < p->n_ports_out; i++) { 10591 struct port_out_runtime *port = &p->out[i]; 10592 10593 if (port->flush) 10594 port->flush(port->obj); 10595 } 10596 } 10597 10598 /* 10599 * Control. 10600 */ 10601 int 10602 rte_swx_ctl_pipeline_info_get(struct rte_swx_pipeline *p, 10603 struct rte_swx_ctl_pipeline_info *pipeline) 10604 { 10605 struct action *action; 10606 struct table *table; 10607 uint32_t n_actions = 0, n_tables = 0; 10608 10609 if (!p || !pipeline) 10610 return -EINVAL; 10611 10612 TAILQ_FOREACH(action, &p->actions, node) 10613 n_actions++; 10614 10615 TAILQ_FOREACH(table, &p->tables, node) 10616 n_tables++; 10617 10618 pipeline->n_ports_in = p->n_ports_in; 10619 pipeline->n_ports_out = p->n_ports_out; 10620 pipeline->n_actions = n_actions; 10621 pipeline->n_tables = n_tables; 10622 pipeline->n_selectors = p->n_selectors; 10623 pipeline->n_regarrays = p->n_regarrays; 10624 pipeline->n_metarrays = p->n_metarrays; 10625 10626 return 0; 10627 } 10628 10629 int 10630 rte_swx_ctl_pipeline_numa_node_get(struct rte_swx_pipeline *p, int *numa_node) 10631 { 10632 if (!p || !numa_node) 10633 return -EINVAL; 10634 10635 *numa_node = p->numa_node; 10636 return 0; 10637 } 10638 10639 int 10640 rte_swx_ctl_action_info_get(struct rte_swx_pipeline *p, 10641 uint32_t action_id, 10642 struct rte_swx_ctl_action_info *action) 10643 { 10644 struct action *a = NULL; 10645 10646 if (!p || (action_id >= p->n_actions) || !action) 10647 return -EINVAL; 10648 10649 a = action_find_by_id(p, action_id); 10650 if (!a) 10651 return -EINVAL; 10652 10653 strcpy(action->name, a->name); 10654 action->n_args = a->st ? a->st->n_fields : 0; 10655 return 0; 10656 } 10657 10658 int 10659 rte_swx_ctl_action_arg_info_get(struct rte_swx_pipeline *p, 10660 uint32_t action_id, 10661 uint32_t action_arg_id, 10662 struct rte_swx_ctl_action_arg_info *action_arg) 10663 { 10664 struct action *a = NULL; 10665 struct field *arg = NULL; 10666 10667 if (!p || (action_id >= p->n_actions) || !action_arg) 10668 return -EINVAL; 10669 10670 a = action_find_by_id(p, action_id); 10671 if (!a || !a->st || (action_arg_id >= a->st->n_fields)) 10672 return -EINVAL; 10673 10674 arg = &a->st->fields[action_arg_id]; 10675 strcpy(action_arg->name, arg->name); 10676 action_arg->n_bits = arg->n_bits; 10677 action_arg->is_network_byte_order = a->args_endianness[action_arg_id]; 10678 10679 return 0; 10680 } 10681 10682 int 10683 rte_swx_ctl_table_info_get(struct rte_swx_pipeline *p, 10684 uint32_t table_id, 10685 struct rte_swx_ctl_table_info *table) 10686 { 10687 struct table *t = NULL; 10688 10689 if (!p || !table) 10690 return -EINVAL; 10691 10692 t = table_find_by_id(p, table_id); 10693 if (!t) 10694 return -EINVAL; 10695 10696 strcpy(table->name, t->name); 10697 strcpy(table->args, t->args); 10698 table->n_match_fields = t->n_fields; 10699 table->n_actions = t->n_actions; 10700 table->default_action_is_const = t->default_action_is_const; 10701 table->size = t->size; 10702 return 0; 10703 } 10704 10705 int 10706 rte_swx_ctl_table_match_field_info_get(struct rte_swx_pipeline *p, 10707 uint32_t table_id, 10708 uint32_t match_field_id, 10709 struct rte_swx_ctl_table_match_field_info *match_field) 10710 { 10711 struct table *t; 10712 struct match_field *f; 10713 10714 if (!p || (table_id >= p->n_tables) || !match_field) 10715 return -EINVAL; 10716 10717 t = table_find_by_id(p, table_id); 10718 if (!t || (match_field_id >= t->n_fields)) 10719 return -EINVAL; 10720 10721 f = &t->fields[match_field_id]; 10722 match_field->match_type = f->match_type; 10723 match_field->is_header = t->header ? 1 : 0; 10724 match_field->n_bits = f->field->n_bits; 10725 match_field->offset = f->field->offset; 10726 10727 return 0; 10728 } 10729 10730 int 10731 rte_swx_ctl_table_action_info_get(struct rte_swx_pipeline *p, 10732 uint32_t table_id, 10733 uint32_t table_action_id, 10734 struct rte_swx_ctl_table_action_info *table_action) 10735 { 10736 struct table *t; 10737 10738 if (!p || (table_id >= p->n_tables) || !table_action) 10739 return -EINVAL; 10740 10741 t = table_find_by_id(p, table_id); 10742 if (!t || (table_action_id >= t->n_actions)) 10743 return -EINVAL; 10744 10745 table_action->action_id = t->actions[table_action_id]->id; 10746 10747 return 0; 10748 } 10749 10750 int 10751 rte_swx_ctl_table_ops_get(struct rte_swx_pipeline *p, 10752 uint32_t table_id, 10753 struct rte_swx_table_ops *table_ops, 10754 int *is_stub) 10755 { 10756 struct table *t; 10757 10758 if (!p || (table_id >= p->n_tables)) 10759 return -EINVAL; 10760 10761 t = table_find_by_id(p, table_id); 10762 if (!t) 10763 return -EINVAL; 10764 10765 if (t->type) { 10766 if (table_ops) 10767 memcpy(table_ops, &t->type->ops, sizeof(*table_ops)); 10768 *is_stub = 0; 10769 } else { 10770 *is_stub = 1; 10771 } 10772 10773 return 0; 10774 } 10775 10776 int 10777 rte_swx_ctl_selector_info_get(struct rte_swx_pipeline *p, 10778 uint32_t selector_id, 10779 struct rte_swx_ctl_selector_info *selector) 10780 { 10781 struct selector *s = NULL; 10782 10783 if (!p || !selector) 10784 return -EINVAL; 10785 10786 s = selector_find_by_id(p, selector_id); 10787 if (!s) 10788 return -EINVAL; 10789 10790 strcpy(selector->name, s->name); 10791 10792 selector->n_selector_fields = s->n_selector_fields; 10793 selector->n_groups_max = s->n_groups_max; 10794 selector->n_members_per_group_max = s->n_members_per_group_max; 10795 10796 return 0; 10797 } 10798 10799 int 10800 rte_swx_ctl_selector_group_id_field_info_get(struct rte_swx_pipeline *p, 10801 uint32_t selector_id, 10802 struct rte_swx_ctl_table_match_field_info *field) 10803 { 10804 struct selector *s; 10805 10806 if (!p || (selector_id >= p->n_selectors) || !field) 10807 return -EINVAL; 10808 10809 s = selector_find_by_id(p, selector_id); 10810 if (!s) 10811 return -EINVAL; 10812 10813 field->match_type = RTE_SWX_TABLE_MATCH_EXACT; 10814 field->is_header = 0; 10815 field->n_bits = s->group_id_field->n_bits; 10816 field->offset = s->group_id_field->offset; 10817 10818 return 0; 10819 } 10820 10821 int 10822 rte_swx_ctl_selector_field_info_get(struct rte_swx_pipeline *p, 10823 uint32_t selector_id, 10824 uint32_t selector_field_id, 10825 struct rte_swx_ctl_table_match_field_info *field) 10826 { 10827 struct selector *s; 10828 struct field *f; 10829 10830 if (!p || (selector_id >= p->n_selectors) || !field) 10831 return -EINVAL; 10832 10833 s = selector_find_by_id(p, selector_id); 10834 if (!s || (selector_field_id >= s->n_selector_fields)) 10835 return -EINVAL; 10836 10837 f = s->selector_fields[selector_field_id]; 10838 field->match_type = RTE_SWX_TABLE_MATCH_EXACT; 10839 field->is_header = s->selector_header ? 1 : 0; 10840 field->n_bits = f->n_bits; 10841 field->offset = f->offset; 10842 10843 return 0; 10844 } 10845 10846 int 10847 rte_swx_ctl_selector_member_id_field_info_get(struct rte_swx_pipeline *p, 10848 uint32_t selector_id, 10849 struct rte_swx_ctl_table_match_field_info *field) 10850 { 10851 struct selector *s; 10852 10853 if (!p || (selector_id >= p->n_selectors) || !field) 10854 return -EINVAL; 10855 10856 s = selector_find_by_id(p, selector_id); 10857 if (!s) 10858 return -EINVAL; 10859 10860 field->match_type = RTE_SWX_TABLE_MATCH_EXACT; 10861 field->is_header = 0; 10862 field->n_bits = s->member_id_field->n_bits; 10863 field->offset = s->member_id_field->offset; 10864 10865 return 0; 10866 } 10867 10868 int 10869 rte_swx_pipeline_table_state_get(struct rte_swx_pipeline *p, 10870 struct rte_swx_table_state **table_state) 10871 { 10872 if (!p || !table_state || !p->build_done) 10873 return -EINVAL; 10874 10875 *table_state = p->table_state; 10876 return 0; 10877 } 10878 10879 int 10880 rte_swx_pipeline_table_state_set(struct rte_swx_pipeline *p, 10881 struct rte_swx_table_state *table_state) 10882 { 10883 if (!p || !table_state || !p->build_done) 10884 return -EINVAL; 10885 10886 p->table_state = table_state; 10887 return 0; 10888 } 10889 10890 int 10891 rte_swx_ctl_pipeline_port_in_stats_read(struct rte_swx_pipeline *p, 10892 uint32_t port_id, 10893 struct rte_swx_port_in_stats *stats) 10894 { 10895 struct port_in *port; 10896 10897 if (!p || !stats) 10898 return -EINVAL; 10899 10900 port = port_in_find(p, port_id); 10901 if (!port) 10902 return -EINVAL; 10903 10904 port->type->ops.stats_read(port->obj, stats); 10905 return 0; 10906 } 10907 10908 int 10909 rte_swx_ctl_pipeline_port_out_stats_read(struct rte_swx_pipeline *p, 10910 uint32_t port_id, 10911 struct rte_swx_port_out_stats *stats) 10912 { 10913 struct port_out *port; 10914 10915 if (!p || !stats) 10916 return -EINVAL; 10917 10918 port = port_out_find(p, port_id); 10919 if (!port) 10920 return -EINVAL; 10921 10922 port->type->ops.stats_read(port->obj, stats); 10923 return 0; 10924 } 10925 10926 int 10927 rte_swx_ctl_pipeline_table_stats_read(struct rte_swx_pipeline *p, 10928 const char *table_name, 10929 struct rte_swx_table_stats *stats) 10930 { 10931 struct table *table; 10932 struct table_statistics *table_stats; 10933 10934 if (!p || !table_name || !table_name[0] || !stats || !stats->n_pkts_action) 10935 return -EINVAL; 10936 10937 table = table_find(p, table_name); 10938 if (!table) 10939 return -EINVAL; 10940 10941 table_stats = &p->table_stats[table->id]; 10942 10943 memcpy(stats->n_pkts_action, 10944 table_stats->n_pkts_action, 10945 p->n_actions * sizeof(uint64_t)); 10946 10947 stats->n_pkts_hit = table_stats->n_pkts_hit[1]; 10948 stats->n_pkts_miss = table_stats->n_pkts_hit[0]; 10949 10950 return 0; 10951 } 10952 10953 int 10954 rte_swx_ctl_pipeline_selector_stats_read(struct rte_swx_pipeline *p, 10955 const char *selector_name, 10956 struct rte_swx_pipeline_selector_stats *stats) 10957 { 10958 struct selector *s; 10959 10960 if (!p || !selector_name || !selector_name[0] || !stats) 10961 return -EINVAL; 10962 10963 s = selector_find(p, selector_name); 10964 if (!s) 10965 return -EINVAL; 10966 10967 stats->n_pkts = p->selector_stats[s->id].n_pkts; 10968 10969 return 0; 10970 } 10971 10972 int 10973 rte_swx_ctl_regarray_info_get(struct rte_swx_pipeline *p, 10974 uint32_t regarray_id, 10975 struct rte_swx_ctl_regarray_info *regarray) 10976 { 10977 struct regarray *r; 10978 10979 if (!p || !regarray) 10980 return -EINVAL; 10981 10982 r = regarray_find_by_id(p, regarray_id); 10983 if (!r) 10984 return -EINVAL; 10985 10986 strcpy(regarray->name, r->name); 10987 regarray->size = r->size; 10988 return 0; 10989 } 10990 10991 int 10992 rte_swx_ctl_pipeline_regarray_read(struct rte_swx_pipeline *p, 10993 const char *regarray_name, 10994 uint32_t regarray_index, 10995 uint64_t *value) 10996 { 10997 struct regarray *regarray; 10998 struct regarray_runtime *r; 10999 11000 if (!p || !regarray_name || !value) 11001 return -EINVAL; 11002 11003 regarray = regarray_find(p, regarray_name); 11004 if (!regarray || (regarray_index >= regarray->size)) 11005 return -EINVAL; 11006 11007 r = &p->regarray_runtime[regarray->id]; 11008 *value = r->regarray[regarray_index]; 11009 return 0; 11010 } 11011 11012 int 11013 rte_swx_ctl_pipeline_regarray_write(struct rte_swx_pipeline *p, 11014 const char *regarray_name, 11015 uint32_t regarray_index, 11016 uint64_t value) 11017 { 11018 struct regarray *regarray; 11019 struct regarray_runtime *r; 11020 11021 if (!p || !regarray_name) 11022 return -EINVAL; 11023 11024 regarray = regarray_find(p, regarray_name); 11025 if (!regarray || (regarray_index >= regarray->size)) 11026 return -EINVAL; 11027 11028 r = &p->regarray_runtime[regarray->id]; 11029 r->regarray[regarray_index] = value; 11030 return 0; 11031 } 11032 11033 int 11034 rte_swx_ctl_metarray_info_get(struct rte_swx_pipeline *p, 11035 uint32_t metarray_id, 11036 struct rte_swx_ctl_metarray_info *metarray) 11037 { 11038 struct metarray *m; 11039 11040 if (!p || !metarray) 11041 return -EINVAL; 11042 11043 m = metarray_find_by_id(p, metarray_id); 11044 if (!m) 11045 return -EINVAL; 11046 11047 strcpy(metarray->name, m->name); 11048 metarray->size = m->size; 11049 return 0; 11050 } 11051 11052 int 11053 rte_swx_ctl_meter_profile_add(struct rte_swx_pipeline *p, 11054 const char *name, 11055 struct rte_meter_trtcm_params *params) 11056 { 11057 struct meter_profile *mp; 11058 int status; 11059 11060 CHECK(p, EINVAL); 11061 CHECK_NAME(name, EINVAL); 11062 CHECK(params, EINVAL); 11063 CHECK(!meter_profile_find(p, name), EEXIST); 11064 11065 /* Node allocation. */ 11066 mp = calloc(1, sizeof(struct meter_profile)); 11067 CHECK(mp, ENOMEM); 11068 11069 /* Node initialization. */ 11070 strcpy(mp->name, name); 11071 memcpy(&mp->params, params, sizeof(struct rte_meter_trtcm_params)); 11072 status = rte_meter_trtcm_profile_config(&mp->profile, params); 11073 if (status) { 11074 free(mp); 11075 CHECK(0, EINVAL); 11076 } 11077 11078 /* Node add to tailq. */ 11079 TAILQ_INSERT_TAIL(&p->meter_profiles, mp, node); 11080 11081 return 0; 11082 } 11083 11084 int 11085 rte_swx_ctl_meter_profile_delete(struct rte_swx_pipeline *p, 11086 const char *name) 11087 { 11088 struct meter_profile *mp; 11089 11090 CHECK(p, EINVAL); 11091 CHECK_NAME(name, EINVAL); 11092 11093 mp = meter_profile_find(p, name); 11094 CHECK(mp, EINVAL); 11095 CHECK(!mp->n_users, EBUSY); 11096 11097 /* Remove node from tailq. */ 11098 TAILQ_REMOVE(&p->meter_profiles, mp, node); 11099 free(mp); 11100 11101 return 0; 11102 } 11103 11104 int 11105 rte_swx_ctl_meter_reset(struct rte_swx_pipeline *p, 11106 const char *metarray_name, 11107 uint32_t metarray_index) 11108 { 11109 struct meter_profile *mp_old; 11110 struct metarray *metarray; 11111 struct metarray_runtime *metarray_runtime; 11112 struct meter *m; 11113 11114 CHECK(p, EINVAL); 11115 CHECK_NAME(metarray_name, EINVAL); 11116 11117 metarray = metarray_find(p, metarray_name); 11118 CHECK(metarray, EINVAL); 11119 CHECK(metarray_index < metarray->size, EINVAL); 11120 11121 metarray_runtime = &p->metarray_runtime[metarray->id]; 11122 m = &metarray_runtime->metarray[metarray_index]; 11123 mp_old = m->profile; 11124 11125 meter_init(m); 11126 11127 mp_old->n_users--; 11128 11129 return 0; 11130 } 11131 11132 int 11133 rte_swx_ctl_meter_set(struct rte_swx_pipeline *p, 11134 const char *metarray_name, 11135 uint32_t metarray_index, 11136 const char *profile_name) 11137 { 11138 struct meter_profile *mp, *mp_old; 11139 struct metarray *metarray; 11140 struct metarray_runtime *metarray_runtime; 11141 struct meter *m; 11142 11143 CHECK(p, EINVAL); 11144 CHECK_NAME(metarray_name, EINVAL); 11145 11146 metarray = metarray_find(p, metarray_name); 11147 CHECK(metarray, EINVAL); 11148 CHECK(metarray_index < metarray->size, EINVAL); 11149 11150 mp = meter_profile_find(p, profile_name); 11151 CHECK(mp, EINVAL); 11152 11153 metarray_runtime = &p->metarray_runtime[metarray->id]; 11154 m = &metarray_runtime->metarray[metarray_index]; 11155 mp_old = m->profile; 11156 11157 memset(m, 0, sizeof(struct meter)); 11158 rte_meter_trtcm_config(&m->m, &mp->profile); 11159 m->profile = mp; 11160 m->color_mask = RTE_COLORS; 11161 11162 mp->n_users++; 11163 mp_old->n_users--; 11164 11165 return 0; 11166 } 11167 11168 int 11169 rte_swx_ctl_meter_stats_read(struct rte_swx_pipeline *p, 11170 const char *metarray_name, 11171 uint32_t metarray_index, 11172 struct rte_swx_ctl_meter_stats *stats) 11173 { 11174 struct metarray *metarray; 11175 struct metarray_runtime *metarray_runtime; 11176 struct meter *m; 11177 11178 CHECK(p, EINVAL); 11179 CHECK_NAME(metarray_name, EINVAL); 11180 11181 metarray = metarray_find(p, metarray_name); 11182 CHECK(metarray, EINVAL); 11183 CHECK(metarray_index < metarray->size, EINVAL); 11184 11185 CHECK(stats, EINVAL); 11186 11187 metarray_runtime = &p->metarray_runtime[metarray->id]; 11188 m = &metarray_runtime->metarray[metarray_index]; 11189 11190 memcpy(stats->n_pkts, m->n_pkts, sizeof(m->n_pkts)); 11191 memcpy(stats->n_bytes, m->n_bytes, sizeof(m->n_bytes)); 11192 11193 return 0; 11194 } 11195