1 /* SPDX-License-Identifier: BSD-3-Clause 2 * Copyright(c) 2018 Intel Corporation 3 */ 4 5 #include <stdio.h> 6 #include <string.h> 7 #include <stdint.h> 8 #include <inttypes.h> 9 10 #include <rte_memory.h> 11 #include <rte_debug.h> 12 #include <rte_hexdump.h> 13 #include <rte_malloc.h> 14 #include <rte_random.h> 15 #include <rte_byteorder.h> 16 #include <rte_errno.h> 17 #include "test.h" 18 19 #if !defined(RTE_LIB_BPF) 20 21 static int 22 test_bpf(void) 23 { 24 printf("BPF not supported, skipping test\n"); 25 return TEST_SKIPPED; 26 } 27 28 #else 29 30 #include <rte_bpf.h> 31 #include <rte_ether.h> 32 #include <rte_ip.h> 33 34 35 /* 36 * Basic functional tests for librte_bpf. 37 * The main procedure - load eBPF program, execute it and 38 * compare results with expected values. 39 */ 40 41 struct dummy_offset { 42 RTE_ATOMIC(uint64_t) u64; 43 RTE_ATOMIC(uint32_t) u32; 44 uint16_t u16; 45 uint8_t u8; 46 }; 47 48 struct dummy_vect8 { 49 struct dummy_offset in[8]; 50 struct dummy_offset out[8]; 51 }; 52 53 struct dummy_net { 54 struct rte_ether_hdr eth_hdr; 55 struct rte_vlan_hdr vlan_hdr; 56 struct rte_ipv4_hdr ip_hdr; 57 }; 58 59 #define DUMMY_MBUF_NUM 2 60 61 /* first mbuf in the packet, should always be at offset 0 */ 62 struct dummy_mbuf { 63 struct rte_mbuf mb[DUMMY_MBUF_NUM]; 64 uint8_t buf[DUMMY_MBUF_NUM][RTE_MBUF_DEFAULT_BUF_SIZE]; 65 }; 66 67 #define TEST_FILL_1 0xDEADBEEF 68 69 #define TEST_MUL_1 21 70 #define TEST_MUL_2 -100 71 72 #define TEST_SHIFT_1 15 73 #define TEST_SHIFT_2 33 74 75 #define TEST_SHIFT32_MASK (CHAR_BIT * sizeof(uint32_t) - 1) 76 #define TEST_SHIFT64_MASK (CHAR_BIT * sizeof(uint64_t) - 1) 77 78 #define TEST_JCC_1 0 79 #define TEST_JCC_2 -123 80 #define TEST_JCC_3 5678 81 #define TEST_JCC_4 TEST_FILL_1 82 83 #define TEST_IMM_1 UINT64_MAX 84 #define TEST_IMM_2 ((uint64_t)INT64_MIN) 85 #define TEST_IMM_3 ((uint64_t)INT64_MAX + INT32_MAX) 86 #define TEST_IMM_4 ((uint64_t)UINT32_MAX) 87 #define TEST_IMM_5 ((uint64_t)UINT32_MAX + 1) 88 89 #define TEST_MEMFROB 0x2a2a2a2a 90 91 #define STRING_GEEK 0x6B656567 92 #define STRING_WEEK 0x6B656577 93 94 #define TEST_NETMASK 0xffffff00 95 #define TEST_SUBNET 0xaca80200 96 97 uint8_t src_mac[] = { 0x00, 0xFF, 0xAA, 0xFF, 0xAA, 0xFF }; 98 uint8_t dst_mac[] = { 0x00, 0xAA, 0xFF, 0xAA, 0xFF, 0xAA }; 99 100 uint32_t ip_src_addr = (172U << 24) | (168U << 16) | (2 << 8) | 1; 101 uint32_t ip_dst_addr = (172U << 24) | (168U << 16) | (2 << 8) | 2; 102 103 struct bpf_test { 104 const char *name; 105 size_t arg_sz; 106 struct rte_bpf_prm prm; 107 void (*prepare)(void *); 108 int (*check_result)(uint64_t, const void *); 109 uint32_t allow_fail; 110 }; 111 112 /* 113 * Compare return value and result data with expected ones. 114 * Report a failure if they don't match. 115 */ 116 static int 117 cmp_res(const char *func, uint64_t exp_rc, uint64_t ret_rc, 118 const void *exp_res, const void *ret_res, size_t res_sz) 119 { 120 int32_t ret; 121 122 ret = 0; 123 if (exp_rc != ret_rc) { 124 printf("%s@%d: invalid return value, expected: 0x%" PRIx64 125 ",result: 0x%" PRIx64 "\n", 126 func, __LINE__, exp_rc, ret_rc); 127 ret |= -1; 128 } 129 130 if (memcmp(exp_res, ret_res, res_sz) != 0) { 131 printf("%s: invalid value\n", func); 132 rte_memdump(stdout, "expected", exp_res, res_sz); 133 rte_memdump(stdout, "result", ret_res, res_sz); 134 ret |= -1; 135 } 136 137 return ret; 138 } 139 140 /* store immediate test-cases */ 141 static const struct ebpf_insn test_store1_prog[] = { 142 { 143 .code = (BPF_ST | BPF_MEM | BPF_B), 144 .dst_reg = EBPF_REG_1, 145 .off = offsetof(struct dummy_offset, u8), 146 .imm = TEST_FILL_1, 147 }, 148 { 149 .code = (BPF_ST | BPF_MEM | BPF_H), 150 .dst_reg = EBPF_REG_1, 151 .off = offsetof(struct dummy_offset, u16), 152 .imm = TEST_FILL_1, 153 }, 154 { 155 .code = (BPF_ST | BPF_MEM | BPF_W), 156 .dst_reg = EBPF_REG_1, 157 .off = offsetof(struct dummy_offset, u32), 158 .imm = TEST_FILL_1, 159 }, 160 { 161 .code = (BPF_ST | BPF_MEM | EBPF_DW), 162 .dst_reg = EBPF_REG_1, 163 .off = offsetof(struct dummy_offset, u64), 164 .imm = TEST_FILL_1, 165 }, 166 /* return 1 */ 167 { 168 .code = (BPF_ALU | EBPF_MOV | BPF_K), 169 .dst_reg = EBPF_REG_0, 170 .imm = 1, 171 }, 172 { 173 .code = (BPF_JMP | EBPF_EXIT), 174 }, 175 }; 176 177 static void 178 test_store1_prepare(void *arg) 179 { 180 struct dummy_offset *df; 181 182 df = arg; 183 memset(df, 0, sizeof(*df)); 184 } 185 186 static int 187 test_store1_check(uint64_t rc, const void *arg) 188 { 189 const struct dummy_offset *dft; 190 struct dummy_offset dfe; 191 192 dft = arg; 193 194 memset(&dfe, 0, sizeof(dfe)); 195 dfe.u64 = (int32_t)TEST_FILL_1; 196 dfe.u32 = dfe.u64; 197 dfe.u16 = dfe.u64; 198 dfe.u8 = dfe.u64; 199 200 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe)); 201 } 202 203 /* store register test-cases */ 204 static const struct ebpf_insn test_store2_prog[] = { 205 206 { 207 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 208 .dst_reg = EBPF_REG_2, 209 .imm = TEST_FILL_1, 210 }, 211 { 212 .code = (BPF_STX | BPF_MEM | BPF_B), 213 .dst_reg = EBPF_REG_1, 214 .src_reg = EBPF_REG_2, 215 .off = offsetof(struct dummy_offset, u8), 216 }, 217 { 218 .code = (BPF_STX | BPF_MEM | BPF_H), 219 .dst_reg = EBPF_REG_1, 220 .src_reg = EBPF_REG_2, 221 .off = offsetof(struct dummy_offset, u16), 222 }, 223 { 224 .code = (BPF_STX | BPF_MEM | BPF_W), 225 .dst_reg = EBPF_REG_1, 226 .src_reg = EBPF_REG_2, 227 .off = offsetof(struct dummy_offset, u32), 228 }, 229 { 230 .code = (BPF_STX | BPF_MEM | EBPF_DW), 231 .dst_reg = EBPF_REG_1, 232 .src_reg = EBPF_REG_2, 233 .off = offsetof(struct dummy_offset, u64), 234 }, 235 /* return 1 */ 236 { 237 .code = (BPF_ALU | EBPF_MOV | BPF_K), 238 .dst_reg = EBPF_REG_0, 239 .imm = 1, 240 }, 241 { 242 .code = (BPF_JMP | EBPF_EXIT), 243 }, 244 }; 245 246 /* load test-cases */ 247 static const struct ebpf_insn test_load1_prog[] = { 248 249 { 250 .code = (BPF_LDX | BPF_MEM | BPF_B), 251 .dst_reg = EBPF_REG_2, 252 .src_reg = EBPF_REG_1, 253 .off = offsetof(struct dummy_offset, u8), 254 }, 255 { 256 .code = (BPF_LDX | BPF_MEM | BPF_H), 257 .dst_reg = EBPF_REG_3, 258 .src_reg = EBPF_REG_1, 259 .off = offsetof(struct dummy_offset, u16), 260 }, 261 { 262 .code = (BPF_LDX | BPF_MEM | BPF_W), 263 .dst_reg = EBPF_REG_4, 264 .src_reg = EBPF_REG_1, 265 .off = offsetof(struct dummy_offset, u32), 266 }, 267 { 268 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 269 .dst_reg = EBPF_REG_0, 270 .src_reg = EBPF_REG_1, 271 .off = offsetof(struct dummy_offset, u64), 272 }, 273 /* return sum */ 274 { 275 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 276 .dst_reg = EBPF_REG_0, 277 .src_reg = EBPF_REG_4, 278 }, 279 { 280 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 281 .dst_reg = EBPF_REG_0, 282 .src_reg = EBPF_REG_3, 283 }, 284 { 285 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 286 .dst_reg = EBPF_REG_0, 287 .src_reg = EBPF_REG_2, 288 }, 289 { 290 .code = (BPF_JMP | EBPF_EXIT), 291 }, 292 }; 293 294 static void 295 test_load1_prepare(void *arg) 296 { 297 struct dummy_offset *df; 298 299 df = arg; 300 301 memset(df, 0, sizeof(*df)); 302 df->u64 = (int32_t)TEST_FILL_1; 303 df->u32 = df->u64; 304 df->u16 = df->u64; 305 df->u8 = df->u64; 306 } 307 308 static int 309 test_load1_check(uint64_t rc, const void *arg) 310 { 311 uint64_t v; 312 const struct dummy_offset *dft; 313 314 dft = arg; 315 v = dft->u64; 316 v += dft->u32; 317 v += dft->u16; 318 v += dft->u8; 319 320 return cmp_res(__func__, v, rc, dft, dft, sizeof(*dft)); 321 } 322 323 /* load immediate test-cases */ 324 static const struct ebpf_insn test_ldimm1_prog[] = { 325 326 { 327 .code = (BPF_LD | BPF_IMM | EBPF_DW), 328 .dst_reg = EBPF_REG_0, 329 .imm = (uint32_t)TEST_IMM_1, 330 }, 331 { 332 .imm = TEST_IMM_1 >> 32, 333 }, 334 { 335 .code = (BPF_LD | BPF_IMM | EBPF_DW), 336 .dst_reg = EBPF_REG_3, 337 .imm = (uint32_t)TEST_IMM_2, 338 }, 339 { 340 .imm = TEST_IMM_2 >> 32, 341 }, 342 { 343 .code = (BPF_LD | BPF_IMM | EBPF_DW), 344 .dst_reg = EBPF_REG_5, 345 .imm = (uint32_t)TEST_IMM_3, 346 }, 347 { 348 .imm = TEST_IMM_3 >> 32, 349 }, 350 { 351 .code = (BPF_LD | BPF_IMM | EBPF_DW), 352 .dst_reg = EBPF_REG_7, 353 .imm = (uint32_t)TEST_IMM_4, 354 }, 355 { 356 .imm = TEST_IMM_4 >> 32, 357 }, 358 { 359 .code = (BPF_LD | BPF_IMM | EBPF_DW), 360 .dst_reg = EBPF_REG_9, 361 .imm = (uint32_t)TEST_IMM_5, 362 }, 363 { 364 .imm = TEST_IMM_5 >> 32, 365 }, 366 /* return sum */ 367 { 368 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 369 .dst_reg = EBPF_REG_0, 370 .src_reg = EBPF_REG_3, 371 }, 372 { 373 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 374 .dst_reg = EBPF_REG_0, 375 .src_reg = EBPF_REG_5, 376 }, 377 { 378 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 379 .dst_reg = EBPF_REG_0, 380 .src_reg = EBPF_REG_7, 381 }, 382 { 383 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 384 .dst_reg = EBPF_REG_0, 385 .src_reg = EBPF_REG_9, 386 }, 387 { 388 .code = (BPF_JMP | EBPF_EXIT), 389 }, 390 }; 391 392 static int 393 test_ldimm1_check(uint64_t rc, const void *arg) 394 { 395 uint64_t v1, v2; 396 397 v1 = TEST_IMM_1; 398 v2 = TEST_IMM_2; 399 v1 += v2; 400 v2 = TEST_IMM_3; 401 v1 += v2; 402 v2 = TEST_IMM_4; 403 v1 += v2; 404 v2 = TEST_IMM_5; 405 v1 += v2; 406 407 return cmp_res(__func__, v1, rc, arg, arg, 0); 408 } 409 410 411 /* alu mul test-cases */ 412 static const struct ebpf_insn test_mul1_prog[] = { 413 414 { 415 .code = (BPF_LDX | BPF_MEM | BPF_W), 416 .dst_reg = EBPF_REG_2, 417 .src_reg = EBPF_REG_1, 418 .off = offsetof(struct dummy_vect8, in[0].u32), 419 }, 420 { 421 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 422 .dst_reg = EBPF_REG_3, 423 .src_reg = EBPF_REG_1, 424 .off = offsetof(struct dummy_vect8, in[1].u64), 425 }, 426 { 427 .code = (BPF_LDX | BPF_MEM | BPF_W), 428 .dst_reg = EBPF_REG_4, 429 .src_reg = EBPF_REG_1, 430 .off = offsetof(struct dummy_vect8, in[2].u32), 431 }, 432 { 433 .code = (BPF_ALU | BPF_MUL | BPF_K), 434 .dst_reg = EBPF_REG_2, 435 .imm = TEST_MUL_1, 436 }, 437 { 438 .code = (EBPF_ALU64 | BPF_MUL | BPF_K), 439 .dst_reg = EBPF_REG_3, 440 .imm = TEST_MUL_2, 441 }, 442 { 443 .code = (BPF_ALU | BPF_MUL | BPF_X), 444 .dst_reg = EBPF_REG_4, 445 .src_reg = EBPF_REG_2, 446 }, 447 { 448 .code = (EBPF_ALU64 | BPF_MUL | BPF_X), 449 .dst_reg = EBPF_REG_4, 450 .src_reg = EBPF_REG_3, 451 }, 452 { 453 .code = (BPF_STX | BPF_MEM | EBPF_DW), 454 .dst_reg = EBPF_REG_1, 455 .src_reg = EBPF_REG_2, 456 .off = offsetof(struct dummy_vect8, out[0].u64), 457 }, 458 { 459 .code = (BPF_STX | BPF_MEM | EBPF_DW), 460 .dst_reg = EBPF_REG_1, 461 .src_reg = EBPF_REG_3, 462 .off = offsetof(struct dummy_vect8, out[1].u64), 463 }, 464 { 465 .code = (BPF_STX | BPF_MEM | EBPF_DW), 466 .dst_reg = EBPF_REG_1, 467 .src_reg = EBPF_REG_4, 468 .off = offsetof(struct dummy_vect8, out[2].u64), 469 }, 470 /* return 1 */ 471 { 472 .code = (BPF_ALU | EBPF_MOV | BPF_K), 473 .dst_reg = EBPF_REG_0, 474 .imm = 1, 475 }, 476 { 477 .code = (BPF_JMP | EBPF_EXIT), 478 }, 479 }; 480 481 static void 482 test_mul1_prepare(void *arg) 483 { 484 struct dummy_vect8 *dv; 485 uint64_t v; 486 487 dv = arg; 488 489 v = rte_rand(); 490 491 memset(dv, 0, sizeof(*dv)); 492 dv->in[0].u32 = v; 493 dv->in[1].u64 = v << 12 | v >> 6; 494 dv->in[2].u32 = -v; 495 } 496 497 static int 498 test_mul1_check(uint64_t rc, const void *arg) 499 { 500 uint64_t r2, r3, r4; 501 const struct dummy_vect8 *dvt; 502 struct dummy_vect8 dve; 503 504 dvt = arg; 505 memset(&dve, 0, sizeof(dve)); 506 507 r2 = dvt->in[0].u32; 508 r3 = dvt->in[1].u64; 509 r4 = dvt->in[2].u32; 510 511 r2 = (uint32_t)r2 * TEST_MUL_1; 512 r3 *= TEST_MUL_2; 513 r4 = (uint32_t)(r4 * r2); 514 r4 *= r3; 515 516 dve.out[0].u64 = r2; 517 dve.out[1].u64 = r3; 518 dve.out[2].u64 = r4; 519 520 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out)); 521 } 522 523 /* alu shift test-cases */ 524 static const struct ebpf_insn test_shift1_prog[] = { 525 526 { 527 .code = (BPF_LDX | BPF_MEM | BPF_W), 528 .dst_reg = EBPF_REG_2, 529 .src_reg = EBPF_REG_1, 530 .off = offsetof(struct dummy_vect8, in[0].u32), 531 }, 532 { 533 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 534 .dst_reg = EBPF_REG_3, 535 .src_reg = EBPF_REG_1, 536 .off = offsetof(struct dummy_vect8, in[1].u64), 537 }, 538 { 539 .code = (BPF_LDX | BPF_MEM | BPF_W), 540 .dst_reg = EBPF_REG_4, 541 .src_reg = EBPF_REG_1, 542 .off = offsetof(struct dummy_vect8, in[2].u32), 543 }, 544 { 545 .code = (BPF_ALU | BPF_LSH | BPF_K), 546 .dst_reg = EBPF_REG_2, 547 .imm = TEST_SHIFT_1, 548 }, 549 { 550 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_K), 551 .dst_reg = EBPF_REG_3, 552 .imm = TEST_SHIFT_2, 553 }, 554 { 555 .code = (BPF_STX | BPF_MEM | EBPF_DW), 556 .dst_reg = EBPF_REG_1, 557 .src_reg = EBPF_REG_2, 558 .off = offsetof(struct dummy_vect8, out[0].u64), 559 }, 560 { 561 .code = (BPF_STX | BPF_MEM | EBPF_DW), 562 .dst_reg = EBPF_REG_1, 563 .src_reg = EBPF_REG_3, 564 .off = offsetof(struct dummy_vect8, out[1].u64), 565 }, 566 { 567 .code = (BPF_ALU | BPF_AND | BPF_K), 568 .dst_reg = EBPF_REG_4, 569 .imm = TEST_SHIFT64_MASK, 570 }, 571 { 572 .code = (EBPF_ALU64 | BPF_LSH | BPF_X), 573 .dst_reg = EBPF_REG_3, 574 .src_reg = EBPF_REG_4, 575 }, 576 { 577 .code = (BPF_ALU | BPF_AND | BPF_K), 578 .dst_reg = EBPF_REG_4, 579 .imm = TEST_SHIFT32_MASK, 580 }, 581 { 582 .code = (BPF_ALU | BPF_RSH | BPF_X), 583 .dst_reg = EBPF_REG_2, 584 .src_reg = EBPF_REG_4, 585 }, 586 { 587 .code = (BPF_STX | BPF_MEM | EBPF_DW), 588 .dst_reg = EBPF_REG_1, 589 .src_reg = EBPF_REG_2, 590 .off = offsetof(struct dummy_vect8, out[2].u64), 591 }, 592 { 593 .code = (BPF_STX | BPF_MEM | EBPF_DW), 594 .dst_reg = EBPF_REG_1, 595 .src_reg = EBPF_REG_3, 596 .off = offsetof(struct dummy_vect8, out[3].u64), 597 }, 598 { 599 .code = (BPF_LDX | BPF_MEM | BPF_W), 600 .dst_reg = EBPF_REG_2, 601 .src_reg = EBPF_REG_1, 602 .off = offsetof(struct dummy_vect8, in[0].u32), 603 }, 604 { 605 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 606 .dst_reg = EBPF_REG_3, 607 .src_reg = EBPF_REG_1, 608 .off = offsetof(struct dummy_vect8, in[1].u64), 609 }, 610 { 611 .code = (BPF_LDX | BPF_MEM | BPF_W), 612 .dst_reg = EBPF_REG_4, 613 .src_reg = EBPF_REG_1, 614 .off = offsetof(struct dummy_vect8, in[2].u32), 615 }, 616 { 617 .code = (BPF_ALU | BPF_AND | BPF_K), 618 .dst_reg = EBPF_REG_2, 619 .imm = TEST_SHIFT64_MASK, 620 }, 621 { 622 .code = (EBPF_ALU64 | EBPF_ARSH | BPF_X), 623 .dst_reg = EBPF_REG_3, 624 .src_reg = EBPF_REG_2, 625 }, 626 { 627 .code = (BPF_ALU | BPF_AND | BPF_K), 628 .dst_reg = EBPF_REG_2, 629 .imm = TEST_SHIFT32_MASK, 630 }, 631 { 632 .code = (BPF_ALU | BPF_LSH | BPF_X), 633 .dst_reg = EBPF_REG_4, 634 .src_reg = EBPF_REG_2, 635 }, 636 { 637 .code = (BPF_STX | BPF_MEM | EBPF_DW), 638 .dst_reg = EBPF_REG_1, 639 .src_reg = EBPF_REG_4, 640 .off = offsetof(struct dummy_vect8, out[4].u64), 641 }, 642 { 643 .code = (BPF_STX | BPF_MEM | EBPF_DW), 644 .dst_reg = EBPF_REG_1, 645 .src_reg = EBPF_REG_3, 646 .off = offsetof(struct dummy_vect8, out[5].u64), 647 }, 648 /* return 1 */ 649 { 650 .code = (BPF_ALU | EBPF_MOV | BPF_K), 651 .dst_reg = EBPF_REG_0, 652 .imm = 1, 653 }, 654 { 655 .code = (BPF_JMP | EBPF_EXIT), 656 }, 657 }; 658 659 static void 660 test_shift1_prepare(void *arg) 661 { 662 struct dummy_vect8 *dv; 663 uint64_t v; 664 665 dv = arg; 666 667 v = rte_rand(); 668 669 memset(dv, 0, sizeof(*dv)); 670 dv->in[0].u32 = v; 671 dv->in[1].u64 = v << 12 | v >> 6; 672 dv->in[2].u32 = (-v ^ 5); 673 } 674 675 static int 676 test_shift1_check(uint64_t rc, const void *arg) 677 { 678 uint64_t r2, r3, r4; 679 const struct dummy_vect8 *dvt; 680 struct dummy_vect8 dve; 681 682 dvt = arg; 683 memset(&dve, 0, sizeof(dve)); 684 685 r2 = dvt->in[0].u32; 686 r3 = dvt->in[1].u64; 687 r4 = dvt->in[2].u32; 688 689 r2 = (uint32_t)r2 << TEST_SHIFT_1; 690 r3 = (int64_t)r3 >> TEST_SHIFT_2; 691 692 dve.out[0].u64 = r2; 693 dve.out[1].u64 = r3; 694 695 r4 &= TEST_SHIFT64_MASK; 696 r3 <<= r4; 697 r4 &= TEST_SHIFT32_MASK; 698 r2 = (uint32_t)r2 >> r4; 699 700 dve.out[2].u64 = r2; 701 dve.out[3].u64 = r3; 702 703 r2 = dvt->in[0].u32; 704 r3 = dvt->in[1].u64; 705 r4 = dvt->in[2].u32; 706 707 r2 &= TEST_SHIFT64_MASK; 708 r3 = (int64_t)r3 >> r2; 709 r2 &= TEST_SHIFT32_MASK; 710 r4 = (uint32_t)r4 << r2; 711 712 dve.out[4].u64 = r4; 713 dve.out[5].u64 = r3; 714 715 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out)); 716 } 717 718 /* jmp test-cases */ 719 static const struct ebpf_insn test_jump1_prog[] = { 720 721 [0] = { 722 .code = (BPF_ALU | EBPF_MOV | BPF_K), 723 .dst_reg = EBPF_REG_0, 724 .imm = 0, 725 }, 726 [1] = { 727 .code = (BPF_LDX | BPF_MEM | BPF_W), 728 .dst_reg = EBPF_REG_2, 729 .src_reg = EBPF_REG_1, 730 .off = offsetof(struct dummy_vect8, in[0].u32), 731 }, 732 [2] = { 733 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 734 .dst_reg = EBPF_REG_3, 735 .src_reg = EBPF_REG_1, 736 .off = offsetof(struct dummy_vect8, in[0].u64), 737 }, 738 [3] = { 739 .code = (BPF_LDX | BPF_MEM | BPF_W), 740 .dst_reg = EBPF_REG_4, 741 .src_reg = EBPF_REG_1, 742 .off = offsetof(struct dummy_vect8, in[1].u32), 743 }, 744 [4] = { 745 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 746 .dst_reg = EBPF_REG_5, 747 .src_reg = EBPF_REG_1, 748 .off = offsetof(struct dummy_vect8, in[1].u64), 749 }, 750 [5] = { 751 .code = (BPF_JMP | BPF_JEQ | BPF_K), 752 .dst_reg = EBPF_REG_2, 753 .imm = TEST_JCC_1, 754 .off = 8, 755 }, 756 [6] = { 757 .code = (BPF_JMP | EBPF_JSLE | BPF_K), 758 .dst_reg = EBPF_REG_3, 759 .imm = TEST_JCC_2, 760 .off = 9, 761 }, 762 [7] = { 763 .code = (BPF_JMP | BPF_JGT | BPF_K), 764 .dst_reg = EBPF_REG_4, 765 .imm = TEST_JCC_3, 766 .off = 10, 767 }, 768 [8] = { 769 .code = (BPF_JMP | BPF_JSET | BPF_K), 770 .dst_reg = EBPF_REG_5, 771 .imm = TEST_JCC_4, 772 .off = 11, 773 }, 774 [9] = { 775 .code = (BPF_JMP | EBPF_JNE | BPF_X), 776 .dst_reg = EBPF_REG_2, 777 .src_reg = EBPF_REG_3, 778 .off = 12, 779 }, 780 [10] = { 781 .code = (BPF_JMP | EBPF_JSGT | BPF_X), 782 .dst_reg = EBPF_REG_2, 783 .src_reg = EBPF_REG_4, 784 .off = 13, 785 }, 786 [11] = { 787 .code = (BPF_JMP | EBPF_JLE | BPF_X), 788 .dst_reg = EBPF_REG_2, 789 .src_reg = EBPF_REG_5, 790 .off = 14, 791 }, 792 [12] = { 793 .code = (BPF_JMP | BPF_JSET | BPF_X), 794 .dst_reg = EBPF_REG_3, 795 .src_reg = EBPF_REG_5, 796 .off = 15, 797 }, 798 [13] = { 799 .code = (BPF_JMP | EBPF_EXIT), 800 }, 801 [14] = { 802 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 803 .dst_reg = EBPF_REG_0, 804 .imm = 0x1, 805 }, 806 [15] = { 807 .code = (BPF_JMP | BPF_JA), 808 .off = -10, 809 }, 810 [16] = { 811 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 812 .dst_reg = EBPF_REG_0, 813 .imm = 0x2, 814 }, 815 [17] = { 816 .code = (BPF_JMP | BPF_JA), 817 .off = -11, 818 }, 819 [18] = { 820 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 821 .dst_reg = EBPF_REG_0, 822 .imm = 0x4, 823 }, 824 [19] = { 825 .code = (BPF_JMP | BPF_JA), 826 .off = -12, 827 }, 828 [20] = { 829 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 830 .dst_reg = EBPF_REG_0, 831 .imm = 0x8, 832 }, 833 [21] = { 834 .code = (BPF_JMP | BPF_JA), 835 .off = -13, 836 }, 837 [22] = { 838 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 839 .dst_reg = EBPF_REG_0, 840 .imm = 0x10, 841 }, 842 [23] = { 843 .code = (BPF_JMP | BPF_JA), 844 .off = -14, 845 }, 846 [24] = { 847 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 848 .dst_reg = EBPF_REG_0, 849 .imm = 0x20, 850 }, 851 [25] = { 852 .code = (BPF_JMP | BPF_JA), 853 .off = -15, 854 }, 855 [26] = { 856 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 857 .dst_reg = EBPF_REG_0, 858 .imm = 0x40, 859 }, 860 [27] = { 861 .code = (BPF_JMP | BPF_JA), 862 .off = -16, 863 }, 864 [28] = { 865 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 866 .dst_reg = EBPF_REG_0, 867 .imm = 0x80, 868 }, 869 [29] = { 870 .code = (BPF_JMP | BPF_JA), 871 .off = -17, 872 }, 873 }; 874 875 static void 876 test_jump1_prepare(void *arg) 877 { 878 struct dummy_vect8 *dv; 879 uint64_t v1, v2; 880 881 dv = arg; 882 883 v1 = rte_rand(); 884 v2 = rte_rand(); 885 886 memset(dv, 0, sizeof(*dv)); 887 dv->in[0].u64 = v1; 888 dv->in[1].u64 = v2; 889 dv->in[0].u32 = (v1 << 12) + (v2 >> 6); 890 dv->in[1].u32 = (v2 << 12) - (v1 >> 6); 891 } 892 893 static int 894 test_jump1_check(uint64_t rc, const void *arg) 895 { 896 uint64_t r2, r3, r4, r5, rv; 897 const struct dummy_vect8 *dvt; 898 899 dvt = arg; 900 901 rv = 0; 902 r2 = dvt->in[0].u32; 903 r3 = dvt->in[0].u64; 904 r4 = dvt->in[1].u32; 905 r5 = dvt->in[1].u64; 906 907 if (r2 == TEST_JCC_1) 908 rv |= 0x1; 909 if ((int64_t)r3 <= TEST_JCC_2) 910 rv |= 0x2; 911 if (r4 > TEST_JCC_3) 912 rv |= 0x4; 913 if (r5 & TEST_JCC_4) 914 rv |= 0x8; 915 if (r2 != r3) 916 rv |= 0x10; 917 if ((int64_t)r2 > (int64_t)r4) 918 rv |= 0x20; 919 if (r2 <= r5) 920 rv |= 0x40; 921 if (r3 & r5) 922 rv |= 0x80; 923 924 return cmp_res(__func__, rv, rc, &rv, &rc, sizeof(rv)); 925 } 926 927 /* Jump test case - check ip4_dest in particular subnet */ 928 static const struct ebpf_insn test_jump2_prog[] = { 929 930 [0] = { 931 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 932 .dst_reg = EBPF_REG_2, 933 .imm = 0xe, 934 }, 935 [1] = { 936 .code = (BPF_LDX | BPF_MEM | BPF_H), 937 .dst_reg = EBPF_REG_3, 938 .src_reg = EBPF_REG_1, 939 .off = 12, 940 }, 941 [2] = { 942 .code = (BPF_JMP | EBPF_JNE | BPF_K), 943 .dst_reg = EBPF_REG_3, 944 .off = 2, 945 .imm = 0x81, 946 }, 947 [3] = { 948 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 949 .dst_reg = EBPF_REG_2, 950 .imm = 0x12, 951 }, 952 [4] = { 953 .code = (BPF_LDX | BPF_MEM | BPF_H), 954 .dst_reg = EBPF_REG_3, 955 .src_reg = EBPF_REG_1, 956 .off = 16, 957 }, 958 [5] = { 959 .code = (EBPF_ALU64 | BPF_AND | BPF_K), 960 .dst_reg = EBPF_REG_3, 961 .imm = 0xffff, 962 }, 963 [6] = { 964 .code = (BPF_JMP | EBPF_JNE | BPF_K), 965 .dst_reg = EBPF_REG_3, 966 .off = 9, 967 .imm = 0x8, 968 }, 969 [7] = { 970 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 971 .dst_reg = EBPF_REG_1, 972 .src_reg = EBPF_REG_2, 973 }, 974 [8] = { 975 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 976 .dst_reg = EBPF_REG_0, 977 .imm = 0, 978 }, 979 [9] = { 980 .code = (BPF_LDX | BPF_MEM | BPF_W), 981 .dst_reg = EBPF_REG_1, 982 .src_reg = EBPF_REG_1, 983 .off = 16, 984 }, 985 [10] = { 986 .code = (BPF_ALU | EBPF_MOV | BPF_K), 987 .dst_reg = EBPF_REG_3, 988 .imm = TEST_NETMASK, 989 }, 990 [11] = { 991 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), 992 .dst_reg = EBPF_REG_3, 993 .imm = sizeof(uint32_t) * CHAR_BIT, 994 }, 995 [12] = { 996 .code = (BPF_ALU | BPF_AND | BPF_X), 997 .dst_reg = EBPF_REG_1, 998 .src_reg = EBPF_REG_3, 999 }, 1000 [13] = { 1001 .code = (BPF_ALU | EBPF_MOV | BPF_K), 1002 .dst_reg = EBPF_REG_3, 1003 .imm = TEST_SUBNET, 1004 }, 1005 [14] = { 1006 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), 1007 .dst_reg = EBPF_REG_3, 1008 .imm = sizeof(uint32_t) * CHAR_BIT, 1009 }, 1010 [15] = { 1011 .code = (BPF_JMP | BPF_JEQ | BPF_X), 1012 .dst_reg = EBPF_REG_1, 1013 .src_reg = EBPF_REG_3, 1014 .off = 1, 1015 }, 1016 [16] = { 1017 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 1018 .dst_reg = EBPF_REG_0, 1019 .imm = -1, 1020 }, 1021 [17] = { 1022 .code = (BPF_JMP | EBPF_EXIT), 1023 }, 1024 }; 1025 1026 /* Preparing a vlan packet */ 1027 static void 1028 test_jump2_prepare(void *arg) 1029 { 1030 struct dummy_net *dn; 1031 1032 dn = arg; 1033 memset(dn, 0, sizeof(*dn)); 1034 1035 /* 1036 * Initialize ether header. 1037 */ 1038 rte_ether_addr_copy((struct rte_ether_addr *)dst_mac, 1039 &dn->eth_hdr.dst_addr); 1040 rte_ether_addr_copy((struct rte_ether_addr *)src_mac, 1041 &dn->eth_hdr.src_addr); 1042 dn->eth_hdr.ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_VLAN); 1043 1044 /* 1045 * Initialize vlan header. 1046 */ 1047 dn->vlan_hdr.eth_proto = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4); 1048 dn->vlan_hdr.vlan_tci = 32; 1049 1050 /* 1051 * Initialize IP header. 1052 */ 1053 dn->ip_hdr.version_ihl = 0x45; /*IP_VERSION | IP_HDRLEN*/ 1054 dn->ip_hdr.time_to_live = 64; /* IP_DEFTTL */ 1055 dn->ip_hdr.next_proto_id = IPPROTO_TCP; 1056 dn->ip_hdr.packet_id = rte_cpu_to_be_16(0x463c); 1057 dn->ip_hdr.total_length = rte_cpu_to_be_16(60); 1058 dn->ip_hdr.src_addr = rte_cpu_to_be_32(ip_src_addr); 1059 dn->ip_hdr.dst_addr = rte_cpu_to_be_32(ip_dst_addr); 1060 } 1061 1062 static int 1063 test_jump2_check(uint64_t rc, const void *arg) 1064 { 1065 const struct rte_ether_hdr *eth_hdr = arg; 1066 const struct rte_ipv4_hdr *ipv4_hdr; 1067 const void *next = eth_hdr; 1068 uint16_t eth_type; 1069 uint64_t v = -1; 1070 1071 if (eth_hdr->ether_type == htons(0x8100)) { 1072 const struct rte_vlan_hdr *vlan_hdr = 1073 (const void *)(eth_hdr + 1); 1074 eth_type = vlan_hdr->eth_proto; 1075 next = vlan_hdr + 1; 1076 } else { 1077 eth_type = eth_hdr->ether_type; 1078 next = eth_hdr + 1; 1079 } 1080 1081 if (eth_type == htons(0x0800)) { 1082 ipv4_hdr = next; 1083 if ((ipv4_hdr->dst_addr & rte_cpu_to_be_32(TEST_NETMASK)) == 1084 rte_cpu_to_be_32(TEST_SUBNET)) { 1085 v = 0; 1086 } 1087 } 1088 1089 return cmp_res(__func__, v, rc, arg, arg, sizeof(arg)); 1090 } 1091 1092 /* alu (add, sub, and, or, xor, neg) test-cases */ 1093 static const struct ebpf_insn test_alu1_prog[] = { 1094 1095 { 1096 .code = (BPF_LDX | BPF_MEM | BPF_W), 1097 .dst_reg = EBPF_REG_2, 1098 .src_reg = EBPF_REG_1, 1099 .off = offsetof(struct dummy_vect8, in[0].u32), 1100 }, 1101 { 1102 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 1103 .dst_reg = EBPF_REG_3, 1104 .src_reg = EBPF_REG_1, 1105 .off = offsetof(struct dummy_vect8, in[0].u64), 1106 }, 1107 { 1108 .code = (BPF_LDX | BPF_MEM | BPF_W), 1109 .dst_reg = EBPF_REG_4, 1110 .src_reg = EBPF_REG_1, 1111 .off = offsetof(struct dummy_vect8, in[1].u32), 1112 }, 1113 { 1114 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 1115 .dst_reg = EBPF_REG_5, 1116 .src_reg = EBPF_REG_1, 1117 .off = offsetof(struct dummy_vect8, in[1].u64), 1118 }, 1119 { 1120 .code = (BPF_ALU | BPF_AND | BPF_K), 1121 .dst_reg = EBPF_REG_2, 1122 .imm = TEST_FILL_1, 1123 }, 1124 { 1125 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 1126 .dst_reg = EBPF_REG_3, 1127 .imm = TEST_FILL_1, 1128 }, 1129 { 1130 .code = (BPF_ALU | BPF_XOR | BPF_K), 1131 .dst_reg = EBPF_REG_4, 1132 .imm = TEST_FILL_1, 1133 }, 1134 { 1135 .code = (EBPF_ALU64 | BPF_ADD | BPF_K), 1136 .dst_reg = EBPF_REG_5, 1137 .imm = TEST_FILL_1, 1138 }, 1139 { 1140 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1141 .dst_reg = EBPF_REG_1, 1142 .src_reg = EBPF_REG_2, 1143 .off = offsetof(struct dummy_vect8, out[0].u64), 1144 }, 1145 { 1146 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1147 .dst_reg = EBPF_REG_1, 1148 .src_reg = EBPF_REG_3, 1149 .off = offsetof(struct dummy_vect8, out[1].u64), 1150 }, 1151 { 1152 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1153 .dst_reg = EBPF_REG_1, 1154 .src_reg = EBPF_REG_4, 1155 .off = offsetof(struct dummy_vect8, out[2].u64), 1156 }, 1157 { 1158 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1159 .dst_reg = EBPF_REG_1, 1160 .src_reg = EBPF_REG_5, 1161 .off = offsetof(struct dummy_vect8, out[3].u64), 1162 }, 1163 { 1164 .code = (BPF_ALU | BPF_OR | BPF_X), 1165 .dst_reg = EBPF_REG_2, 1166 .src_reg = EBPF_REG_3, 1167 }, 1168 { 1169 .code = (EBPF_ALU64 | BPF_XOR | BPF_X), 1170 .dst_reg = EBPF_REG_3, 1171 .src_reg = EBPF_REG_4, 1172 }, 1173 { 1174 .code = (BPF_ALU | BPF_SUB | BPF_X), 1175 .dst_reg = EBPF_REG_4, 1176 .src_reg = EBPF_REG_5, 1177 }, 1178 { 1179 .code = (EBPF_ALU64 | BPF_AND | BPF_X), 1180 .dst_reg = EBPF_REG_5, 1181 .src_reg = EBPF_REG_2, 1182 }, 1183 { 1184 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1185 .dst_reg = EBPF_REG_1, 1186 .src_reg = EBPF_REG_2, 1187 .off = offsetof(struct dummy_vect8, out[4].u64), 1188 }, 1189 { 1190 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1191 .dst_reg = EBPF_REG_1, 1192 .src_reg = EBPF_REG_3, 1193 .off = offsetof(struct dummy_vect8, out[5].u64), 1194 }, 1195 { 1196 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1197 .dst_reg = EBPF_REG_1, 1198 .src_reg = EBPF_REG_4, 1199 .off = offsetof(struct dummy_vect8, out[6].u64), 1200 }, 1201 { 1202 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1203 .dst_reg = EBPF_REG_1, 1204 .src_reg = EBPF_REG_5, 1205 .off = offsetof(struct dummy_vect8, out[7].u64), 1206 }, 1207 /* return (-r2 + (-r3)) */ 1208 { 1209 .code = (BPF_ALU | BPF_NEG), 1210 .dst_reg = EBPF_REG_2, 1211 }, 1212 { 1213 .code = (EBPF_ALU64 | BPF_NEG), 1214 .dst_reg = EBPF_REG_3, 1215 }, 1216 { 1217 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 1218 .dst_reg = EBPF_REG_2, 1219 .src_reg = EBPF_REG_3, 1220 }, 1221 { 1222 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 1223 .dst_reg = EBPF_REG_0, 1224 .src_reg = EBPF_REG_2, 1225 }, 1226 { 1227 .code = (BPF_JMP | EBPF_EXIT), 1228 }, 1229 }; 1230 1231 static int 1232 test_alu1_check(uint64_t rc, const void *arg) 1233 { 1234 uint64_t r2, r3, r4, r5, rv; 1235 const struct dummy_vect8 *dvt; 1236 struct dummy_vect8 dve; 1237 1238 dvt = arg; 1239 memset(&dve, 0, sizeof(dve)); 1240 1241 r2 = dvt->in[0].u32; 1242 r3 = dvt->in[0].u64; 1243 r4 = dvt->in[1].u32; 1244 r5 = dvt->in[1].u64; 1245 1246 r2 = (uint32_t)r2 & TEST_FILL_1; 1247 r3 |= (int32_t) TEST_FILL_1; 1248 r4 = (uint32_t)r4 ^ TEST_FILL_1; 1249 r5 += (int32_t)TEST_FILL_1; 1250 1251 dve.out[0].u64 = r2; 1252 dve.out[1].u64 = r3; 1253 dve.out[2].u64 = r4; 1254 dve.out[3].u64 = r5; 1255 1256 r2 = (uint32_t)r2 | (uint32_t)r3; 1257 r3 ^= r4; 1258 r4 = (uint32_t)r4 - (uint32_t)r5; 1259 r5 &= r2; 1260 1261 dve.out[4].u64 = r2; 1262 dve.out[5].u64 = r3; 1263 dve.out[6].u64 = r4; 1264 dve.out[7].u64 = r5; 1265 1266 r2 = -(int32_t)r2; 1267 rv = (uint32_t)r2; 1268 r3 = -r3; 1269 rv += r3; 1270 1271 return cmp_res(__func__, rv, rc, dve.out, dvt->out, sizeof(dve.out)); 1272 } 1273 1274 /* endianness conversions (BE->LE/LE->BE) test-cases */ 1275 static const struct ebpf_insn test_bele1_prog[] = { 1276 1277 { 1278 .code = (BPF_LDX | BPF_MEM | BPF_H), 1279 .dst_reg = EBPF_REG_2, 1280 .src_reg = EBPF_REG_1, 1281 .off = offsetof(struct dummy_vect8, in[0].u16), 1282 }, 1283 { 1284 .code = (BPF_LDX | BPF_MEM | BPF_W), 1285 .dst_reg = EBPF_REG_3, 1286 .src_reg = EBPF_REG_1, 1287 .off = offsetof(struct dummy_vect8, in[0].u32), 1288 }, 1289 { 1290 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 1291 .dst_reg = EBPF_REG_4, 1292 .src_reg = EBPF_REG_1, 1293 .off = offsetof(struct dummy_vect8, in[0].u64), 1294 }, 1295 { 1296 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), 1297 .dst_reg = EBPF_REG_2, 1298 .imm = sizeof(uint16_t) * CHAR_BIT, 1299 }, 1300 { 1301 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), 1302 .dst_reg = EBPF_REG_3, 1303 .imm = sizeof(uint32_t) * CHAR_BIT, 1304 }, 1305 { 1306 .code = (BPF_ALU | EBPF_END | EBPF_TO_BE), 1307 .dst_reg = EBPF_REG_4, 1308 .imm = sizeof(uint64_t) * CHAR_BIT, 1309 }, 1310 { 1311 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1312 .dst_reg = EBPF_REG_1, 1313 .src_reg = EBPF_REG_2, 1314 .off = offsetof(struct dummy_vect8, out[0].u64), 1315 }, 1316 { 1317 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1318 .dst_reg = EBPF_REG_1, 1319 .src_reg = EBPF_REG_3, 1320 .off = offsetof(struct dummy_vect8, out[1].u64), 1321 }, 1322 { 1323 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1324 .dst_reg = EBPF_REG_1, 1325 .src_reg = EBPF_REG_4, 1326 .off = offsetof(struct dummy_vect8, out[2].u64), 1327 }, 1328 { 1329 .code = (BPF_LDX | BPF_MEM | BPF_H), 1330 .dst_reg = EBPF_REG_2, 1331 .src_reg = EBPF_REG_1, 1332 .off = offsetof(struct dummy_vect8, in[0].u16), 1333 }, 1334 { 1335 .code = (BPF_LDX | BPF_MEM | BPF_W), 1336 .dst_reg = EBPF_REG_3, 1337 .src_reg = EBPF_REG_1, 1338 .off = offsetof(struct dummy_vect8, in[0].u32), 1339 }, 1340 { 1341 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 1342 .dst_reg = EBPF_REG_4, 1343 .src_reg = EBPF_REG_1, 1344 .off = offsetof(struct dummy_vect8, in[0].u64), 1345 }, 1346 { 1347 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE), 1348 .dst_reg = EBPF_REG_2, 1349 .imm = sizeof(uint16_t) * CHAR_BIT, 1350 }, 1351 { 1352 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE), 1353 .dst_reg = EBPF_REG_3, 1354 .imm = sizeof(uint32_t) * CHAR_BIT, 1355 }, 1356 { 1357 .code = (BPF_ALU | EBPF_END | EBPF_TO_LE), 1358 .dst_reg = EBPF_REG_4, 1359 .imm = sizeof(uint64_t) * CHAR_BIT, 1360 }, 1361 { 1362 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1363 .dst_reg = EBPF_REG_1, 1364 .src_reg = EBPF_REG_2, 1365 .off = offsetof(struct dummy_vect8, out[3].u64), 1366 }, 1367 { 1368 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1369 .dst_reg = EBPF_REG_1, 1370 .src_reg = EBPF_REG_3, 1371 .off = offsetof(struct dummy_vect8, out[4].u64), 1372 }, 1373 { 1374 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1375 .dst_reg = EBPF_REG_1, 1376 .src_reg = EBPF_REG_4, 1377 .off = offsetof(struct dummy_vect8, out[5].u64), 1378 }, 1379 /* return 1 */ 1380 { 1381 .code = (BPF_ALU | EBPF_MOV | BPF_K), 1382 .dst_reg = EBPF_REG_0, 1383 .imm = 1, 1384 }, 1385 { 1386 .code = (BPF_JMP | EBPF_EXIT), 1387 }, 1388 }; 1389 1390 static void 1391 test_bele1_prepare(void *arg) 1392 { 1393 struct dummy_vect8 *dv; 1394 1395 dv = arg; 1396 1397 memset(dv, 0, sizeof(*dv)); 1398 dv->in[0].u64 = rte_rand(); 1399 dv->in[0].u32 = dv->in[0].u64; 1400 dv->in[0].u16 = dv->in[0].u64; 1401 } 1402 1403 static int 1404 test_bele1_check(uint64_t rc, const void *arg) 1405 { 1406 uint64_t r2, r3, r4; 1407 const struct dummy_vect8 *dvt; 1408 struct dummy_vect8 dve; 1409 1410 dvt = arg; 1411 memset(&dve, 0, sizeof(dve)); 1412 1413 r2 = dvt->in[0].u16; 1414 r3 = dvt->in[0].u32; 1415 r4 = dvt->in[0].u64; 1416 1417 r2 = rte_cpu_to_be_16(r2); 1418 r3 = rte_cpu_to_be_32(r3); 1419 r4 = rte_cpu_to_be_64(r4); 1420 1421 dve.out[0].u64 = r2; 1422 dve.out[1].u64 = r3; 1423 dve.out[2].u64 = r4; 1424 1425 r2 = dvt->in[0].u16; 1426 r3 = dvt->in[0].u32; 1427 r4 = dvt->in[0].u64; 1428 1429 r2 = rte_cpu_to_le_16(r2); 1430 r3 = rte_cpu_to_le_32(r3); 1431 r4 = rte_cpu_to_le_64(r4); 1432 1433 dve.out[3].u64 = r2; 1434 dve.out[4].u64 = r3; 1435 dve.out[5].u64 = r4; 1436 1437 return cmp_res(__func__, 1, rc, dve.out, dvt->out, sizeof(dve.out)); 1438 } 1439 1440 /* atomic add test-cases */ 1441 static const struct ebpf_insn test_xadd1_prog[] = { 1442 1443 { 1444 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 1445 .dst_reg = EBPF_REG_2, 1446 .imm = 1, 1447 }, 1448 { 1449 .code = (BPF_STX | EBPF_XADD | BPF_W), 1450 .dst_reg = EBPF_REG_1, 1451 .src_reg = EBPF_REG_2, 1452 .off = offsetof(struct dummy_offset, u32), 1453 }, 1454 { 1455 .code = (BPF_STX | EBPF_XADD | EBPF_DW), 1456 .dst_reg = EBPF_REG_1, 1457 .src_reg = EBPF_REG_2, 1458 .off = offsetof(struct dummy_offset, u64), 1459 }, 1460 { 1461 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 1462 .dst_reg = EBPF_REG_3, 1463 .imm = -1, 1464 }, 1465 { 1466 .code = (BPF_STX | EBPF_XADD | BPF_W), 1467 .dst_reg = EBPF_REG_1, 1468 .src_reg = EBPF_REG_3, 1469 .off = offsetof(struct dummy_offset, u32), 1470 }, 1471 { 1472 .code = (BPF_STX | EBPF_XADD | EBPF_DW), 1473 .dst_reg = EBPF_REG_1, 1474 .src_reg = EBPF_REG_3, 1475 .off = offsetof(struct dummy_offset, u64), 1476 }, 1477 { 1478 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 1479 .dst_reg = EBPF_REG_4, 1480 .imm = TEST_FILL_1, 1481 }, 1482 { 1483 .code = (BPF_STX | EBPF_XADD | BPF_W), 1484 .dst_reg = EBPF_REG_1, 1485 .src_reg = EBPF_REG_4, 1486 .off = offsetof(struct dummy_offset, u32), 1487 }, 1488 { 1489 .code = (BPF_STX | EBPF_XADD | EBPF_DW), 1490 .dst_reg = EBPF_REG_1, 1491 .src_reg = EBPF_REG_4, 1492 .off = offsetof(struct dummy_offset, u64), 1493 }, 1494 { 1495 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 1496 .dst_reg = EBPF_REG_5, 1497 .imm = TEST_MUL_1, 1498 }, 1499 { 1500 .code = (BPF_STX | EBPF_XADD | BPF_W), 1501 .dst_reg = EBPF_REG_1, 1502 .src_reg = EBPF_REG_5, 1503 .off = offsetof(struct dummy_offset, u32), 1504 }, 1505 { 1506 .code = (BPF_STX | EBPF_XADD | EBPF_DW), 1507 .dst_reg = EBPF_REG_1, 1508 .src_reg = EBPF_REG_5, 1509 .off = offsetof(struct dummy_offset, u64), 1510 }, 1511 { 1512 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 1513 .dst_reg = EBPF_REG_6, 1514 .imm = TEST_MUL_2, 1515 }, 1516 { 1517 .code = (BPF_STX | EBPF_XADD | BPF_W), 1518 .dst_reg = EBPF_REG_1, 1519 .src_reg = EBPF_REG_6, 1520 .off = offsetof(struct dummy_offset, u32), 1521 }, 1522 { 1523 .code = (BPF_STX | EBPF_XADD | EBPF_DW), 1524 .dst_reg = EBPF_REG_1, 1525 .src_reg = EBPF_REG_6, 1526 .off = offsetof(struct dummy_offset, u64), 1527 }, 1528 { 1529 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 1530 .dst_reg = EBPF_REG_7, 1531 .imm = TEST_JCC_2, 1532 }, 1533 { 1534 .code = (BPF_STX | EBPF_XADD | BPF_W), 1535 .dst_reg = EBPF_REG_1, 1536 .src_reg = EBPF_REG_7, 1537 .off = offsetof(struct dummy_offset, u32), 1538 }, 1539 { 1540 .code = (BPF_STX | EBPF_XADD | EBPF_DW), 1541 .dst_reg = EBPF_REG_1, 1542 .src_reg = EBPF_REG_7, 1543 .off = offsetof(struct dummy_offset, u64), 1544 }, 1545 { 1546 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 1547 .dst_reg = EBPF_REG_8, 1548 .imm = TEST_JCC_3, 1549 }, 1550 { 1551 .code = (BPF_STX | EBPF_XADD | BPF_W), 1552 .dst_reg = EBPF_REG_1, 1553 .src_reg = EBPF_REG_8, 1554 .off = offsetof(struct dummy_offset, u32), 1555 }, 1556 { 1557 .code = (BPF_STX | EBPF_XADD | EBPF_DW), 1558 .dst_reg = EBPF_REG_1, 1559 .src_reg = EBPF_REG_8, 1560 .off = offsetof(struct dummy_offset, u64), 1561 }, 1562 /* return 1 */ 1563 { 1564 .code = (BPF_ALU | EBPF_MOV | BPF_K), 1565 .dst_reg = EBPF_REG_0, 1566 .imm = 1, 1567 }, 1568 { 1569 .code = (BPF_JMP | EBPF_EXIT), 1570 }, 1571 }; 1572 1573 static int 1574 test_xadd1_check(uint64_t rc, const void *arg) 1575 { 1576 uint64_t rv; 1577 const struct dummy_offset *dft; 1578 struct dummy_offset dfe; 1579 1580 dft = arg; 1581 memset(&dfe, 0, sizeof(dfe)); 1582 1583 rv = 1; 1584 rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv, 1585 rte_memory_order_relaxed); 1586 rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv, 1587 rte_memory_order_relaxed); 1588 1589 rv = -1; 1590 rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv, 1591 rte_memory_order_relaxed); 1592 rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv, 1593 rte_memory_order_relaxed); 1594 1595 rv = (int32_t)TEST_FILL_1; 1596 rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv, 1597 rte_memory_order_relaxed); 1598 rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv, 1599 rte_memory_order_relaxed); 1600 1601 rv = TEST_MUL_1; 1602 rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv, 1603 rte_memory_order_relaxed); 1604 rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv, 1605 rte_memory_order_relaxed); 1606 1607 rv = TEST_MUL_2; 1608 rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv, 1609 rte_memory_order_relaxed); 1610 rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv, 1611 rte_memory_order_relaxed); 1612 1613 rv = TEST_JCC_2; 1614 rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv, 1615 rte_memory_order_relaxed); 1616 rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv, 1617 rte_memory_order_relaxed); 1618 1619 rv = TEST_JCC_3; 1620 rte_atomic_fetch_add_explicit((uint32_t __rte_atomic *)&dfe.u32, rv, 1621 rte_memory_order_relaxed); 1622 rte_atomic_fetch_add_explicit((uint64_t __rte_atomic *)&dfe.u64, rv, 1623 rte_memory_order_relaxed); 1624 1625 return cmp_res(__func__, 1, rc, &dfe, dft, sizeof(dfe)); 1626 } 1627 1628 /* alu div test-cases */ 1629 static const struct ebpf_insn test_div1_prog[] = { 1630 1631 { 1632 .code = (BPF_LDX | BPF_MEM | BPF_W), 1633 .dst_reg = EBPF_REG_2, 1634 .src_reg = EBPF_REG_1, 1635 .off = offsetof(struct dummy_vect8, in[0].u32), 1636 }, 1637 { 1638 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 1639 .dst_reg = EBPF_REG_3, 1640 .src_reg = EBPF_REG_1, 1641 .off = offsetof(struct dummy_vect8, in[1].u64), 1642 }, 1643 { 1644 .code = (BPF_LDX | BPF_MEM | BPF_W), 1645 .dst_reg = EBPF_REG_4, 1646 .src_reg = EBPF_REG_1, 1647 .off = offsetof(struct dummy_vect8, in[2].u32), 1648 }, 1649 { 1650 .code = (BPF_ALU | BPF_DIV | BPF_K), 1651 .dst_reg = EBPF_REG_2, 1652 .imm = TEST_MUL_1, 1653 }, 1654 { 1655 .code = (EBPF_ALU64 | BPF_MOD | BPF_K), 1656 .dst_reg = EBPF_REG_3, 1657 .imm = TEST_MUL_2, 1658 }, 1659 { 1660 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 1661 .dst_reg = EBPF_REG_2, 1662 .imm = 1, 1663 }, 1664 { 1665 .code = (EBPF_ALU64 | BPF_OR | BPF_K), 1666 .dst_reg = EBPF_REG_3, 1667 .imm = 1, 1668 }, 1669 { 1670 .code = (BPF_ALU | BPF_MOD | BPF_X), 1671 .dst_reg = EBPF_REG_4, 1672 .src_reg = EBPF_REG_2, 1673 }, 1674 { 1675 .code = (EBPF_ALU64 | BPF_DIV | BPF_X), 1676 .dst_reg = EBPF_REG_4, 1677 .src_reg = EBPF_REG_3, 1678 }, 1679 { 1680 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1681 .dst_reg = EBPF_REG_1, 1682 .src_reg = EBPF_REG_2, 1683 .off = offsetof(struct dummy_vect8, out[0].u64), 1684 }, 1685 { 1686 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1687 .dst_reg = EBPF_REG_1, 1688 .src_reg = EBPF_REG_3, 1689 .off = offsetof(struct dummy_vect8, out[1].u64), 1690 }, 1691 { 1692 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1693 .dst_reg = EBPF_REG_1, 1694 .src_reg = EBPF_REG_4, 1695 .off = offsetof(struct dummy_vect8, out[2].u64), 1696 }, 1697 /* check that we can handle division by zero gracefully. */ 1698 { 1699 .code = (BPF_LDX | BPF_MEM | BPF_W), 1700 .dst_reg = EBPF_REG_2, 1701 .src_reg = EBPF_REG_1, 1702 .off = offsetof(struct dummy_vect8, in[3].u32), 1703 }, 1704 { 1705 .code = (BPF_ALU | BPF_DIV | BPF_X), 1706 .dst_reg = EBPF_REG_4, 1707 .src_reg = EBPF_REG_2, 1708 }, 1709 /* return 1 */ 1710 { 1711 .code = (BPF_ALU | EBPF_MOV | BPF_K), 1712 .dst_reg = EBPF_REG_0, 1713 .imm = 1, 1714 }, 1715 { 1716 .code = (BPF_JMP | EBPF_EXIT), 1717 }, 1718 }; 1719 1720 static int 1721 test_div1_check(uint64_t rc, const void *arg) 1722 { 1723 uint64_t r2, r3, r4; 1724 const struct dummy_vect8 *dvt; 1725 struct dummy_vect8 dve; 1726 1727 dvt = arg; 1728 memset(&dve, 0, sizeof(dve)); 1729 1730 r2 = dvt->in[0].u32; 1731 r3 = dvt->in[1].u64; 1732 r4 = dvt->in[2].u32; 1733 1734 r2 = (uint32_t)r2 / TEST_MUL_1; 1735 r3 %= TEST_MUL_2; 1736 r2 |= 1; 1737 r3 |= 1; 1738 r4 = (uint32_t)(r4 % r2); 1739 r4 /= r3; 1740 1741 dve.out[0].u64 = r2; 1742 dve.out[1].u64 = r3; 1743 dve.out[2].u64 = r4; 1744 1745 /* 1746 * in the test prog we attempted to divide by zero. 1747 * so return value should return 0. 1748 */ 1749 return cmp_res(__func__, 0, rc, dve.out, dvt->out, sizeof(dve.out)); 1750 } 1751 1752 /* call test-cases */ 1753 static const struct ebpf_insn test_call1_prog[] = { 1754 1755 { 1756 .code = (BPF_LDX | BPF_MEM | BPF_W), 1757 .dst_reg = EBPF_REG_2, 1758 .src_reg = EBPF_REG_1, 1759 .off = offsetof(struct dummy_offset, u32), 1760 }, 1761 { 1762 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 1763 .dst_reg = EBPF_REG_3, 1764 .src_reg = EBPF_REG_1, 1765 .off = offsetof(struct dummy_offset, u64), 1766 }, 1767 { 1768 .code = (BPF_STX | BPF_MEM | BPF_W), 1769 .dst_reg = EBPF_REG_10, 1770 .src_reg = EBPF_REG_2, 1771 .off = -4, 1772 }, 1773 { 1774 .code = (BPF_STX | BPF_MEM | EBPF_DW), 1775 .dst_reg = EBPF_REG_10, 1776 .src_reg = EBPF_REG_3, 1777 .off = -16, 1778 }, 1779 { 1780 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 1781 .dst_reg = EBPF_REG_2, 1782 .src_reg = EBPF_REG_10, 1783 }, 1784 { 1785 .code = (EBPF_ALU64 | BPF_SUB | BPF_K), 1786 .dst_reg = EBPF_REG_2, 1787 .imm = 4, 1788 }, 1789 { 1790 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 1791 .dst_reg = EBPF_REG_3, 1792 .src_reg = EBPF_REG_10, 1793 }, 1794 { 1795 .code = (EBPF_ALU64 | BPF_SUB | BPF_K), 1796 .dst_reg = EBPF_REG_3, 1797 .imm = 16, 1798 }, 1799 { 1800 .code = (BPF_JMP | EBPF_CALL), 1801 .imm = 0, 1802 }, 1803 { 1804 .code = (BPF_LDX | BPF_MEM | BPF_W), 1805 .dst_reg = EBPF_REG_2, 1806 .src_reg = EBPF_REG_10, 1807 .off = -4, 1808 }, 1809 { 1810 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 1811 .dst_reg = EBPF_REG_0, 1812 .src_reg = EBPF_REG_10, 1813 .off = -16 1814 }, 1815 { 1816 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 1817 .dst_reg = EBPF_REG_0, 1818 .src_reg = EBPF_REG_2, 1819 }, 1820 { 1821 .code = (BPF_JMP | EBPF_EXIT), 1822 }, 1823 }; 1824 1825 static void 1826 dummy_func1(const void *p, uint32_t *v32, uint64_t *v64) 1827 { 1828 const struct dummy_offset *dv; 1829 1830 dv = p; 1831 1832 v32[0] += dv->u16; 1833 v64[0] += dv->u8; 1834 } 1835 1836 static int 1837 test_call1_check(uint64_t rc, const void *arg) 1838 { 1839 uint32_t v32; 1840 uint64_t v64; 1841 const struct dummy_offset *dv; 1842 1843 dv = arg; 1844 1845 v32 = dv->u32; 1846 v64 = dv->u64; 1847 dummy_func1(arg, &v32, &v64); 1848 v64 += v32; 1849 1850 return cmp_res(__func__, v64, rc, dv, dv, sizeof(*dv)); 1851 } 1852 1853 static const struct rte_bpf_xsym test_call1_xsym[] = { 1854 { 1855 .name = RTE_STR(dummy_func1), 1856 .type = RTE_BPF_XTYPE_FUNC, 1857 .func = { 1858 .val = (void *)dummy_func1, 1859 .nb_args = 3, 1860 .args = { 1861 [0] = { 1862 .type = RTE_BPF_ARG_PTR, 1863 .size = sizeof(struct dummy_offset), 1864 }, 1865 [1] = { 1866 .type = RTE_BPF_ARG_PTR, 1867 .size = sizeof(uint32_t), 1868 }, 1869 [2] = { 1870 .type = RTE_BPF_ARG_PTR, 1871 .size = sizeof(uint64_t), 1872 }, 1873 }, 1874 }, 1875 }, 1876 }; 1877 1878 static const struct ebpf_insn test_call2_prog[] = { 1879 1880 { 1881 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 1882 .dst_reg = EBPF_REG_1, 1883 .src_reg = EBPF_REG_10, 1884 }, 1885 { 1886 .code = (EBPF_ALU64 | BPF_ADD | BPF_K), 1887 .dst_reg = EBPF_REG_1, 1888 .imm = -(int32_t)sizeof(struct dummy_offset), 1889 }, 1890 { 1891 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 1892 .dst_reg = EBPF_REG_2, 1893 .src_reg = EBPF_REG_10, 1894 }, 1895 { 1896 .code = (EBPF_ALU64 | BPF_ADD | BPF_K), 1897 .dst_reg = EBPF_REG_2, 1898 .imm = -2 * (int32_t)sizeof(struct dummy_offset), 1899 }, 1900 { 1901 .code = (BPF_JMP | EBPF_CALL), 1902 .imm = 0, 1903 }, 1904 { 1905 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 1906 .dst_reg = EBPF_REG_1, 1907 .src_reg = EBPF_REG_10, 1908 .off = -(int32_t)(sizeof(struct dummy_offset) - 1909 offsetof(struct dummy_offset, u64)), 1910 }, 1911 { 1912 .code = (BPF_LDX | BPF_MEM | BPF_W), 1913 .dst_reg = EBPF_REG_0, 1914 .src_reg = EBPF_REG_10, 1915 .off = -(int32_t)(sizeof(struct dummy_offset) - 1916 offsetof(struct dummy_offset, u32)), 1917 }, 1918 { 1919 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 1920 .dst_reg = EBPF_REG_0, 1921 .src_reg = EBPF_REG_1, 1922 }, 1923 { 1924 .code = (BPF_LDX | BPF_MEM | BPF_H), 1925 .dst_reg = EBPF_REG_1, 1926 .src_reg = EBPF_REG_10, 1927 .off = -(int32_t)(2 * sizeof(struct dummy_offset) - 1928 offsetof(struct dummy_offset, u16)), 1929 }, 1930 { 1931 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 1932 .dst_reg = EBPF_REG_0, 1933 .src_reg = EBPF_REG_1, 1934 }, 1935 { 1936 .code = (BPF_LDX | BPF_MEM | BPF_B), 1937 .dst_reg = EBPF_REG_1, 1938 .src_reg = EBPF_REG_10, 1939 .off = -(int32_t)(2 * sizeof(struct dummy_offset) - 1940 offsetof(struct dummy_offset, u8)), 1941 }, 1942 { 1943 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 1944 .dst_reg = EBPF_REG_0, 1945 .src_reg = EBPF_REG_1, 1946 }, 1947 { 1948 .code = (BPF_JMP | EBPF_EXIT), 1949 }, 1950 1951 }; 1952 1953 static void 1954 dummy_func2(struct dummy_offset *a, struct dummy_offset *b) 1955 { 1956 uint64_t v; 1957 1958 v = 0; 1959 a->u64 = v++; 1960 a->u32 = v++; 1961 a->u16 = v++; 1962 a->u8 = v++; 1963 b->u64 = v++; 1964 b->u32 = v++; 1965 b->u16 = v++; 1966 b->u8 = v++; 1967 } 1968 1969 static int 1970 test_call2_check(uint64_t rc, const void *arg) 1971 { 1972 uint64_t v; 1973 struct dummy_offset a, b; 1974 1975 RTE_SET_USED(arg); 1976 1977 dummy_func2(&a, &b); 1978 v = a.u64 + a.u32 + b.u16 + b.u8; 1979 1980 return cmp_res(__func__, v, rc, arg, arg, 0); 1981 } 1982 1983 static const struct rte_bpf_xsym test_call2_xsym[] = { 1984 { 1985 .name = RTE_STR(dummy_func2), 1986 .type = RTE_BPF_XTYPE_FUNC, 1987 .func = { 1988 .val = (void *)dummy_func2, 1989 .nb_args = 2, 1990 .args = { 1991 [0] = { 1992 .type = RTE_BPF_ARG_PTR, 1993 .size = sizeof(struct dummy_offset), 1994 }, 1995 [1] = { 1996 .type = RTE_BPF_ARG_PTR, 1997 .size = sizeof(struct dummy_offset), 1998 }, 1999 }, 2000 }, 2001 }, 2002 }; 2003 2004 static const struct ebpf_insn test_call3_prog[] = { 2005 2006 { 2007 .code = (BPF_JMP | EBPF_CALL), 2008 .imm = 0, 2009 }, 2010 { 2011 .code = (BPF_LDX | BPF_MEM | BPF_B), 2012 .dst_reg = EBPF_REG_2, 2013 .src_reg = EBPF_REG_0, 2014 .off = offsetof(struct dummy_offset, u8), 2015 }, 2016 { 2017 .code = (BPF_LDX | BPF_MEM | BPF_H), 2018 .dst_reg = EBPF_REG_3, 2019 .src_reg = EBPF_REG_0, 2020 .off = offsetof(struct dummy_offset, u16), 2021 }, 2022 { 2023 .code = (BPF_LDX | BPF_MEM | BPF_W), 2024 .dst_reg = EBPF_REG_4, 2025 .src_reg = EBPF_REG_0, 2026 .off = offsetof(struct dummy_offset, u32), 2027 }, 2028 { 2029 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 2030 .dst_reg = EBPF_REG_0, 2031 .src_reg = EBPF_REG_0, 2032 .off = offsetof(struct dummy_offset, u64), 2033 }, 2034 /* return sum */ 2035 { 2036 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 2037 .dst_reg = EBPF_REG_0, 2038 .src_reg = EBPF_REG_4, 2039 }, 2040 { 2041 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 2042 .dst_reg = EBPF_REG_0, 2043 .src_reg = EBPF_REG_3, 2044 }, 2045 { 2046 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 2047 .dst_reg = EBPF_REG_0, 2048 .src_reg = EBPF_REG_2, 2049 }, 2050 { 2051 .code = (BPF_JMP | EBPF_EXIT), 2052 }, 2053 }; 2054 2055 static const struct dummy_offset * 2056 dummy_func3(const struct dummy_vect8 *p) 2057 { 2058 return &p->in[RTE_DIM(p->in) - 1]; 2059 } 2060 2061 static void 2062 test_call3_prepare(void *arg) 2063 { 2064 struct dummy_vect8 *pv; 2065 struct dummy_offset *df; 2066 2067 pv = arg; 2068 df = (struct dummy_offset *)(uintptr_t)dummy_func3(pv); 2069 2070 memset(pv, 0, sizeof(*pv)); 2071 df->u64 = (int32_t)TEST_FILL_1; 2072 df->u32 = df->u64; 2073 df->u16 = df->u64; 2074 df->u8 = df->u64; 2075 } 2076 2077 static int 2078 test_call3_check(uint64_t rc, const void *arg) 2079 { 2080 uint64_t v; 2081 const struct dummy_vect8 *pv; 2082 const struct dummy_offset *dft; 2083 2084 pv = arg; 2085 dft = dummy_func3(pv); 2086 2087 v = dft->u64; 2088 v += dft->u32; 2089 v += dft->u16; 2090 v += dft->u8; 2091 2092 return cmp_res(__func__, v, rc, pv, pv, sizeof(*pv)); 2093 } 2094 2095 static const struct rte_bpf_xsym test_call3_xsym[] = { 2096 { 2097 .name = RTE_STR(dummy_func3), 2098 .type = RTE_BPF_XTYPE_FUNC, 2099 .func = { 2100 .val = (void *)dummy_func3, 2101 .nb_args = 1, 2102 .args = { 2103 [0] = { 2104 .type = RTE_BPF_ARG_PTR, 2105 .size = sizeof(struct dummy_vect8), 2106 }, 2107 }, 2108 .ret = { 2109 .type = RTE_BPF_ARG_PTR, 2110 .size = sizeof(struct dummy_offset), 2111 }, 2112 }, 2113 }, 2114 }; 2115 2116 /* Test for stack corruption in multiple function calls */ 2117 static const struct ebpf_insn test_call4_prog[] = { 2118 { 2119 .code = (BPF_ST | BPF_MEM | BPF_B), 2120 .dst_reg = EBPF_REG_10, 2121 .off = -4, 2122 .imm = 1, 2123 }, 2124 { 2125 .code = (BPF_ST | BPF_MEM | BPF_B), 2126 .dst_reg = EBPF_REG_10, 2127 .off = -3, 2128 .imm = 2, 2129 }, 2130 { 2131 .code = (BPF_ST | BPF_MEM | BPF_B), 2132 .dst_reg = EBPF_REG_10, 2133 .off = -2, 2134 .imm = 3, 2135 }, 2136 { 2137 .code = (BPF_ST | BPF_MEM | BPF_B), 2138 .dst_reg = EBPF_REG_10, 2139 .off = -1, 2140 .imm = 4, 2141 }, 2142 { 2143 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2144 .dst_reg = EBPF_REG_1, 2145 .src_reg = EBPF_REG_10, 2146 }, 2147 { 2148 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 2149 .dst_reg = EBPF_REG_2, 2150 .imm = 4, 2151 }, 2152 { 2153 .code = (EBPF_ALU64 | BPF_SUB | BPF_X), 2154 .dst_reg = EBPF_REG_1, 2155 .src_reg = EBPF_REG_2, 2156 }, 2157 { 2158 .code = (BPF_JMP | EBPF_CALL), 2159 .imm = 0, 2160 }, 2161 { 2162 .code = (BPF_LDX | BPF_MEM | BPF_B), 2163 .dst_reg = EBPF_REG_1, 2164 .src_reg = EBPF_REG_10, 2165 .off = -4, 2166 }, 2167 { 2168 .code = (BPF_LDX | BPF_MEM | BPF_B), 2169 .dst_reg = EBPF_REG_2, 2170 .src_reg = EBPF_REG_10, 2171 .off = -3, 2172 }, 2173 { 2174 .code = (BPF_LDX | BPF_MEM | BPF_B), 2175 .dst_reg = EBPF_REG_3, 2176 .src_reg = EBPF_REG_10, 2177 .off = -2, 2178 }, 2179 { 2180 .code = (BPF_LDX | BPF_MEM | BPF_B), 2181 .dst_reg = EBPF_REG_4, 2182 .src_reg = EBPF_REG_10, 2183 .off = -1, 2184 }, 2185 { 2186 .code = (BPF_JMP | EBPF_CALL), 2187 .imm = 1, 2188 }, 2189 { 2190 .code = (EBPF_ALU64 | BPF_XOR | BPF_K), 2191 .dst_reg = EBPF_REG_0, 2192 .imm = TEST_MEMFROB, 2193 }, 2194 { 2195 .code = (BPF_JMP | EBPF_EXIT), 2196 }, 2197 }; 2198 2199 /* Gathering the bytes together */ 2200 static uint32_t 2201 dummy_func4_1(uint8_t a, uint8_t b, uint8_t c, uint8_t d) 2202 { 2203 return (a << 24) | (b << 16) | (c << 8) | (d << 0); 2204 } 2205 2206 /* Implementation of memfrob */ 2207 static uint32_t 2208 dummy_func4_0(uint32_t *s, uint8_t n) 2209 { 2210 char *p = (char *) s; 2211 while (n-- > 0) 2212 *p++ ^= 42; 2213 return *s; 2214 } 2215 2216 2217 static int 2218 test_call4_check(uint64_t rc, const void *arg) 2219 { 2220 uint8_t a[4] = {1, 2, 3, 4}; 2221 uint32_t s, v = 0; 2222 2223 RTE_SET_USED(arg); 2224 2225 s = dummy_func4_0((uint32_t *)a, 4); 2226 2227 s = dummy_func4_1(a[0], a[1], a[2], a[3]); 2228 2229 v = s ^ TEST_MEMFROB; 2230 2231 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v)); 2232 } 2233 2234 static const struct rte_bpf_xsym test_call4_xsym[] = { 2235 [0] = { 2236 .name = RTE_STR(dummy_func4_0), 2237 .type = RTE_BPF_XTYPE_FUNC, 2238 .func = { 2239 .val = (void *)dummy_func4_0, 2240 .nb_args = 2, 2241 .args = { 2242 [0] = { 2243 .type = RTE_BPF_ARG_PTR, 2244 .size = 4 * sizeof(uint8_t), 2245 }, 2246 [1] = { 2247 .type = RTE_BPF_ARG_RAW, 2248 .size = sizeof(uint8_t), 2249 }, 2250 }, 2251 .ret = { 2252 .type = RTE_BPF_ARG_RAW, 2253 .size = sizeof(uint32_t), 2254 }, 2255 }, 2256 }, 2257 [1] = { 2258 .name = RTE_STR(dummy_func4_1), 2259 .type = RTE_BPF_XTYPE_FUNC, 2260 .func = { 2261 .val = (void *)dummy_func4_1, 2262 .nb_args = 4, 2263 .args = { 2264 [0] = { 2265 .type = RTE_BPF_ARG_RAW, 2266 .size = sizeof(uint8_t), 2267 }, 2268 [1] = { 2269 .type = RTE_BPF_ARG_RAW, 2270 .size = sizeof(uint8_t), 2271 }, 2272 [2] = { 2273 .type = RTE_BPF_ARG_RAW, 2274 .size = sizeof(uint8_t), 2275 }, 2276 [3] = { 2277 .type = RTE_BPF_ARG_RAW, 2278 .size = sizeof(uint8_t), 2279 }, 2280 }, 2281 .ret = { 2282 .type = RTE_BPF_ARG_RAW, 2283 .size = sizeof(uint32_t), 2284 }, 2285 }, 2286 }, 2287 }; 2288 2289 /* string compare test case */ 2290 static const struct ebpf_insn test_call5_prog[] = { 2291 2292 [0] = { 2293 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 2294 .dst_reg = EBPF_REG_1, 2295 .imm = STRING_GEEK, 2296 }, 2297 [1] = { 2298 .code = (BPF_STX | BPF_MEM | BPF_W), 2299 .dst_reg = EBPF_REG_10, 2300 .src_reg = EBPF_REG_1, 2301 .off = -8, 2302 }, 2303 [2] = { 2304 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 2305 .dst_reg = EBPF_REG_6, 2306 .imm = 0, 2307 }, 2308 [3] = { 2309 .code = (BPF_STX | BPF_MEM | BPF_B), 2310 .dst_reg = EBPF_REG_10, 2311 .src_reg = EBPF_REG_6, 2312 .off = -4, 2313 }, 2314 [4] = { 2315 .code = (BPF_STX | BPF_MEM | BPF_W), 2316 .dst_reg = EBPF_REG_10, 2317 .src_reg = EBPF_REG_6, 2318 .off = -12, 2319 }, 2320 [5] = { 2321 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 2322 .dst_reg = EBPF_REG_1, 2323 .imm = STRING_WEEK, 2324 }, 2325 [6] = { 2326 .code = (BPF_STX | BPF_MEM | BPF_W), 2327 .dst_reg = EBPF_REG_10, 2328 .src_reg = EBPF_REG_1, 2329 .off = -16, 2330 }, 2331 [7] = { 2332 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2333 .dst_reg = EBPF_REG_1, 2334 .src_reg = EBPF_REG_10, 2335 }, 2336 [8] = { 2337 .code = (EBPF_ALU64 | BPF_ADD | BPF_K), 2338 .dst_reg = EBPF_REG_1, 2339 .imm = -8, 2340 }, 2341 [9] = { 2342 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2343 .dst_reg = EBPF_REG_2, 2344 .src_reg = EBPF_REG_1, 2345 }, 2346 [10] = { 2347 .code = (BPF_JMP | EBPF_CALL), 2348 .imm = 0, 2349 }, 2350 [11] = { 2351 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2352 .dst_reg = EBPF_REG_1, 2353 .src_reg = EBPF_REG_0, 2354 }, 2355 [12] = { 2356 .code = (BPF_ALU | EBPF_MOV | BPF_K), 2357 .dst_reg = EBPF_REG_0, 2358 .imm = -1, 2359 }, 2360 [13] = { 2361 .code = (EBPF_ALU64 | BPF_LSH | BPF_K), 2362 .dst_reg = EBPF_REG_1, 2363 .imm = 0x20, 2364 }, 2365 [14] = { 2366 .code = (EBPF_ALU64 | BPF_RSH | BPF_K), 2367 .dst_reg = EBPF_REG_1, 2368 .imm = 0x20, 2369 }, 2370 [15] = { 2371 .code = (BPF_JMP | EBPF_JNE | BPF_K), 2372 .dst_reg = EBPF_REG_1, 2373 .off = 11, 2374 .imm = 0, 2375 }, 2376 [16] = { 2377 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2378 .dst_reg = EBPF_REG_1, 2379 .src_reg = EBPF_REG_10, 2380 }, 2381 [17] = { 2382 .code = (EBPF_ALU64 | BPF_ADD | BPF_K), 2383 .dst_reg = EBPF_REG_1, 2384 .imm = -8, 2385 }, 2386 [18] = { 2387 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2388 .dst_reg = EBPF_REG_2, 2389 .src_reg = EBPF_REG_10, 2390 }, 2391 [19] = { 2392 .code = (EBPF_ALU64 | BPF_ADD | BPF_K), 2393 .dst_reg = EBPF_REG_2, 2394 .imm = -16, 2395 }, 2396 [20] = { 2397 .code = (BPF_JMP | EBPF_CALL), 2398 .imm = 0, 2399 }, 2400 [21] = { 2401 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2402 .dst_reg = EBPF_REG_1, 2403 .src_reg = EBPF_REG_0, 2404 }, 2405 [22] = { 2406 .code = (EBPF_ALU64 | BPF_LSH | BPF_K), 2407 .dst_reg = EBPF_REG_1, 2408 .imm = 0x20, 2409 }, 2410 [23] = { 2411 .code = (EBPF_ALU64 | BPF_RSH | BPF_K), 2412 .dst_reg = EBPF_REG_1, 2413 .imm = 0x20, 2414 }, 2415 [24] = { 2416 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2417 .dst_reg = EBPF_REG_0, 2418 .src_reg = EBPF_REG_1, 2419 }, 2420 [25] = { 2421 .code = (BPF_JMP | BPF_JEQ | BPF_X), 2422 .dst_reg = EBPF_REG_1, 2423 .src_reg = EBPF_REG_6, 2424 .off = 1, 2425 }, 2426 [26] = { 2427 .code = (EBPF_ALU64 | EBPF_MOV | BPF_K), 2428 .dst_reg = EBPF_REG_0, 2429 .imm = 0, 2430 }, 2431 [27] = { 2432 .code = (BPF_JMP | EBPF_EXIT), 2433 }, 2434 }; 2435 2436 /* String comparison implementation, return 0 if equal else difference */ 2437 static uint32_t 2438 dummy_func5(const char *s1, const char *s2) 2439 { 2440 while (*s1 && (*s1 == *s2)) { 2441 s1++; 2442 s2++; 2443 } 2444 return *(const unsigned char *)s1 - *(const unsigned char *)s2; 2445 } 2446 2447 static int 2448 test_call5_check(uint64_t rc, const void *arg) 2449 { 2450 char a[] = "geek"; 2451 char b[] = "week"; 2452 uint32_t v; 2453 2454 RTE_SET_USED(arg); 2455 2456 v = dummy_func5(a, a); 2457 if (v != 0) { 2458 v = -1; 2459 goto fail; 2460 } 2461 2462 v = dummy_func5(a, b); 2463 if (v == 0) 2464 goto fail; 2465 2466 v = 0; 2467 2468 fail: 2469 return cmp_res(__func__, v, rc, &v, &rc, sizeof(v)); 2470 } 2471 2472 static const struct rte_bpf_xsym test_call5_xsym[] = { 2473 [0] = { 2474 .name = RTE_STR(dummy_func5), 2475 .type = RTE_BPF_XTYPE_FUNC, 2476 .func = { 2477 .val = (void *)dummy_func5, 2478 .nb_args = 2, 2479 .args = { 2480 [0] = { 2481 .type = RTE_BPF_ARG_PTR, 2482 .size = sizeof(char), 2483 }, 2484 [1] = { 2485 .type = RTE_BPF_ARG_PTR, 2486 .size = sizeof(char), 2487 }, 2488 }, 2489 .ret = { 2490 .type = RTE_BPF_ARG_RAW, 2491 .size = sizeof(uint32_t), 2492 }, 2493 }, 2494 }, 2495 }; 2496 2497 /* load mbuf (BPF_ABS/BPF_IND) test-cases */ 2498 static const struct ebpf_insn test_ld_mbuf1_prog[] = { 2499 2500 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */ 2501 { 2502 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2503 .dst_reg = EBPF_REG_6, 2504 .src_reg = EBPF_REG_1, 2505 }, 2506 /* load IPv4 version and IHL */ 2507 { 2508 .code = (BPF_LD | BPF_ABS | BPF_B), 2509 .imm = offsetof(struct rte_ipv4_hdr, version_ihl), 2510 }, 2511 /* check IP version */ 2512 { 2513 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2514 .dst_reg = EBPF_REG_2, 2515 .src_reg = EBPF_REG_0, 2516 }, 2517 { 2518 .code = (BPF_ALU | BPF_AND | BPF_K), 2519 .dst_reg = EBPF_REG_2, 2520 .imm = 0xf0, 2521 }, 2522 { 2523 .code = (BPF_JMP | BPF_JEQ | BPF_K), 2524 .dst_reg = EBPF_REG_2, 2525 .imm = IPVERSION << 4, 2526 .off = 2, 2527 }, 2528 /* invalid IP version, return 0 */ 2529 { 2530 .code = (EBPF_ALU64 | BPF_XOR | BPF_X), 2531 .dst_reg = EBPF_REG_0, 2532 .src_reg = EBPF_REG_0, 2533 }, 2534 { 2535 .code = (BPF_JMP | EBPF_EXIT), 2536 }, 2537 /* load 3-rd byte of IP data */ 2538 { 2539 .code = (BPF_ALU | BPF_AND | BPF_K), 2540 .dst_reg = EBPF_REG_0, 2541 .imm = RTE_IPV4_HDR_IHL_MASK, 2542 }, 2543 { 2544 .code = (BPF_ALU | BPF_LSH | BPF_K), 2545 .dst_reg = EBPF_REG_0, 2546 .imm = 2, 2547 }, 2548 { 2549 .code = (BPF_LD | BPF_IND | BPF_B), 2550 .src_reg = EBPF_REG_0, 2551 .imm = 3, 2552 }, 2553 { 2554 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2555 .dst_reg = EBPF_REG_7, 2556 .src_reg = EBPF_REG_0, 2557 }, 2558 /* load IPv4 src addr */ 2559 { 2560 .code = (BPF_LD | BPF_ABS | BPF_W), 2561 .imm = offsetof(struct rte_ipv4_hdr, src_addr), 2562 }, 2563 { 2564 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 2565 .dst_reg = EBPF_REG_7, 2566 .src_reg = EBPF_REG_0, 2567 }, 2568 /* load IPv4 total length */ 2569 { 2570 .code = (BPF_LD | BPF_ABS | BPF_H), 2571 .imm = offsetof(struct rte_ipv4_hdr, total_length), 2572 }, 2573 { 2574 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2575 .dst_reg = EBPF_REG_8, 2576 .src_reg = EBPF_REG_0, 2577 }, 2578 /* load last 4 bytes of IP data */ 2579 { 2580 .code = (BPF_LD | BPF_IND | BPF_W), 2581 .src_reg = EBPF_REG_8, 2582 .imm = -(int32_t)sizeof(uint32_t), 2583 }, 2584 { 2585 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 2586 .dst_reg = EBPF_REG_7, 2587 .src_reg = EBPF_REG_0, 2588 }, 2589 /* load 2 bytes from the middle of IP data */ 2590 { 2591 .code = (EBPF_ALU64 | BPF_RSH | BPF_K), 2592 .dst_reg = EBPF_REG_8, 2593 .imm = 1, 2594 }, 2595 { 2596 .code = (BPF_LD | BPF_IND | BPF_H), 2597 .src_reg = EBPF_REG_8, 2598 }, 2599 { 2600 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 2601 .dst_reg = EBPF_REG_0, 2602 .src_reg = EBPF_REG_7, 2603 }, 2604 { 2605 .code = (BPF_JMP | EBPF_EXIT), 2606 }, 2607 }; 2608 2609 static void 2610 dummy_mbuf_prep(struct rte_mbuf *mb, uint8_t buf[], uint32_t buf_len, 2611 uint32_t data_len) 2612 { 2613 uint32_t i; 2614 uint8_t *db; 2615 2616 mb->buf_addr = buf; 2617 rte_mbuf_iova_set(mb, (uintptr_t)buf); 2618 mb->buf_len = buf_len; 2619 rte_mbuf_refcnt_set(mb, 1); 2620 2621 /* set pool pointer to dummy value, test doesn't use it */ 2622 mb->pool = (void *)buf; 2623 2624 rte_pktmbuf_reset(mb); 2625 db = (uint8_t *)rte_pktmbuf_append(mb, data_len); 2626 2627 for (i = 0; i != data_len; i++) 2628 db[i] = i; 2629 } 2630 2631 static void 2632 test_ld_mbuf1_prepare(void *arg) 2633 { 2634 struct dummy_mbuf *dm; 2635 struct rte_ipv4_hdr *ph; 2636 2637 const uint32_t plen = 400; 2638 const struct rte_ipv4_hdr iph = { 2639 .version_ihl = RTE_IPV4_VHL_DEF, 2640 .total_length = rte_cpu_to_be_16(plen), 2641 .time_to_live = IPDEFTTL, 2642 .next_proto_id = IPPROTO_RAW, 2643 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK), 2644 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST), 2645 }; 2646 2647 dm = arg; 2648 memset(dm, 0, sizeof(*dm)); 2649 2650 dummy_mbuf_prep(&dm->mb[0], dm->buf[0], sizeof(dm->buf[0]), 2651 plen / 2 + 1); 2652 dummy_mbuf_prep(&dm->mb[1], dm->buf[1], sizeof(dm->buf[0]), 2653 plen / 2 - 1); 2654 2655 rte_pktmbuf_chain(&dm->mb[0], &dm->mb[1]); 2656 2657 ph = rte_pktmbuf_mtod(dm->mb, typeof(ph)); 2658 memcpy(ph, &iph, sizeof(iph)); 2659 } 2660 2661 static uint64_t 2662 test_ld_mbuf1(const struct rte_mbuf *pkt) 2663 { 2664 uint64_t n, v; 2665 const uint8_t *p8; 2666 const uint16_t *p16; 2667 const uint32_t *p32; 2668 struct dummy_offset dof; 2669 2670 /* load IPv4 version and IHL */ 2671 p8 = rte_pktmbuf_read(pkt, 2672 offsetof(struct rte_ipv4_hdr, version_ihl), sizeof(*p8), 2673 &dof); 2674 if (p8 == NULL) 2675 return 0; 2676 2677 /* check IP version */ 2678 if ((p8[0] & 0xf0) != IPVERSION << 4) 2679 return 0; 2680 2681 n = (p8[0] & RTE_IPV4_HDR_IHL_MASK) * RTE_IPV4_IHL_MULTIPLIER; 2682 2683 /* load 3-rd byte of IP data */ 2684 p8 = rte_pktmbuf_read(pkt, n + 3, sizeof(*p8), &dof); 2685 if (p8 == NULL) 2686 return 0; 2687 2688 v = p8[0]; 2689 2690 /* load IPv4 src addr */ 2691 p32 = rte_pktmbuf_read(pkt, 2692 offsetof(struct rte_ipv4_hdr, src_addr), sizeof(*p32), 2693 &dof); 2694 if (p32 == NULL) 2695 return 0; 2696 2697 v += rte_be_to_cpu_32(p32[0]); 2698 2699 /* load IPv4 total length */ 2700 p16 = rte_pktmbuf_read(pkt, 2701 offsetof(struct rte_ipv4_hdr, total_length), sizeof(*p16), 2702 &dof); 2703 if (p16 == NULL) 2704 return 0; 2705 2706 n = rte_be_to_cpu_16(p16[0]); 2707 2708 /* load last 4 bytes of IP data */ 2709 p32 = rte_pktmbuf_read(pkt, n - sizeof(*p32), sizeof(*p32), &dof); 2710 if (p32 == NULL) 2711 return 0; 2712 2713 v += rte_be_to_cpu_32(p32[0]); 2714 2715 /* load 2 bytes from the middle of IP data */ 2716 p16 = rte_pktmbuf_read(pkt, n / 2, sizeof(*p16), &dof); 2717 if (p16 == NULL) 2718 return 0; 2719 2720 v += rte_be_to_cpu_16(p16[0]); 2721 return v; 2722 } 2723 2724 static int 2725 test_ld_mbuf1_check(uint64_t rc, const void *arg) 2726 { 2727 const struct dummy_mbuf *dm; 2728 uint64_t v; 2729 2730 dm = arg; 2731 v = test_ld_mbuf1(dm->mb); 2732 return cmp_res(__func__, v, rc, arg, arg, 0); 2733 } 2734 2735 /* 2736 * same as ld_mbuf1, but then truncate the mbuf by 1B, 2737 * so load of last 4B fail. 2738 */ 2739 static void 2740 test_ld_mbuf2_prepare(void *arg) 2741 { 2742 struct dummy_mbuf *dm; 2743 2744 test_ld_mbuf1_prepare(arg); 2745 dm = arg; 2746 rte_pktmbuf_trim(dm->mb, 1); 2747 } 2748 2749 static int 2750 test_ld_mbuf2_check(uint64_t rc, const void *arg) 2751 { 2752 return cmp_res(__func__, 0, rc, arg, arg, 0); 2753 } 2754 2755 /* same as test_ld_mbuf1, but now store intermediate results on the stack */ 2756 static const struct ebpf_insn test_ld_mbuf3_prog[] = { 2757 2758 /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */ 2759 { 2760 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2761 .dst_reg = EBPF_REG_6, 2762 .src_reg = EBPF_REG_1, 2763 }, 2764 /* load IPv4 version and IHL */ 2765 { 2766 .code = (BPF_LD | BPF_ABS | BPF_B), 2767 .imm = offsetof(struct rte_ipv4_hdr, version_ihl), 2768 }, 2769 /* check IP version */ 2770 { 2771 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2772 .dst_reg = EBPF_REG_2, 2773 .src_reg = EBPF_REG_0, 2774 }, 2775 { 2776 .code = (BPF_ALU | BPF_AND | BPF_K), 2777 .dst_reg = EBPF_REG_2, 2778 .imm = 0xf0, 2779 }, 2780 { 2781 .code = (BPF_JMP | BPF_JEQ | BPF_K), 2782 .dst_reg = EBPF_REG_2, 2783 .imm = IPVERSION << 4, 2784 .off = 2, 2785 }, 2786 /* invalid IP version, return 0 */ 2787 { 2788 .code = (EBPF_ALU64 | BPF_XOR | BPF_X), 2789 .dst_reg = EBPF_REG_0, 2790 .src_reg = EBPF_REG_0, 2791 }, 2792 { 2793 .code = (BPF_JMP | EBPF_EXIT), 2794 }, 2795 /* load 3-rd byte of IP data */ 2796 { 2797 .code = (BPF_ALU | BPF_AND | BPF_K), 2798 .dst_reg = EBPF_REG_0, 2799 .imm = RTE_IPV4_HDR_IHL_MASK, 2800 }, 2801 { 2802 .code = (BPF_ALU | BPF_LSH | BPF_K), 2803 .dst_reg = EBPF_REG_0, 2804 .imm = 2, 2805 }, 2806 { 2807 .code = (BPF_LD | BPF_IND | BPF_B), 2808 .src_reg = EBPF_REG_0, 2809 .imm = 3, 2810 }, 2811 { 2812 .code = (BPF_STX | BPF_MEM | BPF_B), 2813 .dst_reg = EBPF_REG_10, 2814 .src_reg = EBPF_REG_0, 2815 .off = (int16_t)(offsetof(struct dummy_offset, u8) - 2816 sizeof(struct dummy_offset)), 2817 }, 2818 /* load IPv4 src addr */ 2819 { 2820 .code = (BPF_LD | BPF_ABS | BPF_W), 2821 .imm = offsetof(struct rte_ipv4_hdr, src_addr), 2822 }, 2823 { 2824 .code = (BPF_STX | BPF_MEM | BPF_W), 2825 .dst_reg = EBPF_REG_10, 2826 .src_reg = EBPF_REG_0, 2827 .off = (int16_t)(offsetof(struct dummy_offset, u32) - 2828 sizeof(struct dummy_offset)), 2829 }, 2830 /* load IPv4 total length */ 2831 { 2832 .code = (BPF_LD | BPF_ABS | BPF_H), 2833 .imm = offsetof(struct rte_ipv4_hdr, total_length), 2834 }, 2835 { 2836 .code = (EBPF_ALU64 | EBPF_MOV | BPF_X), 2837 .dst_reg = EBPF_REG_8, 2838 .src_reg = EBPF_REG_0, 2839 }, 2840 /* load last 4 bytes of IP data */ 2841 { 2842 .code = (BPF_LD | BPF_IND | BPF_W), 2843 .src_reg = EBPF_REG_8, 2844 .imm = -(int32_t)sizeof(uint32_t), 2845 }, 2846 { 2847 .code = (BPF_STX | BPF_MEM | EBPF_DW), 2848 .dst_reg = EBPF_REG_10, 2849 .src_reg = EBPF_REG_0, 2850 .off = (int16_t)(offsetof(struct dummy_offset, u64) - 2851 sizeof(struct dummy_offset)), 2852 }, 2853 /* load 2 bytes from the middle of IP data */ 2854 { 2855 .code = (EBPF_ALU64 | BPF_RSH | BPF_K), 2856 .dst_reg = EBPF_REG_8, 2857 .imm = 1, 2858 }, 2859 { 2860 .code = (BPF_LD | BPF_IND | BPF_H), 2861 .src_reg = EBPF_REG_8, 2862 }, 2863 { 2864 .code = (BPF_LDX | BPF_MEM | EBPF_DW), 2865 .dst_reg = EBPF_REG_1, 2866 .src_reg = EBPF_REG_10, 2867 .off = (int16_t)(offsetof(struct dummy_offset, u64) - 2868 sizeof(struct dummy_offset)), 2869 }, 2870 { 2871 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 2872 .dst_reg = EBPF_REG_0, 2873 .src_reg = EBPF_REG_1, 2874 }, 2875 { 2876 .code = (BPF_LDX | BPF_MEM | BPF_W), 2877 .dst_reg = EBPF_REG_1, 2878 .src_reg = EBPF_REG_10, 2879 .off = (int16_t)(offsetof(struct dummy_offset, u32) - 2880 sizeof(struct dummy_offset)), 2881 }, 2882 { 2883 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 2884 .dst_reg = EBPF_REG_0, 2885 .src_reg = EBPF_REG_1, 2886 }, 2887 { 2888 .code = (BPF_LDX | BPF_MEM | BPF_B), 2889 .dst_reg = EBPF_REG_1, 2890 .src_reg = EBPF_REG_10, 2891 .off = (int16_t)(offsetof(struct dummy_offset, u8) - 2892 sizeof(struct dummy_offset)), 2893 }, 2894 { 2895 .code = (EBPF_ALU64 | BPF_ADD | BPF_X), 2896 .dst_reg = EBPF_REG_0, 2897 .src_reg = EBPF_REG_1, 2898 }, 2899 { 2900 .code = (BPF_JMP | EBPF_EXIT), 2901 }, 2902 }; 2903 2904 /* all bpf test cases */ 2905 static const struct bpf_test tests[] = { 2906 { 2907 .name = "test_store1", 2908 .arg_sz = sizeof(struct dummy_offset), 2909 .prm = { 2910 .ins = test_store1_prog, 2911 .nb_ins = RTE_DIM(test_store1_prog), 2912 .prog_arg = { 2913 .type = RTE_BPF_ARG_PTR, 2914 .size = sizeof(struct dummy_offset), 2915 }, 2916 }, 2917 .prepare = test_store1_prepare, 2918 .check_result = test_store1_check, 2919 }, 2920 { 2921 .name = "test_store2", 2922 .arg_sz = sizeof(struct dummy_offset), 2923 .prm = { 2924 .ins = test_store2_prog, 2925 .nb_ins = RTE_DIM(test_store2_prog), 2926 .prog_arg = { 2927 .type = RTE_BPF_ARG_PTR, 2928 .size = sizeof(struct dummy_offset), 2929 }, 2930 }, 2931 .prepare = test_store1_prepare, 2932 .check_result = test_store1_check, 2933 }, 2934 { 2935 .name = "test_load1", 2936 .arg_sz = sizeof(struct dummy_offset), 2937 .prm = { 2938 .ins = test_load1_prog, 2939 .nb_ins = RTE_DIM(test_load1_prog), 2940 .prog_arg = { 2941 .type = RTE_BPF_ARG_PTR, 2942 .size = sizeof(struct dummy_offset), 2943 }, 2944 }, 2945 .prepare = test_load1_prepare, 2946 .check_result = test_load1_check, 2947 }, 2948 { 2949 .name = "test_ldimm1", 2950 .arg_sz = sizeof(struct dummy_offset), 2951 .prm = { 2952 .ins = test_ldimm1_prog, 2953 .nb_ins = RTE_DIM(test_ldimm1_prog), 2954 .prog_arg = { 2955 .type = RTE_BPF_ARG_PTR, 2956 .size = sizeof(struct dummy_offset), 2957 }, 2958 }, 2959 .prepare = test_store1_prepare, 2960 .check_result = test_ldimm1_check, 2961 }, 2962 { 2963 .name = "test_mul1", 2964 .arg_sz = sizeof(struct dummy_vect8), 2965 .prm = { 2966 .ins = test_mul1_prog, 2967 .nb_ins = RTE_DIM(test_mul1_prog), 2968 .prog_arg = { 2969 .type = RTE_BPF_ARG_PTR, 2970 .size = sizeof(struct dummy_vect8), 2971 }, 2972 }, 2973 .prepare = test_mul1_prepare, 2974 .check_result = test_mul1_check, 2975 }, 2976 { 2977 .name = "test_shift1", 2978 .arg_sz = sizeof(struct dummy_vect8), 2979 .prm = { 2980 .ins = test_shift1_prog, 2981 .nb_ins = RTE_DIM(test_shift1_prog), 2982 .prog_arg = { 2983 .type = RTE_BPF_ARG_PTR, 2984 .size = sizeof(struct dummy_vect8), 2985 }, 2986 }, 2987 .prepare = test_shift1_prepare, 2988 .check_result = test_shift1_check, 2989 }, 2990 { 2991 .name = "test_jump1", 2992 .arg_sz = sizeof(struct dummy_vect8), 2993 .prm = { 2994 .ins = test_jump1_prog, 2995 .nb_ins = RTE_DIM(test_jump1_prog), 2996 .prog_arg = { 2997 .type = RTE_BPF_ARG_PTR, 2998 .size = sizeof(struct dummy_vect8), 2999 }, 3000 }, 3001 .prepare = test_jump1_prepare, 3002 .check_result = test_jump1_check, 3003 }, 3004 { 3005 .name = "test_jump2", 3006 .arg_sz = sizeof(struct dummy_net), 3007 .prm = { 3008 .ins = test_jump2_prog, 3009 .nb_ins = RTE_DIM(test_jump2_prog), 3010 .prog_arg = { 3011 .type = RTE_BPF_ARG_PTR, 3012 .size = sizeof(struct dummy_net), 3013 }, 3014 }, 3015 .prepare = test_jump2_prepare, 3016 .check_result = test_jump2_check, 3017 }, 3018 { 3019 .name = "test_alu1", 3020 .arg_sz = sizeof(struct dummy_vect8), 3021 .prm = { 3022 .ins = test_alu1_prog, 3023 .nb_ins = RTE_DIM(test_alu1_prog), 3024 .prog_arg = { 3025 .type = RTE_BPF_ARG_PTR, 3026 .size = sizeof(struct dummy_vect8), 3027 }, 3028 }, 3029 .prepare = test_jump1_prepare, 3030 .check_result = test_alu1_check, 3031 }, 3032 { 3033 .name = "test_bele1", 3034 .arg_sz = sizeof(struct dummy_vect8), 3035 .prm = { 3036 .ins = test_bele1_prog, 3037 .nb_ins = RTE_DIM(test_bele1_prog), 3038 .prog_arg = { 3039 .type = RTE_BPF_ARG_PTR, 3040 .size = sizeof(struct dummy_vect8), 3041 }, 3042 }, 3043 .prepare = test_bele1_prepare, 3044 .check_result = test_bele1_check, 3045 }, 3046 { 3047 .name = "test_xadd1", 3048 .arg_sz = sizeof(struct dummy_offset), 3049 .prm = { 3050 .ins = test_xadd1_prog, 3051 .nb_ins = RTE_DIM(test_xadd1_prog), 3052 .prog_arg = { 3053 .type = RTE_BPF_ARG_PTR, 3054 .size = sizeof(struct dummy_offset), 3055 }, 3056 }, 3057 .prepare = test_store1_prepare, 3058 .check_result = test_xadd1_check, 3059 }, 3060 { 3061 .name = "test_div1", 3062 .arg_sz = sizeof(struct dummy_vect8), 3063 .prm = { 3064 .ins = test_div1_prog, 3065 .nb_ins = RTE_DIM(test_div1_prog), 3066 .prog_arg = { 3067 .type = RTE_BPF_ARG_PTR, 3068 .size = sizeof(struct dummy_vect8), 3069 }, 3070 }, 3071 .prepare = test_mul1_prepare, 3072 .check_result = test_div1_check, 3073 }, 3074 { 3075 .name = "test_call1", 3076 .arg_sz = sizeof(struct dummy_offset), 3077 .prm = { 3078 .ins = test_call1_prog, 3079 .nb_ins = RTE_DIM(test_call1_prog), 3080 .prog_arg = { 3081 .type = RTE_BPF_ARG_PTR, 3082 .size = sizeof(struct dummy_offset), 3083 }, 3084 .xsym = test_call1_xsym, 3085 .nb_xsym = RTE_DIM(test_call1_xsym), 3086 }, 3087 .prepare = test_load1_prepare, 3088 .check_result = test_call1_check, 3089 /* for now don't support function calls on 32 bit platform */ 3090 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), 3091 }, 3092 { 3093 .name = "test_call2", 3094 .arg_sz = sizeof(struct dummy_offset), 3095 .prm = { 3096 .ins = test_call2_prog, 3097 .nb_ins = RTE_DIM(test_call2_prog), 3098 .prog_arg = { 3099 .type = RTE_BPF_ARG_PTR, 3100 .size = sizeof(struct dummy_offset), 3101 }, 3102 .xsym = test_call2_xsym, 3103 .nb_xsym = RTE_DIM(test_call2_xsym), 3104 }, 3105 .prepare = test_store1_prepare, 3106 .check_result = test_call2_check, 3107 /* for now don't support function calls on 32 bit platform */ 3108 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), 3109 }, 3110 { 3111 .name = "test_call3", 3112 .arg_sz = sizeof(struct dummy_vect8), 3113 .prm = { 3114 .ins = test_call3_prog, 3115 .nb_ins = RTE_DIM(test_call3_prog), 3116 .prog_arg = { 3117 .type = RTE_BPF_ARG_PTR, 3118 .size = sizeof(struct dummy_vect8), 3119 }, 3120 .xsym = test_call3_xsym, 3121 .nb_xsym = RTE_DIM(test_call3_xsym), 3122 }, 3123 .prepare = test_call3_prepare, 3124 .check_result = test_call3_check, 3125 /* for now don't support function calls on 32 bit platform */ 3126 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), 3127 }, 3128 { 3129 .name = "test_call4", 3130 .arg_sz = sizeof(struct dummy_offset), 3131 .prm = { 3132 .ins = test_call4_prog, 3133 .nb_ins = RTE_DIM(test_call4_prog), 3134 .prog_arg = { 3135 .type = RTE_BPF_ARG_PTR, 3136 .size = 2 * sizeof(struct dummy_offset), 3137 }, 3138 .xsym = test_call4_xsym, 3139 .nb_xsym = RTE_DIM(test_call4_xsym), 3140 }, 3141 .prepare = test_store1_prepare, 3142 .check_result = test_call4_check, 3143 /* for now don't support function calls on 32 bit platform */ 3144 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), 3145 }, 3146 { 3147 .name = "test_call5", 3148 .arg_sz = sizeof(struct dummy_offset), 3149 .prm = { 3150 .ins = test_call5_prog, 3151 .nb_ins = RTE_DIM(test_call5_prog), 3152 .prog_arg = { 3153 .type = RTE_BPF_ARG_PTR, 3154 .size = sizeof(struct dummy_offset), 3155 }, 3156 .xsym = test_call5_xsym, 3157 .nb_xsym = RTE_DIM(test_call5_xsym), 3158 }, 3159 .prepare = test_store1_prepare, 3160 .check_result = test_call5_check, 3161 /* for now don't support function calls on 32 bit platform */ 3162 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), 3163 }, 3164 { 3165 .name = "test_ld_mbuf1", 3166 .arg_sz = sizeof(struct dummy_mbuf), 3167 .prm = { 3168 .ins = test_ld_mbuf1_prog, 3169 .nb_ins = RTE_DIM(test_ld_mbuf1_prog), 3170 .prog_arg = { 3171 .type = RTE_BPF_ARG_PTR_MBUF, 3172 .buf_size = sizeof(struct dummy_mbuf), 3173 }, 3174 }, 3175 .prepare = test_ld_mbuf1_prepare, 3176 .check_result = test_ld_mbuf1_check, 3177 /* mbuf as input argument is not supported on 32 bit platform */ 3178 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), 3179 }, 3180 { 3181 .name = "test_ld_mbuf2", 3182 .arg_sz = sizeof(struct dummy_mbuf), 3183 .prm = { 3184 .ins = test_ld_mbuf1_prog, 3185 .nb_ins = RTE_DIM(test_ld_mbuf1_prog), 3186 .prog_arg = { 3187 .type = RTE_BPF_ARG_PTR_MBUF, 3188 .buf_size = sizeof(struct dummy_mbuf), 3189 }, 3190 }, 3191 .prepare = test_ld_mbuf2_prepare, 3192 .check_result = test_ld_mbuf2_check, 3193 /* mbuf as input argument is not supported on 32 bit platform */ 3194 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), 3195 }, 3196 { 3197 .name = "test_ld_mbuf3", 3198 .arg_sz = sizeof(struct dummy_mbuf), 3199 .prm = { 3200 .ins = test_ld_mbuf3_prog, 3201 .nb_ins = RTE_DIM(test_ld_mbuf3_prog), 3202 .prog_arg = { 3203 .type = RTE_BPF_ARG_PTR_MBUF, 3204 .buf_size = sizeof(struct dummy_mbuf), 3205 }, 3206 }, 3207 .prepare = test_ld_mbuf1_prepare, 3208 .check_result = test_ld_mbuf1_check, 3209 /* mbuf as input argument is not supported on 32 bit platform */ 3210 .allow_fail = (sizeof(uint64_t) != sizeof(uintptr_t)), 3211 }, 3212 }; 3213 3214 static int 3215 run_test(const struct bpf_test *tst) 3216 { 3217 int32_t ret, rv; 3218 int64_t rc; 3219 struct rte_bpf *bpf; 3220 struct rte_bpf_jit jit; 3221 uint8_t tbuf[tst->arg_sz]; 3222 3223 printf("%s(%s) start\n", __func__, tst->name); 3224 3225 bpf = rte_bpf_load(&tst->prm); 3226 if (bpf == NULL) { 3227 printf("%s@%d: failed to load bpf code, error=%d(%s);\n", 3228 __func__, __LINE__, rte_errno, strerror(rte_errno)); 3229 return -1; 3230 } 3231 3232 tst->prepare(tbuf); 3233 rc = rte_bpf_exec(bpf, tbuf); 3234 ret = tst->check_result(rc, tbuf); 3235 if (ret != 0) { 3236 printf("%s@%d: check_result(%s) failed, error: %d(%s);\n", 3237 __func__, __LINE__, tst->name, ret, strerror(ret)); 3238 } 3239 3240 /* repeat the same test with jit, when possible */ 3241 rte_bpf_get_jit(bpf, &jit); 3242 if (jit.func != NULL) { 3243 3244 tst->prepare(tbuf); 3245 rc = jit.func(tbuf); 3246 rv = tst->check_result(rc, tbuf); 3247 ret |= rv; 3248 if (rv != 0) { 3249 printf("%s@%d: check_result(%s) failed, " 3250 "error: %d(%s);\n", 3251 __func__, __LINE__, tst->name, 3252 rv, strerror(rv)); 3253 } 3254 } 3255 3256 rte_bpf_destroy(bpf); 3257 return ret; 3258 3259 } 3260 3261 static int 3262 test_bpf(void) 3263 { 3264 int32_t rc, rv; 3265 uint32_t i; 3266 3267 rc = 0; 3268 for (i = 0; i != RTE_DIM(tests); i++) { 3269 rv = run_test(tests + i); 3270 if (tests[i].allow_fail == 0) 3271 rc |= rv; 3272 } 3273 3274 return rc; 3275 } 3276 3277 #endif /* !RTE_LIB_BPF */ 3278 3279 REGISTER_FAST_TEST(bpf_autotest, true, true, test_bpf); 3280 3281 #ifndef RTE_HAS_LIBPCAP 3282 3283 static int 3284 test_bpf_convert(void) 3285 { 3286 printf("BPF convert RTE_HAS_LIBPCAP is undefined, skipping test\n"); 3287 return TEST_SKIPPED; 3288 } 3289 3290 #else 3291 #include <pcap/pcap.h> 3292 3293 static void 3294 test_bpf_dump(struct bpf_program *cbf, const struct rte_bpf_prm *prm) 3295 { 3296 printf("cBPF program (%u insns)\n", cbf->bf_len); 3297 bpf_dump(cbf, 1); 3298 3299 if (prm != NULL) { 3300 printf("\neBPF program (%u insns)\n", prm->nb_ins); 3301 rte_bpf_dump(stdout, prm->ins, prm->nb_ins); 3302 } 3303 } 3304 3305 static int 3306 test_bpf_match(pcap_t *pcap, const char *str, 3307 struct rte_mbuf *mb) 3308 { 3309 struct bpf_program fcode; 3310 struct rte_bpf_prm *prm = NULL; 3311 struct rte_bpf *bpf = NULL; 3312 int ret = -1; 3313 uint64_t rc; 3314 3315 if (pcap_compile(pcap, &fcode, str, 1, PCAP_NETMASK_UNKNOWN)) { 3316 printf("%s@%d: pcap_compile(\"%s\") failed: %s;\n", 3317 __func__, __LINE__, str, pcap_geterr(pcap)); 3318 return -1; 3319 } 3320 3321 prm = rte_bpf_convert(&fcode); 3322 if (prm == NULL) { 3323 printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n", 3324 __func__, __LINE__, str, rte_errno, strerror(rte_errno)); 3325 goto error; 3326 } 3327 3328 bpf = rte_bpf_load(prm); 3329 if (bpf == NULL) { 3330 printf("%s@%d: failed to load bpf code, error=%d(%s);\n", 3331 __func__, __LINE__, rte_errno, strerror(rte_errno)); 3332 goto error; 3333 } 3334 3335 rc = rte_bpf_exec(bpf, mb); 3336 /* The return code from bpf capture filter is non-zero if matched */ 3337 ret = (rc == 0); 3338 error: 3339 if (bpf) 3340 rte_bpf_destroy(bpf); 3341 rte_free(prm); 3342 pcap_freecode(&fcode); 3343 return ret; 3344 } 3345 3346 /* Basic sanity test can we match a IP packet */ 3347 static int 3348 test_bpf_filter_sanity(pcap_t *pcap) 3349 { 3350 const uint32_t plen = 100; 3351 struct rte_mbuf mb, *m; 3352 uint8_t tbuf[RTE_MBUF_DEFAULT_BUF_SIZE]; 3353 struct { 3354 struct rte_ether_hdr eth_hdr; 3355 struct rte_ipv4_hdr ip_hdr; 3356 } *hdr; 3357 3358 memset(&mb, 0, sizeof(mb)); 3359 dummy_mbuf_prep(&mb, tbuf, sizeof(tbuf), plen); 3360 m = &mb; 3361 3362 hdr = rte_pktmbuf_mtod(m, typeof(hdr)); 3363 hdr->eth_hdr = (struct rte_ether_hdr) { 3364 .dst_addr.addr_bytes = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff }, 3365 .ether_type = rte_cpu_to_be_16(RTE_ETHER_TYPE_IPV4), 3366 }; 3367 hdr->ip_hdr = (struct rte_ipv4_hdr) { 3368 .version_ihl = RTE_IPV4_VHL_DEF, 3369 .total_length = rte_cpu_to_be_16(plen), 3370 .time_to_live = IPDEFTTL, 3371 .next_proto_id = IPPROTO_RAW, 3372 .src_addr = rte_cpu_to_be_32(RTE_IPV4_LOOPBACK), 3373 .dst_addr = rte_cpu_to_be_32(RTE_IPV4_BROADCAST), 3374 }; 3375 3376 if (test_bpf_match(pcap, "ip", m) != 0) { 3377 printf("%s@%d: filter \"ip\" doesn't match test data\n", 3378 __func__, __LINE__); 3379 return -1; 3380 } 3381 if (test_bpf_match(pcap, "not ip", m) == 0) { 3382 printf("%s@%d: filter \"not ip\" does match test data\n", 3383 __func__, __LINE__); 3384 return -1; 3385 } 3386 3387 return 0; 3388 } 3389 3390 /* 3391 * Some sample pcap filter strings from 3392 * https://wiki.wireshark.org/CaptureFilters 3393 */ 3394 static const char * const sample_filters[] = { 3395 "host 172.18.5.4", 3396 "net 192.168.0.0/24", 3397 "src net 192.168.0.0/24", 3398 "src net 192.168.0.0 mask 255.255.255.0", 3399 "dst net 192.168.0.0/24", 3400 "dst net 192.168.0.0 mask 255.255.255.0", 3401 "port 53", 3402 "host 192.0.2.1 and not (port 80 or port 25)", 3403 "host 2001:4b98:db0::8 and not port 80 and not port 25", 3404 "port not 53 and not arp", 3405 "(tcp[0:2] > 1500 and tcp[0:2] < 1550) or (tcp[2:2] > 1500 and tcp[2:2] < 1550)", 3406 "ether proto 0x888e", 3407 "ether[0] & 1 = 0 and ip[16] >= 224", 3408 "icmp[icmptype] != icmp-echo and icmp[icmptype] != icmp-echoreply", 3409 "tcp[tcpflags] & (tcp-syn|tcp-fin) != 0 and not src and dst net 127.0.0.1", 3410 "not ether dst 01:80:c2:00:00:0e", 3411 "not broadcast and not multicast", 3412 "dst host ff02::1", 3413 "port 80 and tcp[((tcp[12:1] & 0xf0) >> 2):4] = 0x47455420", 3414 /* Worms */ 3415 "dst port 135 and tcp port 135 and ip[2:2]==48", 3416 "icmp[icmptype]==icmp-echo and ip[2:2]==92 and icmp[8:4]==0xAAAAAAAA", 3417 "dst port 135 or dst port 445 or dst port 1433" 3418 " and tcp[tcpflags] & (tcp-syn) != 0" 3419 " and tcp[tcpflags] & (tcp-ack) = 0 and src net 192.168.0.0/24", 3420 "tcp src port 443 and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4] = 0x18)" 3421 " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 1] = 0x03)" 3422 " and (tcp[((tcp[12] & 0xF0) >> 4 ) * 4 + 2] < 0x04)" 3423 " and ((ip[2:2] - 4 * (ip[0] & 0x0F) - 4 * ((tcp[12] & 0xF0) >> 4) > 69))", 3424 /* Other */ 3425 "len = 128", 3426 "host 1::1 or host 1::1 or host 1::1 or host 1::1 or host 1::1 or host 1::1", 3427 ("host 1::1 or host 1::2 or host 1::3 or host 1::4 or host 1::5 " 3428 "or host 192.0.2.1 or host 192.0.2.100 or host 192.0.2.200"), 3429 }; 3430 3431 static int 3432 test_bpf_filter(pcap_t *pcap, const char *s) 3433 { 3434 struct bpf_program fcode; 3435 struct rte_bpf_prm *prm = NULL; 3436 struct rte_bpf *bpf = NULL; 3437 3438 if (pcap_compile(pcap, &fcode, s, 1, PCAP_NETMASK_UNKNOWN)) { 3439 printf("%s@%d: pcap_compile('%s') failed: %s;\n", 3440 __func__, __LINE__, s, pcap_geterr(pcap)); 3441 return -1; 3442 } 3443 3444 prm = rte_bpf_convert(&fcode); 3445 if (prm == NULL) { 3446 printf("%s@%d: bpf_convert('%s') failed,, error=%d(%s);\n", 3447 __func__, __LINE__, s, rte_errno, strerror(rte_errno)); 3448 goto error; 3449 } 3450 3451 printf("bpf convert for \"%s\" produced:\n", s); 3452 rte_bpf_dump(stdout, prm->ins, prm->nb_ins); 3453 3454 bpf = rte_bpf_load(prm); 3455 if (bpf == NULL) { 3456 printf("%s@%d: failed to load bpf code, error=%d(%s);\n", 3457 __func__, __LINE__, rte_errno, strerror(rte_errno)); 3458 goto error; 3459 } 3460 3461 error: 3462 if (bpf) 3463 rte_bpf_destroy(bpf); 3464 else { 3465 printf("%s \"%s\"\n", __func__, s); 3466 test_bpf_dump(&fcode, prm); 3467 } 3468 3469 rte_free(prm); 3470 pcap_freecode(&fcode); 3471 return (bpf == NULL) ? -1 : 0; 3472 } 3473 3474 static int 3475 test_bpf_convert(void) 3476 { 3477 unsigned int i; 3478 pcap_t *pcap; 3479 int rc; 3480 3481 pcap = pcap_open_dead(DLT_EN10MB, 262144); 3482 if (!pcap) { 3483 printf("pcap_open_dead failed\n"); 3484 return -1; 3485 } 3486 3487 rc = test_bpf_filter_sanity(pcap); 3488 for (i = 0; i < RTE_DIM(sample_filters); i++) 3489 rc |= test_bpf_filter(pcap, sample_filters[i]); 3490 3491 pcap_close(pcap); 3492 return rc; 3493 } 3494 3495 #endif /* RTE_HAS_LIBPCAP */ 3496 3497 REGISTER_FAST_TEST(bpf_convert_autotest, true, true, test_bpf_convert); 3498