12eccf6afSStephen Hemminger /* SPDX-License-Identifier: BSD-3-Clause 22eccf6afSStephen Hemminger * Copyright(c) 2021 Microsoft Corporation 32eccf6afSStephen Hemminger * 42eccf6afSStephen Hemminger * Based on bpf_convert_filter() in the Linux kernel sources 52eccf6afSStephen Hemminger * and filter2xdp. 62eccf6afSStephen Hemminger * 72eccf6afSStephen Hemminger * Licensed as BSD with permission original authors. 82eccf6afSStephen Hemminger * Copyright (C) 2017 Tobias Klauser 92eccf6afSStephen Hemminger * Copyright (c) 2011 - 2014 PLUMgrid, http://plumgrid.com 102eccf6afSStephen Hemminger */ 112eccf6afSStephen Hemminger 122eccf6afSStephen Hemminger #include <assert.h> 132eccf6afSStephen Hemminger #include <errno.h> 142eccf6afSStephen Hemminger #include <stdbool.h> 152eccf6afSStephen Hemminger #include <stddef.h> 162eccf6afSStephen Hemminger #include <stdint.h> 172eccf6afSStephen Hemminger #include <stdlib.h> 182eccf6afSStephen Hemminger #include <string.h> 192eccf6afSStephen Hemminger 202eccf6afSStephen Hemminger #include <rte_common.h> 212eccf6afSStephen Hemminger #include <rte_bpf.h> 222eccf6afSStephen Hemminger #include <rte_log.h> 232eccf6afSStephen Hemminger #include <rte_malloc.h> 242eccf6afSStephen Hemminger #include <rte_errno.h> 252eccf6afSStephen Hemminger 262eccf6afSStephen Hemminger #include <pcap/pcap.h> 272eccf6afSStephen Hemminger #include <pcap/bpf.h> 282eccf6afSStephen Hemminger 292eccf6afSStephen Hemminger #include "bpf_impl.h" 302eccf6afSStephen Hemminger #include "bpf_def.h" 312eccf6afSStephen Hemminger 322eccf6afSStephen Hemminger #ifndef BPF_MAXINSNS 332eccf6afSStephen Hemminger #define BPF_MAXINSNS 4096 342eccf6afSStephen Hemminger #endif 352eccf6afSStephen Hemminger 362eccf6afSStephen Hemminger /* 372eccf6afSStephen Hemminger * Linux socket filter uses negative absolute offsets to 382eccf6afSStephen Hemminger * reference ancillary data. 392eccf6afSStephen Hemminger */ 402eccf6afSStephen Hemminger #define SKF_AD_OFF (-0x1000) 412eccf6afSStephen Hemminger #define SKF_AD_PROTOCOL 0 422eccf6afSStephen Hemminger #define SKF_AD_PKTTYPE 4 432eccf6afSStephen Hemminger #define SKF_AD_IFINDEX 8 442eccf6afSStephen Hemminger #define SKF_AD_NLATTR 12 452eccf6afSStephen Hemminger #define SKF_AD_NLATTR_NEST 16 462eccf6afSStephen Hemminger #define SKF_AD_MARK 20 472eccf6afSStephen Hemminger #define SKF_AD_QUEUE 24 482eccf6afSStephen Hemminger #define SKF_AD_HATYPE 28 492eccf6afSStephen Hemminger #define SKF_AD_RXHASH 32 502eccf6afSStephen Hemminger #define SKF_AD_CPU 36 512eccf6afSStephen Hemminger #define SKF_AD_ALU_XOR_X 40 522eccf6afSStephen Hemminger #define SKF_AD_VLAN_TAG 44 532eccf6afSStephen Hemminger #define SKF_AD_VLAN_TAG_PRESENT 48 542eccf6afSStephen Hemminger #define SKF_AD_PAY_OFFSET 52 552eccf6afSStephen Hemminger #define SKF_AD_RANDOM 56 562eccf6afSStephen Hemminger #define SKF_AD_VLAN_TPID 60 572eccf6afSStephen Hemminger #define SKF_AD_MAX 64 582eccf6afSStephen Hemminger 592eccf6afSStephen Hemminger /* ArgX, context and stack frame pointer register positions. Note, 602eccf6afSStephen Hemminger * Arg1, Arg2, Arg3, etc are used as argument mappings of function 612eccf6afSStephen Hemminger * calls in BPF_CALL instruction. 622eccf6afSStephen Hemminger */ 632eccf6afSStephen Hemminger #define BPF_REG_ARG1 EBPF_REG_1 642eccf6afSStephen Hemminger #define BPF_REG_ARG2 EBPF_REG_2 652eccf6afSStephen Hemminger #define BPF_REG_ARG3 EBPF_REG_3 662eccf6afSStephen Hemminger #define BPF_REG_ARG4 EBPF_REG_4 672eccf6afSStephen Hemminger #define BPF_REG_ARG5 EBPF_REG_5 682eccf6afSStephen Hemminger #define BPF_REG_CTX EBPF_REG_6 692eccf6afSStephen Hemminger #define BPF_REG_FP EBPF_REG_10 702eccf6afSStephen Hemminger 712eccf6afSStephen Hemminger /* Additional register mappings for converted user programs. */ 722eccf6afSStephen Hemminger #define BPF_REG_A EBPF_REG_0 732eccf6afSStephen Hemminger #define BPF_REG_X EBPF_REG_7 742eccf6afSStephen Hemminger #define BPF_REG_TMP EBPF_REG_8 752eccf6afSStephen Hemminger 762eccf6afSStephen Hemminger /* Helper macros for filter block array initializers. */ 772eccf6afSStephen Hemminger 782eccf6afSStephen Hemminger /* ALU ops on registers, bpf_add|sub|...: dst_reg += src_reg */ 792eccf6afSStephen Hemminger 802eccf6afSStephen Hemminger #define EBPF_ALU64_REG(OP, DST, SRC) \ 812eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 822eccf6afSStephen Hemminger .code = EBPF_ALU64 | BPF_OP(OP) | BPF_X, \ 832eccf6afSStephen Hemminger .dst_reg = DST, \ 842eccf6afSStephen Hemminger .src_reg = SRC, \ 852eccf6afSStephen Hemminger .off = 0, \ 862eccf6afSStephen Hemminger .imm = 0 }) 872eccf6afSStephen Hemminger 882eccf6afSStephen Hemminger #define BPF_ALU32_REG(OP, DST, SRC) \ 892eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 902eccf6afSStephen Hemminger .code = BPF_ALU | BPF_OP(OP) | BPF_X, \ 912eccf6afSStephen Hemminger .dst_reg = DST, \ 922eccf6afSStephen Hemminger .src_reg = SRC, \ 932eccf6afSStephen Hemminger .off = 0, \ 942eccf6afSStephen Hemminger .imm = 0 }) 952eccf6afSStephen Hemminger 962eccf6afSStephen Hemminger /* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */ 972eccf6afSStephen Hemminger 982eccf6afSStephen Hemminger #define BPF_ALU32_IMM(OP, DST, IMM) \ 992eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1002eccf6afSStephen Hemminger .code = BPF_ALU | BPF_OP(OP) | BPF_K, \ 1012eccf6afSStephen Hemminger .dst_reg = DST, \ 1022eccf6afSStephen Hemminger .src_reg = 0, \ 1032eccf6afSStephen Hemminger .off = 0, \ 1042eccf6afSStephen Hemminger .imm = IMM }) 1052eccf6afSStephen Hemminger 1062eccf6afSStephen Hemminger /* Short form of mov, dst_reg = src_reg */ 1072eccf6afSStephen Hemminger 1082eccf6afSStephen Hemminger #define BPF_MOV64_REG(DST, SRC) \ 1092eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1102eccf6afSStephen Hemminger .code = EBPF_ALU64 | EBPF_MOV | BPF_X, \ 1112eccf6afSStephen Hemminger .dst_reg = DST, \ 1122eccf6afSStephen Hemminger .src_reg = SRC, \ 1132eccf6afSStephen Hemminger .off = 0, \ 1142eccf6afSStephen Hemminger .imm = 0 }) 1152eccf6afSStephen Hemminger 1162eccf6afSStephen Hemminger #define BPF_MOV32_REG(DST, SRC) \ 1172eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1182eccf6afSStephen Hemminger .code = BPF_ALU | EBPF_MOV | BPF_X, \ 1192eccf6afSStephen Hemminger .dst_reg = DST, \ 1202eccf6afSStephen Hemminger .src_reg = SRC, \ 1212eccf6afSStephen Hemminger .off = 0, \ 1222eccf6afSStephen Hemminger .imm = 0 }) 1232eccf6afSStephen Hemminger 1242eccf6afSStephen Hemminger /* Short form of mov, dst_reg = imm32 */ 1252eccf6afSStephen Hemminger 1262eccf6afSStephen Hemminger #define BPF_MOV32_IMM(DST, IMM) \ 1272eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1282eccf6afSStephen Hemminger .code = BPF_ALU | EBPF_MOV | BPF_K, \ 1292eccf6afSStephen Hemminger .dst_reg = DST, \ 1302eccf6afSStephen Hemminger .src_reg = 0, \ 1312eccf6afSStephen Hemminger .off = 0, \ 1322eccf6afSStephen Hemminger .imm = IMM }) 1332eccf6afSStephen Hemminger 1342eccf6afSStephen Hemminger /* Short form of mov based on type, BPF_X: dst_reg = src_reg, BPF_K: dst_reg = imm32 */ 1352eccf6afSStephen Hemminger 1362eccf6afSStephen Hemminger #define BPF_MOV32_RAW(TYPE, DST, SRC, IMM) \ 1372eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1382eccf6afSStephen Hemminger .code = BPF_ALU | EBPF_MOV | BPF_SRC(TYPE), \ 1392eccf6afSStephen Hemminger .dst_reg = DST, \ 1402eccf6afSStephen Hemminger .src_reg = SRC, \ 1412eccf6afSStephen Hemminger .off = 0, \ 1422eccf6afSStephen Hemminger .imm = IMM }) 1432eccf6afSStephen Hemminger 1442eccf6afSStephen Hemminger /* Direct packet access, R0 = *(uint *) (skb->data + imm32) */ 1452eccf6afSStephen Hemminger 1462eccf6afSStephen Hemminger #define BPF_LD_ABS(SIZE, IMM) \ 1472eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1482eccf6afSStephen Hemminger .code = BPF_LD | BPF_SIZE(SIZE) | BPF_ABS, \ 1492eccf6afSStephen Hemminger .dst_reg = 0, \ 1502eccf6afSStephen Hemminger .src_reg = 0, \ 1512eccf6afSStephen Hemminger .off = 0, \ 1522eccf6afSStephen Hemminger .imm = IMM }) 1532eccf6afSStephen Hemminger 1542eccf6afSStephen Hemminger /* Memory load, dst_reg = *(uint *) (src_reg + off16) */ 1552eccf6afSStephen Hemminger 1562eccf6afSStephen Hemminger #define BPF_LDX_MEM(SIZE, DST, SRC, OFF) \ 1572eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1582eccf6afSStephen Hemminger .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEM, \ 1592eccf6afSStephen Hemminger .dst_reg = DST, \ 1602eccf6afSStephen Hemminger .src_reg = SRC, \ 1612eccf6afSStephen Hemminger .off = OFF, \ 1622eccf6afSStephen Hemminger .imm = 0 }) 1632eccf6afSStephen Hemminger 1642eccf6afSStephen Hemminger /* Memory store, *(uint *) (dst_reg + off16) = src_reg */ 1652eccf6afSStephen Hemminger 1662eccf6afSStephen Hemminger #define BPF_STX_MEM(SIZE, DST, SRC, OFF) \ 1672eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1682eccf6afSStephen Hemminger .code = BPF_STX | BPF_SIZE(SIZE) | BPF_MEM, \ 1692eccf6afSStephen Hemminger .dst_reg = DST, \ 1702eccf6afSStephen Hemminger .src_reg = SRC, \ 1712eccf6afSStephen Hemminger .off = OFF, \ 1722eccf6afSStephen Hemminger .imm = 0 }) 1732eccf6afSStephen Hemminger 1742eccf6afSStephen Hemminger /* Conditional jumps against immediates, if (dst_reg 'op' imm32) goto pc + off16 */ 1752eccf6afSStephen Hemminger 1762eccf6afSStephen Hemminger #define BPF_JMP_IMM(OP, DST, IMM, OFF) \ 1772eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1782eccf6afSStephen Hemminger .code = BPF_JMP | BPF_OP(OP) | BPF_K, \ 1792eccf6afSStephen Hemminger .dst_reg = DST, \ 1802eccf6afSStephen Hemminger .src_reg = 0, \ 1812eccf6afSStephen Hemminger .off = OFF, \ 1822eccf6afSStephen Hemminger .imm = IMM }) 1832eccf6afSStephen Hemminger 1842eccf6afSStephen Hemminger /* Raw code statement block */ 1852eccf6afSStephen Hemminger 1862eccf6afSStephen Hemminger #define BPF_RAW_INSN(CODE, DST, SRC, OFF, IMM) \ 1872eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1882eccf6afSStephen Hemminger .code = CODE, \ 1892eccf6afSStephen Hemminger .dst_reg = DST, \ 1902eccf6afSStephen Hemminger .src_reg = SRC, \ 1912eccf6afSStephen Hemminger .off = OFF, \ 1922eccf6afSStephen Hemminger .imm = IMM }) 1932eccf6afSStephen Hemminger 1942eccf6afSStephen Hemminger /* Program exit */ 1952eccf6afSStephen Hemminger 1962eccf6afSStephen Hemminger #define BPF_EXIT_INSN() \ 1972eccf6afSStephen Hemminger ((struct ebpf_insn) { \ 1982eccf6afSStephen Hemminger .code = BPF_JMP | EBPF_EXIT, \ 1992eccf6afSStephen Hemminger .dst_reg = 0, \ 2002eccf6afSStephen Hemminger .src_reg = 0, \ 2012eccf6afSStephen Hemminger .off = 0, \ 2022eccf6afSStephen Hemminger .imm = 0 }) 2032eccf6afSStephen Hemminger 2042eccf6afSStephen Hemminger /* 2052eccf6afSStephen Hemminger * Placeholder to convert BPF extensions like length and VLAN tag 2062eccf6afSStephen Hemminger * If and when DPDK BPF supports them. 2072eccf6afSStephen Hemminger */ 2082eccf6afSStephen Hemminger static bool convert_bpf_load(const struct bpf_insn *fp, 2092eccf6afSStephen Hemminger struct ebpf_insn **new_insnp __rte_unused) 2102eccf6afSStephen Hemminger { 2112eccf6afSStephen Hemminger switch (fp->k) { 2122eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_PROTOCOL: 2132eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_PKTTYPE: 2142eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_IFINDEX: 2152eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_HATYPE: 2162eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_MARK: 2172eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_RXHASH: 2182eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_QUEUE: 2192eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_VLAN_TAG: 2202eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 2212eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_VLAN_TPID: 2222eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_PAY_OFFSET: 2232eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_NLATTR: 2242eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_NLATTR_NEST: 2252eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_CPU: 2262eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_RANDOM: 2272eccf6afSStephen Hemminger case SKF_AD_OFF + SKF_AD_ALU_XOR_X: 2282eccf6afSStephen Hemminger /* Linux has special negative offsets to access meta-data. */ 2290e21c7c0SDavid Marchand RTE_BPF_LOG_LINE(ERR, 2300e21c7c0SDavid Marchand "rte_bpf_convert: socket offset %d not supported", 2312eccf6afSStephen Hemminger fp->k - SKF_AD_OFF); 2322eccf6afSStephen Hemminger return true; 2332eccf6afSStephen Hemminger default: 2342eccf6afSStephen Hemminger return false; 2352eccf6afSStephen Hemminger } 2362eccf6afSStephen Hemminger } 2372eccf6afSStephen Hemminger 2382eccf6afSStephen Hemminger static int bpf_convert_filter(const struct bpf_insn *prog, size_t len, 2392eccf6afSStephen Hemminger struct ebpf_insn *new_prog, uint32_t *new_len) 2402eccf6afSStephen Hemminger { 2412eccf6afSStephen Hemminger unsigned int pass = 0; 2422eccf6afSStephen Hemminger size_t new_flen = 0, target, i; 2432eccf6afSStephen Hemminger struct ebpf_insn *new_insn; 2442eccf6afSStephen Hemminger const struct bpf_insn *fp; 2452eccf6afSStephen Hemminger int *addrs = NULL; 2462eccf6afSStephen Hemminger uint8_t bpf_src; 2472eccf6afSStephen Hemminger 2482eccf6afSStephen Hemminger if (len > BPF_MAXINSNS) { 2490e21c7c0SDavid Marchand RTE_BPF_LOG_LINE(ERR, "%s: cBPF program too long (%zu insns)", 2502eccf6afSStephen Hemminger __func__, len); 2512eccf6afSStephen Hemminger return -EINVAL; 2522eccf6afSStephen Hemminger } 2532eccf6afSStephen Hemminger 2542eccf6afSStephen Hemminger /* On second pass, allocate the new program */ 2552eccf6afSStephen Hemminger if (new_prog) { 2562eccf6afSStephen Hemminger addrs = calloc(len, sizeof(*addrs)); 2572eccf6afSStephen Hemminger if (addrs == NULL) 2582eccf6afSStephen Hemminger return -ENOMEM; 2592eccf6afSStephen Hemminger } 2602eccf6afSStephen Hemminger 2612eccf6afSStephen Hemminger do_pass: 2622eccf6afSStephen Hemminger new_insn = new_prog; 2632eccf6afSStephen Hemminger fp = prog; 2642eccf6afSStephen Hemminger 2652eccf6afSStephen Hemminger /* Classic BPF related prologue emission. */ 2662eccf6afSStephen Hemminger if (new_insn) { 2672eccf6afSStephen Hemminger /* Classic BPF expects A and X to be reset first. These need 2682eccf6afSStephen Hemminger * to be guaranteed to be the first two instructions. 2692eccf6afSStephen Hemminger */ 2702eccf6afSStephen Hemminger *new_insn++ = EBPF_ALU64_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 2712eccf6afSStephen Hemminger *new_insn++ = EBPF_ALU64_REG(BPF_XOR, BPF_REG_X, BPF_REG_X); 2722eccf6afSStephen Hemminger 2732eccf6afSStephen Hemminger /* All programs must keep CTX in callee saved BPF_REG_CTX. 2742eccf6afSStephen Hemminger * In eBPF case it's done by the compiler, here we need to 2752eccf6afSStephen Hemminger * do this ourself. Initial CTX is present in BPF_REG_ARG1. 2762eccf6afSStephen Hemminger */ 2772eccf6afSStephen Hemminger *new_insn++ = BPF_MOV64_REG(BPF_REG_CTX, BPF_REG_ARG1); 2782eccf6afSStephen Hemminger } else { 2792eccf6afSStephen Hemminger new_insn += 3; 2802eccf6afSStephen Hemminger } 2812eccf6afSStephen Hemminger 2822eccf6afSStephen Hemminger for (i = 0; i < len; fp++, i++) { 2832eccf6afSStephen Hemminger struct ebpf_insn tmp_insns[6] = { }; 2842eccf6afSStephen Hemminger struct ebpf_insn *insn = tmp_insns; 2852eccf6afSStephen Hemminger 2862eccf6afSStephen Hemminger if (addrs) 2872eccf6afSStephen Hemminger addrs[i] = new_insn - new_prog; 2882eccf6afSStephen Hemminger 2892eccf6afSStephen Hemminger switch (fp->code) { 2902eccf6afSStephen Hemminger /* Absolute loads are how classic BPF accesses skb */ 2912eccf6afSStephen Hemminger case BPF_LD | BPF_ABS | BPF_W: 2922eccf6afSStephen Hemminger case BPF_LD | BPF_ABS | BPF_H: 2932eccf6afSStephen Hemminger case BPF_LD | BPF_ABS | BPF_B: 2942eccf6afSStephen Hemminger if (convert_bpf_load(fp, &insn)) 2952eccf6afSStephen Hemminger goto err; 2962eccf6afSStephen Hemminger 2972eccf6afSStephen Hemminger *insn = BPF_RAW_INSN(fp->code, 0, 0, 0, fp->k); 2982eccf6afSStephen Hemminger break; 2992eccf6afSStephen Hemminger 3002eccf6afSStephen Hemminger case BPF_ALU | BPF_DIV | BPF_X: 3012eccf6afSStephen Hemminger case BPF_ALU | BPF_MOD | BPF_X: 3022eccf6afSStephen Hemminger /* For cBPF, don't cause floating point exception */ 3032eccf6afSStephen Hemminger *insn++ = BPF_MOV32_REG(BPF_REG_X, BPF_REG_X); 3042eccf6afSStephen Hemminger *insn++ = BPF_JMP_IMM(EBPF_JNE, BPF_REG_X, 0, 2); 3052eccf6afSStephen Hemminger *insn++ = BPF_ALU32_REG(BPF_XOR, BPF_REG_A, BPF_REG_A); 3062eccf6afSStephen Hemminger *insn++ = BPF_EXIT_INSN(); 3072eccf6afSStephen Hemminger /* fallthrough */ 3082eccf6afSStephen Hemminger case BPF_ALU | BPF_ADD | BPF_X: 3092eccf6afSStephen Hemminger case BPF_ALU | BPF_ADD | BPF_K: 3102eccf6afSStephen Hemminger case BPF_ALU | BPF_SUB | BPF_X: 3112eccf6afSStephen Hemminger case BPF_ALU | BPF_SUB | BPF_K: 3122eccf6afSStephen Hemminger case BPF_ALU | BPF_AND | BPF_X: 3132eccf6afSStephen Hemminger case BPF_ALU | BPF_AND | BPF_K: 3142eccf6afSStephen Hemminger case BPF_ALU | BPF_OR | BPF_X: 3152eccf6afSStephen Hemminger case BPF_ALU | BPF_OR | BPF_K: 3162eccf6afSStephen Hemminger case BPF_ALU | BPF_LSH | BPF_X: 3172eccf6afSStephen Hemminger case BPF_ALU | BPF_LSH | BPF_K: 3182eccf6afSStephen Hemminger case BPF_ALU | BPF_RSH | BPF_X: 3192eccf6afSStephen Hemminger case BPF_ALU | BPF_RSH | BPF_K: 3202eccf6afSStephen Hemminger case BPF_ALU | BPF_XOR | BPF_X: 3212eccf6afSStephen Hemminger case BPF_ALU | BPF_XOR | BPF_K: 3222eccf6afSStephen Hemminger case BPF_ALU | BPF_MUL | BPF_X: 3232eccf6afSStephen Hemminger case BPF_ALU | BPF_MUL | BPF_K: 3242eccf6afSStephen Hemminger case BPF_ALU | BPF_DIV | BPF_K: 3252eccf6afSStephen Hemminger case BPF_ALU | BPF_MOD | BPF_K: 3262eccf6afSStephen Hemminger case BPF_ALU | BPF_NEG: 3272eccf6afSStephen Hemminger case BPF_LD | BPF_IND | BPF_W: 3282eccf6afSStephen Hemminger case BPF_LD | BPF_IND | BPF_H: 3292eccf6afSStephen Hemminger case BPF_LD | BPF_IND | BPF_B: 3302eccf6afSStephen Hemminger /* All arithmetic insns map as-is. */ 3312eccf6afSStephen Hemminger insn->code = fp->code; 3322eccf6afSStephen Hemminger insn->dst_reg = BPF_REG_A; 3332eccf6afSStephen Hemminger bpf_src = BPF_SRC(fp->code); 3342eccf6afSStephen Hemminger insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; 3352eccf6afSStephen Hemminger insn->off = 0; 3362eccf6afSStephen Hemminger insn->imm = fp->k; 3372eccf6afSStephen Hemminger break; 3382eccf6afSStephen Hemminger 3392eccf6afSStephen Hemminger /* Jump transformation cannot use BPF block macros 3402eccf6afSStephen Hemminger * everywhere as offset calculation and target updates 3412eccf6afSStephen Hemminger * require a bit more work than the rest, i.e. jump 3422eccf6afSStephen Hemminger * opcodes map as-is, but offsets need adjustment. 3432eccf6afSStephen Hemminger */ 3442eccf6afSStephen Hemminger 3452eccf6afSStephen Hemminger #define BPF_EMIT_JMP \ 3462eccf6afSStephen Hemminger do { \ 3472eccf6afSStephen Hemminger if (target >= len) \ 3482eccf6afSStephen Hemminger goto err; \ 3492eccf6afSStephen Hemminger insn->off = addrs ? addrs[target] - addrs[i] - 1 : 0; \ 3502eccf6afSStephen Hemminger /* Adjust pc relative offset for 2nd or 3rd insn. */ \ 3512eccf6afSStephen Hemminger insn->off -= insn - tmp_insns; \ 3522eccf6afSStephen Hemminger } while (0) 3532eccf6afSStephen Hemminger 3542eccf6afSStephen Hemminger case BPF_JMP | BPF_JA: 3552eccf6afSStephen Hemminger target = i + fp->k + 1; 3562eccf6afSStephen Hemminger insn->code = fp->code; 3572eccf6afSStephen Hemminger BPF_EMIT_JMP; 3582eccf6afSStephen Hemminger break; 3592eccf6afSStephen Hemminger 3602eccf6afSStephen Hemminger case BPF_JMP | BPF_JEQ | BPF_K: 3612eccf6afSStephen Hemminger case BPF_JMP | BPF_JEQ | BPF_X: 3622eccf6afSStephen Hemminger case BPF_JMP | BPF_JSET | BPF_K: 3632eccf6afSStephen Hemminger case BPF_JMP | BPF_JSET | BPF_X: 3642eccf6afSStephen Hemminger case BPF_JMP | BPF_JGT | BPF_K: 3652eccf6afSStephen Hemminger case BPF_JMP | BPF_JGT | BPF_X: 3662eccf6afSStephen Hemminger case BPF_JMP | BPF_JGE | BPF_K: 3672eccf6afSStephen Hemminger case BPF_JMP | BPF_JGE | BPF_X: 3682eccf6afSStephen Hemminger if (BPF_SRC(fp->code) == BPF_K && (int) fp->k < 0) { 3692eccf6afSStephen Hemminger /* BPF immediates are signed, zero extend 3702eccf6afSStephen Hemminger * immediate into tmp register and use it 3712eccf6afSStephen Hemminger * in compare insn. 3722eccf6afSStephen Hemminger */ 3732eccf6afSStephen Hemminger *insn++ = BPF_MOV32_IMM(BPF_REG_TMP, fp->k); 3742eccf6afSStephen Hemminger 3752eccf6afSStephen Hemminger insn->dst_reg = BPF_REG_A; 3762eccf6afSStephen Hemminger insn->src_reg = BPF_REG_TMP; 3772eccf6afSStephen Hemminger bpf_src = BPF_X; 3782eccf6afSStephen Hemminger } else { 3792eccf6afSStephen Hemminger insn->dst_reg = BPF_REG_A; 3802eccf6afSStephen Hemminger insn->imm = fp->k; 3812eccf6afSStephen Hemminger bpf_src = BPF_SRC(fp->code); 3822eccf6afSStephen Hemminger insn->src_reg = bpf_src == BPF_X ? BPF_REG_X : 0; 3832eccf6afSStephen Hemminger } 3842eccf6afSStephen Hemminger 3852eccf6afSStephen Hemminger /* Common case where 'jump_false' is next insn. */ 3862eccf6afSStephen Hemminger if (fp->jf == 0) { 3872eccf6afSStephen Hemminger insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 3882eccf6afSStephen Hemminger target = i + fp->jt + 1; 3892eccf6afSStephen Hemminger BPF_EMIT_JMP; 3902eccf6afSStephen Hemminger break; 3912eccf6afSStephen Hemminger } 3922eccf6afSStephen Hemminger 3932eccf6afSStephen Hemminger /* Convert JEQ into JNE when 'jump_true' is next insn. */ 3942eccf6afSStephen Hemminger if (fp->jt == 0 && BPF_OP(fp->code) == BPF_JEQ) { 3952eccf6afSStephen Hemminger insn->code = BPF_JMP | EBPF_JNE | bpf_src; 3962eccf6afSStephen Hemminger target = i + fp->jf + 1; 3972eccf6afSStephen Hemminger BPF_EMIT_JMP; 3982eccf6afSStephen Hemminger break; 3992eccf6afSStephen Hemminger } 4002eccf6afSStephen Hemminger 4012eccf6afSStephen Hemminger /* Other jumps are mapped into two insns: Jxx and JA. */ 4022eccf6afSStephen Hemminger target = i + fp->jt + 1; 4032eccf6afSStephen Hemminger insn->code = BPF_JMP | BPF_OP(fp->code) | bpf_src; 4042eccf6afSStephen Hemminger BPF_EMIT_JMP; 4052eccf6afSStephen Hemminger insn++; 4062eccf6afSStephen Hemminger 4072eccf6afSStephen Hemminger insn->code = BPF_JMP | BPF_JA; 4082eccf6afSStephen Hemminger target = i + fp->jf + 1; 4092eccf6afSStephen Hemminger BPF_EMIT_JMP; 4102eccf6afSStephen Hemminger break; 4112eccf6afSStephen Hemminger 4127be78d02SJosh Soref /* ldxb 4 * ([14] & 0xf) is remapped into 6 insns. */ 4132eccf6afSStephen Hemminger case BPF_LDX | BPF_MSH | BPF_B: 4142eccf6afSStephen Hemminger /* tmp = A */ 4152eccf6afSStephen Hemminger *insn++ = BPF_MOV64_REG(BPF_REG_TMP, BPF_REG_A); 4162eccf6afSStephen Hemminger /* A = BPF_R0 = *(u8 *) (skb->data + K) */ 4172eccf6afSStephen Hemminger *insn++ = BPF_LD_ABS(BPF_B, fp->k); 4182eccf6afSStephen Hemminger /* A &= 0xf */ 4192eccf6afSStephen Hemminger *insn++ = BPF_ALU32_IMM(BPF_AND, BPF_REG_A, 0xf); 4202eccf6afSStephen Hemminger /* A <<= 2 */ 4212eccf6afSStephen Hemminger *insn++ = BPF_ALU32_IMM(BPF_LSH, BPF_REG_A, 2); 4222eccf6afSStephen Hemminger /* X = A */ 4232eccf6afSStephen Hemminger *insn++ = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 4242eccf6afSStephen Hemminger /* A = tmp */ 4252eccf6afSStephen Hemminger *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_TMP); 4262eccf6afSStephen Hemminger break; 4272eccf6afSStephen Hemminger 4287be78d02SJosh Soref /* RET_K is remapped into 2 insns. RET_A case doesn't need an 4292eccf6afSStephen Hemminger * extra mov as EBPF_REG_0 is already mapped into BPF_REG_A. 4302eccf6afSStephen Hemminger */ 4312eccf6afSStephen Hemminger case BPF_RET | BPF_A: 4322eccf6afSStephen Hemminger case BPF_RET | BPF_K: 4332eccf6afSStephen Hemminger if (BPF_RVAL(fp->code) == BPF_K) { 4342eccf6afSStephen Hemminger *insn++ = BPF_MOV32_RAW(BPF_K, EBPF_REG_0, 4352eccf6afSStephen Hemminger 0, fp->k); 4362eccf6afSStephen Hemminger } 4372eccf6afSStephen Hemminger *insn = BPF_EXIT_INSN(); 4382eccf6afSStephen Hemminger break; 4392eccf6afSStephen Hemminger 4402eccf6afSStephen Hemminger /* Store to stack. */ 4412eccf6afSStephen Hemminger case BPF_ST: 4422eccf6afSStephen Hemminger case BPF_STX: 4432eccf6afSStephen Hemminger *insn = BPF_STX_MEM(BPF_W, BPF_REG_FP, BPF_CLASS(fp->code) == 4442eccf6afSStephen Hemminger BPF_ST ? BPF_REG_A : BPF_REG_X, 4452eccf6afSStephen Hemminger -(BPF_MEMWORDS - fp->k) * 4); 4462eccf6afSStephen Hemminger break; 4472eccf6afSStephen Hemminger 4482eccf6afSStephen Hemminger /* Load from stack. */ 4492eccf6afSStephen Hemminger case BPF_LD | BPF_MEM: 4502eccf6afSStephen Hemminger case BPF_LDX | BPF_MEM: 4512eccf6afSStephen Hemminger *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 4522eccf6afSStephen Hemminger BPF_REG_A : BPF_REG_X, BPF_REG_FP, 4532eccf6afSStephen Hemminger -(BPF_MEMWORDS - fp->k) * 4); 4542eccf6afSStephen Hemminger break; 4552eccf6afSStephen Hemminger 4562eccf6afSStephen Hemminger /* A = K or X = K */ 4572eccf6afSStephen Hemminger case BPF_LD | BPF_IMM: 4582eccf6afSStephen Hemminger case BPF_LDX | BPF_IMM: 4592eccf6afSStephen Hemminger *insn = BPF_MOV32_IMM(BPF_CLASS(fp->code) == BPF_LD ? 4602eccf6afSStephen Hemminger BPF_REG_A : BPF_REG_X, fp->k); 4612eccf6afSStephen Hemminger break; 4622eccf6afSStephen Hemminger 4632eccf6afSStephen Hemminger /* X = A */ 4642eccf6afSStephen Hemminger case BPF_MISC | BPF_TAX: 4652eccf6afSStephen Hemminger *insn = BPF_MOV64_REG(BPF_REG_X, BPF_REG_A); 4662eccf6afSStephen Hemminger break; 4672eccf6afSStephen Hemminger 4682eccf6afSStephen Hemminger /* A = X */ 4692eccf6afSStephen Hemminger case BPF_MISC | BPF_TXA: 4702eccf6afSStephen Hemminger *insn = BPF_MOV64_REG(BPF_REG_A, BPF_REG_X); 4712eccf6afSStephen Hemminger break; 4722eccf6afSStephen Hemminger 4732eccf6afSStephen Hemminger /* A = mbuf->len or X = mbuf->len */ 4742eccf6afSStephen Hemminger case BPF_LD | BPF_W | BPF_LEN: 4752eccf6afSStephen Hemminger case BPF_LDX | BPF_W | BPF_LEN: 4762eccf6afSStephen Hemminger /* BPF_ABS/BPF_IND implicitly expect mbuf ptr in R6 */ 4772eccf6afSStephen Hemminger 4782eccf6afSStephen Hemminger *insn = BPF_LDX_MEM(BPF_W, BPF_CLASS(fp->code) == BPF_LD ? 4792eccf6afSStephen Hemminger BPF_REG_A : BPF_REG_X, BPF_REG_CTX, 4802eccf6afSStephen Hemminger offsetof(struct rte_mbuf, pkt_len)); 4812eccf6afSStephen Hemminger break; 4822eccf6afSStephen Hemminger 4832eccf6afSStephen Hemminger /* Unknown instruction. */ 4842eccf6afSStephen Hemminger default: 4850e21c7c0SDavid Marchand RTE_BPF_LOG_LINE(ERR, "%s: Unknown instruction!: %#x", 4862eccf6afSStephen Hemminger __func__, fp->code); 4872eccf6afSStephen Hemminger goto err; 4882eccf6afSStephen Hemminger } 4892eccf6afSStephen Hemminger 4902eccf6afSStephen Hemminger insn++; 4912eccf6afSStephen Hemminger if (new_prog) 4922eccf6afSStephen Hemminger memcpy(new_insn, tmp_insns, 4932eccf6afSStephen Hemminger sizeof(*insn) * (insn - tmp_insns)); 4942eccf6afSStephen Hemminger new_insn += insn - tmp_insns; 4952eccf6afSStephen Hemminger } 4962eccf6afSStephen Hemminger 4972eccf6afSStephen Hemminger if (!new_prog) { 4982eccf6afSStephen Hemminger /* Only calculating new length. */ 4992eccf6afSStephen Hemminger *new_len = new_insn - new_prog; 5002eccf6afSStephen Hemminger return 0; 5012eccf6afSStephen Hemminger } 5022eccf6afSStephen Hemminger 5032eccf6afSStephen Hemminger pass++; 5042eccf6afSStephen Hemminger if ((ptrdiff_t)new_flen != new_insn - new_prog) { 5052eccf6afSStephen Hemminger new_flen = new_insn - new_prog; 5062eccf6afSStephen Hemminger if (pass > 2) 5072eccf6afSStephen Hemminger goto err; 5082eccf6afSStephen Hemminger goto do_pass; 5092eccf6afSStephen Hemminger } 5102eccf6afSStephen Hemminger 5112eccf6afSStephen Hemminger free(addrs); 5122eccf6afSStephen Hemminger assert(*new_len == new_flen); 5132eccf6afSStephen Hemminger 5142eccf6afSStephen Hemminger return 0; 5152eccf6afSStephen Hemminger err: 5162eccf6afSStephen Hemminger free(addrs); 5172eccf6afSStephen Hemminger return -1; 5182eccf6afSStephen Hemminger } 5192eccf6afSStephen Hemminger 5202eccf6afSStephen Hemminger struct rte_bpf_prm * 5212eccf6afSStephen Hemminger rte_bpf_convert(const struct bpf_program *prog) 5222eccf6afSStephen Hemminger { 5232eccf6afSStephen Hemminger struct rte_bpf_prm *prm = NULL; 5242eccf6afSStephen Hemminger struct ebpf_insn *ebpf = NULL; 5252eccf6afSStephen Hemminger uint32_t ebpf_len = 0; 5262eccf6afSStephen Hemminger int ret; 5272eccf6afSStephen Hemminger 5282eccf6afSStephen Hemminger if (prog == NULL) { 5290e21c7c0SDavid Marchand RTE_BPF_LOG_LINE(ERR, "%s: NULL program", __func__); 5302eccf6afSStephen Hemminger rte_errno = EINVAL; 5312eccf6afSStephen Hemminger return NULL; 5322eccf6afSStephen Hemminger } 5332eccf6afSStephen Hemminger 5342eccf6afSStephen Hemminger /* 1st pass: calculate the eBPF program length */ 5352eccf6afSStephen Hemminger ret = bpf_convert_filter(prog->bf_insns, prog->bf_len, NULL, &ebpf_len); 5362eccf6afSStephen Hemminger if (ret < 0) { 5370e21c7c0SDavid Marchand RTE_BPF_LOG_LINE(ERR, "%s: cannot get eBPF length", __func__); 5382eccf6afSStephen Hemminger rte_errno = -ret; 5392eccf6afSStephen Hemminger return NULL; 5402eccf6afSStephen Hemminger } 5412eccf6afSStephen Hemminger 5420e21c7c0SDavid Marchand RTE_BPF_LOG_LINE(DEBUG, "%s: prog len cBPF=%u -> eBPF=%u", 5432eccf6afSStephen Hemminger __func__, prog->bf_len, ebpf_len); 5442eccf6afSStephen Hemminger 5452eccf6afSStephen Hemminger prm = rte_zmalloc("bpf_filter", 5462eccf6afSStephen Hemminger sizeof(*prm) + ebpf_len * sizeof(*ebpf), 0); 5472eccf6afSStephen Hemminger if (prm == NULL) { 5482eccf6afSStephen Hemminger rte_errno = ENOMEM; 5492eccf6afSStephen Hemminger return NULL; 5502eccf6afSStephen Hemminger } 5512eccf6afSStephen Hemminger 5522eccf6afSStephen Hemminger /* The EPBF instructions in this case are right after the header */ 5532eccf6afSStephen Hemminger ebpf = (void *)(prm + 1); 5542eccf6afSStephen Hemminger 5552eccf6afSStephen Hemminger /* 2nd pass: remap cBPF to eBPF instructions */ 5562eccf6afSStephen Hemminger ret = bpf_convert_filter(prog->bf_insns, prog->bf_len, ebpf, &ebpf_len); 5572eccf6afSStephen Hemminger if (ret < 0) { 5580e21c7c0SDavid Marchand RTE_BPF_LOG_LINE(ERR, "%s: cannot convert cBPF to eBPF", __func__); 559*a3923d6bSStephen Hemminger rte_free(prm); 5602eccf6afSStephen Hemminger rte_errno = -ret; 5612eccf6afSStephen Hemminger return NULL; 5622eccf6afSStephen Hemminger } 5632eccf6afSStephen Hemminger 5642eccf6afSStephen Hemminger prm->ins = ebpf; 5652eccf6afSStephen Hemminger prm->nb_ins = ebpf_len; 5662eccf6afSStephen Hemminger 5672eccf6afSStephen Hemminger /* Classic BPF programs use mbufs */ 5682eccf6afSStephen Hemminger prm->prog_arg.type = RTE_BPF_ARG_PTR_MBUF; 5692eccf6afSStephen Hemminger prm->prog_arg.size = sizeof(struct rte_mbuf); 5702eccf6afSStephen Hemminger 5712eccf6afSStephen Hemminger return prm; 5722eccf6afSStephen Hemminger } 573