1 /*- 2 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 3 * The Regents of the University of California. All rights reserved. 4 * 5 * This code is derived from the Stanford/CMU enet packet filter, 6 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 7 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 8 * Berkeley Laboratory. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the University of 21 * California, Berkeley and its contributors. 22 * 4. Neither the name of the University nor the names of its contributors 23 * may be used to endorse or promote products derived from this software 24 * without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 29 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 36 * SUCH DAMAGE. 37 * 38 * @(#)bpf.c 7.5 (Berkeley) 7/15/91 39 */ 40 41 #include <config.h> 42 43 #include <pcap/pcap-inttypes.h> 44 #include "pcap-types.h" 45 #include "extract.h" 46 #include "diag-control.h" 47 48 #define EXTRACT_SHORT EXTRACT_BE_U_2 49 #define EXTRACT_LONG EXTRACT_BE_U_4 50 51 #ifndef _WIN32 52 #include <sys/param.h> 53 #include <sys/types.h> 54 #include <sys/time.h> 55 #endif /* _WIN32 */ 56 57 #include <pcap-int.h> 58 59 #include <stdlib.h> 60 61 #ifdef __linux__ 62 #include <linux/types.h> 63 #include <linux/if_packet.h> 64 #include <linux/filter.h> 65 #endif 66 67 enum { 68 BPF_S_ANC_NONE, 69 BPF_S_ANC_VLAN_TAG, 70 BPF_S_ANC_VLAN_TAG_PRESENT, 71 }; 72 73 /* 74 * Execute the filter program starting at pc on the packet p 75 * wirelen is the length of the original packet 76 * buflen is the amount of data present 77 * aux_data is auxiliary data, currently used only when interpreting 78 * filters intended for the Linux kernel in cases where the kernel 79 * rejects the filter; it contains VLAN tag information 80 * For the kernel, p is assumed to be a pointer to an mbuf if buflen is 0, 81 * in all other cases, p is a pointer to a buffer and buflen is its size. 82 * 83 * Thanks to Ani Sinha <ani@arista.com> for providing initial implementation 84 */ 85 #if defined(SKF_AD_VLAN_TAG_PRESENT) 86 u_int 87 pcapint_filter_with_aux_data(const struct bpf_insn *pc, const u_char *p, 88 u_int wirelen, u_int buflen, const struct pcap_bpf_aux_data *aux_data) 89 #else 90 u_int 91 pcapint_filter_with_aux_data(const struct bpf_insn *pc, const u_char *p, 92 u_int wirelen, u_int buflen, const struct pcap_bpf_aux_data *aux_data _U_) 93 #endif 94 { 95 register uint32_t A, X; 96 register bpf_u_int32 k; 97 uint32_t mem[BPF_MEMWORDS]; 98 99 if (pc == 0) 100 /* 101 * No filter means accept all. 102 */ 103 return (u_int)-1; 104 A = 0; 105 X = 0; 106 --pc; 107 for (;;) { 108 ++pc; 109 switch (pc->code) { 110 111 default: 112 abort(); 113 case BPF_RET|BPF_K: 114 return (u_int)pc->k; 115 116 case BPF_RET|BPF_A: 117 return (u_int)A; 118 119 case BPF_LD|BPF_W|BPF_ABS: 120 k = pc->k; 121 if (k > buflen || sizeof(int32_t) > buflen - k) { 122 return 0; 123 } 124 A = EXTRACT_LONG(&p[k]); 125 continue; 126 127 case BPF_LD|BPF_H|BPF_ABS: 128 k = pc->k; 129 if (k > buflen || sizeof(int16_t) > buflen - k) { 130 return 0; 131 } 132 A = EXTRACT_SHORT(&p[k]); 133 continue; 134 135 case BPF_LD|BPF_B|BPF_ABS: 136 /* 137 * Yes, we know, this switch doesn't do 138 * anything unless we're building for 139 * a Linux kernel with removed VLAN 140 * tags available as meta-data. 141 */ 142 DIAG_OFF_DEFAULT_ONLY_SWITCH 143 switch (pc->k) { 144 145 #if defined(SKF_AD_VLAN_TAG_PRESENT) 146 case SKF_AD_OFF + SKF_AD_VLAN_TAG: 147 if (!aux_data) 148 return 0; 149 A = aux_data->vlan_tag; 150 break; 151 152 case SKF_AD_OFF + SKF_AD_VLAN_TAG_PRESENT: 153 if (!aux_data) 154 return 0; 155 A = aux_data->vlan_tag_present; 156 break; 157 #endif 158 default: 159 k = pc->k; 160 if (k >= buflen) { 161 return 0; 162 } 163 A = p[k]; 164 break; 165 } 166 DIAG_ON_DEFAULT_ONLY_SWITCH 167 continue; 168 169 case BPF_LD|BPF_W|BPF_LEN: 170 A = wirelen; 171 continue; 172 173 case BPF_LDX|BPF_W|BPF_LEN: 174 X = wirelen; 175 continue; 176 177 case BPF_LD|BPF_W|BPF_IND: 178 k = X + pc->k; 179 if (pc->k > buflen || X > buflen - pc->k || 180 sizeof(int32_t) > buflen - k) { 181 return 0; 182 } 183 A = EXTRACT_LONG(&p[k]); 184 continue; 185 186 case BPF_LD|BPF_H|BPF_IND: 187 k = X + pc->k; 188 if (X > buflen || pc->k > buflen - X || 189 sizeof(int16_t) > buflen - k) { 190 return 0; 191 } 192 A = EXTRACT_SHORT(&p[k]); 193 continue; 194 195 case BPF_LD|BPF_B|BPF_IND: 196 k = X + pc->k; 197 if (pc->k >= buflen || X >= buflen - pc->k) { 198 return 0; 199 } 200 A = p[k]; 201 continue; 202 203 case BPF_LDX|BPF_MSH|BPF_B: 204 k = pc->k; 205 if (k >= buflen) { 206 return 0; 207 } 208 X = (p[pc->k] & 0xf) << 2; 209 continue; 210 211 case BPF_LD|BPF_IMM: 212 A = pc->k; 213 continue; 214 215 case BPF_LDX|BPF_IMM: 216 X = pc->k; 217 continue; 218 219 case BPF_LD|BPF_MEM: 220 A = mem[pc->k]; 221 continue; 222 223 case BPF_LDX|BPF_MEM: 224 X = mem[pc->k]; 225 continue; 226 227 case BPF_ST: 228 mem[pc->k] = A; 229 continue; 230 231 case BPF_STX: 232 mem[pc->k] = X; 233 continue; 234 235 case BPF_JMP|BPF_JA: 236 /* 237 * XXX - we currently implement "ip6 protochain" 238 * with backward jumps, so sign-extend pc->k. 239 */ 240 pc += (bpf_int32)pc->k; 241 continue; 242 243 case BPF_JMP|BPF_JGT|BPF_K: 244 pc += (A > pc->k) ? pc->jt : pc->jf; 245 continue; 246 247 case BPF_JMP|BPF_JGE|BPF_K: 248 pc += (A >= pc->k) ? pc->jt : pc->jf; 249 continue; 250 251 case BPF_JMP|BPF_JEQ|BPF_K: 252 pc += (A == pc->k) ? pc->jt : pc->jf; 253 continue; 254 255 case BPF_JMP|BPF_JSET|BPF_K: 256 pc += (A & pc->k) ? pc->jt : pc->jf; 257 continue; 258 259 case BPF_JMP|BPF_JGT|BPF_X: 260 pc += (A > X) ? pc->jt : pc->jf; 261 continue; 262 263 case BPF_JMP|BPF_JGE|BPF_X: 264 pc += (A >= X) ? pc->jt : pc->jf; 265 continue; 266 267 case BPF_JMP|BPF_JEQ|BPF_X: 268 pc += (A == X) ? pc->jt : pc->jf; 269 continue; 270 271 case BPF_JMP|BPF_JSET|BPF_X: 272 pc += (A & X) ? pc->jt : pc->jf; 273 continue; 274 275 case BPF_ALU|BPF_ADD|BPF_X: 276 A += X; 277 continue; 278 279 case BPF_ALU|BPF_SUB|BPF_X: 280 A -= X; 281 continue; 282 283 case BPF_ALU|BPF_MUL|BPF_X: 284 A *= X; 285 continue; 286 287 case BPF_ALU|BPF_DIV|BPF_X: 288 if (X == 0) 289 return 0; 290 A /= X; 291 continue; 292 293 case BPF_ALU|BPF_MOD|BPF_X: 294 if (X == 0) 295 return 0; 296 A %= X; 297 continue; 298 299 case BPF_ALU|BPF_AND|BPF_X: 300 A &= X; 301 continue; 302 303 case BPF_ALU|BPF_OR|BPF_X: 304 A |= X; 305 continue; 306 307 case BPF_ALU|BPF_XOR|BPF_X: 308 A ^= X; 309 continue; 310 311 case BPF_ALU|BPF_LSH|BPF_X: 312 if (X < 32) 313 A <<= X; 314 else 315 A = 0; 316 continue; 317 318 case BPF_ALU|BPF_RSH|BPF_X: 319 if (X < 32) 320 A >>= X; 321 else 322 A = 0; 323 continue; 324 325 case BPF_ALU|BPF_ADD|BPF_K: 326 A += pc->k; 327 continue; 328 329 case BPF_ALU|BPF_SUB|BPF_K: 330 A -= pc->k; 331 continue; 332 333 case BPF_ALU|BPF_MUL|BPF_K: 334 A *= pc->k; 335 continue; 336 337 case BPF_ALU|BPF_DIV|BPF_K: 338 A /= pc->k; 339 continue; 340 341 case BPF_ALU|BPF_MOD|BPF_K: 342 A %= pc->k; 343 continue; 344 345 case BPF_ALU|BPF_AND|BPF_K: 346 A &= pc->k; 347 continue; 348 349 case BPF_ALU|BPF_OR|BPF_K: 350 A |= pc->k; 351 continue; 352 353 case BPF_ALU|BPF_XOR|BPF_K: 354 A ^= pc->k; 355 continue; 356 357 case BPF_ALU|BPF_LSH|BPF_K: 358 A <<= pc->k; 359 continue; 360 361 case BPF_ALU|BPF_RSH|BPF_K: 362 A >>= pc->k; 363 continue; 364 365 case BPF_ALU|BPF_NEG: 366 /* 367 * Most BPF arithmetic is unsigned, but negation 368 * can't be unsigned; respecify it as subtracting 369 * the accumulator from 0U, so that 1) we don't 370 * get compiler warnings about negating an unsigned 371 * value and 2) don't get UBSan warnings about 372 * the result of negating 0x80000000 being undefined. 373 */ 374 A = (0U - A); 375 continue; 376 377 case BPF_MISC|BPF_TAX: 378 X = A; 379 continue; 380 381 case BPF_MISC|BPF_TXA: 382 A = X; 383 continue; 384 } 385 } 386 } 387 388 u_int 389 pcapint_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen, 390 u_int buflen) 391 { 392 return pcapint_filter_with_aux_data(pc, p, wirelen, buflen, NULL); 393 } 394 395 /* 396 * Return true if the 'fcode' is a valid filter program. 397 * The constraints are that each jump be forward and to a valid 398 * code, that memory accesses are within valid ranges (to the 399 * extent that this can be checked statically; loads of packet 400 * data have to be, and are, also checked at run time), and that 401 * the code terminates with either an accept or reject. 402 * 403 * The kernel needs to be able to verify an application's filter code. 404 * Otherwise, a bogus program could easily crash the system. 405 */ 406 int 407 pcapint_validate_filter(const struct bpf_insn *f, int len) 408 { 409 u_int i, from; 410 const struct bpf_insn *p; 411 412 if (len < 1) 413 return 0; 414 415 for (i = 0; i < (u_int)len; ++i) { 416 p = &f[i]; 417 switch (BPF_CLASS(p->code)) { 418 /* 419 * Check that memory operations use valid addresses. 420 */ 421 case BPF_LD: 422 case BPF_LDX: 423 switch (BPF_MODE(p->code)) { 424 case BPF_IMM: 425 break; 426 case BPF_ABS: 427 case BPF_IND: 428 case BPF_MSH: 429 /* 430 * There's no maximum packet data size 431 * in userland. The runtime packet length 432 * check suffices. 433 */ 434 break; 435 case BPF_MEM: 436 if (p->k >= BPF_MEMWORDS) 437 return 0; 438 break; 439 case BPF_LEN: 440 break; 441 default: 442 return 0; 443 } 444 break; 445 case BPF_ST: 446 case BPF_STX: 447 if (p->k >= BPF_MEMWORDS) 448 return 0; 449 break; 450 case BPF_ALU: 451 switch (BPF_OP(p->code)) { 452 case BPF_ADD: 453 case BPF_SUB: 454 case BPF_MUL: 455 case BPF_OR: 456 case BPF_AND: 457 case BPF_XOR: 458 case BPF_LSH: 459 case BPF_RSH: 460 case BPF_NEG: 461 break; 462 case BPF_DIV: 463 case BPF_MOD: 464 /* 465 * Check for constant division or modulus 466 * by 0. 467 */ 468 if (BPF_SRC(p->code) == BPF_K && p->k == 0) 469 return 0; 470 break; 471 default: 472 return 0; 473 } 474 break; 475 case BPF_JMP: 476 /* 477 * Check that jumps are within the code block, 478 * and that unconditional branches don't go 479 * backwards as a result of an overflow. 480 * Unconditional branches have a 32-bit offset, 481 * so they could overflow; we check to make 482 * sure they don't. Conditional branches have 483 * an 8-bit offset, and the from address is <= 484 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS 485 * is sufficiently small that adding 255 to it 486 * won't overflow. 487 * 488 * We know that len is <= BPF_MAXINSNS, and we 489 * assume that BPF_MAXINSNS is < the maximum size 490 * of a u_int, so that i + 1 doesn't overflow. 491 * 492 * For userland, we don't know that the from 493 * or len are <= BPF_MAXINSNS, but we know that 494 * from <= len, and, except on a 64-bit system, 495 * it's unlikely that len, if it truly reflects 496 * the size of the program we've been handed, 497 * will be anywhere near the maximum size of 498 * a u_int. We also don't check for backward 499 * branches, as we currently support them in 500 * userland for the protochain operation. 501 */ 502 from = i + 1; 503 switch (BPF_OP(p->code)) { 504 case BPF_JA: 505 if (from + p->k >= (u_int)len) 506 return 0; 507 break; 508 case BPF_JEQ: 509 case BPF_JGT: 510 case BPF_JGE: 511 case BPF_JSET: 512 if (from + p->jt >= (u_int)len || from + p->jf >= (u_int)len) 513 return 0; 514 break; 515 default: 516 return 0; 517 } 518 break; 519 case BPF_RET: 520 break; 521 case BPF_MISC: 522 break; 523 default: 524 return 0; 525 } 526 } 527 return BPF_CLASS(f[len - 1].code) == BPF_RET; 528 } 529 530 /* 531 * Exported because older versions of libpcap exported them. 532 */ 533 u_int 534 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen, 535 u_int buflen) 536 { 537 return pcapint_filter(pc, p, wirelen, buflen); 538 } 539 540 int 541 bpf_validate(const struct bpf_insn *f, int len) 542 { 543 return pcapint_validate_filter(f, len); 544 } 545