1 /* $NetBSD: bpf_filter.c,v 1.53 2012/08/15 21:31:39 alnsn Exp $ */ 2 3 /*- 4 * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997 5 * The Regents of the University of California. All rights reserved. 6 * 7 * This code is derived from the Stanford/CMU enet packet filter, 8 * (net/enet.c) distributed as part of 4.3BSD, and code contributed 9 * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence 10 * Berkeley Laboratory. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. Neither the name of the University nor the names of its contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 34 * SUCH DAMAGE. 35 * 36 * @(#)bpf_filter.c 8.1 (Berkeley) 6/10/93 37 */ 38 39 #include <sys/cdefs.h> 40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.53 2012/08/15 21:31:39 alnsn Exp $"); 41 42 #if 0 43 #if !(defined(lint) || defined(KERNEL)) 44 static const char rcsid[] = 45 "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp (LBL)"; 46 #endif 47 #endif 48 49 #include <sys/param.h> 50 #include <sys/time.h> 51 #include <sys/kmem.h> 52 #include <sys/endian.h> 53 54 #define EXTRACT_SHORT(p) be16dec(p) 55 #define EXTRACT_LONG(p) be32dec(p) 56 57 #ifdef _KERNEL 58 #include <sys/mbuf.h> 59 #define MINDEX(len, m, k) \ 60 { \ 61 len = m->m_len; \ 62 while (k >= len) { \ 63 k -= len; \ 64 m = m->m_next; \ 65 if (m == 0) \ 66 return 0; \ 67 len = m->m_len; \ 68 } \ 69 } 70 71 static int m_xword (const struct mbuf *, uint32_t, int *); 72 static int m_xhalf (const struct mbuf *, uint32_t, int *); 73 74 static int 75 m_xword(const struct mbuf *m, uint32_t k, int *err) 76 { 77 int len; 78 u_char *cp, *np; 79 struct mbuf *m0; 80 81 *err = 1; 82 MINDEX(len, m, k); 83 cp = mtod(m, u_char *) + k; 84 if (len >= k + 4) { 85 *err = 0; 86 return EXTRACT_LONG(cp); 87 } 88 m0 = m->m_next; 89 if (m0 == 0 || m0->m_len + len - k < 4) { 90 *err = 1; 91 return 0; 92 } 93 *err = 0; 94 np = mtod(m0, u_char *); 95 96 switch (len - k) { 97 case 1: 98 return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2]; 99 case 2: 100 return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1]; 101 default: 102 return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0]; 103 } 104 } 105 106 static int 107 m_xhalf(const struct mbuf *m, uint32_t k, int *err) 108 { 109 int len; 110 u_char *cp; 111 struct mbuf *m0; 112 113 *err = 1; 114 MINDEX(len, m, k); 115 cp = mtod(m, u_char *) + k; 116 if (len >= k + 2) { 117 *err = 0; 118 return EXTRACT_SHORT(cp); 119 } 120 m0 = m->m_next; 121 if (m0 == 0) { 122 *err = 1; 123 return 0; 124 } 125 *err = 0; 126 return (cp[0] << 8) | mtod(m0, u_char *)[0]; 127 } 128 #else /* _KERNEL */ 129 #include <stdlib.h> 130 #include <string.h> 131 #endif /* !_KERNEL */ 132 133 #include <net/bpf.h> 134 135 /* 136 * Execute the filter program starting at pc on the packet p 137 * wirelen is the length of the original packet 138 * buflen is the amount of data present 139 */ 140 u_int 141 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen, 142 u_int buflen) 143 { 144 uint32_t A, X, k; 145 uint32_t mem[BPF_MEMWORDS]; 146 147 if (pc == 0) { 148 /* 149 * No filter means accept all. 150 */ 151 return (u_int)-1; 152 } 153 154 /* 155 * Note: safe to leave memwords uninitialised, as the validation 156 * step ensures that it will not be read, if it was not written. 157 */ 158 A = 0; 159 X = 0; 160 --pc; 161 162 for (;;) { 163 ++pc; 164 switch (pc->code) { 165 166 default: 167 #ifdef _KERNEL 168 return 0; 169 #else 170 abort(); 171 /*NOTREACHED*/ 172 #endif 173 case BPF_RET|BPF_K: 174 return (u_int)pc->k; 175 176 case BPF_RET|BPF_A: 177 return (u_int)A; 178 179 case BPF_LD|BPF_W|BPF_ABS: 180 k = pc->k; 181 if (k > buflen || sizeof(int32_t) > buflen - k) { 182 #ifdef _KERNEL 183 int merr; 184 185 if (buflen != 0) 186 return 0; 187 A = m_xword((const struct mbuf *)p, k, &merr); 188 if (merr != 0) 189 return 0; 190 continue; 191 #else 192 return 0; 193 #endif 194 } 195 A = EXTRACT_LONG(&p[k]); 196 continue; 197 198 case BPF_LD|BPF_H|BPF_ABS: 199 k = pc->k; 200 if (k > buflen || sizeof(int16_t) > buflen - k) { 201 #ifdef _KERNEL 202 int merr; 203 204 if (buflen != 0) 205 return 0; 206 A = m_xhalf((const struct mbuf *)p, k, &merr); 207 if (merr != 0) 208 return 0; 209 continue; 210 #else 211 return 0; 212 #endif 213 } 214 A = EXTRACT_SHORT(&p[k]); 215 continue; 216 217 case BPF_LD|BPF_B|BPF_ABS: 218 k = pc->k; 219 if (k >= buflen) { 220 #ifdef _KERNEL 221 const struct mbuf *m; 222 int len; 223 224 if (buflen != 0) 225 return 0; 226 m = (const struct mbuf *)p; 227 MINDEX(len, m, k); 228 A = mtod(m, u_char *)[k]; 229 continue; 230 #else 231 return 0; 232 #endif 233 } 234 A = p[k]; 235 continue; 236 237 case BPF_LD|BPF_W|BPF_LEN: 238 A = wirelen; 239 continue; 240 241 case BPF_LDX|BPF_W|BPF_LEN: 242 X = wirelen; 243 continue; 244 245 case BPF_LD|BPF_W|BPF_IND: 246 k = X + pc->k; 247 if (pc->k > buflen || X > buflen - pc->k || 248 sizeof(int32_t) > buflen - k) { 249 #ifdef _KERNEL 250 int merr; 251 252 if (buflen != 0) 253 return 0; 254 A = m_xword((const struct mbuf *)p, k, &merr); 255 if (merr != 0) 256 return 0; 257 continue; 258 #else 259 return 0; 260 #endif 261 } 262 A = EXTRACT_LONG(&p[k]); 263 continue; 264 265 case BPF_LD|BPF_H|BPF_IND: 266 k = X + pc->k; 267 if (pc->k > buflen || X > buflen - pc->k || 268 sizeof(int16_t) > buflen - k) { 269 #ifdef _KERNEL 270 int merr; 271 272 if (buflen != 0) 273 return 0; 274 A = m_xhalf((const struct mbuf *)p, k, &merr); 275 if (merr != 0) 276 return 0; 277 continue; 278 #else 279 return 0; 280 #endif 281 } 282 A = EXTRACT_SHORT(&p[k]); 283 continue; 284 285 case BPF_LD|BPF_B|BPF_IND: 286 k = X + pc->k; 287 if (pc->k >= buflen || X >= buflen - pc->k) { 288 #ifdef _KERNEL 289 const struct mbuf *m; 290 int len; 291 292 if (buflen != 0) 293 return 0; 294 m = (const struct mbuf *)p; 295 MINDEX(len, m, k); 296 A = mtod(m, u_char *)[k]; 297 continue; 298 #else 299 return 0; 300 #endif 301 } 302 A = p[k]; 303 continue; 304 305 case BPF_LDX|BPF_MSH|BPF_B: 306 k = pc->k; 307 if (k >= buflen) { 308 #ifdef _KERNEL 309 const struct mbuf *m; 310 int len; 311 312 if (buflen != 0) 313 return 0; 314 m = (const struct mbuf *)p; 315 MINDEX(len, m, k); 316 X = (mtod(m, char *)[k] & 0xf) << 2; 317 continue; 318 #else 319 return 0; 320 #endif 321 } 322 X = (p[pc->k] & 0xf) << 2; 323 continue; 324 325 case BPF_LD|BPF_IMM: 326 A = pc->k; 327 continue; 328 329 case BPF_LDX|BPF_IMM: 330 X = pc->k; 331 continue; 332 333 case BPF_LD|BPF_MEM: 334 A = mem[pc->k]; 335 continue; 336 337 case BPF_LDX|BPF_MEM: 338 X = mem[pc->k]; 339 continue; 340 341 case BPF_ST: 342 mem[pc->k] = A; 343 continue; 344 345 case BPF_STX: 346 mem[pc->k] = X; 347 continue; 348 349 case BPF_JMP|BPF_JA: 350 pc += pc->k; 351 continue; 352 353 case BPF_JMP|BPF_JGT|BPF_K: 354 pc += (A > pc->k) ? pc->jt : pc->jf; 355 continue; 356 357 case BPF_JMP|BPF_JGE|BPF_K: 358 pc += (A >= pc->k) ? pc->jt : pc->jf; 359 continue; 360 361 case BPF_JMP|BPF_JEQ|BPF_K: 362 pc += (A == pc->k) ? pc->jt : pc->jf; 363 continue; 364 365 case BPF_JMP|BPF_JSET|BPF_K: 366 pc += (A & pc->k) ? pc->jt : pc->jf; 367 continue; 368 369 case BPF_JMP|BPF_JGT|BPF_X: 370 pc += (A > X) ? pc->jt : pc->jf; 371 continue; 372 373 case BPF_JMP|BPF_JGE|BPF_X: 374 pc += (A >= X) ? pc->jt : pc->jf; 375 continue; 376 377 case BPF_JMP|BPF_JEQ|BPF_X: 378 pc += (A == X) ? pc->jt : pc->jf; 379 continue; 380 381 case BPF_JMP|BPF_JSET|BPF_X: 382 pc += (A & X) ? pc->jt : pc->jf; 383 continue; 384 385 case BPF_ALU|BPF_ADD|BPF_X: 386 A += X; 387 continue; 388 389 case BPF_ALU|BPF_SUB|BPF_X: 390 A -= X; 391 continue; 392 393 case BPF_ALU|BPF_MUL|BPF_X: 394 A *= X; 395 continue; 396 397 case BPF_ALU|BPF_DIV|BPF_X: 398 if (X == 0) 399 return 0; 400 A /= X; 401 continue; 402 403 case BPF_ALU|BPF_AND|BPF_X: 404 A &= X; 405 continue; 406 407 case BPF_ALU|BPF_OR|BPF_X: 408 A |= X; 409 continue; 410 411 case BPF_ALU|BPF_LSH|BPF_X: 412 A <<= X; 413 continue; 414 415 case BPF_ALU|BPF_RSH|BPF_X: 416 A >>= X; 417 continue; 418 419 case BPF_ALU|BPF_ADD|BPF_K: 420 A += pc->k; 421 continue; 422 423 case BPF_ALU|BPF_SUB|BPF_K: 424 A -= pc->k; 425 continue; 426 427 case BPF_ALU|BPF_MUL|BPF_K: 428 A *= pc->k; 429 continue; 430 431 case BPF_ALU|BPF_DIV|BPF_K: 432 A /= pc->k; 433 continue; 434 435 case BPF_ALU|BPF_AND|BPF_K: 436 A &= pc->k; 437 continue; 438 439 case BPF_ALU|BPF_OR|BPF_K: 440 A |= pc->k; 441 continue; 442 443 case BPF_ALU|BPF_LSH|BPF_K: 444 A <<= pc->k; 445 continue; 446 447 case BPF_ALU|BPF_RSH|BPF_K: 448 A >>= pc->k; 449 continue; 450 451 case BPF_ALU|BPF_NEG: 452 A = -A; 453 continue; 454 455 case BPF_MISC|BPF_TAX: 456 X = A; 457 continue; 458 459 case BPF_MISC|BPF_TXA: 460 A = X; 461 continue; 462 } 463 } 464 } 465 466 /* 467 * Return true if the 'fcode' is a valid filter program. 468 * The constraints are that each jump be forward and to a valid 469 * code, that memory accesses are within valid ranges (to the 470 * extent that this can be checked statically; loads of packet 471 * data have to be, and are, also checked at run time), and that 472 * the code terminates with either an accept or reject. 473 * 474 * The kernel needs to be able to verify an application's filter code. 475 * Otherwise, a bogus program could easily crash the system. 476 */ 477 __CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY); 478 479 int 480 bpf_validate(const struct bpf_insn *f, int signed_len) 481 { 482 u_int i, from, len, ok = 0; 483 const struct bpf_insn *p; 484 #if defined(KERNEL) || defined(_KERNEL) 485 uint16_t *mem, invalid; 486 size_t size; 487 #endif 488 489 len = (u_int)signed_len; 490 if (len < 1) 491 return 0; 492 #if defined(KERNEL) || defined(_KERNEL) 493 if (len > BPF_MAXINSNS) 494 return 0; 495 #endif 496 if (BPF_CLASS(f[len - 1].code) != BPF_RET) 497 return 0; 498 499 #if defined(KERNEL) || defined(_KERNEL) 500 mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP); 501 invalid = ~0; /* All is invalid on startup */ 502 #endif 503 504 for (i = 0; i < len; ++i) { 505 #if defined(KERNEL) || defined(_KERNEL) 506 /* blend in any invalid bits for current pc */ 507 invalid |= mem[i]; 508 #endif 509 p = &f[i]; 510 switch (BPF_CLASS(p->code)) { 511 /* 512 * Check that memory operations use valid addresses. 513 */ 514 case BPF_LD: 515 case BPF_LDX: 516 switch (BPF_MODE(p->code)) { 517 case BPF_MEM: 518 /* 519 * There's no maximum packet data size 520 * in userland. The runtime packet length 521 * check suffices. 522 */ 523 #if defined(KERNEL) || defined(_KERNEL) 524 /* 525 * More strict check with actual packet length 526 * is done runtime. 527 */ 528 if (p->k >= BPF_MEMWORDS) 529 goto out; 530 /* check for current memory invalid */ 531 if (invalid & (1 << p->k)) 532 goto out; 533 #endif 534 break; 535 case BPF_ABS: 536 case BPF_IND: 537 case BPF_MSH: 538 case BPF_IMM: 539 case BPF_LEN: 540 break; 541 default: 542 goto out; 543 } 544 break; 545 case BPF_ST: 546 case BPF_STX: 547 if (p->k >= BPF_MEMWORDS) 548 goto out; 549 #if defined(KERNEL) || defined(_KERNEL) 550 /* validate the memory word */ 551 invalid &= ~(1 << p->k); 552 #endif 553 break; 554 case BPF_ALU: 555 switch (BPF_OP(p->code)) { 556 case BPF_ADD: 557 case BPF_SUB: 558 case BPF_MUL: 559 case BPF_OR: 560 case BPF_AND: 561 case BPF_LSH: 562 case BPF_RSH: 563 case BPF_NEG: 564 break; 565 case BPF_DIV: 566 /* 567 * Check for constant division by 0. 568 */ 569 if (BPF_SRC(p->code) == BPF_K && p->k == 0) 570 goto out; 571 break; 572 default: 573 goto out; 574 } 575 break; 576 case BPF_JMP: 577 /* 578 * Check that jumps are within the code block, 579 * and that unconditional branches don't go 580 * backwards as a result of an overflow. 581 * Unconditional branches have a 32-bit offset, 582 * so they could overflow; we check to make 583 * sure they don't. Conditional branches have 584 * an 8-bit offset, and the from address is <= 585 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS 586 * is sufficiently small that adding 255 to it 587 * won't overflow. 588 * 589 * We know that len is <= BPF_MAXINSNS, and we 590 * assume that BPF_MAXINSNS is < the maximum size 591 * of a u_int, so that i + 1 doesn't overflow. 592 * 593 * For userland, we don't know that the from 594 * or len are <= BPF_MAXINSNS, but we know that 595 * from <= len, and, except on a 64-bit system, 596 * it's unlikely that len, if it truly reflects 597 * the size of the program we've been handed, 598 * will be anywhere near the maximum size of 599 * a u_int. We also don't check for backward 600 * branches, as we currently support them in 601 * userland for the protochain operation. 602 */ 603 from = i + 1; 604 switch (BPF_OP(p->code)) { 605 case BPF_JA: 606 if (from + p->k >= len) 607 goto out; 608 #if defined(KERNEL) || defined(_KERNEL) 609 if (from + p->k < from) 610 goto out; 611 /* 612 * mark the currently invalid bits for the 613 * destination 614 */ 615 mem[from + p->k] |= invalid; 616 invalid = 0; 617 #endif 618 break; 619 case BPF_JEQ: 620 case BPF_JGT: 621 case BPF_JGE: 622 case BPF_JSET: 623 if (from + p->jt >= len || from + p->jf >= len) 624 goto out; 625 #if defined(KERNEL) || defined(_KERNEL) 626 /* 627 * mark the currently invalid bits for both 628 * possible jump destinations 629 */ 630 mem[from + p->jt] |= invalid; 631 mem[from + p->jf] |= invalid; 632 invalid = 0; 633 #endif 634 break; 635 default: 636 goto out; 637 } 638 break; 639 case BPF_RET: 640 break; 641 case BPF_MISC: 642 break; 643 default: 644 goto out; 645 } 646 } 647 ok = 1; 648 out: 649 #if defined(KERNEL) || defined(_KERNEL) 650 kmem_free(mem, size); 651 #endif 652 return ok; 653 } 654