xref: /netbsd-src/sys/net/bpf_filter.c (revision 975a152cfcdb39ae6e496af647af0c7275ca0b61)
1 /*	$NetBSD: bpf_filter.c,v 1.57 2013/08/30 15:00:08 rmind Exp $	*/
2 
3 /*-
4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from the Stanford/CMU enet packet filter,
8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10  * Berkeley Laboratory.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.57 2013/08/30 15:00:08 rmind Exp $");
41 
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
46 #endif
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53 
54 #include <net/bpf.h>
55 
56 #ifdef _KERNEL
57 
58 struct bpf_ctx {
59 	const bpf_copfunc_t *	copfuncs;
60 	size_t			nfuncs;
61 };
62 
63 /* Default BPF context (zeroed). */
64 static bpf_ctx_t		bpf_def_ctx1;
65 bpf_ctx_t *			bpf_def_ctx = &bpf_def_ctx1;
66 
67 bpf_ctx_t *
68 bpf_create(void)
69 {
70 	return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
71 }
72 
73 void
74 bpf_destroy(bpf_ctx_t *bc)
75 {
76 	kmem_free(bc, sizeof(bpf_ctx_t));
77 }
78 
79 int
80 bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
81 {
82 	bc->copfuncs = funcs;
83 	bc->nfuncs = n;
84 	return 0;
85 }
86 
87 #endif
88 
89 #define EXTRACT_SHORT(p)	be16dec(p)
90 #define EXTRACT_LONG(p)		be32dec(p)
91 
92 #ifdef _KERNEL
93 #include <sys/mbuf.h>
94 #define MINDEX(len, m, k) 		\
95 {					\
96 	len = m->m_len; 		\
97 	while (k >= len) { 		\
98 		k -= len; 		\
99 		m = m->m_next; 		\
100 		if (m == 0) 		\
101 			return 0; 	\
102 		len = m->m_len; 	\
103 	}				\
104 }
105 
106 uint32_t m_xword (const struct mbuf *, uint32_t, int *);
107 uint32_t m_xhalf (const struct mbuf *, uint32_t, int *);
108 uint32_t m_xbyte (const struct mbuf *, uint32_t, int *);
109 
110 uint32_t
111 m_xword(const struct mbuf *m, uint32_t k, int *err)
112 {
113 	int len;
114 	u_char *cp, *np;
115 	struct mbuf *m0;
116 
117 	*err = 1;
118 	MINDEX(len, m, k);
119 	cp = mtod(m, u_char *) + k;
120 	if (len >= k + 4) {
121 		*err = 0;
122 		return EXTRACT_LONG(cp);
123 	}
124 	m0 = m->m_next;
125 	if (m0 == 0 || m0->m_len + len - k < 4)
126 		return 0;
127 	*err = 0;
128 	np = mtod(m0, u_char *);
129 
130 	switch (len - k) {
131 	case 1:
132 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
133 	case 2:
134 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
135 	default:
136 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
137 	}
138 }
139 
140 uint32_t
141 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
142 {
143 	int len;
144 	u_char *cp;
145 	struct mbuf *m0;
146 
147 	*err = 1;
148 	MINDEX(len, m, k);
149 	cp = mtod(m, u_char *) + k;
150 	if (len >= k + 2) {
151 		*err = 0;
152 		return EXTRACT_SHORT(cp);
153 	}
154 	m0 = m->m_next;
155 	if (m0 == 0)
156 		return 0;
157 	*err = 0;
158 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
159 }
160 
161 uint32_t
162 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
163 {
164 	int len;
165 
166 	*err = 0;
167 	MINDEX(len, m, k);
168 	return mtod(m, u_char *)[k];
169 }
170 #else /* _KERNEL */
171 #include <stdlib.h>
172 #endif /* !_KERNEL */
173 
174 #include <net/bpf.h>
175 
176 /*
177  * Execute the filter program starting at pc on the packet p
178  * wirelen is the length of the original packet
179  * buflen is the amount of data present
180  */
181 #ifdef _KERNEL
182 u_int
183 bpf_filter(bpf_ctx_t *bc, void *arg, const struct bpf_insn *pc,
184     const u_char *p, u_int wirelen, u_int buflen)
185 #else
186 u_int
187 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
188     u_int buflen)
189 #endif
190 {
191 	uint32_t A, X, k;
192 	uint32_t mem[BPF_MEMWORDS];
193 
194 #ifdef _KERNEL
195 	KASSERT(bc != NULL);
196 #endif
197 
198 	if (pc == 0) {
199 		/*
200 		 * No filter means accept all.
201 		 */
202 		return (u_int)-1;
203 	}
204 
205 	/*
206 	 * Note: safe to leave memwords uninitialised, as the validation
207 	 * step ensures that it will not be read, if it was not written.
208 	 */
209 	A = 0;
210 	X = 0;
211 	--pc;
212 
213 	for (;;) {
214 		++pc;
215 		switch (pc->code) {
216 
217 		default:
218 #ifdef _KERNEL
219 			return 0;
220 #else
221 			abort();
222 			/*NOTREACHED*/
223 #endif
224 		case BPF_RET|BPF_K:
225 			return (u_int)pc->k;
226 
227 		case BPF_RET|BPF_A:
228 			return (u_int)A;
229 
230 		case BPF_LD|BPF_W|BPF_ABS:
231 			k = pc->k;
232 			if (k > buflen || sizeof(int32_t) > buflen - k) {
233 #ifdef _KERNEL
234 				int merr;
235 
236 				if (buflen != 0)
237 					return 0;
238 				A = m_xword((const struct mbuf *)p, k, &merr);
239 				if (merr != 0)
240 					return 0;
241 				continue;
242 #else
243 				return 0;
244 #endif
245 			}
246 			A = EXTRACT_LONG(&p[k]);
247 			continue;
248 
249 		case BPF_LD|BPF_H|BPF_ABS:
250 			k = pc->k;
251 			if (k > buflen || sizeof(int16_t) > buflen - k) {
252 #ifdef _KERNEL
253 				int merr;
254 
255 				if (buflen != 0)
256 					return 0;
257 				A = m_xhalf((const struct mbuf *)p, k, &merr);
258 				if (merr != 0)
259 					return 0;
260 				continue;
261 #else
262 				return 0;
263 #endif
264 			}
265 			A = EXTRACT_SHORT(&p[k]);
266 			continue;
267 
268 		case BPF_LD|BPF_B|BPF_ABS:
269 			k = pc->k;
270 			if (k >= buflen) {
271 #ifdef _KERNEL
272 				const struct mbuf *m;
273 				int len;
274 
275 				if (buflen != 0)
276 					return 0;
277 				m = (const struct mbuf *)p;
278 				MINDEX(len, m, k);
279 				A = mtod(m, u_char *)[k];
280 				continue;
281 #else
282 				return 0;
283 #endif
284 			}
285 			A = p[k];
286 			continue;
287 
288 		case BPF_LD|BPF_W|BPF_LEN:
289 			A = wirelen;
290 			continue;
291 
292 		case BPF_LDX|BPF_W|BPF_LEN:
293 			X = wirelen;
294 			continue;
295 
296 		case BPF_LD|BPF_W|BPF_IND:
297 			k = X + pc->k;
298 			if (pc->k > buflen || X > buflen - pc->k ||
299 			    sizeof(int32_t) > buflen - k) {
300 #ifdef _KERNEL
301 				int merr;
302 
303 				if (buflen != 0)
304 					return 0;
305 				A = m_xword((const struct mbuf *)p, k, &merr);
306 				if (merr != 0)
307 					return 0;
308 				continue;
309 #else
310 				return 0;
311 #endif
312 			}
313 			A = EXTRACT_LONG(&p[k]);
314 			continue;
315 
316 		case BPF_LD|BPF_H|BPF_IND:
317 			k = X + pc->k;
318 			if (pc->k > buflen || X > buflen - pc->k ||
319 			    sizeof(int16_t) > buflen - k) {
320 #ifdef _KERNEL
321 				int merr;
322 
323 				if (buflen != 0)
324 					return 0;
325 				A = m_xhalf((const struct mbuf *)p, k, &merr);
326 				if (merr != 0)
327 					return 0;
328 				continue;
329 #else
330 				return 0;
331 #endif
332 			}
333 			A = EXTRACT_SHORT(&p[k]);
334 			continue;
335 
336 		case BPF_LD|BPF_B|BPF_IND:
337 			k = X + pc->k;
338 			if (pc->k >= buflen || X >= buflen - pc->k) {
339 #ifdef _KERNEL
340 				const struct mbuf *m;
341 				int len;
342 
343 				if (buflen != 0)
344 					return 0;
345 				m = (const struct mbuf *)p;
346 				MINDEX(len, m, k);
347 				A = mtod(m, u_char *)[k];
348 				continue;
349 #else
350 				return 0;
351 #endif
352 			}
353 			A = p[k];
354 			continue;
355 
356 		case BPF_LDX|BPF_MSH|BPF_B:
357 			k = pc->k;
358 			if (k >= buflen) {
359 #ifdef _KERNEL
360 				const struct mbuf *m;
361 				int len;
362 
363 				if (buflen != 0)
364 					return 0;
365 				m = (const struct mbuf *)p;
366 				MINDEX(len, m, k);
367 				X = (mtod(m, char *)[k] & 0xf) << 2;
368 				continue;
369 #else
370 				return 0;
371 #endif
372 			}
373 			X = (p[pc->k] & 0xf) << 2;
374 			continue;
375 
376 		case BPF_LD|BPF_IMM:
377 			A = pc->k;
378 			continue;
379 
380 		case BPF_LDX|BPF_IMM:
381 			X = pc->k;
382 			continue;
383 
384 		case BPF_LD|BPF_MEM:
385 			A = mem[pc->k];
386 			continue;
387 
388 		case BPF_LDX|BPF_MEM:
389 			X = mem[pc->k];
390 			continue;
391 
392 		case BPF_ST:
393 			mem[pc->k] = A;
394 			continue;
395 
396 		case BPF_STX:
397 			mem[pc->k] = X;
398 			continue;
399 
400 		case BPF_JMP|BPF_JA:
401 			pc += pc->k;
402 			continue;
403 
404 		case BPF_JMP|BPF_JGT|BPF_K:
405 			pc += (A > pc->k) ? pc->jt : pc->jf;
406 			continue;
407 
408 		case BPF_JMP|BPF_JGE|BPF_K:
409 			pc += (A >= pc->k) ? pc->jt : pc->jf;
410 			continue;
411 
412 		case BPF_JMP|BPF_JEQ|BPF_K:
413 			pc += (A == pc->k) ? pc->jt : pc->jf;
414 			continue;
415 
416 		case BPF_JMP|BPF_JSET|BPF_K:
417 			pc += (A & pc->k) ? pc->jt : pc->jf;
418 			continue;
419 
420 		case BPF_JMP|BPF_JGT|BPF_X:
421 			pc += (A > X) ? pc->jt : pc->jf;
422 			continue;
423 
424 		case BPF_JMP|BPF_JGE|BPF_X:
425 			pc += (A >= X) ? pc->jt : pc->jf;
426 			continue;
427 
428 		case BPF_JMP|BPF_JEQ|BPF_X:
429 			pc += (A == X) ? pc->jt : pc->jf;
430 			continue;
431 
432 		case BPF_JMP|BPF_JSET|BPF_X:
433 			pc += (A & X) ? pc->jt : pc->jf;
434 			continue;
435 
436 		case BPF_ALU|BPF_ADD|BPF_X:
437 			A += X;
438 			continue;
439 
440 		case BPF_ALU|BPF_SUB|BPF_X:
441 			A -= X;
442 			continue;
443 
444 		case BPF_ALU|BPF_MUL|BPF_X:
445 			A *= X;
446 			continue;
447 
448 		case BPF_ALU|BPF_DIV|BPF_X:
449 			if (X == 0)
450 				return 0;
451 			A /= X;
452 			continue;
453 
454 		case BPF_ALU|BPF_AND|BPF_X:
455 			A &= X;
456 			continue;
457 
458 		case BPF_ALU|BPF_OR|BPF_X:
459 			A |= X;
460 			continue;
461 
462 		case BPF_ALU|BPF_LSH|BPF_X:
463 			A <<= X;
464 			continue;
465 
466 		case BPF_ALU|BPF_RSH|BPF_X:
467 			A >>= X;
468 			continue;
469 
470 		case BPF_ALU|BPF_ADD|BPF_K:
471 			A += pc->k;
472 			continue;
473 
474 		case BPF_ALU|BPF_SUB|BPF_K:
475 			A -= pc->k;
476 			continue;
477 
478 		case BPF_ALU|BPF_MUL|BPF_K:
479 			A *= pc->k;
480 			continue;
481 
482 		case BPF_ALU|BPF_DIV|BPF_K:
483 			A /= pc->k;
484 			continue;
485 
486 		case BPF_ALU|BPF_AND|BPF_K:
487 			A &= pc->k;
488 			continue;
489 
490 		case BPF_ALU|BPF_OR|BPF_K:
491 			A |= pc->k;
492 			continue;
493 
494 		case BPF_ALU|BPF_LSH|BPF_K:
495 			A <<= pc->k;
496 			continue;
497 
498 		case BPF_ALU|BPF_RSH|BPF_K:
499 			A >>= pc->k;
500 			continue;
501 
502 		case BPF_ALU|BPF_NEG:
503 			A = -A;
504 			continue;
505 
506 		case BPF_MISC|BPF_TAX:
507 			X = A;
508 			continue;
509 
510 		case BPF_MISC|BPF_TXA:
511 			A = X;
512 			continue;
513 
514 		case BPF_MISC|BPF_COP:
515 #ifdef _KERNEL
516 			if (pc->k < bc->nfuncs) {
517 				const bpf_copfunc_t fn = bc->copfuncs[pc->k];
518 				A = fn((const struct mbuf *)p, arg, A, mem);
519 				continue;
520 			}
521 #endif
522 			return 0;
523 
524 		case BPF_MISC|BPF_COPX:
525 #ifdef _KERNEL
526 			if (X < bc->nfuncs) {
527 				const bpf_copfunc_t fn = bc->copfuncs[X];
528 				A = fn((const struct mbuf *)p, arg, A, mem);
529 				continue;
530 			}
531 #endif
532 			return 0;
533 		}
534 	}
535 }
536 
537 /*
538  * Return true if the 'fcode' is a valid filter program.
539  * The constraints are that each jump be forward and to a valid
540  * code, that memory accesses are within valid ranges (to the
541  * extent that this can be checked statically; loads of packet
542  * data have to be, and are, also checked at run time), and that
543  * the code terminates with either an accept or reject.
544  *
545  * The kernel needs to be able to verify an application's filter code.
546  * Otherwise, a bogus program could easily crash the system.
547  */
548 __CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY);
549 
550 int
551 bpf_validate(const struct bpf_insn *f, int signed_len)
552 {
553 	u_int i, from, len, ok = 0;
554 	const struct bpf_insn *p;
555 #if defined(KERNEL) || defined(_KERNEL)
556 	uint16_t *mem, invalid;
557 	size_t size;
558 #endif
559 
560 	len = (u_int)signed_len;
561 	if (len < 1)
562 		return 0;
563 #if defined(KERNEL) || defined(_KERNEL)
564 	if (len > BPF_MAXINSNS)
565 		return 0;
566 #endif
567 	if (BPF_CLASS(f[len - 1].code) != BPF_RET)
568 		return 0;
569 
570 #if defined(KERNEL) || defined(_KERNEL)
571 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
572 	invalid = ~0;	/* All is invalid on startup */
573 #endif
574 
575 	for (i = 0; i < len; ++i) {
576 #if defined(KERNEL) || defined(_KERNEL)
577 		/* blend in any invalid bits for current pc */
578 		invalid |= mem[i];
579 #endif
580 		p = &f[i];
581 		switch (BPF_CLASS(p->code)) {
582 		/*
583 		 * Check that memory operations use valid addresses.
584 		 */
585 		case BPF_LD:
586 		case BPF_LDX:
587 			switch (BPF_MODE(p->code)) {
588 			case BPF_MEM:
589 				/*
590 				 * There's no maximum packet data size
591 				 * in userland.  The runtime packet length
592 				 * check suffices.
593 				 */
594 #if defined(KERNEL) || defined(_KERNEL)
595 				/*
596 				 * More strict check with actual packet length
597 				 * is done runtime.
598 				 */
599 				if (p->k >= BPF_MEMWORDS)
600 					goto out;
601 				/* check for current memory invalid */
602 				if (invalid & (1 << p->k))
603 					goto out;
604 #endif
605 				break;
606 			case BPF_ABS:
607 			case BPF_IND:
608 			case BPF_MSH:
609 			case BPF_IMM:
610 			case BPF_LEN:
611 				break;
612 			default:
613 				goto out;
614 			}
615 			break;
616 		case BPF_ST:
617 		case BPF_STX:
618 			if (p->k >= BPF_MEMWORDS)
619 				goto out;
620 #if defined(KERNEL) || defined(_KERNEL)
621 			/* validate the memory word */
622 			invalid &= ~(1 << p->k);
623 #endif
624 			break;
625 		case BPF_ALU:
626 			switch (BPF_OP(p->code)) {
627 			case BPF_ADD:
628 			case BPF_SUB:
629 			case BPF_MUL:
630 			case BPF_OR:
631 			case BPF_AND:
632 			case BPF_LSH:
633 			case BPF_RSH:
634 			case BPF_NEG:
635 				break;
636 			case BPF_DIV:
637 				/*
638 				 * Check for constant division by 0.
639 				 */
640 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
641 					goto out;
642 				break;
643 			default:
644 				goto out;
645 			}
646 			break;
647 		case BPF_JMP:
648 			/*
649 			 * Check that jumps are within the code block,
650 			 * and that unconditional branches don't go
651 			 * backwards as a result of an overflow.
652 			 * Unconditional branches have a 32-bit offset,
653 			 * so they could overflow; we check to make
654 			 * sure they don't.  Conditional branches have
655 			 * an 8-bit offset, and the from address is <=
656 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
657 			 * is sufficiently small that adding 255 to it
658 			 * won't overflow.
659 			 *
660 			 * We know that len is <= BPF_MAXINSNS, and we
661 			 * assume that BPF_MAXINSNS is < the maximum size
662 			 * of a u_int, so that i + 1 doesn't overflow.
663 			 *
664 			 * For userland, we don't know that the from
665 			 * or len are <= BPF_MAXINSNS, but we know that
666 			 * from <= len, and, except on a 64-bit system,
667 			 * it's unlikely that len, if it truly reflects
668 			 * the size of the program we've been handed,
669 			 * will be anywhere near the maximum size of
670 			 * a u_int.  We also don't check for backward
671 			 * branches, as we currently support them in
672 			 * userland for the protochain operation.
673 			 */
674 			from = i + 1;
675 			switch (BPF_OP(p->code)) {
676 			case BPF_JA:
677 				if (from + p->k >= len)
678 					goto out;
679 #if defined(KERNEL) || defined(_KERNEL)
680 				if (from + p->k < from)
681 					goto out;
682 				/*
683 				 * mark the currently invalid bits for the
684 				 * destination
685 				 */
686 				mem[from + p->k] |= invalid;
687 				invalid = 0;
688 #endif
689 				break;
690 			case BPF_JEQ:
691 			case BPF_JGT:
692 			case BPF_JGE:
693 			case BPF_JSET:
694 				if (from + p->jt >= len || from + p->jf >= len)
695 					goto out;
696 #if defined(KERNEL) || defined(_KERNEL)
697 				/*
698 				 * mark the currently invalid bits for both
699 				 * possible jump destinations
700 				 */
701 				mem[from + p->jt] |= invalid;
702 				mem[from + p->jf] |= invalid;
703 				invalid = 0;
704 #endif
705 				break;
706 			default:
707 				goto out;
708 			}
709 			break;
710 		case BPF_RET:
711 			break;
712 		case BPF_MISC:
713 			break;
714 		default:
715 			goto out;
716 		}
717 	}
718 	ok = 1;
719 out:
720 #if defined(KERNEL) || defined(_KERNEL)
721 	kmem_free(mem, size);
722 #endif
723 	return ok;
724 }
725