xref: /netbsd-src/sys/net/bpf_filter.c (revision 46f5119e40af2e51998f686b2fdcc76b5488f7f3)
1 /*	$NetBSD: bpf_filter.c,v 1.46 2011/02/19 17:21:48 christos Exp $	*/
2 
3 /*-
4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from the Stanford/CMU enet packet filter,
8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10  * Berkeley Laboratory.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.46 2011/02/19 17:21:48 christos Exp $");
41 
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
46 #endif
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53 
54 #define EXTRACT_SHORT(p)	be16dec(p)
55 #define EXTRACT_LONG(p)		be32dec(p)
56 
57 #ifdef _KERNEL
58 #include <sys/mbuf.h>
59 #define MINDEX(len, m, k) 		\
60 { 					\
61 	len = m->m_len; 		\
62 	while (k >= len) { 		\
63 		k -= len; 		\
64 		m = m->m_next; 		\
65 		if (m == 0) 		\
66 			return 0; 	\
67 		len = m->m_len; 	\
68 	} 				\
69 }
70 
71 static int m_xword (const struct mbuf *, uint32_t, int *);
72 static int m_xhalf (const struct mbuf *, uint32_t, int *);
73 
74 static int
75 m_xword(const struct mbuf *m, uint32_t k, int *err)
76 {
77 	int len;
78 	u_char *cp, *np;
79 	struct mbuf *m0;
80 
81 	*err = 1;
82 	MINDEX(len, m, k);
83 	cp = mtod(m, u_char *) + k;
84 	if (len >= k + 4) {
85 		*err = 0;
86 		return EXTRACT_LONG(cp);
87 	}
88 	m0 = m->m_next;
89 	if (m0 == 0 || m0->m_len + len - k < 4)
90 		return 0;
91 	*err = 0;
92 	np = mtod(m0, u_char *);
93 	switch (len - k) {
94 
95 	case 1:
96 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
97 
98 	case 2:
99 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
100 
101 	default:
102 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
103 	}
104 }
105 
106 static int
107 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
108 {
109 	int len;
110 	u_char *cp;
111 	struct mbuf *m0;
112 
113 	*err = 1;
114 	MINDEX(len, m, k);
115 	cp = mtod(m, u_char *) + k;
116 	if (len >= k + 2) {
117 		*err = 0;
118 		return EXTRACT_SHORT(cp);
119 	}
120 	m0 = m->m_next;
121 	if (m0 == 0)
122 		return 0;
123 	*err = 0;
124 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
125 }
126 #else /* _KERNEL */
127 #include <stdlib.h>
128 #endif /* !_KERNEL */
129 
130 #include <net/bpf.h>
131 
132 /*
133  * Execute the filter program starting at pc on the packet p
134  * wirelen is the length of the original packet
135  * buflen is the amount of data present
136  */
137 u_int
138 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
139     u_int buflen)
140 {
141 	uint32_t A, X, k;
142 	uint32_t mem[BPF_MEMWORDS];
143 
144 	if (pc == 0)
145 		/*
146 		 * No filter means accept all.
147 		 */
148 		return (u_int)-1;
149 	A = 0;
150 	X = 0;
151 	--pc;
152 	for (;;) {
153 		++pc;
154 		switch (pc->code) {
155 
156 		default:
157 #ifdef _KERNEL
158 			return 0;
159 #else
160 			abort();
161 			/*NOTREACHED*/
162 #endif
163 		case BPF_RET|BPF_K:
164 			return (u_int)pc->k;
165 
166 		case BPF_RET|BPF_A:
167 			return (u_int)A;
168 
169 		case BPF_LD|BPF_W|BPF_ABS:
170 			k = pc->k;
171 			if (k + sizeof(int32_t) > buflen) {
172 #ifdef _KERNEL
173 				int merr = 0;	/* XXX: GCC */
174 
175 				if (buflen != 0)
176 					return 0;
177 				A = m_xword((const struct mbuf *)p, k, &merr);
178 				if (merr != 0)
179 					return 0;
180 				continue;
181 #else
182 				return 0;
183 #endif
184 			}
185 			A = EXTRACT_LONG(&p[k]);
186 			continue;
187 
188 		case BPF_LD|BPF_H|BPF_ABS:
189 			k = pc->k;
190 			if (k + sizeof(int16_t) > buflen) {
191 #ifdef _KERNEL
192 				int merr;
193 
194 				if (buflen != 0)
195 					return 0;
196 				A = m_xhalf((const struct mbuf *)p, k, &merr);
197 				if (merr != 0)
198 					return 0;
199 				continue;
200 #else
201 				return 0;
202 #endif
203 			}
204 			A = EXTRACT_SHORT(&p[k]);
205 			continue;
206 
207 		case BPF_LD|BPF_B|BPF_ABS:
208 			k = pc->k;
209 			if (k >= buflen) {
210 #ifdef _KERNEL
211 				const struct mbuf *m;
212 				int len;
213 
214 				if (buflen != 0)
215 					return 0;
216 				m = (const struct mbuf *)p;
217 				MINDEX(len, m, k);
218 				A = mtod(m, u_char *)[k];
219 				continue;
220 #else
221 				return 0;
222 #endif
223 			}
224 			A = p[k];
225 			continue;
226 
227 		case BPF_LD|BPF_W|BPF_LEN:
228 			A = wirelen;
229 			continue;
230 
231 		case BPF_LDX|BPF_W|BPF_LEN:
232 			X = wirelen;
233 			continue;
234 
235 		case BPF_LD|BPF_W|BPF_IND:
236 			k = X + pc->k;
237 			if (k + sizeof(int32_t) > buflen) {
238 #ifdef _KERNEL
239 				int merr = 0;	/* XXX: GCC */
240 
241 				if (buflen != 0)
242 					return 0;
243 				A = m_xword((const struct mbuf *)p, k, &merr);
244 				if (merr != 0)
245 					return 0;
246 				continue;
247 #else
248 				return 0;
249 #endif
250 			}
251 			A = EXTRACT_LONG(&p[k]);
252 			continue;
253 
254 		case BPF_LD|BPF_H|BPF_IND:
255 			k = X + pc->k;
256 			if (k + sizeof(int16_t) > buflen) {
257 #ifdef _KERNEL
258 				int merr = 0;	/* XXX: GCC */
259 
260 				if (buflen != 0)
261 					return 0;
262 				A = m_xhalf((const struct mbuf *)p, k, &merr);
263 				if (merr != 0)
264 					return 0;
265 				continue;
266 #else
267 				return 0;
268 #endif
269 			}
270 			A = EXTRACT_SHORT(&p[k]);
271 			continue;
272 
273 		case BPF_LD|BPF_B|BPF_IND:
274 			k = X + pc->k;
275 			if (k >= buflen) {
276 #ifdef _KERNEL
277 				const struct mbuf *m;
278 				int len;
279 
280 				if (buflen != 0)
281 					return 0;
282 				m = (const struct mbuf *)p;
283 				MINDEX(len, m, k);
284 				A = mtod(m, u_char *)[k];
285 				continue;
286 #else
287 				return 0;
288 #endif
289 			}
290 			A = p[k];
291 			continue;
292 
293 		case BPF_LDX|BPF_MSH|BPF_B:
294 			k = pc->k;
295 			if (k >= buflen) {
296 #ifdef _KERNEL
297 				const struct mbuf *m;
298 				int len;
299 
300 				if (buflen != 0)
301 					return 0;
302 				m = (const struct mbuf *)p;
303 				MINDEX(len, m, k);
304 				X = (mtod(m, char *)[k] & 0xf) << 2;
305 				continue;
306 #else
307 				return 0;
308 #endif
309 			}
310 			X = (p[pc->k] & 0xf) << 2;
311 			continue;
312 
313 		case BPF_LD|BPF_IMM:
314 			A = pc->k;
315 			continue;
316 
317 		case BPF_LDX|BPF_IMM:
318 			X = pc->k;
319 			continue;
320 
321 		case BPF_LD|BPF_MEM:
322 			A = mem[pc->k];
323 			continue;
324 
325 		case BPF_LDX|BPF_MEM:
326 			X = mem[pc->k];
327 			continue;
328 
329 		case BPF_ST:
330 			mem[pc->k] = A;
331 			continue;
332 
333 		case BPF_STX:
334 			mem[pc->k] = X;
335 			continue;
336 
337 		case BPF_JMP|BPF_JA:
338 			pc += pc->k;
339 			continue;
340 
341 		case BPF_JMP|BPF_JGT|BPF_K:
342 			pc += (A > pc->k) ? pc->jt : pc->jf;
343 			continue;
344 
345 		case BPF_JMP|BPF_JGE|BPF_K:
346 			pc += (A >= pc->k) ? pc->jt : pc->jf;
347 			continue;
348 
349 		case BPF_JMP|BPF_JEQ|BPF_K:
350 			pc += (A == pc->k) ? pc->jt : pc->jf;
351 			continue;
352 
353 		case BPF_JMP|BPF_JSET|BPF_K:
354 			pc += (A & pc->k) ? pc->jt : pc->jf;
355 			continue;
356 
357 		case BPF_JMP|BPF_JGT|BPF_X:
358 			pc += (A > X) ? pc->jt : pc->jf;
359 			continue;
360 
361 		case BPF_JMP|BPF_JGE|BPF_X:
362 			pc += (A >= X) ? pc->jt : pc->jf;
363 			continue;
364 
365 		case BPF_JMP|BPF_JEQ|BPF_X:
366 			pc += (A == X) ? pc->jt : pc->jf;
367 			continue;
368 
369 		case BPF_JMP|BPF_JSET|BPF_X:
370 			pc += (A & X) ? pc->jt : pc->jf;
371 			continue;
372 
373 		case BPF_ALU|BPF_ADD|BPF_X:
374 			A += X;
375 			continue;
376 
377 		case BPF_ALU|BPF_SUB|BPF_X:
378 			A -= X;
379 			continue;
380 
381 		case BPF_ALU|BPF_MUL|BPF_X:
382 			A *= X;
383 			continue;
384 
385 		case BPF_ALU|BPF_DIV|BPF_X:
386 			if (X == 0)
387 				return 0;
388 			A /= X;
389 			continue;
390 
391 		case BPF_ALU|BPF_AND|BPF_X:
392 			A &= X;
393 			continue;
394 
395 		case BPF_ALU|BPF_OR|BPF_X:
396 			A |= X;
397 			continue;
398 
399 		case BPF_ALU|BPF_LSH|BPF_X:
400 			A <<= X;
401 			continue;
402 
403 		case BPF_ALU|BPF_RSH|BPF_X:
404 			A >>= X;
405 			continue;
406 
407 		case BPF_ALU|BPF_ADD|BPF_K:
408 			A += pc->k;
409 			continue;
410 
411 		case BPF_ALU|BPF_SUB|BPF_K:
412 			A -= pc->k;
413 			continue;
414 
415 		case BPF_ALU|BPF_MUL|BPF_K:
416 			A *= pc->k;
417 			continue;
418 
419 		case BPF_ALU|BPF_DIV|BPF_K:
420 			A /= pc->k;
421 			continue;
422 
423 		case BPF_ALU|BPF_AND|BPF_K:
424 			A &= pc->k;
425 			continue;
426 
427 		case BPF_ALU|BPF_OR|BPF_K:
428 			A |= pc->k;
429 			continue;
430 
431 		case BPF_ALU|BPF_LSH|BPF_K:
432 			A <<= pc->k;
433 			continue;
434 
435 		case BPF_ALU|BPF_RSH|BPF_K:
436 			A >>= pc->k;
437 			continue;
438 
439 		case BPF_ALU|BPF_NEG:
440 			A = -A;
441 			continue;
442 
443 		case BPF_MISC|BPF_TAX:
444 			X = A;
445 			continue;
446 
447 		case BPF_MISC|BPF_TXA:
448 			A = X;
449 			continue;
450 		}
451 	}
452 }
453 
454 /*
455  * Return true if the 'fcode' is a valid filter program.
456  * The constraints are that each jump be forward and to a valid
457  * code, that memory accesses are within valid ranges (to the
458  * extent that this can be checked statically; loads of packet
459  * data have to be, and are, also checked at run time), and that
460  * the code terminates with either an accept or reject.
461  *
462  * The kernel needs to be able to verify an application's filter code.
463  * Otherwise, a bogus program could easily crash the system.
464  */
465 __CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY);
466 
467 int
468 bpf_validate(const struct bpf_insn *f, int signed_len)
469 {
470 	u_int i, from, len, ok = 0;
471 	const struct bpf_insn *p;
472 #if defined(KERNEL) || defined(_KERNEL)
473 	uint16_t *mem, invalid;
474 	size_t size;
475 #endif
476 
477 	len = (u_int)signed_len;
478 	if (len < 1)
479 		return 0;
480 #if defined(KERNEL) || defined(_KERNEL)
481 	if (len > BPF_MAXINSNS)
482 		return 0;
483 #endif
484 	if (BPF_CLASS(f[len - 1].code) != BPF_RET)
485 		return 0;
486 
487 #if defined(KERNEL) || defined(_KERNEL)
488 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
489 	invalid = ~0;	/* All is invalid on startup */
490 #endif
491 
492 	for (i = 0; i < len; ++i) {
493 #if defined(KERNEL) || defined(_KERNEL)
494 		/* blend in any invalid bits for current pc */
495 		invalid |= mem[i];
496 #endif
497 		p = &f[i];
498 		switch (BPF_CLASS(p->code)) {
499 		/*
500 		 * Check that memory operations use valid addresses.
501 		 */
502 		case BPF_LD:
503 		case BPF_LDX:
504 			switch (BPF_MODE(p->code)) {
505 			case BPF_MEM:
506 				/*
507 				 * There's no maximum packet data size
508 				 * in userland.  The runtime packet length
509 				 * check suffices.
510 				 */
511 #if defined(KERNEL) || defined(_KERNEL)
512 				/*
513 				 * More strict check with actual packet length
514 				 * is done runtime.
515 				 */
516 				if (p->k >= BPF_MEMWORDS)
517 					goto out;
518 				/* check for current memory invalid */
519 				if (invalid & (1 << p->k))
520 					goto out;
521 #endif
522 				break;
523 			case BPF_ABS:
524 			case BPF_IND:
525 			case BPF_MSH:
526 			case BPF_IMM:
527 			case BPF_LEN:
528 				break;
529 			default:
530 				goto out;
531 			}
532 			break;
533 		case BPF_ST:
534 		case BPF_STX:
535 			if (p->k >= BPF_MEMWORDS)
536 				goto out;
537 #if defined(KERNEL) || defined(_KERNEL)
538 			/* validate the memory word */
539 			invalid &= ~(1 << p->k);
540 #endif
541 			break;
542 		case BPF_ALU:
543 			switch (BPF_OP(p->code)) {
544 			case BPF_ADD:
545 			case BPF_SUB:
546 			case BPF_MUL:
547 			case BPF_OR:
548 			case BPF_AND:
549 			case BPF_LSH:
550 			case BPF_RSH:
551 			case BPF_NEG:
552 				break;
553 			case BPF_DIV:
554 				/*
555 				 * Check for constant division by 0.
556 				 */
557 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
558 					goto out;
559 				break;
560 			default:
561 				goto out;
562 			}
563 			break;
564 		case BPF_JMP:
565 			/*
566 			 * Check that jumps are within the code block,
567 			 * and that unconditional branches don't go
568 			 * backwards as a result of an overflow.
569 			 * Unconditional branches have a 32-bit offset,
570 			 * so they could overflow; we check to make
571 			 * sure they don't.  Conditional branches have
572 			 * an 8-bit offset, and the from address is <=
573 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
574 			 * is sufficiently small that adding 255 to it
575 			 * won't overflow.
576 			 *
577 			 * We know that len is <= BPF_MAXINSNS, and we
578 			 * assume that BPF_MAXINSNS is < the maximum size
579 			 * of a u_int, so that i + 1 doesn't overflow.
580 			 *
581 			 * For userland, we don't know that the from
582 			 * or len are <= BPF_MAXINSNS, but we know that
583 			 * from <= len, and, except on a 64-bit system,
584 			 * it's unlikely that len, if it truly reflects
585 			 * the size of the program we've been handed,
586 			 * will be anywhere near the maximum size of
587 			 * a u_int.  We also don't check for backward
588 			 * branches, as we currently support them in
589 			 * userland for the protochain operation.
590 			 */
591 			from = i + 1;
592 			switch (BPF_OP(p->code)) {
593 			case BPF_JA:
594 				if (from + p->k >= len)
595 					goto out;
596 #if defined(KERNEL) || defined(_KERNEL)
597 				if (from + p->k < from)
598 					goto out;
599 				/*
600 				 * mark the currently invalid bits for the
601 				 * destination
602 				 */
603 				mem[from + p->k] |= invalid;
604 				invalid = 0;
605 #endif
606 				break;
607 			case BPF_JEQ:
608 			case BPF_JGT:
609 			case BPF_JGE:
610 			case BPF_JSET:
611 				if (from + p->jt >= len || from + p->jf >= len)
612 					goto out;
613 #if defined(KERNEL) || defined(_KERNEL)
614 				/*
615 				 * mark the currently invalid bits for both
616 				 * possible jump destinations
617 				 */
618 				mem[from + p->jt] |= invalid;
619 				mem[from + p->jf] |= invalid;
620 				invalid = 0;
621 #endif
622 				break;
623 			default:
624 				goto out;
625 			}
626 			break;
627 		case BPF_RET:
628 			break;
629 		case BPF_MISC:
630 			break;
631 		default:
632 			goto out;
633 		}
634 	}
635 	ok = 1;
636 out:
637 #if defined(KERNEL) || defined(_KERNEL)
638 	kmem_free(mem, size);
639 #endif
640 	return ok;
641 }
642