xref: /netbsd-src/sys/net/bpf_filter.c (revision 9ddb6ab554e70fb9bbd90c3d96b812bc57755a14)
1 /*	$NetBSD: bpf_filter.c,v 1.50 2011/12/29 23:47:21 alnsn Exp $	*/
2 
3 /*-
4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from the Stanford/CMU enet packet filter,
8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10  * Berkeley Laboratory.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.50 2011/12/29 23:47:21 alnsn Exp $");
41 
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
46 #endif
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53 
54 #define EXTRACT_SHORT(p)	be16dec(p)
55 #define EXTRACT_LONG(p)		be32dec(p)
56 
57 #ifdef _KERNEL
58 #include <sys/mbuf.h>
59 #define MINDEX(len, m, k) 		\
60 { 					\
61 	len = m->m_len; 		\
62 	while (k >= len) { 		\
63 		k -= len; 		\
64 		m = m->m_next; 		\
65 		if (m == 0) 		\
66 			return 0; 	\
67 		len = m->m_len; 	\
68 	} 				\
69 }
70 
71 static int m_xword (const struct mbuf *, uint32_t, int *);
72 static int m_xhalf (const struct mbuf *, uint32_t, int *);
73 
74 static int
75 m_xword(const struct mbuf *m, uint32_t k, int *err)
76 {
77 	int len;
78 	u_char *cp, *np;
79 	struct mbuf *m0;
80 
81 	*err = 1;
82 	MINDEX(len, m, k);
83 	cp = mtod(m, u_char *) + k;
84 	if (len >= k + 4) {
85 		*err = 0;
86 		return EXTRACT_LONG(cp);
87 	}
88 	m0 = m->m_next;
89 	if (m0 == 0 || m0->m_len + len - k < 4)
90 		return 0;
91 	*err = 0;
92 	np = mtod(m0, u_char *);
93 	switch (len - k) {
94 
95 	case 1:
96 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
97 
98 	case 2:
99 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
100 
101 	default:
102 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
103 	}
104 }
105 
106 static int
107 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
108 {
109 	int len;
110 	u_char *cp;
111 	struct mbuf *m0;
112 
113 	*err = 1;
114 	MINDEX(len, m, k);
115 	cp = mtod(m, u_char *) + k;
116 	if (len >= k + 2) {
117 		*err = 0;
118 		return EXTRACT_SHORT(cp);
119 	}
120 	m0 = m->m_next;
121 	if (m0 == 0)
122 		return 0;
123 	*err = 0;
124 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
125 }
126 #else /* _KERNEL */
127 #include <stdlib.h>
128 #endif /* !_KERNEL */
129 
130 #include <net/bpf.h>
131 
132 /*
133  * Execute the filter program starting at pc on the packet p
134  * wirelen is the length of the original packet
135  * buflen is the amount of data present
136  */
137 u_int
138 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
139     u_int buflen)
140 {
141 	uint32_t A, X, k;
142 	uint32_t mem[BPF_MEMWORDS];
143 
144 	if (pc == 0)
145 		/*
146 		 * No filter means accept all.
147 		 */
148 		return (u_int)-1;
149 	A = 0;
150 	X = 0;
151 	--pc;
152 	for (;;) {
153 		++pc;
154 		switch (pc->code) {
155 
156 		default:
157 #ifdef _KERNEL
158 			return 0;
159 #else
160 			abort();
161 			/*NOTREACHED*/
162 #endif
163 		case BPF_RET|BPF_K:
164 			return (u_int)pc->k;
165 
166 		case BPF_RET|BPF_A:
167 			return (u_int)A;
168 
169 		case BPF_LD|BPF_W|BPF_ABS:
170 			k = pc->k;
171 			if (k > buflen || sizeof(int32_t) > buflen - k) {
172 #ifdef _KERNEL
173 				int merr = 0;	/* XXX: GCC */
174 
175 				if (buflen != 0)
176 					return 0;
177 				A = m_xword((const struct mbuf *)p, k, &merr);
178 				if (merr != 0)
179 					return 0;
180 				continue;
181 #else
182 				return 0;
183 #endif
184 			}
185 			A = EXTRACT_LONG(&p[k]);
186 			continue;
187 
188 		case BPF_LD|BPF_H|BPF_ABS:
189 			k = pc->k;
190 			if (k > buflen || sizeof(int16_t) > buflen - k) {
191 #ifdef _KERNEL
192 				int merr;
193 
194 				if (buflen != 0)
195 					return 0;
196 				A = m_xhalf((const struct mbuf *)p, k, &merr);
197 				if (merr != 0)
198 					return 0;
199 				continue;
200 #else
201 				return 0;
202 #endif
203 			}
204 			A = EXTRACT_SHORT(&p[k]);
205 			continue;
206 
207 		case BPF_LD|BPF_B|BPF_ABS:
208 			k = pc->k;
209 			if (k >= buflen) {
210 #ifdef _KERNEL
211 				const struct mbuf *m;
212 				int len;
213 
214 				if (buflen != 0)
215 					return 0;
216 				m = (const struct mbuf *)p;
217 				MINDEX(len, m, k);
218 				A = mtod(m, u_char *)[k];
219 				continue;
220 #else
221 				return 0;
222 #endif
223 			}
224 			A = p[k];
225 			continue;
226 
227 		case BPF_LD|BPF_W|BPF_LEN:
228 			A = wirelen;
229 			continue;
230 
231 		case BPF_LDX|BPF_W|BPF_LEN:
232 			X = wirelen;
233 			continue;
234 
235 		case BPF_LD|BPF_W|BPF_IND:
236 			k = X + pc->k;
237 			if (pc->k > buflen || X > buflen - pc->k ||
238 			    sizeof(int32_t) > buflen - k) {
239 #ifdef _KERNEL
240 				int merr = 0;	/* XXX: GCC */
241 
242 				if (buflen != 0)
243 					return 0;
244 				A = m_xword((const struct mbuf *)p, k, &merr);
245 				if (merr != 0)
246 					return 0;
247 				continue;
248 #else
249 				return 0;
250 #endif
251 			}
252 			A = EXTRACT_LONG(&p[k]);
253 			continue;
254 
255 		case BPF_LD|BPF_H|BPF_IND:
256 			k = X + pc->k;
257 			if (pc->k > buflen || X > buflen - pc->k ||
258 			    sizeof(int16_t) > buflen - k) {
259 #ifdef _KERNEL
260 				int merr = 0;	/* XXX: GCC */
261 
262 				if (buflen != 0)
263 					return 0;
264 				A = m_xhalf((const struct mbuf *)p, k, &merr);
265 				if (merr != 0)
266 					return 0;
267 				continue;
268 #else
269 				return 0;
270 #endif
271 			}
272 			A = EXTRACT_SHORT(&p[k]);
273 			continue;
274 
275 		case BPF_LD|BPF_B|BPF_IND:
276 			k = X + pc->k;
277 			if (pc->k >= buflen || X >= buflen - pc->k) {
278 #ifdef _KERNEL
279 				const struct mbuf *m;
280 				int len;
281 
282 				if (buflen != 0)
283 					return 0;
284 				m = (const struct mbuf *)p;
285 				MINDEX(len, m, k);
286 				A = mtod(m, u_char *)[k];
287 				continue;
288 #else
289 				return 0;
290 #endif
291 			}
292 			A = p[k];
293 			continue;
294 
295 		case BPF_LDX|BPF_MSH|BPF_B:
296 			k = pc->k;
297 			if (k >= buflen) {
298 #ifdef _KERNEL
299 				const struct mbuf *m;
300 				int len;
301 
302 				if (buflen != 0)
303 					return 0;
304 				m = (const struct mbuf *)p;
305 				MINDEX(len, m, k);
306 				X = (mtod(m, char *)[k] & 0xf) << 2;
307 				continue;
308 #else
309 				return 0;
310 #endif
311 			}
312 			X = (p[pc->k] & 0xf) << 2;
313 			continue;
314 
315 		case BPF_LD|BPF_IMM:
316 			A = pc->k;
317 			continue;
318 
319 		case BPF_LDX|BPF_IMM:
320 			X = pc->k;
321 			continue;
322 
323 		case BPF_LD|BPF_MEM:
324 			A = mem[pc->k];
325 			continue;
326 
327 		case BPF_LDX|BPF_MEM:
328 			X = mem[pc->k];
329 			continue;
330 
331 		case BPF_ST:
332 			mem[pc->k] = A;
333 			continue;
334 
335 		case BPF_STX:
336 			mem[pc->k] = X;
337 			continue;
338 
339 		case BPF_JMP|BPF_JA:
340 			pc += pc->k;
341 			continue;
342 
343 		case BPF_JMP|BPF_JGT|BPF_K:
344 			pc += (A > pc->k) ? pc->jt : pc->jf;
345 			continue;
346 
347 		case BPF_JMP|BPF_JGE|BPF_K:
348 			pc += (A >= pc->k) ? pc->jt : pc->jf;
349 			continue;
350 
351 		case BPF_JMP|BPF_JEQ|BPF_K:
352 			pc += (A == pc->k) ? pc->jt : pc->jf;
353 			continue;
354 
355 		case BPF_JMP|BPF_JSET|BPF_K:
356 			pc += (A & pc->k) ? pc->jt : pc->jf;
357 			continue;
358 
359 		case BPF_JMP|BPF_JGT|BPF_X:
360 			pc += (A > X) ? pc->jt : pc->jf;
361 			continue;
362 
363 		case BPF_JMP|BPF_JGE|BPF_X:
364 			pc += (A >= X) ? pc->jt : pc->jf;
365 			continue;
366 
367 		case BPF_JMP|BPF_JEQ|BPF_X:
368 			pc += (A == X) ? pc->jt : pc->jf;
369 			continue;
370 
371 		case BPF_JMP|BPF_JSET|BPF_X:
372 			pc += (A & X) ? pc->jt : pc->jf;
373 			continue;
374 
375 		case BPF_ALU|BPF_ADD|BPF_X:
376 			A += X;
377 			continue;
378 
379 		case BPF_ALU|BPF_SUB|BPF_X:
380 			A -= X;
381 			continue;
382 
383 		case BPF_ALU|BPF_MUL|BPF_X:
384 			A *= X;
385 			continue;
386 
387 		case BPF_ALU|BPF_DIV|BPF_X:
388 			if (X == 0)
389 				return 0;
390 			A /= X;
391 			continue;
392 
393 		case BPF_ALU|BPF_AND|BPF_X:
394 			A &= X;
395 			continue;
396 
397 		case BPF_ALU|BPF_OR|BPF_X:
398 			A |= X;
399 			continue;
400 
401 		case BPF_ALU|BPF_LSH|BPF_X:
402 			A <<= X;
403 			continue;
404 
405 		case BPF_ALU|BPF_RSH|BPF_X:
406 			A >>= X;
407 			continue;
408 
409 		case BPF_ALU|BPF_ADD|BPF_K:
410 			A += pc->k;
411 			continue;
412 
413 		case BPF_ALU|BPF_SUB|BPF_K:
414 			A -= pc->k;
415 			continue;
416 
417 		case BPF_ALU|BPF_MUL|BPF_K:
418 			A *= pc->k;
419 			continue;
420 
421 		case BPF_ALU|BPF_DIV|BPF_K:
422 			A /= pc->k;
423 			continue;
424 
425 		case BPF_ALU|BPF_AND|BPF_K:
426 			A &= pc->k;
427 			continue;
428 
429 		case BPF_ALU|BPF_OR|BPF_K:
430 			A |= pc->k;
431 			continue;
432 
433 		case BPF_ALU|BPF_LSH|BPF_K:
434 			A <<= pc->k;
435 			continue;
436 
437 		case BPF_ALU|BPF_RSH|BPF_K:
438 			A >>= pc->k;
439 			continue;
440 
441 		case BPF_ALU|BPF_NEG:
442 			A = -A;
443 			continue;
444 
445 		case BPF_MISC|BPF_TAX:
446 			X = A;
447 			continue;
448 
449 		case BPF_MISC|BPF_TXA:
450 			A = X;
451 			continue;
452 		}
453 	}
454 }
455 
456 /*
457  * Return true if the 'fcode' is a valid filter program.
458  * The constraints are that each jump be forward and to a valid
459  * code, that memory accesses are within valid ranges (to the
460  * extent that this can be checked statically; loads of packet
461  * data have to be, and are, also checked at run time), and that
462  * the code terminates with either an accept or reject.
463  *
464  * The kernel needs to be able to verify an application's filter code.
465  * Otherwise, a bogus program could easily crash the system.
466  */
467 __CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY);
468 
469 int
470 bpf_validate(const struct bpf_insn *f, int signed_len)
471 {
472 	u_int i, from, len, ok = 0;
473 	const struct bpf_insn *p;
474 #if defined(KERNEL) || defined(_KERNEL)
475 	uint16_t *mem, invalid;
476 	size_t size;
477 #endif
478 
479 	len = (u_int)signed_len;
480 	if (len < 1)
481 		return 0;
482 #if defined(KERNEL) || defined(_KERNEL)
483 	if (len > BPF_MAXINSNS)
484 		return 0;
485 #endif
486 	if (BPF_CLASS(f[len - 1].code) != BPF_RET)
487 		return 0;
488 
489 #if defined(KERNEL) || defined(_KERNEL)
490 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
491 	invalid = ~0;	/* All is invalid on startup */
492 #endif
493 
494 	for (i = 0; i < len; ++i) {
495 #if defined(KERNEL) || defined(_KERNEL)
496 		/* blend in any invalid bits for current pc */
497 		invalid |= mem[i];
498 #endif
499 		p = &f[i];
500 		switch (BPF_CLASS(p->code)) {
501 		/*
502 		 * Check that memory operations use valid addresses.
503 		 */
504 		case BPF_LD:
505 		case BPF_LDX:
506 			switch (BPF_MODE(p->code)) {
507 			case BPF_MEM:
508 				/*
509 				 * There's no maximum packet data size
510 				 * in userland.  The runtime packet length
511 				 * check suffices.
512 				 */
513 #if defined(KERNEL) || defined(_KERNEL)
514 				/*
515 				 * More strict check with actual packet length
516 				 * is done runtime.
517 				 */
518 				if (p->k >= BPF_MEMWORDS)
519 					goto out;
520 				/* check for current memory invalid */
521 				if (invalid & (1 << p->k))
522 					goto out;
523 #endif
524 				break;
525 			case BPF_ABS:
526 			case BPF_IND:
527 			case BPF_MSH:
528 			case BPF_IMM:
529 			case BPF_LEN:
530 				break;
531 			default:
532 				goto out;
533 			}
534 			break;
535 		case BPF_ST:
536 		case BPF_STX:
537 			if (p->k >= BPF_MEMWORDS)
538 				goto out;
539 #if defined(KERNEL) || defined(_KERNEL)
540 			/* validate the memory word */
541 			invalid &= ~(1 << p->k);
542 #endif
543 			break;
544 		case BPF_ALU:
545 			switch (BPF_OP(p->code)) {
546 			case BPF_ADD:
547 			case BPF_SUB:
548 			case BPF_MUL:
549 			case BPF_OR:
550 			case BPF_AND:
551 			case BPF_LSH:
552 			case BPF_RSH:
553 			case BPF_NEG:
554 				break;
555 			case BPF_DIV:
556 				/*
557 				 * Check for constant division by 0.
558 				 */
559 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
560 					goto out;
561 				break;
562 			default:
563 				goto out;
564 			}
565 			break;
566 		case BPF_JMP:
567 			/*
568 			 * Check that jumps are within the code block,
569 			 * and that unconditional branches don't go
570 			 * backwards as a result of an overflow.
571 			 * Unconditional branches have a 32-bit offset,
572 			 * so they could overflow; we check to make
573 			 * sure they don't.  Conditional branches have
574 			 * an 8-bit offset, and the from address is <=
575 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
576 			 * is sufficiently small that adding 255 to it
577 			 * won't overflow.
578 			 *
579 			 * We know that len is <= BPF_MAXINSNS, and we
580 			 * assume that BPF_MAXINSNS is < the maximum size
581 			 * of a u_int, so that i + 1 doesn't overflow.
582 			 *
583 			 * For userland, we don't know that the from
584 			 * or len are <= BPF_MAXINSNS, but we know that
585 			 * from <= len, and, except on a 64-bit system,
586 			 * it's unlikely that len, if it truly reflects
587 			 * the size of the program we've been handed,
588 			 * will be anywhere near the maximum size of
589 			 * a u_int.  We also don't check for backward
590 			 * branches, as we currently support them in
591 			 * userland for the protochain operation.
592 			 */
593 			from = i + 1;
594 			switch (BPF_OP(p->code)) {
595 			case BPF_JA:
596 				if (from + p->k >= len)
597 					goto out;
598 #if defined(KERNEL) || defined(_KERNEL)
599 				if (from + p->k < from)
600 					goto out;
601 				/*
602 				 * mark the currently invalid bits for the
603 				 * destination
604 				 */
605 				mem[from + p->k] |= invalid;
606 				invalid = 0;
607 #endif
608 				break;
609 			case BPF_JEQ:
610 			case BPF_JGT:
611 			case BPF_JGE:
612 			case BPF_JSET:
613 				if (from + p->jt >= len || from + p->jf >= len)
614 					goto out;
615 #if defined(KERNEL) || defined(_KERNEL)
616 				/*
617 				 * mark the currently invalid bits for both
618 				 * possible jump destinations
619 				 */
620 				mem[from + p->jt] |= invalid;
621 				mem[from + p->jf] |= invalid;
622 				invalid = 0;
623 #endif
624 				break;
625 			default:
626 				goto out;
627 			}
628 			break;
629 		case BPF_RET:
630 			break;
631 		case BPF_MISC:
632 			break;
633 		default:
634 			goto out;
635 		}
636 	}
637 	ok = 1;
638 out:
639 #if defined(KERNEL) || defined(_KERNEL)
640 	kmem_free(mem, size);
641 #endif
642 	return ok;
643 }
644