xref: /netbsd-src/sys/net/bpf_filter.c (revision e61202360d5611414dd6f6115934a96aa1f50b1a)
1 /*	$NetBSD: bpf_filter.c,v 1.54 2012/09/27 18:28:56 alnsn Exp $	*/
2 
3 /*-
4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from the Stanford/CMU enet packet filter,
8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10  * Berkeley Laboratory.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.54 2012/09/27 18:28:56 alnsn Exp $");
41 
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
46 #endif
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53 
54 #define EXTRACT_SHORT(p)	be16dec(p)
55 #define EXTRACT_LONG(p)		be32dec(p)
56 
57 #ifdef _KERNEL
58 #include <sys/mbuf.h>
59 #define MINDEX(len, m, k) 		\
60 {					\
61 	len = m->m_len; 		\
62 	while (k >= len) { 		\
63 		k -= len; 		\
64 		m = m->m_next; 		\
65 		if (m == 0) 		\
66 			return 0; 	\
67 		len = m->m_len; 	\
68 	}				\
69 }
70 
71 static int m_xword (const struct mbuf *, uint32_t, int *);
72 static int m_xhalf (const struct mbuf *, uint32_t, int *);
73 
74 static int
75 m_xword(const struct mbuf *m, uint32_t k, int *err)
76 {
77 	int len;
78 	u_char *cp, *np;
79 	struct mbuf *m0;
80 
81 	*err = 1;
82 	MINDEX(len, m, k);
83 	cp = mtod(m, u_char *) + k;
84 	if (len >= k + 4) {
85 		*err = 0;
86 		return EXTRACT_LONG(cp);
87 	}
88 	m0 = m->m_next;
89 	if (m0 == 0 || m0->m_len + len - k < 4)
90 		return 0;
91 	*err = 0;
92 	np = mtod(m0, u_char *);
93 
94 	switch (len - k) {
95 	case 1:
96 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
97 	case 2:
98 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
99 	default:
100 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
101 	}
102 }
103 
104 static int
105 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
106 {
107 	int len;
108 	u_char *cp;
109 	struct mbuf *m0;
110 
111 	*err = 1;
112 	MINDEX(len, m, k);
113 	cp = mtod(m, u_char *) + k;
114 	if (len >= k + 2) {
115 		*err = 0;
116 		return EXTRACT_SHORT(cp);
117 	}
118 	m0 = m->m_next;
119 	if (m0 == 0)
120 		return 0;
121 	*err = 0;
122 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
123 }
124 #else /* _KERNEL */
125 #include <stdlib.h>
126 #endif /* !_KERNEL */
127 
128 #include <net/bpf.h>
129 
130 /*
131  * Execute the filter program starting at pc on the packet p
132  * wirelen is the length of the original packet
133  * buflen is the amount of data present
134  */
135 u_int
136 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
137     u_int buflen)
138 {
139 	uint32_t A, X, k;
140 	uint32_t mem[BPF_MEMWORDS];
141 
142 	if (pc == 0) {
143 		/*
144 		 * No filter means accept all.
145 		 */
146 		return (u_int)-1;
147 	}
148 
149 	/*
150 	 * Note: safe to leave memwords uninitialised, as the validation
151 	 * step ensures that it will not be read, if it was not written.
152 	 */
153 	A = 0;
154 	X = 0;
155 	--pc;
156 
157 	for (;;) {
158 		++pc;
159 		switch (pc->code) {
160 
161 		default:
162 #ifdef _KERNEL
163 			return 0;
164 #else
165 			abort();
166 			/*NOTREACHED*/
167 #endif
168 		case BPF_RET|BPF_K:
169 			return (u_int)pc->k;
170 
171 		case BPF_RET|BPF_A:
172 			return (u_int)A;
173 
174 		case BPF_LD|BPF_W|BPF_ABS:
175 			k = pc->k;
176 			if (k > buflen || sizeof(int32_t) > buflen - k) {
177 #ifdef _KERNEL
178 				int merr;
179 
180 				if (buflen != 0)
181 					return 0;
182 				A = m_xword((const struct mbuf *)p, k, &merr);
183 				if (merr != 0)
184 					return 0;
185 				continue;
186 #else
187 				return 0;
188 #endif
189 			}
190 			A = EXTRACT_LONG(&p[k]);
191 			continue;
192 
193 		case BPF_LD|BPF_H|BPF_ABS:
194 			k = pc->k;
195 			if (k > buflen || sizeof(int16_t) > buflen - k) {
196 #ifdef _KERNEL
197 				int merr;
198 
199 				if (buflen != 0)
200 					return 0;
201 				A = m_xhalf((const struct mbuf *)p, k, &merr);
202 				if (merr != 0)
203 					return 0;
204 				continue;
205 #else
206 				return 0;
207 #endif
208 			}
209 			A = EXTRACT_SHORT(&p[k]);
210 			continue;
211 
212 		case BPF_LD|BPF_B|BPF_ABS:
213 			k = pc->k;
214 			if (k >= buflen) {
215 #ifdef _KERNEL
216 				const struct mbuf *m;
217 				int len;
218 
219 				if (buflen != 0)
220 					return 0;
221 				m = (const struct mbuf *)p;
222 				MINDEX(len, m, k);
223 				A = mtod(m, u_char *)[k];
224 				continue;
225 #else
226 				return 0;
227 #endif
228 			}
229 			A = p[k];
230 			continue;
231 
232 		case BPF_LD|BPF_W|BPF_LEN:
233 			A = wirelen;
234 			continue;
235 
236 		case BPF_LDX|BPF_W|BPF_LEN:
237 			X = wirelen;
238 			continue;
239 
240 		case BPF_LD|BPF_W|BPF_IND:
241 			k = X + pc->k;
242 			if (pc->k > buflen || X > buflen - pc->k ||
243 			    sizeof(int32_t) > buflen - k) {
244 #ifdef _KERNEL
245 				int merr;
246 
247 				if (buflen != 0)
248 					return 0;
249 				A = m_xword((const struct mbuf *)p, k, &merr);
250 				if (merr != 0)
251 					return 0;
252 				continue;
253 #else
254 				return 0;
255 #endif
256 			}
257 			A = EXTRACT_LONG(&p[k]);
258 			continue;
259 
260 		case BPF_LD|BPF_H|BPF_IND:
261 			k = X + pc->k;
262 			if (pc->k > buflen || X > buflen - pc->k ||
263 			    sizeof(int16_t) > buflen - k) {
264 #ifdef _KERNEL
265 				int merr;
266 
267 				if (buflen != 0)
268 					return 0;
269 				A = m_xhalf((const struct mbuf *)p, k, &merr);
270 				if (merr != 0)
271 					return 0;
272 				continue;
273 #else
274 				return 0;
275 #endif
276 			}
277 			A = EXTRACT_SHORT(&p[k]);
278 			continue;
279 
280 		case BPF_LD|BPF_B|BPF_IND:
281 			k = X + pc->k;
282 			if (pc->k >= buflen || X >= buflen - pc->k) {
283 #ifdef _KERNEL
284 				const struct mbuf *m;
285 				int len;
286 
287 				if (buflen != 0)
288 					return 0;
289 				m = (const struct mbuf *)p;
290 				MINDEX(len, m, k);
291 				A = mtod(m, u_char *)[k];
292 				continue;
293 #else
294 				return 0;
295 #endif
296 			}
297 			A = p[k];
298 			continue;
299 
300 		case BPF_LDX|BPF_MSH|BPF_B:
301 			k = pc->k;
302 			if (k >= buflen) {
303 #ifdef _KERNEL
304 				const struct mbuf *m;
305 				int len;
306 
307 				if (buflen != 0)
308 					return 0;
309 				m = (const struct mbuf *)p;
310 				MINDEX(len, m, k);
311 				X = (mtod(m, char *)[k] & 0xf) << 2;
312 				continue;
313 #else
314 				return 0;
315 #endif
316 			}
317 			X = (p[pc->k] & 0xf) << 2;
318 			continue;
319 
320 		case BPF_LD|BPF_IMM:
321 			A = pc->k;
322 			continue;
323 
324 		case BPF_LDX|BPF_IMM:
325 			X = pc->k;
326 			continue;
327 
328 		case BPF_LD|BPF_MEM:
329 			A = mem[pc->k];
330 			continue;
331 
332 		case BPF_LDX|BPF_MEM:
333 			X = mem[pc->k];
334 			continue;
335 
336 		case BPF_ST:
337 			mem[pc->k] = A;
338 			continue;
339 
340 		case BPF_STX:
341 			mem[pc->k] = X;
342 			continue;
343 
344 		case BPF_JMP|BPF_JA:
345 			pc += pc->k;
346 			continue;
347 
348 		case BPF_JMP|BPF_JGT|BPF_K:
349 			pc += (A > pc->k) ? pc->jt : pc->jf;
350 			continue;
351 
352 		case BPF_JMP|BPF_JGE|BPF_K:
353 			pc += (A >= pc->k) ? pc->jt : pc->jf;
354 			continue;
355 
356 		case BPF_JMP|BPF_JEQ|BPF_K:
357 			pc += (A == pc->k) ? pc->jt : pc->jf;
358 			continue;
359 
360 		case BPF_JMP|BPF_JSET|BPF_K:
361 			pc += (A & pc->k) ? pc->jt : pc->jf;
362 			continue;
363 
364 		case BPF_JMP|BPF_JGT|BPF_X:
365 			pc += (A > X) ? pc->jt : pc->jf;
366 			continue;
367 
368 		case BPF_JMP|BPF_JGE|BPF_X:
369 			pc += (A >= X) ? pc->jt : pc->jf;
370 			continue;
371 
372 		case BPF_JMP|BPF_JEQ|BPF_X:
373 			pc += (A == X) ? pc->jt : pc->jf;
374 			continue;
375 
376 		case BPF_JMP|BPF_JSET|BPF_X:
377 			pc += (A & X) ? pc->jt : pc->jf;
378 			continue;
379 
380 		case BPF_ALU|BPF_ADD|BPF_X:
381 			A += X;
382 			continue;
383 
384 		case BPF_ALU|BPF_SUB|BPF_X:
385 			A -= X;
386 			continue;
387 
388 		case BPF_ALU|BPF_MUL|BPF_X:
389 			A *= X;
390 			continue;
391 
392 		case BPF_ALU|BPF_DIV|BPF_X:
393 			if (X == 0)
394 				return 0;
395 			A /= X;
396 			continue;
397 
398 		case BPF_ALU|BPF_AND|BPF_X:
399 			A &= X;
400 			continue;
401 
402 		case BPF_ALU|BPF_OR|BPF_X:
403 			A |= X;
404 			continue;
405 
406 		case BPF_ALU|BPF_LSH|BPF_X:
407 			A <<= X;
408 			continue;
409 
410 		case BPF_ALU|BPF_RSH|BPF_X:
411 			A >>= X;
412 			continue;
413 
414 		case BPF_ALU|BPF_ADD|BPF_K:
415 			A += pc->k;
416 			continue;
417 
418 		case BPF_ALU|BPF_SUB|BPF_K:
419 			A -= pc->k;
420 			continue;
421 
422 		case BPF_ALU|BPF_MUL|BPF_K:
423 			A *= pc->k;
424 			continue;
425 
426 		case BPF_ALU|BPF_DIV|BPF_K:
427 			A /= pc->k;
428 			continue;
429 
430 		case BPF_ALU|BPF_AND|BPF_K:
431 			A &= pc->k;
432 			continue;
433 
434 		case BPF_ALU|BPF_OR|BPF_K:
435 			A |= pc->k;
436 			continue;
437 
438 		case BPF_ALU|BPF_LSH|BPF_K:
439 			A <<= pc->k;
440 			continue;
441 
442 		case BPF_ALU|BPF_RSH|BPF_K:
443 			A >>= pc->k;
444 			continue;
445 
446 		case BPF_ALU|BPF_NEG:
447 			A = -A;
448 			continue;
449 
450 		case BPF_MISC|BPF_TAX:
451 			X = A;
452 			continue;
453 
454 		case BPF_MISC|BPF_TXA:
455 			A = X;
456 			continue;
457 		}
458 	}
459 }
460 
461 /*
462  * Return true if the 'fcode' is a valid filter program.
463  * The constraints are that each jump be forward and to a valid
464  * code, that memory accesses are within valid ranges (to the
465  * extent that this can be checked statically; loads of packet
466  * data have to be, and are, also checked at run time), and that
467  * the code terminates with either an accept or reject.
468  *
469  * The kernel needs to be able to verify an application's filter code.
470  * Otherwise, a bogus program could easily crash the system.
471  */
472 __CTASSERT(BPF_MEMWORDS == sizeof(uint16_t) * NBBY);
473 
474 int
475 bpf_validate(const struct bpf_insn *f, int signed_len)
476 {
477 	u_int i, from, len, ok = 0;
478 	const struct bpf_insn *p;
479 #if defined(KERNEL) || defined(_KERNEL)
480 	uint16_t *mem, invalid;
481 	size_t size;
482 #endif
483 
484 	len = (u_int)signed_len;
485 	if (len < 1)
486 		return 0;
487 #if defined(KERNEL) || defined(_KERNEL)
488 	if (len > BPF_MAXINSNS)
489 		return 0;
490 #endif
491 	if (BPF_CLASS(f[len - 1].code) != BPF_RET)
492 		return 0;
493 
494 #if defined(KERNEL) || defined(_KERNEL)
495 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
496 	invalid = ~0;	/* All is invalid on startup */
497 #endif
498 
499 	for (i = 0; i < len; ++i) {
500 #if defined(KERNEL) || defined(_KERNEL)
501 		/* blend in any invalid bits for current pc */
502 		invalid |= mem[i];
503 #endif
504 		p = &f[i];
505 		switch (BPF_CLASS(p->code)) {
506 		/*
507 		 * Check that memory operations use valid addresses.
508 		 */
509 		case BPF_LD:
510 		case BPF_LDX:
511 			switch (BPF_MODE(p->code)) {
512 			case BPF_MEM:
513 				/*
514 				 * There's no maximum packet data size
515 				 * in userland.  The runtime packet length
516 				 * check suffices.
517 				 */
518 #if defined(KERNEL) || defined(_KERNEL)
519 				/*
520 				 * More strict check with actual packet length
521 				 * is done runtime.
522 				 */
523 				if (p->k >= BPF_MEMWORDS)
524 					goto out;
525 				/* check for current memory invalid */
526 				if (invalid & (1 << p->k))
527 					goto out;
528 #endif
529 				break;
530 			case BPF_ABS:
531 			case BPF_IND:
532 			case BPF_MSH:
533 			case BPF_IMM:
534 			case BPF_LEN:
535 				break;
536 			default:
537 				goto out;
538 			}
539 			break;
540 		case BPF_ST:
541 		case BPF_STX:
542 			if (p->k >= BPF_MEMWORDS)
543 				goto out;
544 #if defined(KERNEL) || defined(_KERNEL)
545 			/* validate the memory word */
546 			invalid &= ~(1 << p->k);
547 #endif
548 			break;
549 		case BPF_ALU:
550 			switch (BPF_OP(p->code)) {
551 			case BPF_ADD:
552 			case BPF_SUB:
553 			case BPF_MUL:
554 			case BPF_OR:
555 			case BPF_AND:
556 			case BPF_LSH:
557 			case BPF_RSH:
558 			case BPF_NEG:
559 				break;
560 			case BPF_DIV:
561 				/*
562 				 * Check for constant division by 0.
563 				 */
564 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
565 					goto out;
566 				break;
567 			default:
568 				goto out;
569 			}
570 			break;
571 		case BPF_JMP:
572 			/*
573 			 * Check that jumps are within the code block,
574 			 * and that unconditional branches don't go
575 			 * backwards as a result of an overflow.
576 			 * Unconditional branches have a 32-bit offset,
577 			 * so they could overflow; we check to make
578 			 * sure they don't.  Conditional branches have
579 			 * an 8-bit offset, and the from address is <=
580 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
581 			 * is sufficiently small that adding 255 to it
582 			 * won't overflow.
583 			 *
584 			 * We know that len is <= BPF_MAXINSNS, and we
585 			 * assume that BPF_MAXINSNS is < the maximum size
586 			 * of a u_int, so that i + 1 doesn't overflow.
587 			 *
588 			 * For userland, we don't know that the from
589 			 * or len are <= BPF_MAXINSNS, but we know that
590 			 * from <= len, and, except on a 64-bit system,
591 			 * it's unlikely that len, if it truly reflects
592 			 * the size of the program we've been handed,
593 			 * will be anywhere near the maximum size of
594 			 * a u_int.  We also don't check for backward
595 			 * branches, as we currently support them in
596 			 * userland for the protochain operation.
597 			 */
598 			from = i + 1;
599 			switch (BPF_OP(p->code)) {
600 			case BPF_JA:
601 				if (from + p->k >= len)
602 					goto out;
603 #if defined(KERNEL) || defined(_KERNEL)
604 				if (from + p->k < from)
605 					goto out;
606 				/*
607 				 * mark the currently invalid bits for the
608 				 * destination
609 				 */
610 				mem[from + p->k] |= invalid;
611 				invalid = 0;
612 #endif
613 				break;
614 			case BPF_JEQ:
615 			case BPF_JGT:
616 			case BPF_JGE:
617 			case BPF_JSET:
618 				if (from + p->jt >= len || from + p->jf >= len)
619 					goto out;
620 #if defined(KERNEL) || defined(_KERNEL)
621 				/*
622 				 * mark the currently invalid bits for both
623 				 * possible jump destinations
624 				 */
625 				mem[from + p->jt] |= invalid;
626 				mem[from + p->jf] |= invalid;
627 				invalid = 0;
628 #endif
629 				break;
630 			default:
631 				goto out;
632 			}
633 			break;
634 		case BPF_RET:
635 			break;
636 		case BPF_MISC:
637 			break;
638 		default:
639 			goto out;
640 		}
641 	}
642 	ok = 1;
643 out:
644 #if defined(KERNEL) || defined(_KERNEL)
645 	kmem_free(mem, size);
646 #endif
647 	return ok;
648 }
649