xref: /netbsd-src/sys/net/bpf_filter.c (revision b7b7574d3bf8eeb51a1fa3977b59142ec6434a55)
1 /*	$NetBSD: bpf_filter.c,v 1.65 2014/06/25 09:51:34 alnsn Exp $	*/
2 
3 /*-
4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from the Stanford/CMU enet packet filter,
8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10  * Berkeley Laboratory.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.65 2014/06/25 09:51:34 alnsn Exp $");
41 
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
46 #endif
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53 
54 #define	__BPF_PRIVATE
55 #include <net/bpf.h>
56 
57 #ifdef _KERNEL
58 
59 bpf_ctx_t *
60 bpf_create(void)
61 {
62 	return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
63 }
64 
65 void
66 bpf_destroy(bpf_ctx_t *bc)
67 {
68 	kmem_free(bc, sizeof(bpf_ctx_t));
69 }
70 
71 int
72 bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
73 {
74 	bc->copfuncs = funcs;
75 	bc->nfuncs = n;
76 	return 0;
77 }
78 
79 int
80 bpf_set_extmem(bpf_ctx_t *bc, size_t nwords, bpf_memword_init_t preinited)
81 {
82 	if (nwords > BPF_MAX_MEMWORDS || (preinited >> nwords) != 0) {
83 		return EINVAL;
84 	}
85 	bc->extwords = nwords;
86 	bc->preinited = preinited;
87 	return 0;
88 }
89 
90 #endif
91 
92 #define EXTRACT_SHORT(p)	be16dec(p)
93 #define EXTRACT_LONG(p)		be32dec(p)
94 
95 #ifdef _KERNEL
96 #include <sys/mbuf.h>
97 #define MINDEX(len, m, k) 		\
98 {					\
99 	len = m->m_len; 		\
100 	while (k >= len) { 		\
101 		k -= len; 		\
102 		m = m->m_next; 		\
103 		if (m == 0) 		\
104 			return 0; 	\
105 		len = m->m_len; 	\
106 	}				\
107 }
108 
109 uint32_t m_xword(const struct mbuf *, uint32_t, int *);
110 uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
111 uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
112 
113 #define xword(p, k, err) m_xword((const struct mbuf *)(p), (k), (err))
114 #define xhalf(p, k, err) m_xhalf((const struct mbuf *)(p), (k), (err))
115 #define xbyte(p, k, err) m_xbyte((const struct mbuf *)(p), (k), (err))
116 
117 uint32_t
118 m_xword(const struct mbuf *m, uint32_t k, int *err)
119 {
120 	int len;
121 	u_char *cp, *np;
122 	struct mbuf *m0;
123 
124 	*err = 1;
125 	MINDEX(len, m, k);
126 	cp = mtod(m, u_char *) + k;
127 	if (len >= k + 4) {
128 		*err = 0;
129 		return EXTRACT_LONG(cp);
130 	}
131 	m0 = m->m_next;
132 	if (m0 == 0 || m0->m_len + len - k < 4)
133 		return 0;
134 	*err = 0;
135 	np = mtod(m0, u_char *);
136 
137 	switch (len - k) {
138 	case 1:
139 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
140 	case 2:
141 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
142 	default:
143 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
144 	}
145 }
146 
147 uint32_t
148 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
149 {
150 	int len;
151 	u_char *cp;
152 	struct mbuf *m0;
153 
154 	*err = 1;
155 	MINDEX(len, m, k);
156 	cp = mtod(m, u_char *) + k;
157 	if (len >= k + 2) {
158 		*err = 0;
159 		return EXTRACT_SHORT(cp);
160 	}
161 	m0 = m->m_next;
162 	if (m0 == 0)
163 		return 0;
164 	*err = 0;
165 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
166 }
167 
168 uint32_t
169 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
170 {
171 	int len;
172 
173 	*err = 0;
174 	MINDEX(len, m, k);
175 	return mtod(m, u_char *)[k];
176 }
177 #else /* _KERNEL */
178 #include <stdlib.h>
179 #endif /* !_KERNEL */
180 
181 #include <net/bpf.h>
182 
183 /*
184  * Execute the filter program starting at pc on the packet p
185  * wirelen is the length of the original packet
186  * buflen is the amount of data present
187  */
188 #ifdef _KERNEL
189 
190 u_int
191 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
192     u_int buflen)
193 {
194 	uint32_t mem[BPF_MEMWORDS];
195 	bpf_args_t args = {
196 		.pkt = p,
197 		.wirelen = wirelen,
198 		.buflen = buflen,
199 		.mem = mem,
200 		.arg = NULL
201 	};
202 
203 	return bpf_filter_ext(NULL, pc, &args);
204 }
205 
206 u_int
207 bpf_filter_ext(const bpf_ctx_t *bc, const struct bpf_insn *pc, bpf_args_t *args)
208 #else
209 u_int
210 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
211     u_int buflen)
212 #endif
213 {
214 	uint32_t A, X, k;
215 #ifndef _KERNEL
216 	uint32_t mem[BPF_MEMWORDS];
217 	bpf_args_t args_store = {
218 		.pkt = p,
219 		.wirelen = wirelen,
220 		.buflen = buflen,
221 		.mem = mem,
222 		.arg = NULL
223 	};
224 	bpf_args_t * const args = &args_store;
225 #else
226 	const uint8_t * const p = args->pkt;
227 #endif
228 	if (pc == 0) {
229 		/*
230 		 * No filter means accept all.
231 		 */
232 		return (u_int)-1;
233 	}
234 
235 	/*
236 	 * Note: safe to leave memwords uninitialised, as the validation
237 	 * step ensures that it will not be read, if it was not written.
238 	 */
239 	A = 0;
240 	X = 0;
241 	--pc;
242 
243 	for (;;) {
244 		++pc;
245 		switch (pc->code) {
246 
247 		default:
248 #ifdef _KERNEL
249 			return 0;
250 #else
251 			abort();
252 			/*NOTREACHED*/
253 #endif
254 		case BPF_RET|BPF_K:
255 			return (u_int)pc->k;
256 
257 		case BPF_RET|BPF_A:
258 			return (u_int)A;
259 
260 		case BPF_LD|BPF_W|BPF_ABS:
261 			k = pc->k;
262 			if (k > args->buflen ||
263 			    sizeof(int32_t) > args->buflen - k) {
264 #ifdef _KERNEL
265 				int merr;
266 
267 				if (args->buflen != 0)
268 					return 0;
269 				A = xword(args->pkt, k, &merr);
270 				if (merr != 0)
271 					return 0;
272 				continue;
273 #else
274 				return 0;
275 #endif
276 			}
277 			A = EXTRACT_LONG(&p[k]);
278 			continue;
279 
280 		case BPF_LD|BPF_H|BPF_ABS:
281 			k = pc->k;
282 			if (k > args->buflen ||
283 			    sizeof(int16_t) > args->buflen - k) {
284 #ifdef _KERNEL
285 				int merr;
286 
287 				if (args->buflen != 0)
288 					return 0;
289 				A = xhalf(args->pkt, k, &merr);
290 				if (merr != 0)
291 					return 0;
292 				continue;
293 #else
294 				return 0;
295 #endif
296 			}
297 			A = EXTRACT_SHORT(&p[k]);
298 			continue;
299 
300 		case BPF_LD|BPF_B|BPF_ABS:
301 			k = pc->k;
302 			if (k >= args->buflen) {
303 #ifdef _KERNEL
304 				int merr;
305 
306 				if (args->buflen != 0)
307 					return 0;
308 				A = xbyte(args->pkt, k, &merr);
309 				continue;
310 #else
311 				return 0;
312 #endif
313 			}
314 			A = p[k];
315 			continue;
316 
317 		case BPF_LD|BPF_W|BPF_LEN:
318 			A = args->wirelen;
319 			continue;
320 
321 		case BPF_LDX|BPF_W|BPF_LEN:
322 			X = args->wirelen;
323 			continue;
324 
325 		case BPF_LD|BPF_W|BPF_IND:
326 			k = X + pc->k;
327 			if (pc->k > args->buflen ||
328 			    X > args->buflen - pc->k ||
329 			    sizeof(int32_t) > args->buflen - k) {
330 #ifdef _KERNEL
331 				int merr;
332 
333 				if (args->buflen != 0)
334 					return 0;
335 				A = xword(args->pkt, k, &merr);
336 				if (merr != 0)
337 					return 0;
338 				continue;
339 #else
340 				return 0;
341 #endif
342 			}
343 			A = EXTRACT_LONG(&p[k]);
344 			continue;
345 
346 		case BPF_LD|BPF_H|BPF_IND:
347 			k = X + pc->k;
348 			if (pc->k > args->buflen ||
349 			    X > args->buflen - pc->k ||
350 			    sizeof(int16_t) > args->buflen - k) {
351 #ifdef _KERNEL
352 				int merr;
353 
354 				if (args->buflen != 0)
355 					return 0;
356 				A = xhalf(args->pkt, k, &merr);
357 				if (merr != 0)
358 					return 0;
359 				continue;
360 #else
361 				return 0;
362 #endif
363 			}
364 			A = EXTRACT_SHORT(&p[k]);
365 			continue;
366 
367 		case BPF_LD|BPF_B|BPF_IND:
368 			k = X + pc->k;
369 			if (pc->k >= args->buflen ||
370 			    X >= args->buflen - pc->k) {
371 #ifdef _KERNEL
372 				int merr;
373 
374 				if (args->buflen != 0)
375 					return 0;
376 				A = xbyte(args->pkt, k, &merr);
377 				continue;
378 #else
379 				return 0;
380 #endif
381 			}
382 			A = p[k];
383 			continue;
384 
385 		case BPF_LDX|BPF_MSH|BPF_B:
386 			k = pc->k;
387 			if (k >= args->buflen) {
388 #ifdef _KERNEL
389 				int merr;
390 
391 				if (args->buflen != 0)
392 					return 0;
393 				X = (xbyte(args->pkt, k, &merr) & 0xf) << 2;
394 				continue;
395 #else
396 				return 0;
397 #endif
398 			}
399 			X = (p[pc->k] & 0xf) << 2;
400 			continue;
401 
402 		case BPF_LD|BPF_IMM:
403 			A = pc->k;
404 			continue;
405 
406 		case BPF_LDX|BPF_IMM:
407 			X = pc->k;
408 			continue;
409 
410 		case BPF_LD|BPF_MEM:
411 			A = args->mem[pc->k];
412 			continue;
413 
414 		case BPF_LDX|BPF_MEM:
415 			X = args->mem[pc->k];
416 			continue;
417 
418 		case BPF_ST:
419 			args->mem[pc->k] = A;
420 			continue;
421 
422 		case BPF_STX:
423 			args->mem[pc->k] = X;
424 			continue;
425 
426 		case BPF_JMP|BPF_JA:
427 			pc += pc->k;
428 			continue;
429 
430 		case BPF_JMP|BPF_JGT|BPF_K:
431 			pc += (A > pc->k) ? pc->jt : pc->jf;
432 			continue;
433 
434 		case BPF_JMP|BPF_JGE|BPF_K:
435 			pc += (A >= pc->k) ? pc->jt : pc->jf;
436 			continue;
437 
438 		case BPF_JMP|BPF_JEQ|BPF_K:
439 			pc += (A == pc->k) ? pc->jt : pc->jf;
440 			continue;
441 
442 		case BPF_JMP|BPF_JSET|BPF_K:
443 			pc += (A & pc->k) ? pc->jt : pc->jf;
444 			continue;
445 
446 		case BPF_JMP|BPF_JGT|BPF_X:
447 			pc += (A > X) ? pc->jt : pc->jf;
448 			continue;
449 
450 		case BPF_JMP|BPF_JGE|BPF_X:
451 			pc += (A >= X) ? pc->jt : pc->jf;
452 			continue;
453 
454 		case BPF_JMP|BPF_JEQ|BPF_X:
455 			pc += (A == X) ? pc->jt : pc->jf;
456 			continue;
457 
458 		case BPF_JMP|BPF_JSET|BPF_X:
459 			pc += (A & X) ? pc->jt : pc->jf;
460 			continue;
461 
462 		case BPF_ALU|BPF_ADD|BPF_X:
463 			A += X;
464 			continue;
465 
466 		case BPF_ALU|BPF_SUB|BPF_X:
467 			A -= X;
468 			continue;
469 
470 		case BPF_ALU|BPF_MUL|BPF_X:
471 			A *= X;
472 			continue;
473 
474 		case BPF_ALU|BPF_DIV|BPF_X:
475 			if (X == 0)
476 				return 0;
477 			A /= X;
478 			continue;
479 
480 		case BPF_ALU|BPF_AND|BPF_X:
481 			A &= X;
482 			continue;
483 
484 		case BPF_ALU|BPF_OR|BPF_X:
485 			A |= X;
486 			continue;
487 
488 		case BPF_ALU|BPF_LSH|BPF_X:
489 			A <<= X;
490 			continue;
491 
492 		case BPF_ALU|BPF_RSH|BPF_X:
493 			A >>= X;
494 			continue;
495 
496 		case BPF_ALU|BPF_ADD|BPF_K:
497 			A += pc->k;
498 			continue;
499 
500 		case BPF_ALU|BPF_SUB|BPF_K:
501 			A -= pc->k;
502 			continue;
503 
504 		case BPF_ALU|BPF_MUL|BPF_K:
505 			A *= pc->k;
506 			continue;
507 
508 		case BPF_ALU|BPF_DIV|BPF_K:
509 			A /= pc->k;
510 			continue;
511 
512 		case BPF_ALU|BPF_AND|BPF_K:
513 			A &= pc->k;
514 			continue;
515 
516 		case BPF_ALU|BPF_OR|BPF_K:
517 			A |= pc->k;
518 			continue;
519 
520 		case BPF_ALU|BPF_LSH|BPF_K:
521 			A <<= pc->k;
522 			continue;
523 
524 		case BPF_ALU|BPF_RSH|BPF_K:
525 			A >>= pc->k;
526 			continue;
527 
528 		case BPF_ALU|BPF_NEG:
529 			A = -A;
530 			continue;
531 
532 		case BPF_MISC|BPF_TAX:
533 			X = A;
534 			continue;
535 
536 		case BPF_MISC|BPF_TXA:
537 			A = X;
538 			continue;
539 
540 		case BPF_MISC|BPF_COP:
541 #ifdef _KERNEL
542 			if (pc->k < bc->nfuncs) {
543 				const bpf_copfunc_t fn = bc->copfuncs[pc->k];
544 				A = fn(bc, args, A);
545 				continue;
546 			}
547 #endif
548 			return 0;
549 
550 		case BPF_MISC|BPF_COPX:
551 #ifdef _KERNEL
552 			if (X < bc->nfuncs) {
553 				const bpf_copfunc_t fn = bc->copfuncs[X];
554 				A = fn(bc, args, A);
555 				continue;
556 			}
557 #endif
558 			return 0;
559 		}
560 	}
561 }
562 
563 /*
564  * Return true if the 'fcode' is a valid filter program.
565  * The constraints are that each jump be forward and to a valid
566  * code, that memory accesses are within valid ranges (to the
567  * extent that this can be checked statically; loads of packet
568  * data have to be, and are, also checked at run time), and that
569  * the code terminates with either an accept or reject.
570  *
571  * The kernel needs to be able to verify an application's filter code.
572  * Otherwise, a bogus program could easily crash the system.
573  */
574 
575 #if defined(KERNEL) || defined(_KERNEL)
576 
577 int
578 bpf_validate(const struct bpf_insn *f, int signed_len)
579 {
580 	return bpf_validate_ext(NULL, f, signed_len);
581 }
582 
583 int
584 bpf_validate_ext(const bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
585 #else
586 int
587 bpf_validate(const struct bpf_insn *f, int signed_len)
588 #endif
589 {
590 	u_int i, from, len, ok = 0;
591 	const struct bpf_insn *p;
592 #if defined(KERNEL) || defined(_KERNEL)
593 	bpf_memword_init_t *mem, invalid;
594 	size_t size;
595 	const size_t extwords = bc ? bc->extwords : 0;
596 	const size_t memwords = extwords ? extwords : BPF_MEMWORDS;
597 	const bpf_memword_init_t preinited = extwords ? bc->preinited : 0;
598 #else
599 	const size_t memwords = BPF_MEMWORDS;
600 #endif
601 
602 	len = (u_int)signed_len;
603 	if (len < 1)
604 		return 0;
605 #if defined(KERNEL) || defined(_KERNEL)
606 	if (len > BPF_MAXINSNS)
607 		return 0;
608 #endif
609 	if (BPF_CLASS(f[len - 1].code) != BPF_RET)
610 		return 0;
611 
612 #if defined(KERNEL) || defined(_KERNEL)
613 	/* Note: only the pre-initialised is valid on startup */
614 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
615 	invalid = ~preinited;
616 #endif
617 
618 	for (i = 0; i < len; ++i) {
619 #if defined(KERNEL) || defined(_KERNEL)
620 		/* blend in any invalid bits for current pc */
621 		invalid |= mem[i];
622 #endif
623 		p = &f[i];
624 		switch (BPF_CLASS(p->code)) {
625 		/*
626 		 * Check that memory operations use valid addresses.
627 		 */
628 		case BPF_LD:
629 		case BPF_LDX:
630 			switch (BPF_MODE(p->code)) {
631 			case BPF_MEM:
632 				/*
633 				 * There's no maximum packet data size
634 				 * in userland.  The runtime packet length
635 				 * check suffices.
636 				 */
637 #if defined(KERNEL) || defined(_KERNEL)
638 				/*
639 				 * More strict check with actual packet length
640 				 * is done runtime.
641 				 */
642 				if (p->k >= memwords)
643 					goto out;
644 				/* check for current memory invalid */
645 				if (invalid & BPF_MEMWORD_INIT(p->k))
646 					goto out;
647 #endif
648 				break;
649 			case BPF_ABS:
650 			case BPF_IND:
651 			case BPF_MSH:
652 			case BPF_IMM:
653 			case BPF_LEN:
654 				break;
655 			default:
656 				goto out;
657 			}
658 			break;
659 		case BPF_ST:
660 		case BPF_STX:
661 			if (p->k >= memwords)
662 				goto out;
663 #if defined(KERNEL) || defined(_KERNEL)
664 			/* validate the memory word */
665 			invalid &= ~BPF_MEMWORD_INIT(p->k);
666 #endif
667 			break;
668 		case BPF_ALU:
669 			switch (BPF_OP(p->code)) {
670 			case BPF_ADD:
671 			case BPF_SUB:
672 			case BPF_MUL:
673 			case BPF_OR:
674 			case BPF_AND:
675 			case BPF_LSH:
676 			case BPF_RSH:
677 			case BPF_NEG:
678 				break;
679 			case BPF_DIV:
680 				/*
681 				 * Check for constant division by 0.
682 				 */
683 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
684 					goto out;
685 				break;
686 			default:
687 				goto out;
688 			}
689 			break;
690 		case BPF_JMP:
691 			/*
692 			 * Check that jumps are within the code block,
693 			 * and that unconditional branches don't go
694 			 * backwards as a result of an overflow.
695 			 * Unconditional branches have a 32-bit offset,
696 			 * so they could overflow; we check to make
697 			 * sure they don't.  Conditional branches have
698 			 * an 8-bit offset, and the from address is <=
699 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
700 			 * is sufficiently small that adding 255 to it
701 			 * won't overflow.
702 			 *
703 			 * We know that len is <= BPF_MAXINSNS, and we
704 			 * assume that BPF_MAXINSNS is < the maximum size
705 			 * of a u_int, so that i + 1 doesn't overflow.
706 			 *
707 			 * For userland, we don't know that the from
708 			 * or len are <= BPF_MAXINSNS, but we know that
709 			 * from <= len, and, except on a 64-bit system,
710 			 * it's unlikely that len, if it truly reflects
711 			 * the size of the program we've been handed,
712 			 * will be anywhere near the maximum size of
713 			 * a u_int.  We also don't check for backward
714 			 * branches, as we currently support them in
715 			 * userland for the protochain operation.
716 			 */
717 			from = i + 1;
718 			switch (BPF_OP(p->code)) {
719 			case BPF_JA:
720 				if (from + p->k >= len)
721 					goto out;
722 #if defined(KERNEL) || defined(_KERNEL)
723 				if (from + p->k < from)
724 					goto out;
725 				/*
726 				 * mark the currently invalid bits for the
727 				 * destination
728 				 */
729 				mem[from + p->k] |= invalid;
730 				invalid = 0;
731 #endif
732 				break;
733 			case BPF_JEQ:
734 			case BPF_JGT:
735 			case BPF_JGE:
736 			case BPF_JSET:
737 				if (from + p->jt >= len || from + p->jf >= len)
738 					goto out;
739 #if defined(KERNEL) || defined(_KERNEL)
740 				/*
741 				 * mark the currently invalid bits for both
742 				 * possible jump destinations
743 				 */
744 				mem[from + p->jt] |= invalid;
745 				mem[from + p->jf] |= invalid;
746 				invalid = 0;
747 #endif
748 				break;
749 			default:
750 				goto out;
751 			}
752 			break;
753 		case BPF_RET:
754 			break;
755 		case BPF_MISC:
756 			switch (BPF_MISCOP(p->code)) {
757 			case BPF_COP:
758 			case BPF_COPX:
759 				/* In-kernel COP use only. */
760 #if defined(KERNEL) || defined(_KERNEL)
761 				if (bc == NULL || bc->copfuncs == NULL)
762 					goto out;
763 				if (BPF_MISCOP(p->code) == BPF_COP &&
764 				    p->k >= bc->nfuncs) {
765 					goto out;
766 				}
767 				break;
768 #else
769 				goto out;
770 #endif
771 			default:
772 				break;
773 			}
774 			break;
775 		default:
776 			goto out;
777 		}
778 	}
779 	ok = 1;
780 out:
781 #if defined(KERNEL) || defined(_KERNEL)
782 	kmem_free(mem, size);
783 #endif
784 	return ok;
785 }
786