xref: /netbsd-src/sys/net/bpf_filter.c (revision 80d9064ac03cbb6a4174695f0d5b237c8766d3d0)
1 /*	$NetBSD: bpf_filter.c,v 1.67 2014/07/07 19:56:03 alnsn Exp $	*/
2 
3 /*-
4  * Copyright (c) 1990, 1991, 1992, 1993, 1994, 1995, 1996, 1997
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from the Stanford/CMU enet packet filter,
8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10  * Berkeley Laboratory.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)bpf_filter.c	8.1 (Berkeley) 6/10/93
37  */
38 
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: bpf_filter.c,v 1.67 2014/07/07 19:56:03 alnsn Exp $");
41 
42 #if 0
43 #if !(defined(lint) || defined(KERNEL))
44 static const char rcsid[] =
45     "@(#) Header: bpf_filter.c,v 1.33 97/04/26 13:37:18 leres Exp  (LBL)";
46 #endif
47 #endif
48 
49 #include <sys/param.h>
50 #include <sys/time.h>
51 #include <sys/kmem.h>
52 #include <sys/endian.h>
53 
54 #define	__BPF_PRIVATE
55 #include <net/bpf.h>
56 
57 #ifdef _KERNEL
58 
59 bpf_ctx_t *
60 bpf_create(void)
61 {
62 	return kmem_zalloc(sizeof(bpf_ctx_t), KM_SLEEP);
63 }
64 
65 void
66 bpf_destroy(bpf_ctx_t *bc)
67 {
68 	kmem_free(bc, sizeof(bpf_ctx_t));
69 }
70 
71 int
72 bpf_set_cop(bpf_ctx_t *bc, const bpf_copfunc_t *funcs, size_t n)
73 {
74 	bc->copfuncs = funcs;
75 	bc->nfuncs = n;
76 	return 0;
77 }
78 
79 int
80 bpf_set_extmem(bpf_ctx_t *bc, size_t nwords, bpf_memword_init_t preinited)
81 {
82 	if (nwords > BPF_MAX_MEMWORDS || (preinited >> nwords) != 0) {
83 		return EINVAL;
84 	}
85 	bc->extwords = nwords;
86 	bc->preinited = preinited;
87 	return 0;
88 }
89 
90 #endif
91 
92 #define EXTRACT_SHORT(p)	be16dec(p)
93 #define EXTRACT_LONG(p)		be32dec(p)
94 
95 #ifdef _KERNEL
96 #include <sys/mbuf.h>
97 #define MINDEX(len, m, k) 		\
98 {					\
99 	len = m->m_len; 		\
100 	while (k >= len) { 		\
101 		k -= len; 		\
102 		m = m->m_next; 		\
103 		if (m == 0) 		\
104 			return 0; 	\
105 		len = m->m_len; 	\
106 	}				\
107 }
108 
109 uint32_t m_xword(const struct mbuf *, uint32_t, int *);
110 uint32_t m_xhalf(const struct mbuf *, uint32_t, int *);
111 uint32_t m_xbyte(const struct mbuf *, uint32_t, int *);
112 
113 #define xword(p, k, err) m_xword((const struct mbuf *)(p), (k), (err))
114 #define xhalf(p, k, err) m_xhalf((const struct mbuf *)(p), (k), (err))
115 #define xbyte(p, k, err) m_xbyte((const struct mbuf *)(p), (k), (err))
116 
117 uint32_t
118 m_xword(const struct mbuf *m, uint32_t k, int *err)
119 {
120 	int len;
121 	u_char *cp, *np;
122 	struct mbuf *m0;
123 
124 	*err = 1;
125 	MINDEX(len, m, k);
126 	cp = mtod(m, u_char *) + k;
127 	if (len - k >= 4) {
128 		*err = 0;
129 		return EXTRACT_LONG(cp);
130 	}
131 	m0 = m->m_next;
132 	if (m0 == 0 || (len - k) + m0->m_len < 4)
133 		return 0;
134 	*err = 0;
135 	np = mtod(m0, u_char *);
136 
137 	switch (len - k) {
138 	case 1:
139 		return (cp[0] << 24) | (np[0] << 16) | (np[1] << 8) | np[2];
140 	case 2:
141 		return (cp[0] << 24) | (cp[1] << 16) | (np[0] << 8) | np[1];
142 	default:
143 		return (cp[0] << 24) | (cp[1] << 16) | (cp[2] << 8) | np[0];
144 	}
145 }
146 
147 uint32_t
148 m_xhalf(const struct mbuf *m, uint32_t k, int *err)
149 {
150 	int len;
151 	u_char *cp;
152 	struct mbuf *m0;
153 
154 	*err = 1;
155 	MINDEX(len, m, k);
156 	cp = mtod(m, u_char *) + k;
157 	if (len - k >= 2) {
158 		*err = 0;
159 		return EXTRACT_SHORT(cp);
160 	}
161 	m0 = m->m_next;
162 	if (m0 == 0)
163 		return 0;
164 	*err = 0;
165 	return (cp[0] << 8) | mtod(m0, u_char *)[0];
166 }
167 
168 uint32_t
169 m_xbyte(const struct mbuf *m, uint32_t k, int *err)
170 {
171 	int len;
172 
173 	*err = 1;
174 	MINDEX(len, m, k);
175 	*err = 0;
176 	return mtod(m, u_char *)[k];
177 }
178 #else /* _KERNEL */
179 #include <stdlib.h>
180 #endif /* !_KERNEL */
181 
182 #include <net/bpf.h>
183 
184 /*
185  * Execute the filter program starting at pc on the packet p
186  * wirelen is the length of the original packet
187  * buflen is the amount of data present
188  */
189 #ifdef _KERNEL
190 
191 u_int
192 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
193     u_int buflen)
194 {
195 	uint32_t mem[BPF_MEMWORDS];
196 	bpf_args_t args = {
197 		.pkt = p,
198 		.wirelen = wirelen,
199 		.buflen = buflen,
200 		.mem = mem,
201 		.arg = NULL
202 	};
203 
204 	return bpf_filter_ext(NULL, pc, &args);
205 }
206 
207 u_int
208 bpf_filter_ext(const bpf_ctx_t *bc, const struct bpf_insn *pc, bpf_args_t *args)
209 #else
210 u_int
211 bpf_filter(const struct bpf_insn *pc, const u_char *p, u_int wirelen,
212     u_int buflen)
213 #endif
214 {
215 	uint32_t A, X, k;
216 #ifndef _KERNEL
217 	uint32_t mem[BPF_MEMWORDS];
218 	bpf_args_t args_store = {
219 		.pkt = p,
220 		.wirelen = wirelen,
221 		.buflen = buflen,
222 		.mem = mem,
223 		.arg = NULL
224 	};
225 	bpf_args_t * const args = &args_store;
226 #else
227 	const uint8_t * const p = args->pkt;
228 #endif
229 	if (pc == 0) {
230 		/*
231 		 * No filter means accept all.
232 		 */
233 		return (u_int)-1;
234 	}
235 
236 	/*
237 	 * Note: safe to leave memwords uninitialised, as the validation
238 	 * step ensures that it will not be read, if it was not written.
239 	 */
240 	A = 0;
241 	X = 0;
242 	--pc;
243 
244 	for (;;) {
245 		++pc;
246 		switch (pc->code) {
247 
248 		default:
249 #ifdef _KERNEL
250 			return 0;
251 #else
252 			abort();
253 			/*NOTREACHED*/
254 #endif
255 		case BPF_RET|BPF_K:
256 			return (u_int)pc->k;
257 
258 		case BPF_RET|BPF_A:
259 			return (u_int)A;
260 
261 		case BPF_LD|BPF_W|BPF_ABS:
262 			k = pc->k;
263 			if (k > args->buflen ||
264 			    sizeof(int32_t) > args->buflen - k) {
265 #ifdef _KERNEL
266 				int merr;
267 
268 				if (args->buflen != 0)
269 					return 0;
270 				A = xword(args->pkt, k, &merr);
271 				if (merr != 0)
272 					return 0;
273 				continue;
274 #else
275 				return 0;
276 #endif
277 			}
278 			A = EXTRACT_LONG(&p[k]);
279 			continue;
280 
281 		case BPF_LD|BPF_H|BPF_ABS:
282 			k = pc->k;
283 			if (k > args->buflen ||
284 			    sizeof(int16_t) > args->buflen - k) {
285 #ifdef _KERNEL
286 				int merr;
287 
288 				if (args->buflen != 0)
289 					return 0;
290 				A = xhalf(args->pkt, k, &merr);
291 				if (merr != 0)
292 					return 0;
293 				continue;
294 #else
295 				return 0;
296 #endif
297 			}
298 			A = EXTRACT_SHORT(&p[k]);
299 			continue;
300 
301 		case BPF_LD|BPF_B|BPF_ABS:
302 			k = pc->k;
303 			if (k >= args->buflen) {
304 #ifdef _KERNEL
305 				int merr;
306 
307 				if (args->buflen != 0)
308 					return 0;
309 				A = xbyte(args->pkt, k, &merr);
310 				if (merr != 0)
311 					return 0;
312 				continue;
313 #else
314 				return 0;
315 #endif
316 			}
317 			A = p[k];
318 			continue;
319 
320 		case BPF_LD|BPF_W|BPF_LEN:
321 			A = args->wirelen;
322 			continue;
323 
324 		case BPF_LDX|BPF_W|BPF_LEN:
325 			X = args->wirelen;
326 			continue;
327 
328 		case BPF_LD|BPF_W|BPF_IND:
329 			k = X + pc->k;
330 			if (k < X || k >= args->buflen ||
331 			    sizeof(int32_t) > args->buflen - k) {
332 #ifdef _KERNEL
333 				int merr;
334 
335 				if (k < X || args->buflen != 0)
336 					return 0;
337 				A = xword(args->pkt, k, &merr);
338 				if (merr != 0)
339 					return 0;
340 				continue;
341 #else
342 				return 0;
343 #endif
344 			}
345 			A = EXTRACT_LONG(&p[k]);
346 			continue;
347 
348 		case BPF_LD|BPF_H|BPF_IND:
349 			k = X + pc->k;
350 			if (k < X || k >= args->buflen ||
351 			    sizeof(int16_t) > args->buflen - k) {
352 #ifdef _KERNEL
353 				int merr;
354 
355 				if (k < X || args->buflen != 0)
356 					return 0;
357 				A = xhalf(args->pkt, k, &merr);
358 				if (merr != 0)
359 					return 0;
360 				continue;
361 #else
362 				return 0;
363 #endif
364 			}
365 			A = EXTRACT_SHORT(&p[k]);
366 			continue;
367 
368 		case BPF_LD|BPF_B|BPF_IND:
369 			k = X + pc->k;
370 			if (k < X || k >= args->buflen) {
371 #ifdef _KERNEL
372 				int merr;
373 
374 				if (k < X || args->buflen != 0)
375 					return 0;
376 				A = xbyte(args->pkt, k, &merr);
377 				if (merr != 0)
378 					return 0;
379 				continue;
380 #else
381 				return 0;
382 #endif
383 			}
384 			A = p[k];
385 			continue;
386 
387 		case BPF_LDX|BPF_MSH|BPF_B:
388 			k = pc->k;
389 			if (k >= args->buflen) {
390 #ifdef _KERNEL
391 				int merr;
392 
393 				if (args->buflen != 0)
394 					return 0;
395 				X = (xbyte(args->pkt, k, &merr) & 0xf) << 2;
396 				if (merr != 0)
397 					return 0;
398 				continue;
399 #else
400 				return 0;
401 #endif
402 			}
403 			X = (p[pc->k] & 0xf) << 2;
404 			continue;
405 
406 		case BPF_LD|BPF_IMM:
407 			A = pc->k;
408 			continue;
409 
410 		case BPF_LDX|BPF_IMM:
411 			X = pc->k;
412 			continue;
413 
414 		case BPF_LD|BPF_MEM:
415 			A = args->mem[pc->k];
416 			continue;
417 
418 		case BPF_LDX|BPF_MEM:
419 			X = args->mem[pc->k];
420 			continue;
421 
422 		case BPF_ST:
423 			args->mem[pc->k] = A;
424 			continue;
425 
426 		case BPF_STX:
427 			args->mem[pc->k] = X;
428 			continue;
429 
430 		case BPF_JMP|BPF_JA:
431 			pc += pc->k;
432 			continue;
433 
434 		case BPF_JMP|BPF_JGT|BPF_K:
435 			pc += (A > pc->k) ? pc->jt : pc->jf;
436 			continue;
437 
438 		case BPF_JMP|BPF_JGE|BPF_K:
439 			pc += (A >= pc->k) ? pc->jt : pc->jf;
440 			continue;
441 
442 		case BPF_JMP|BPF_JEQ|BPF_K:
443 			pc += (A == pc->k) ? pc->jt : pc->jf;
444 			continue;
445 
446 		case BPF_JMP|BPF_JSET|BPF_K:
447 			pc += (A & pc->k) ? pc->jt : pc->jf;
448 			continue;
449 
450 		case BPF_JMP|BPF_JGT|BPF_X:
451 			pc += (A > X) ? pc->jt : pc->jf;
452 			continue;
453 
454 		case BPF_JMP|BPF_JGE|BPF_X:
455 			pc += (A >= X) ? pc->jt : pc->jf;
456 			continue;
457 
458 		case BPF_JMP|BPF_JEQ|BPF_X:
459 			pc += (A == X) ? pc->jt : pc->jf;
460 			continue;
461 
462 		case BPF_JMP|BPF_JSET|BPF_X:
463 			pc += (A & X) ? pc->jt : pc->jf;
464 			continue;
465 
466 		case BPF_ALU|BPF_ADD|BPF_X:
467 			A += X;
468 			continue;
469 
470 		case BPF_ALU|BPF_SUB|BPF_X:
471 			A -= X;
472 			continue;
473 
474 		case BPF_ALU|BPF_MUL|BPF_X:
475 			A *= X;
476 			continue;
477 
478 		case BPF_ALU|BPF_DIV|BPF_X:
479 			if (X == 0)
480 				return 0;
481 			A /= X;
482 			continue;
483 
484 		case BPF_ALU|BPF_AND|BPF_X:
485 			A &= X;
486 			continue;
487 
488 		case BPF_ALU|BPF_OR|BPF_X:
489 			A |= X;
490 			continue;
491 
492 		case BPF_ALU|BPF_LSH|BPF_X:
493 			A <<= X;
494 			continue;
495 
496 		case BPF_ALU|BPF_RSH|BPF_X:
497 			A >>= X;
498 			continue;
499 
500 		case BPF_ALU|BPF_ADD|BPF_K:
501 			A += pc->k;
502 			continue;
503 
504 		case BPF_ALU|BPF_SUB|BPF_K:
505 			A -= pc->k;
506 			continue;
507 
508 		case BPF_ALU|BPF_MUL|BPF_K:
509 			A *= pc->k;
510 			continue;
511 
512 		case BPF_ALU|BPF_DIV|BPF_K:
513 			A /= pc->k;
514 			continue;
515 
516 		case BPF_ALU|BPF_AND|BPF_K:
517 			A &= pc->k;
518 			continue;
519 
520 		case BPF_ALU|BPF_OR|BPF_K:
521 			A |= pc->k;
522 			continue;
523 
524 		case BPF_ALU|BPF_LSH|BPF_K:
525 			A <<= pc->k;
526 			continue;
527 
528 		case BPF_ALU|BPF_RSH|BPF_K:
529 			A >>= pc->k;
530 			continue;
531 
532 		case BPF_ALU|BPF_NEG:
533 			A = -A;
534 			continue;
535 
536 		case BPF_MISC|BPF_TAX:
537 			X = A;
538 			continue;
539 
540 		case BPF_MISC|BPF_TXA:
541 			A = X;
542 			continue;
543 
544 		case BPF_MISC|BPF_COP:
545 #ifdef _KERNEL
546 			if (pc->k < bc->nfuncs) {
547 				const bpf_copfunc_t fn = bc->copfuncs[pc->k];
548 				A = fn(bc, args, A);
549 				continue;
550 			}
551 #endif
552 			return 0;
553 
554 		case BPF_MISC|BPF_COPX:
555 #ifdef _KERNEL
556 			if (X < bc->nfuncs) {
557 				const bpf_copfunc_t fn = bc->copfuncs[X];
558 				A = fn(bc, args, A);
559 				continue;
560 			}
561 #endif
562 			return 0;
563 		}
564 	}
565 }
566 
567 /*
568  * Return true if the 'fcode' is a valid filter program.
569  * The constraints are that each jump be forward and to a valid
570  * code, that memory accesses are within valid ranges (to the
571  * extent that this can be checked statically; loads of packet
572  * data have to be, and are, also checked at run time), and that
573  * the code terminates with either an accept or reject.
574  *
575  * The kernel needs to be able to verify an application's filter code.
576  * Otherwise, a bogus program could easily crash the system.
577  */
578 
579 #if defined(KERNEL) || defined(_KERNEL)
580 
581 int
582 bpf_validate(const struct bpf_insn *f, int signed_len)
583 {
584 	return bpf_validate_ext(NULL, f, signed_len);
585 }
586 
587 int
588 bpf_validate_ext(const bpf_ctx_t *bc, const struct bpf_insn *f, int signed_len)
589 #else
590 int
591 bpf_validate(const struct bpf_insn *f, int signed_len)
592 #endif
593 {
594 	u_int i, from, len, ok = 0;
595 	const struct bpf_insn *p;
596 #if defined(KERNEL) || defined(_KERNEL)
597 	bpf_memword_init_t *mem, invalid;
598 	size_t size;
599 	const size_t extwords = bc ? bc->extwords : 0;
600 	const size_t memwords = extwords ? extwords : BPF_MEMWORDS;
601 	const bpf_memword_init_t preinited = extwords ? bc->preinited : 0;
602 #else
603 	const size_t memwords = BPF_MEMWORDS;
604 #endif
605 
606 	len = (u_int)signed_len;
607 	if (len < 1)
608 		return 0;
609 #if defined(KERNEL) || defined(_KERNEL)
610 	if (len > BPF_MAXINSNS)
611 		return 0;
612 #endif
613 	if (BPF_CLASS(f[len - 1].code) != BPF_RET)
614 		return 0;
615 
616 #if defined(KERNEL) || defined(_KERNEL)
617 	/* Note: only the pre-initialised is valid on startup */
618 	mem = kmem_zalloc(size = sizeof(*mem) * len, KM_SLEEP);
619 	invalid = ~preinited;
620 #endif
621 
622 	for (i = 0; i < len; ++i) {
623 #if defined(KERNEL) || defined(_KERNEL)
624 		/* blend in any invalid bits for current pc */
625 		invalid |= mem[i];
626 #endif
627 		p = &f[i];
628 		switch (BPF_CLASS(p->code)) {
629 		/*
630 		 * Check that memory operations use valid addresses.
631 		 */
632 		case BPF_LD:
633 		case BPF_LDX:
634 			switch (BPF_MODE(p->code)) {
635 			case BPF_MEM:
636 				/*
637 				 * There's no maximum packet data size
638 				 * in userland.  The runtime packet length
639 				 * check suffices.
640 				 */
641 #if defined(KERNEL) || defined(_KERNEL)
642 				/*
643 				 * More strict check with actual packet length
644 				 * is done runtime.
645 				 */
646 				if (p->k >= memwords)
647 					goto out;
648 				/* check for current memory invalid */
649 				if (invalid & BPF_MEMWORD_INIT(p->k))
650 					goto out;
651 #endif
652 				break;
653 			case BPF_ABS:
654 			case BPF_IND:
655 			case BPF_MSH:
656 			case BPF_IMM:
657 			case BPF_LEN:
658 				break;
659 			default:
660 				goto out;
661 			}
662 			break;
663 		case BPF_ST:
664 		case BPF_STX:
665 			if (p->k >= memwords)
666 				goto out;
667 #if defined(KERNEL) || defined(_KERNEL)
668 			/* validate the memory word */
669 			invalid &= ~BPF_MEMWORD_INIT(p->k);
670 #endif
671 			break;
672 		case BPF_ALU:
673 			switch (BPF_OP(p->code)) {
674 			case BPF_ADD:
675 			case BPF_SUB:
676 			case BPF_MUL:
677 			case BPF_OR:
678 			case BPF_AND:
679 			case BPF_LSH:
680 			case BPF_RSH:
681 			case BPF_NEG:
682 				break;
683 			case BPF_DIV:
684 				/*
685 				 * Check for constant division by 0.
686 				 */
687 				if (BPF_SRC(p->code) == BPF_K && p->k == 0)
688 					goto out;
689 				break;
690 			default:
691 				goto out;
692 			}
693 			break;
694 		case BPF_JMP:
695 			/*
696 			 * Check that jumps are within the code block,
697 			 * and that unconditional branches don't go
698 			 * backwards as a result of an overflow.
699 			 * Unconditional branches have a 32-bit offset,
700 			 * so they could overflow; we check to make
701 			 * sure they don't.  Conditional branches have
702 			 * an 8-bit offset, and the from address is <=
703 			 * BPF_MAXINSNS, and we assume that BPF_MAXINSNS
704 			 * is sufficiently small that adding 255 to it
705 			 * won't overflow.
706 			 *
707 			 * We know that len is <= BPF_MAXINSNS, and we
708 			 * assume that BPF_MAXINSNS is < the maximum size
709 			 * of a u_int, so that i + 1 doesn't overflow.
710 			 *
711 			 * For userland, we don't know that the from
712 			 * or len are <= BPF_MAXINSNS, but we know that
713 			 * from <= len, and, except on a 64-bit system,
714 			 * it's unlikely that len, if it truly reflects
715 			 * the size of the program we've been handed,
716 			 * will be anywhere near the maximum size of
717 			 * a u_int.  We also don't check for backward
718 			 * branches, as we currently support them in
719 			 * userland for the protochain operation.
720 			 */
721 			from = i + 1;
722 			switch (BPF_OP(p->code)) {
723 			case BPF_JA:
724 				if (from + p->k >= len)
725 					goto out;
726 #if defined(KERNEL) || defined(_KERNEL)
727 				if (from + p->k < from)
728 					goto out;
729 				/*
730 				 * mark the currently invalid bits for the
731 				 * destination
732 				 */
733 				mem[from + p->k] |= invalid;
734 				invalid = 0;
735 #endif
736 				break;
737 			case BPF_JEQ:
738 			case BPF_JGT:
739 			case BPF_JGE:
740 			case BPF_JSET:
741 				if (from + p->jt >= len || from + p->jf >= len)
742 					goto out;
743 #if defined(KERNEL) || defined(_KERNEL)
744 				/*
745 				 * mark the currently invalid bits for both
746 				 * possible jump destinations
747 				 */
748 				mem[from + p->jt] |= invalid;
749 				mem[from + p->jf] |= invalid;
750 				invalid = 0;
751 #endif
752 				break;
753 			default:
754 				goto out;
755 			}
756 			break;
757 		case BPF_RET:
758 			break;
759 		case BPF_MISC:
760 			switch (BPF_MISCOP(p->code)) {
761 			case BPF_COP:
762 			case BPF_COPX:
763 				/* In-kernel COP use only. */
764 #if defined(KERNEL) || defined(_KERNEL)
765 				if (bc == NULL || bc->copfuncs == NULL)
766 					goto out;
767 				if (BPF_MISCOP(p->code) == BPF_COP &&
768 				    p->k >= bc->nfuncs) {
769 					goto out;
770 				}
771 				break;
772 #else
773 				goto out;
774 #endif
775 			default:
776 				break;
777 			}
778 			break;
779 		default:
780 			goto out;
781 		}
782 	}
783 	ok = 1;
784 out:
785 #if defined(KERNEL) || defined(_KERNEL)
786 	kmem_free(mem, size);
787 #endif
788 	return ok;
789 }
790