xref: /netbsd-src/sys/net/bpf.c (revision 2c6fc41c810f5088457889d00eba558e8bc74d9e)
1 /*	$NetBSD: bpf.c,v 1.182 2014/03/16 05:20:30 dholland Exp $	*/
2 
3 /*
4  * Copyright (c) 1990, 1991, 1993
5  *	The Regents of the University of California.  All rights reserved.
6  *
7  * This code is derived from the Stanford/CMU enet packet filter,
8  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
9  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
10  * Berkeley Laboratory.
11  *
12  * Redistribution and use in source and binary forms, with or without
13  * modification, are permitted provided that the following conditions
14  * are met:
15  * 1. Redistributions of source code must retain the above copyright
16  *    notice, this list of conditions and the following disclaimer.
17  * 2. Redistributions in binary form must reproduce the above copyright
18  *    notice, this list of conditions and the following disclaimer in the
19  *    documentation and/or other materials provided with the distribution.
20  * 3. Neither the name of the University nor the names of its contributors
21  *    may be used to endorse or promote products derived from this software
22  *    without specific prior written permission.
23  *
24  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34  * SUCH DAMAGE.
35  *
36  *	@(#)bpf.c	8.4 (Berkeley) 1/9/95
37  * static char rcsid[] =
38  * "Header: bpf.c,v 1.67 96/09/26 22:00:52 leres Exp ";
39  */
40 
41 #include <sys/cdefs.h>
42 __KERNEL_RCSID(0, "$NetBSD: bpf.c,v 1.182 2014/03/16 05:20:30 dholland Exp $");
43 
44 #if defined(_KERNEL_OPT)
45 #include "opt_bpf.h"
46 #include "sl.h"
47 #include "strip.h"
48 #endif
49 
50 #include <sys/param.h>
51 #include <sys/systm.h>
52 #include <sys/mbuf.h>
53 #include <sys/buf.h>
54 #include <sys/time.h>
55 #include <sys/proc.h>
56 #include <sys/ioctl.h>
57 #include <sys/conf.h>
58 #include <sys/vnode.h>
59 #include <sys/queue.h>
60 #include <sys/stat.h>
61 #include <sys/module.h>
62 #include <sys/once.h>
63 #include <sys/atomic.h>
64 
65 #include <sys/file.h>
66 #include <sys/filedesc.h>
67 #include <sys/tty.h>
68 #include <sys/uio.h>
69 
70 #include <sys/protosw.h>
71 #include <sys/socket.h>
72 #include <sys/errno.h>
73 #include <sys/kernel.h>
74 #include <sys/poll.h>
75 #include <sys/sysctl.h>
76 #include <sys/kauth.h>
77 
78 #include <net/if.h>
79 #include <net/slip.h>
80 
81 #include <net/bpf.h>
82 #include <net/bpfdesc.h>
83 #include <net/bpfjit.h>
84 
85 #include <net/if_arc.h>
86 #include <net/if_ether.h>
87 
88 #include <netinet/in.h>
89 #include <netinet/if_inarp.h>
90 
91 
92 #include <compat/sys/sockio.h>
93 
94 #ifndef BPF_BUFSIZE
95 /*
96  * 4096 is too small for FDDI frames. 8192 is too small for gigabit Ethernet
97  * jumbos (circa 9k), ATM, or Intel gig/10gig ethernet jumbos (16k).
98  */
99 # define BPF_BUFSIZE 32768
100 #endif
101 
102 #define PRINET  26			/* interruptible */
103 
104 /*
105  * The default read buffer size, and limit for BIOCSBLEN, is sysctl'able.
106  * XXX the default values should be computed dynamically based
107  * on available memory size and available mbuf clusters.
108  */
109 int bpf_bufsize = BPF_BUFSIZE;
110 int bpf_maxbufsize = BPF_DFLTBUFSIZE;	/* XXX set dynamically, see above */
111 bool bpf_jit = false;
112 
113 struct bpfjit_ops bpfjit_module_ops = {
114 	.bj_generate_code = NULL,
115 	.bj_free_code = NULL
116 };
117 
118 /*
119  * Global BPF statistics returned by net.bpf.stats sysctl.
120  */
121 struct bpf_stat	bpf_gstats;
122 
123 /*
124  * Use a mutex to avoid a race condition between gathering the stats/peers
125  * and opening/closing the device.
126  */
127 static kmutex_t bpf_mtx;
128 
129 /*
130  *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
131  *  bpf_dtab holds the descriptors, indexed by minor device #
132  */
133 struct bpf_if	*bpf_iflist;
134 LIST_HEAD(, bpf_d) bpf_list;
135 
136 static int	bpf_allocbufs(struct bpf_d *);
137 static void	bpf_deliver(struct bpf_if *,
138 		            void *(*cpfn)(void *, const void *, size_t),
139 		            void *, u_int, u_int, const bool);
140 static void	bpf_freed(struct bpf_d *);
141 static void	bpf_ifname(struct ifnet *, struct ifreq *);
142 static void	*bpf_mcpy(void *, const void *, size_t);
143 static int	bpf_movein(struct uio *, int, uint64_t,
144 			        struct mbuf **, struct sockaddr *);
145 static void	bpf_attachd(struct bpf_d *, struct bpf_if *);
146 static void	bpf_detachd(struct bpf_d *);
147 static int	bpf_setif(struct bpf_d *, struct ifreq *);
148 static void	bpf_timed_out(void *);
149 static inline void
150 		bpf_wakeup(struct bpf_d *);
151 static int	bpf_hdrlen(struct bpf_d *);
152 static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
153     void *(*)(void *, const void *, size_t), struct timespec *);
154 static void	reset_d(struct bpf_d *);
155 static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
156 static int	bpf_setdlt(struct bpf_d *, u_int);
157 
158 static int	bpf_read(struct file *, off_t *, struct uio *, kauth_cred_t,
159     int);
160 static int	bpf_write(struct file *, off_t *, struct uio *, kauth_cred_t,
161     int);
162 static int	bpf_ioctl(struct file *, u_long, void *);
163 static int	bpf_poll(struct file *, int);
164 static int	bpf_stat(struct file *, struct stat *);
165 static int	bpf_close(struct file *);
166 static int	bpf_kqfilter(struct file *, struct knote *);
167 static void	bpf_softintr(void *);
168 
169 static const struct fileops bpf_fileops = {
170 	.fo_read = bpf_read,
171 	.fo_write = bpf_write,
172 	.fo_ioctl = bpf_ioctl,
173 	.fo_fcntl = fnullop_fcntl,
174 	.fo_poll = bpf_poll,
175 	.fo_stat = bpf_stat,
176 	.fo_close = bpf_close,
177 	.fo_kqfilter = bpf_kqfilter,
178 	.fo_restart = fnullop_restart,
179 };
180 
181 dev_type_open(bpfopen);
182 
183 const struct cdevsw bpf_cdevsw = {
184 	.d_open = bpfopen,
185 	.d_close = noclose,
186 	.d_read = noread,
187 	.d_write = nowrite,
188 	.d_ioctl = noioctl,
189 	.d_stop = nostop,
190 	.d_tty = notty,
191 	.d_poll = nopoll,
192 	.d_mmap = nommap,
193 	.d_kqfilter = nokqfilter,
194 	.d_flag = D_OTHER
195 };
196 
197 bpfjit_func_t
198 bpf_jit_generate(bpf_ctx_t *bc, void *code, size_t size)
199 {
200 	membar_consumer();
201 	if (bpfjit_module_ops.bj_generate_code != NULL) {
202 		return bpfjit_module_ops.bj_generate_code(bc, code, size);
203 	}
204 	return NULL;
205 }
206 
207 void
208 bpf_jit_freecode(bpfjit_func_t jcode)
209 {
210 	KASSERT(bpfjit_module_ops.bj_free_code != NULL);
211 	bpfjit_module_ops.bj_free_code(jcode);
212 }
213 
214 static int
215 bpf_movein(struct uio *uio, int linktype, uint64_t mtu, struct mbuf **mp,
216 	   struct sockaddr *sockp)
217 {
218 	struct mbuf *m;
219 	int error;
220 	size_t len;
221 	size_t hlen;
222 	size_t align;
223 
224 	/*
225 	 * Build a sockaddr based on the data link layer type.
226 	 * We do this at this level because the ethernet header
227 	 * is copied directly into the data field of the sockaddr.
228 	 * In the case of SLIP, there is no header and the packet
229 	 * is forwarded as is.
230 	 * Also, we are careful to leave room at the front of the mbuf
231 	 * for the link level header.
232 	 */
233 	switch (linktype) {
234 
235 	case DLT_SLIP:
236 		sockp->sa_family = AF_INET;
237 		hlen = 0;
238 		align = 0;
239 		break;
240 
241 	case DLT_PPP:
242 		sockp->sa_family = AF_UNSPEC;
243 		hlen = 0;
244 		align = 0;
245 		break;
246 
247 	case DLT_EN10MB:
248 		sockp->sa_family = AF_UNSPEC;
249 		/* XXX Would MAXLINKHDR be better? */
250  		/* 6(dst)+6(src)+2(type) */
251 		hlen = sizeof(struct ether_header);
252 		align = 2;
253 		break;
254 
255 	case DLT_ARCNET:
256 		sockp->sa_family = AF_UNSPEC;
257 		hlen = ARC_HDRLEN;
258 		align = 5;
259 		break;
260 
261 	case DLT_FDDI:
262 		sockp->sa_family = AF_LINK;
263 		/* XXX 4(FORMAC)+6(dst)+6(src) */
264 		hlen = 16;
265 		align = 0;
266 		break;
267 
268 	case DLT_ECONET:
269 		sockp->sa_family = AF_UNSPEC;
270 		hlen = 6;
271 		align = 2;
272 		break;
273 
274 	case DLT_NULL:
275 		sockp->sa_family = AF_UNSPEC;
276 		hlen = 0;
277 		align = 0;
278 		break;
279 
280 	default:
281 		return (EIO);
282 	}
283 
284 	len = uio->uio_resid;
285 	/*
286 	 * If there aren't enough bytes for a link level header or the
287 	 * packet length exceeds the interface mtu, return an error.
288 	 */
289 	if (len - hlen > mtu)
290 		return (EMSGSIZE);
291 
292 	/*
293 	 * XXX Avoid complicated buffer chaining ---
294 	 * bail if it won't fit in a single mbuf.
295 	 * (Take into account possible alignment bytes)
296 	 */
297 	if (len + align > MCLBYTES)
298 		return (EIO);
299 
300 	m = m_gethdr(M_WAIT, MT_DATA);
301 	m->m_pkthdr.rcvif = 0;
302 	m->m_pkthdr.len = (int)(len - hlen);
303 	if (len + align > MHLEN) {
304 		m_clget(m, M_WAIT);
305 		if ((m->m_flags & M_EXT) == 0) {
306 			error = ENOBUFS;
307 			goto bad;
308 		}
309 	}
310 
311 	/* Insure the data is properly aligned */
312 	if (align > 0) {
313 		m->m_data += align;
314 		m->m_len -= (int)align;
315 	}
316 
317 	error = uiomove(mtod(m, void *), len, uio);
318 	if (error)
319 		goto bad;
320 	if (hlen != 0) {
321 		memcpy(sockp->sa_data, mtod(m, void *), hlen);
322 		m->m_data += hlen; /* XXX */
323 		len -= hlen;
324 	}
325 	m->m_len = (int)len;
326 	*mp = m;
327 	return (0);
328 
329 bad:
330 	m_freem(m);
331 	return (error);
332 }
333 
334 /*
335  * Attach file to the bpf interface, i.e. make d listen on bp.
336  * Must be called at splnet.
337  */
338 static void
339 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
340 {
341 	/*
342 	 * Point d at bp, and add d to the interface's list of listeners.
343 	 * Finally, point the driver's bpf cookie at the interface so
344 	 * it will divert packets to bpf.
345 	 */
346 	d->bd_bif = bp;
347 	d->bd_next = bp->bif_dlist;
348 	bp->bif_dlist = d;
349 
350 	*bp->bif_driverp = bp;
351 }
352 
353 /*
354  * Detach a file from its interface.
355  */
356 static void
357 bpf_detachd(struct bpf_d *d)
358 {
359 	struct bpf_d **p;
360 	struct bpf_if *bp;
361 
362 	bp = d->bd_bif;
363 	/*
364 	 * Check if this descriptor had requested promiscuous mode.
365 	 * If so, turn it off.
366 	 */
367 	if (d->bd_promisc) {
368 		int error __diagused;
369 
370 		d->bd_promisc = 0;
371 		/*
372 		 * Take device out of promiscuous mode.  Since we were
373 		 * able to enter promiscuous mode, we should be able
374 		 * to turn it off.  But we can get an error if
375 		 * the interface was configured down, so only panic
376 		 * if we don't get an unexpected error.
377 		 */
378   		error = ifpromisc(bp->bif_ifp, 0);
379 #ifdef DIAGNOSTIC
380 		if (error)
381 			printf("%s: ifpromisc failed: %d", __func__, error);
382 #endif
383 	}
384 	/* Remove d from the interface's descriptor list. */
385 	p = &bp->bif_dlist;
386 	while (*p != d) {
387 		p = &(*p)->bd_next;
388 		if (*p == 0)
389 			panic("%s: descriptor not in list", __func__);
390 	}
391 	*p = (*p)->bd_next;
392 	if (bp->bif_dlist == 0)
393 		/*
394 		 * Let the driver know that there are no more listeners.
395 		 */
396 		*d->bd_bif->bif_driverp = 0;
397 	d->bd_bif = 0;
398 }
399 
400 static int
401 doinit(void)
402 {
403 
404 	mutex_init(&bpf_mtx, MUTEX_DEFAULT, IPL_NONE);
405 
406 	LIST_INIT(&bpf_list);
407 
408 	bpf_gstats.bs_recv = 0;
409 	bpf_gstats.bs_drop = 0;
410 	bpf_gstats.bs_capt = 0;
411 
412 	return 0;
413 }
414 
415 /*
416  * bpfilterattach() is called at boot time.
417  */
418 /* ARGSUSED */
419 void
420 bpfilterattach(int n)
421 {
422 	static ONCE_DECL(control);
423 
424 	RUN_ONCE(&control, doinit);
425 }
426 
427 /*
428  * Open ethernet device. Clones.
429  */
430 /* ARGSUSED */
431 int
432 bpfopen(dev_t dev, int flag, int mode, struct lwp *l)
433 {
434 	struct bpf_d *d;
435 	struct file *fp;
436 	int error, fd;
437 
438 	/* falloc() will use the descriptor for us. */
439 	if ((error = fd_allocfile(&fp, &fd)) != 0)
440 		return error;
441 
442 	d = malloc(sizeof(*d), M_DEVBUF, M_WAITOK|M_ZERO);
443 	d->bd_bufsize = bpf_bufsize;
444 	d->bd_seesent = 1;
445 	d->bd_feedback = 0;
446 	d->bd_pid = l->l_proc->p_pid;
447 #ifdef _LP64
448 	if (curproc->p_flag & PK_32)
449 		d->bd_compat32 = 1;
450 #endif
451 	getnanotime(&d->bd_btime);
452 	d->bd_atime = d->bd_mtime = d->bd_btime;
453 	callout_init(&d->bd_callout, 0);
454 	selinit(&d->bd_sel);
455 	d->bd_sih = softint_establish(SOFTINT_CLOCK, bpf_softintr, d);
456 	d->bd_jitcode = NULL;
457 
458 	mutex_enter(&bpf_mtx);
459 	LIST_INSERT_HEAD(&bpf_list, d, bd_list);
460 	mutex_exit(&bpf_mtx);
461 
462 	return fd_clone(fp, fd, flag, &bpf_fileops, d);
463 }
464 
465 /*
466  * Close the descriptor by detaching it from its interface,
467  * deallocating its buffers, and marking it free.
468  */
469 /* ARGSUSED */
470 static int
471 bpf_close(struct file *fp)
472 {
473 	struct bpf_d *d = fp->f_data;
474 	int s;
475 
476 	KERNEL_LOCK(1, NULL);
477 
478 	/*
479 	 * Refresh the PID associated with this bpf file.
480 	 */
481 	d->bd_pid = curproc->p_pid;
482 
483 	s = splnet();
484 	if (d->bd_state == BPF_WAITING)
485 		callout_stop(&d->bd_callout);
486 	d->bd_state = BPF_IDLE;
487 	if (d->bd_bif)
488 		bpf_detachd(d);
489 	splx(s);
490 	bpf_freed(d);
491 	mutex_enter(&bpf_mtx);
492 	LIST_REMOVE(d, bd_list);
493 	mutex_exit(&bpf_mtx);
494 	callout_destroy(&d->bd_callout);
495 	seldestroy(&d->bd_sel);
496 	softint_disestablish(d->bd_sih);
497 	free(d, M_DEVBUF);
498 	fp->f_data = NULL;
499 
500 	KERNEL_UNLOCK_ONE(NULL);
501 
502 	return (0);
503 }
504 
505 /*
506  * Rotate the packet buffers in descriptor d.  Move the store buffer
507  * into the hold slot, and the free buffer into the store slot.
508  * Zero the length of the new store buffer.
509  */
510 #define ROTATE_BUFFERS(d) \
511 	(d)->bd_hbuf = (d)->bd_sbuf; \
512 	(d)->bd_hlen = (d)->bd_slen; \
513 	(d)->bd_sbuf = (d)->bd_fbuf; \
514 	(d)->bd_slen = 0; \
515 	(d)->bd_fbuf = 0;
516 /*
517  *  bpfread - read next chunk of packets from buffers
518  */
519 static int
520 bpf_read(struct file *fp, off_t *offp, struct uio *uio,
521     kauth_cred_t cred, int flags)
522 {
523 	struct bpf_d *d = fp->f_data;
524 	int timed_out;
525 	int error;
526 	int s;
527 
528 	getnanotime(&d->bd_atime);
529 	/*
530 	 * Restrict application to use a buffer the same size as
531 	 * the kernel buffers.
532 	 */
533 	if (uio->uio_resid != d->bd_bufsize)
534 		return (EINVAL);
535 
536 	KERNEL_LOCK(1, NULL);
537 	s = splnet();
538 	if (d->bd_state == BPF_WAITING)
539 		callout_stop(&d->bd_callout);
540 	timed_out = (d->bd_state == BPF_TIMED_OUT);
541 	d->bd_state = BPF_IDLE;
542 	/*
543 	 * If the hold buffer is empty, then do a timed sleep, which
544 	 * ends when the timeout expires or when enough packets
545 	 * have arrived to fill the store buffer.
546 	 */
547 	while (d->bd_hbuf == 0) {
548 		if (fp->f_flag & FNONBLOCK) {
549 			if (d->bd_slen == 0) {
550 				splx(s);
551 				KERNEL_UNLOCK_ONE(NULL);
552 				return (EWOULDBLOCK);
553 			}
554 			ROTATE_BUFFERS(d);
555 			break;
556 		}
557 
558 		if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
559 			/*
560 			 * A packet(s) either arrived since the previous
561 			 * read or arrived while we were asleep.
562 			 * Rotate the buffers and return what's here.
563 			 */
564 			ROTATE_BUFFERS(d);
565 			break;
566 		}
567 		error = tsleep(d, PRINET|PCATCH, "bpf",
568 				d->bd_rtout);
569 		if (error == EINTR || error == ERESTART) {
570 			splx(s);
571 			KERNEL_UNLOCK_ONE(NULL);
572 			return (error);
573 		}
574 		if (error == EWOULDBLOCK) {
575 			/*
576 			 * On a timeout, return what's in the buffer,
577 			 * which may be nothing.  If there is something
578 			 * in the store buffer, we can rotate the buffers.
579 			 */
580 			if (d->bd_hbuf)
581 				/*
582 				 * We filled up the buffer in between
583 				 * getting the timeout and arriving
584 				 * here, so we don't need to rotate.
585 				 */
586 				break;
587 
588 			if (d->bd_slen == 0) {
589 				splx(s);
590 				KERNEL_UNLOCK_ONE(NULL);
591 				return (0);
592 			}
593 			ROTATE_BUFFERS(d);
594 			break;
595 		}
596 		if (error != 0)
597 			goto done;
598 	}
599 	/*
600 	 * At this point, we know we have something in the hold slot.
601 	 */
602 	splx(s);
603 
604 	/*
605 	 * Move data from hold buffer into user space.
606 	 * We know the entire buffer is transferred since
607 	 * we checked above that the read buffer is bpf_bufsize bytes.
608 	 */
609 	error = uiomove(d->bd_hbuf, d->bd_hlen, uio);
610 
611 	s = splnet();
612 	d->bd_fbuf = d->bd_hbuf;
613 	d->bd_hbuf = 0;
614 	d->bd_hlen = 0;
615 done:
616 	splx(s);
617 	KERNEL_UNLOCK_ONE(NULL);
618 	return (error);
619 }
620 
621 
622 /*
623  * If there are processes sleeping on this descriptor, wake them up.
624  */
625 static inline void
626 bpf_wakeup(struct bpf_d *d)
627 {
628 	wakeup(d);
629 	if (d->bd_async)
630 		softint_schedule(d->bd_sih);
631 	selnotify(&d->bd_sel, 0, 0);
632 }
633 
634 static void
635 bpf_softintr(void *cookie)
636 {
637 	struct bpf_d *d;
638 
639 	d = cookie;
640 	if (d->bd_async)
641 		fownsignal(d->bd_pgid, SIGIO, 0, 0, NULL);
642 }
643 
644 static void
645 bpf_timed_out(void *arg)
646 {
647 	struct bpf_d *d = arg;
648 	int s;
649 
650 	s = splnet();
651 	if (d->bd_state == BPF_WAITING) {
652 		d->bd_state = BPF_TIMED_OUT;
653 		if (d->bd_slen != 0)
654 			bpf_wakeup(d);
655 	}
656 	splx(s);
657 }
658 
659 
660 static int
661 bpf_write(struct file *fp, off_t *offp, struct uio *uio,
662     kauth_cred_t cred, int flags)
663 {
664 	struct bpf_d *d = fp->f_data;
665 	struct ifnet *ifp;
666 	struct mbuf *m, *mc;
667 	int error, s;
668 	static struct sockaddr_storage dst;
669 
670 	m = NULL;	/* XXX gcc */
671 
672 	KERNEL_LOCK(1, NULL);
673 
674 	if (d->bd_bif == 0) {
675 		KERNEL_UNLOCK_ONE(NULL);
676 		return (ENXIO);
677 	}
678 	getnanotime(&d->bd_mtime);
679 
680 	ifp = d->bd_bif->bif_ifp;
681 
682 	if (uio->uio_resid == 0) {
683 		KERNEL_UNLOCK_ONE(NULL);
684 		return (0);
685 	}
686 
687 	error = bpf_movein(uio, (int)d->bd_bif->bif_dlt, ifp->if_mtu, &m,
688 		(struct sockaddr *) &dst);
689 	if (error) {
690 		KERNEL_UNLOCK_ONE(NULL);
691 		return (error);
692 	}
693 
694 	if (m->m_pkthdr.len > ifp->if_mtu) {
695 		KERNEL_UNLOCK_ONE(NULL);
696 		m_freem(m);
697 		return (EMSGSIZE);
698 	}
699 
700 	if (d->bd_hdrcmplt)
701 		dst.ss_family = pseudo_AF_HDRCMPLT;
702 
703 	if (d->bd_feedback) {
704 		mc = m_dup(m, 0, M_COPYALL, M_NOWAIT);
705 		if (mc != NULL)
706 			mc->m_pkthdr.rcvif = ifp;
707 		/* Set M_PROMISC for outgoing packets to be discarded. */
708 		if (1 /*d->bd_direction == BPF_D_INOUT*/)
709 			m->m_flags |= M_PROMISC;
710 	} else
711 		mc = NULL;
712 
713 	s = splsoftnet();
714 	error = (*ifp->if_output)(ifp, m, (struct sockaddr *) &dst, NULL);
715 
716 	if (mc != NULL) {
717 		if (error == 0)
718 			(*ifp->if_input)(ifp, mc);
719 		m_freem(mc);
720 	}
721 	splx(s);
722 	KERNEL_UNLOCK_ONE(NULL);
723 	/*
724 	 * The driver frees the mbuf.
725 	 */
726 	return (error);
727 }
728 
729 /*
730  * Reset a descriptor by flushing its packet buffer and clearing the
731  * receive and drop counts.  Should be called at splnet.
732  */
733 static void
734 reset_d(struct bpf_d *d)
735 {
736 	if (d->bd_hbuf) {
737 		/* Free the hold buffer. */
738 		d->bd_fbuf = d->bd_hbuf;
739 		d->bd_hbuf = 0;
740 	}
741 	d->bd_slen = 0;
742 	d->bd_hlen = 0;
743 	d->bd_rcount = 0;
744 	d->bd_dcount = 0;
745 	d->bd_ccount = 0;
746 }
747 
748 /*
749  *  FIONREAD		Check for read packet available.
750  *  BIOCGBLEN		Get buffer len [for read()].
751  *  BIOCSETF		Set ethernet read filter.
752  *  BIOCFLUSH		Flush read packet buffer.
753  *  BIOCPROMISC		Put interface into promiscuous mode.
754  *  BIOCGDLT		Get link layer type.
755  *  BIOCGETIF		Get interface name.
756  *  BIOCSETIF		Set interface.
757  *  BIOCSRTIMEOUT	Set read timeout.
758  *  BIOCGRTIMEOUT	Get read timeout.
759  *  BIOCGSTATS		Get packet stats.
760  *  BIOCIMMEDIATE	Set immediate mode.
761  *  BIOCVERSION		Get filter language version.
762  *  BIOCGHDRCMPLT	Get "header already complete" flag.
763  *  BIOCSHDRCMPLT	Set "header already complete" flag.
764  *  BIOCSFEEDBACK	Set packet feedback mode.
765  *  BIOCGFEEDBACK	Get packet feedback mode.
766  *  BIOCGSEESENT  	Get "see sent packets" mode.
767  *  BIOCSSEESENT  	Set "see sent packets" mode.
768  */
769 /* ARGSUSED */
770 static int
771 bpf_ioctl(struct file *fp, u_long cmd, void *addr)
772 {
773 	struct bpf_d *d = fp->f_data;
774 	int s, error = 0;
775 
776 	/*
777 	 * Refresh the PID associated with this bpf file.
778 	 */
779 	KERNEL_LOCK(1, NULL);
780 	d->bd_pid = curproc->p_pid;
781 #ifdef _LP64
782 	if (curproc->p_flag & PK_32)
783 		d->bd_compat32 = 1;
784 	else
785 		d->bd_compat32 = 0;
786 #endif
787 
788 	s = splnet();
789 	if (d->bd_state == BPF_WAITING)
790 		callout_stop(&d->bd_callout);
791 	d->bd_state = BPF_IDLE;
792 	splx(s);
793 
794 	switch (cmd) {
795 
796 	default:
797 		error = EINVAL;
798 		break;
799 
800 	/*
801 	 * Check for read packet available.
802 	 */
803 	case FIONREAD:
804 		{
805 			int n;
806 
807 			s = splnet();
808 			n = d->bd_slen;
809 			if (d->bd_hbuf)
810 				n += d->bd_hlen;
811 			splx(s);
812 
813 			*(int *)addr = n;
814 			break;
815 		}
816 
817 	/*
818 	 * Get buffer len [for read()].
819 	 */
820 	case BIOCGBLEN:
821 		*(u_int *)addr = d->bd_bufsize;
822 		break;
823 
824 	/*
825 	 * Set buffer length.
826 	 */
827 	case BIOCSBLEN:
828 		if (d->bd_bif != 0)
829 			error = EINVAL;
830 		else {
831 			u_int size = *(u_int *)addr;
832 
833 			if (size > bpf_maxbufsize)
834 				*(u_int *)addr = size = bpf_maxbufsize;
835 			else if (size < BPF_MINBUFSIZE)
836 				*(u_int *)addr = size = BPF_MINBUFSIZE;
837 			d->bd_bufsize = size;
838 		}
839 		break;
840 
841 	/*
842 	 * Set link layer read filter.
843 	 */
844 	case BIOCSETF:
845 		error = bpf_setf(d, addr);
846 		break;
847 
848 	/*
849 	 * Flush read packet buffer.
850 	 */
851 	case BIOCFLUSH:
852 		s = splnet();
853 		reset_d(d);
854 		splx(s);
855 		break;
856 
857 	/*
858 	 * Put interface into promiscuous mode.
859 	 */
860 	case BIOCPROMISC:
861 		if (d->bd_bif == 0) {
862 			/*
863 			 * No interface attached yet.
864 			 */
865 			error = EINVAL;
866 			break;
867 		}
868 		s = splnet();
869 		if (d->bd_promisc == 0) {
870 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
871 			if (error == 0)
872 				d->bd_promisc = 1;
873 		}
874 		splx(s);
875 		break;
876 
877 	/*
878 	 * Get device parameters.
879 	 */
880 	case BIOCGDLT:
881 		if (d->bd_bif == 0)
882 			error = EINVAL;
883 		else
884 			*(u_int *)addr = d->bd_bif->bif_dlt;
885 		break;
886 
887 	/*
888 	 * Get a list of supported device parameters.
889 	 */
890 	case BIOCGDLTLIST:
891 		if (d->bd_bif == 0)
892 			error = EINVAL;
893 		else
894 			error = bpf_getdltlist(d, addr);
895 		break;
896 
897 	/*
898 	 * Set device parameters.
899 	 */
900 	case BIOCSDLT:
901 		if (d->bd_bif == 0)
902 			error = EINVAL;
903 		else
904 			error = bpf_setdlt(d, *(u_int *)addr);
905 		break;
906 
907 	/*
908 	 * Set interface name.
909 	 */
910 #ifdef OBIOCGETIF
911 	case OBIOCGETIF:
912 #endif
913 	case BIOCGETIF:
914 		if (d->bd_bif == 0)
915 			error = EINVAL;
916 		else
917 			bpf_ifname(d->bd_bif->bif_ifp, addr);
918 		break;
919 
920 	/*
921 	 * Set interface.
922 	 */
923 #ifdef OBIOCSETIF
924 	case OBIOCSETIF:
925 #endif
926 	case BIOCSETIF:
927 		error = bpf_setif(d, addr);
928 		break;
929 
930 	/*
931 	 * Set read timeout.
932 	 */
933 	case BIOCSRTIMEOUT:
934 		{
935 			struct timeval *tv = addr;
936 
937 			/* Compute number of ticks. */
938 			d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
939 			if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
940 				d->bd_rtout = 1;
941 			break;
942 		}
943 
944 #ifdef BIOCGORTIMEOUT
945 	/*
946 	 * Get read timeout.
947 	 */
948 	case BIOCGORTIMEOUT:
949 		{
950 			struct timeval50 *tv = addr;
951 
952 			tv->tv_sec = d->bd_rtout / hz;
953 			tv->tv_usec = (d->bd_rtout % hz) * tick;
954 			break;
955 		}
956 #endif
957 
958 #ifdef BIOCSORTIMEOUT
959 	/*
960 	 * Set read timeout.
961 	 */
962 	case BIOCSORTIMEOUT:
963 		{
964 			struct timeval50 *tv = addr;
965 
966 			/* Compute number of ticks. */
967 			d->bd_rtout = tv->tv_sec * hz + tv->tv_usec / tick;
968 			if ((d->bd_rtout == 0) && (tv->tv_usec != 0))
969 				d->bd_rtout = 1;
970 			break;
971 		}
972 #endif
973 
974 	/*
975 	 * Get read timeout.
976 	 */
977 	case BIOCGRTIMEOUT:
978 		{
979 			struct timeval *tv = addr;
980 
981 			tv->tv_sec = d->bd_rtout / hz;
982 			tv->tv_usec = (d->bd_rtout % hz) * tick;
983 			break;
984 		}
985 	/*
986 	 * Get packet stats.
987 	 */
988 	case BIOCGSTATS:
989 		{
990 			struct bpf_stat *bs = addr;
991 
992 			bs->bs_recv = d->bd_rcount;
993 			bs->bs_drop = d->bd_dcount;
994 			bs->bs_capt = d->bd_ccount;
995 			break;
996 		}
997 
998 	case BIOCGSTATSOLD:
999 		{
1000 			struct bpf_stat_old *bs = addr;
1001 
1002 			bs->bs_recv = d->bd_rcount;
1003 			bs->bs_drop = d->bd_dcount;
1004 			break;
1005 		}
1006 
1007 	/*
1008 	 * Set immediate mode.
1009 	 */
1010 	case BIOCIMMEDIATE:
1011 		d->bd_immediate = *(u_int *)addr;
1012 		break;
1013 
1014 	case BIOCVERSION:
1015 		{
1016 			struct bpf_version *bv = addr;
1017 
1018 			bv->bv_major = BPF_MAJOR_VERSION;
1019 			bv->bv_minor = BPF_MINOR_VERSION;
1020 			break;
1021 		}
1022 
1023 	case BIOCGHDRCMPLT:	/* get "header already complete" flag */
1024 		*(u_int *)addr = d->bd_hdrcmplt;
1025 		break;
1026 
1027 	case BIOCSHDRCMPLT:	/* set "header already complete" flag */
1028 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
1029 		break;
1030 
1031 	/*
1032 	 * Get "see sent packets" flag
1033 	 */
1034 	case BIOCGSEESENT:
1035 		*(u_int *)addr = d->bd_seesent;
1036 		break;
1037 
1038 	/*
1039 	 * Set "see sent" packets flag
1040 	 */
1041 	case BIOCSSEESENT:
1042 		d->bd_seesent = *(u_int *)addr;
1043 		break;
1044 
1045 	/*
1046 	 * Set "feed packets from bpf back to input" mode
1047 	 */
1048 	case BIOCSFEEDBACK:
1049 		d->bd_feedback = *(u_int *)addr;
1050 		break;
1051 
1052 	/*
1053 	 * Get "feed packets from bpf back to input" mode
1054 	 */
1055 	case BIOCGFEEDBACK:
1056 		*(u_int *)addr = d->bd_feedback;
1057 		break;
1058 
1059 	case FIONBIO:		/* Non-blocking I/O */
1060 		/*
1061 		 * No need to do anything special as we use IO_NDELAY in
1062 		 * bpfread() as an indication of whether or not to block
1063 		 * the read.
1064 		 */
1065 		break;
1066 
1067 	case FIOASYNC:		/* Send signal on receive packets */
1068 		d->bd_async = *(int *)addr;
1069 		break;
1070 
1071 	case TIOCSPGRP:		/* Process or group to send signals to */
1072 	case FIOSETOWN:
1073 		error = fsetown(&d->bd_pgid, cmd, addr);
1074 		break;
1075 
1076 	case TIOCGPGRP:
1077 	case FIOGETOWN:
1078 		error = fgetown(d->bd_pgid, cmd, addr);
1079 		break;
1080 	}
1081 	KERNEL_UNLOCK_ONE(NULL);
1082 	return (error);
1083 }
1084 
1085 /*
1086  * Set d's packet filter program to fp.  If this file already has a filter,
1087  * free it and replace it.  Returns EINVAL for bogus requests.
1088  */
1089 int
1090 bpf_setf(struct bpf_d *d, struct bpf_program *fp)
1091 {
1092 	struct bpf_insn *fcode, *old;
1093 	bpfjit_func_t jcode, oldj;
1094 	size_t flen, size;
1095 	int s;
1096 
1097 	jcode = NULL;
1098 	flen = fp->bf_len;
1099 
1100 	if ((fp->bf_insns == NULL && flen) || flen > BPF_MAXINSNS) {
1101 		return EINVAL;
1102 	}
1103 
1104 	if (flen) {
1105 		/*
1106 		 * Allocate the buffer, copy the byte-code from
1107 		 * userspace and validate it.
1108 		 */
1109 		size = flen * sizeof(*fp->bf_insns);
1110 		fcode = malloc(size, M_DEVBUF, M_WAITOK);
1111 		if (copyin(fp->bf_insns, fcode, size) != 0 ||
1112 		    !bpf_validate(fcode, (int)flen)) {
1113 			free(fcode, M_DEVBUF);
1114 			return EINVAL;
1115 		}
1116 		membar_consumer();
1117 		if (bpf_jit) {
1118 			bpf_ctx_t *bc = bpf_default_ctx();
1119 			jcode = bpf_jit_generate(bc, fcode, flen);
1120 		}
1121 	} else {
1122 		fcode = NULL;
1123 	}
1124 
1125 	s = splnet();
1126 	old = d->bd_filter;
1127 	d->bd_filter = fcode;
1128 	oldj = d->bd_jitcode;
1129 	d->bd_jitcode = jcode;
1130 	reset_d(d);
1131 	splx(s);
1132 
1133 	if (old) {
1134 		free(old, M_DEVBUF);
1135 	}
1136 	if (oldj) {
1137 		bpf_jit_freecode(oldj);
1138 	}
1139 
1140 	return 0;
1141 }
1142 
1143 /*
1144  * Detach a file from its current interface (if attached at all) and attach
1145  * to the interface indicated by the name stored in ifr.
1146  * Return an errno or 0.
1147  */
1148 static int
1149 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1150 {
1151 	struct bpf_if *bp;
1152 	char *cp;
1153 	int unit_seen, i, s, error;
1154 
1155 	/*
1156 	 * Make sure the provided name has a unit number, and default
1157 	 * it to '0' if not specified.
1158 	 * XXX This is ugly ... do this differently?
1159 	 */
1160 	unit_seen = 0;
1161 	cp = ifr->ifr_name;
1162 	cp[sizeof(ifr->ifr_name) - 1] = '\0';	/* sanity */
1163 	while (*cp++)
1164 		if (*cp >= '0' && *cp <= '9')
1165 			unit_seen = 1;
1166 	if (!unit_seen) {
1167 		/* Make sure to leave room for the '\0'. */
1168 		for (i = 0; i < (IFNAMSIZ - 1); ++i) {
1169 			if ((ifr->ifr_name[i] >= 'a' &&
1170 			     ifr->ifr_name[i] <= 'z') ||
1171 			    (ifr->ifr_name[i] >= 'A' &&
1172 			     ifr->ifr_name[i] <= 'Z'))
1173 				continue;
1174 			ifr->ifr_name[i] = '0';
1175 		}
1176 	}
1177 
1178 	/*
1179 	 * Look through attached interfaces for the named one.
1180 	 */
1181 	for (bp = bpf_iflist; bp != 0; bp = bp->bif_next) {
1182 		struct ifnet *ifp = bp->bif_ifp;
1183 
1184 		if (ifp == 0 ||
1185 		    strcmp(ifp->if_xname, ifr->ifr_name) != 0)
1186 			continue;
1187 		/* skip additional entry */
1188 		if (bp->bif_driverp != &ifp->if_bpf)
1189 			continue;
1190 		/*
1191 		 * We found the requested interface.
1192 		 * Allocate the packet buffers if we need to.
1193 		 * If we're already attached to requested interface,
1194 		 * just flush the buffer.
1195 		 */
1196 		if (d->bd_sbuf == 0) {
1197 			error = bpf_allocbufs(d);
1198 			if (error != 0)
1199 				return (error);
1200 		}
1201 		s = splnet();
1202 		if (bp != d->bd_bif) {
1203 			if (d->bd_bif)
1204 				/*
1205 				 * Detach if attached to something else.
1206 				 */
1207 				bpf_detachd(d);
1208 
1209 			bpf_attachd(d, bp);
1210 		}
1211 		reset_d(d);
1212 		splx(s);
1213 		return (0);
1214 	}
1215 	/* Not found. */
1216 	return (ENXIO);
1217 }
1218 
1219 /*
1220  * Copy the interface name to the ifreq.
1221  */
1222 static void
1223 bpf_ifname(struct ifnet *ifp, struct ifreq *ifr)
1224 {
1225 	memcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
1226 }
1227 
1228 static int
1229 bpf_stat(struct file *fp, struct stat *st)
1230 {
1231 	struct bpf_d *d = fp->f_data;
1232 
1233 	(void)memset(st, 0, sizeof(*st));
1234 	KERNEL_LOCK(1, NULL);
1235 	st->st_dev = makedev(cdevsw_lookup_major(&bpf_cdevsw), d->bd_pid);
1236 	st->st_atimespec = d->bd_atime;
1237 	st->st_mtimespec = d->bd_mtime;
1238 	st->st_ctimespec = st->st_birthtimespec = d->bd_btime;
1239 	st->st_uid = kauth_cred_geteuid(fp->f_cred);
1240 	st->st_gid = kauth_cred_getegid(fp->f_cred);
1241 	st->st_mode = S_IFCHR;
1242 	KERNEL_UNLOCK_ONE(NULL);
1243 	return 0;
1244 }
1245 
1246 /*
1247  * Support for poll() system call
1248  *
1249  * Return true iff the specific operation will not block indefinitely - with
1250  * the assumption that it is safe to positively acknowledge a request for the
1251  * ability to write to the BPF device.
1252  * Otherwise, return false but make a note that a selnotify() must be done.
1253  */
1254 static int
1255 bpf_poll(struct file *fp, int events)
1256 {
1257 	struct bpf_d *d = fp->f_data;
1258 	int s = splnet();
1259 	int revents;
1260 
1261 	/*
1262 	 * Refresh the PID associated with this bpf file.
1263 	 */
1264 	KERNEL_LOCK(1, NULL);
1265 	d->bd_pid = curproc->p_pid;
1266 
1267 	revents = events & (POLLOUT | POLLWRNORM);
1268 	if (events & (POLLIN | POLLRDNORM)) {
1269 		/*
1270 		 * An imitation of the FIONREAD ioctl code.
1271 		 */
1272 		if (d->bd_hlen != 0 ||
1273 		    ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1274 		     d->bd_slen != 0)) {
1275 			revents |= events & (POLLIN | POLLRDNORM);
1276 		} else {
1277 			selrecord(curlwp, &d->bd_sel);
1278 			/* Start the read timeout if necessary */
1279 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1280 				callout_reset(&d->bd_callout, d->bd_rtout,
1281 					      bpf_timed_out, d);
1282 				d->bd_state = BPF_WAITING;
1283 			}
1284 		}
1285 	}
1286 
1287 	KERNEL_UNLOCK_ONE(NULL);
1288 	splx(s);
1289 	return (revents);
1290 }
1291 
1292 static void
1293 filt_bpfrdetach(struct knote *kn)
1294 {
1295 	struct bpf_d *d = kn->kn_hook;
1296 	int s;
1297 
1298 	KERNEL_LOCK(1, NULL);
1299 	s = splnet();
1300 	SLIST_REMOVE(&d->bd_sel.sel_klist, kn, knote, kn_selnext);
1301 	splx(s);
1302 	KERNEL_UNLOCK_ONE(NULL);
1303 }
1304 
1305 static int
1306 filt_bpfread(struct knote *kn, long hint)
1307 {
1308 	struct bpf_d *d = kn->kn_hook;
1309 	int rv;
1310 
1311 	KERNEL_LOCK(1, NULL);
1312 	kn->kn_data = d->bd_hlen;
1313 	if (d->bd_immediate)
1314 		kn->kn_data += d->bd_slen;
1315 	rv = (kn->kn_data > 0);
1316 	KERNEL_UNLOCK_ONE(NULL);
1317 	return rv;
1318 }
1319 
1320 static const struct filterops bpfread_filtops =
1321 	{ 1, NULL, filt_bpfrdetach, filt_bpfread };
1322 
1323 static int
1324 bpf_kqfilter(struct file *fp, struct knote *kn)
1325 {
1326 	struct bpf_d *d = fp->f_data;
1327 	struct klist *klist;
1328 	int s;
1329 
1330 	KERNEL_LOCK(1, NULL);
1331 
1332 	switch (kn->kn_filter) {
1333 	case EVFILT_READ:
1334 		klist = &d->bd_sel.sel_klist;
1335 		kn->kn_fop = &bpfread_filtops;
1336 		break;
1337 
1338 	default:
1339 		KERNEL_UNLOCK_ONE(NULL);
1340 		return (EINVAL);
1341 	}
1342 
1343 	kn->kn_hook = d;
1344 
1345 	s = splnet();
1346 	SLIST_INSERT_HEAD(klist, kn, kn_selnext);
1347 	splx(s);
1348 	KERNEL_UNLOCK_ONE(NULL);
1349 
1350 	return (0);
1351 }
1352 
1353 /*
1354  * Copy data from an mbuf chain into a buffer.  This code is derived
1355  * from m_copydata in sys/uipc_mbuf.c.
1356  */
1357 static void *
1358 bpf_mcpy(void *dst_arg, const void *src_arg, size_t len)
1359 {
1360 	const struct mbuf *m;
1361 	u_int count;
1362 	u_char *dst;
1363 
1364 	m = src_arg;
1365 	dst = dst_arg;
1366 	while (len > 0) {
1367 		if (m == NULL)
1368 			panic("bpf_mcpy");
1369 		count = min(m->m_len, len);
1370 		memcpy(dst, mtod(m, const void *), count);
1371 		m = m->m_next;
1372 		dst += count;
1373 		len -= count;
1374 	}
1375 	return dst_arg;
1376 }
1377 
1378 /*
1379  * Dispatch a packet to all the listeners on interface bp.
1380  *
1381  * pkt     pointer to the packet, either a data buffer or an mbuf chain
1382  * buflen  buffer length, if pkt is a data buffer
1383  * cpfn    a function that can copy pkt into the listener's buffer
1384  * pktlen  length of the packet
1385  * rcv     true if packet came in
1386  */
1387 static inline void
1388 bpf_deliver(struct bpf_if *bp, void *(*cpfn)(void *, const void *, size_t),
1389     void *pkt, u_int pktlen, u_int buflen, const bool rcv)
1390 {
1391 	bpf_ctx_t *bc = bpf_default_ctx();
1392 	bpf_args_t args = {
1393 		.pkt = pkt,
1394 		.wirelen = pktlen,
1395 		.buflen = buflen,
1396 		.arg = NULL
1397 	};
1398 	struct bpf_d *d;
1399 	struct timespec ts;
1400 	bool gottime = false;
1401 
1402 	/*
1403 	 * Note that the IPL does not have to be raised at this point.
1404 	 * The only problem that could arise here is that if two different
1405 	 * interfaces shared any data.  This is not the case.
1406 	 */
1407 	for (d = bp->bif_dlist; d != NULL; d = d->bd_next) {
1408 		u_int slen;
1409 
1410 		if (!d->bd_seesent && !rcv) {
1411 			continue;
1412 		}
1413 		d->bd_rcount++;
1414 		bpf_gstats.bs_recv++;
1415 
1416 		if (d->bd_jitcode)
1417 			slen = d->bd_jitcode(pkt, pktlen, buflen);
1418 		else
1419 			slen = bpf_filter_ext(bc, d->bd_filter, &args);
1420 
1421 		if (!slen) {
1422 			continue;
1423 		}
1424 		if (!gottime) {
1425 			gottime = true;
1426 			nanotime(&ts);
1427 		}
1428 		catchpacket(d, pkt, pktlen, slen, cpfn, &ts);
1429 	}
1430 }
1431 
1432 /*
1433  * Incoming linkage from device drivers.  Process the packet pkt, of length
1434  * pktlen, which is stored in a contiguous buffer.  The packet is parsed
1435  * by each process' filter, and if accepted, stashed into the corresponding
1436  * buffer.
1437  */
1438 static void
1439 _bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1440 {
1441 
1442 	bpf_deliver(bp, memcpy, pkt, pktlen, pktlen, true);
1443 }
1444 
1445 /*
1446  * Incoming linkage from device drivers, when the head of the packet is in
1447  * a buffer, and the tail is in an mbuf chain.
1448  */
1449 static void
1450 _bpf_mtap2(struct bpf_if *bp, void *data, u_int dlen, struct mbuf *m)
1451 {
1452 	u_int pktlen;
1453 	struct mbuf mb;
1454 
1455 	/* Skip outgoing duplicate packets. */
1456 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
1457 		m->m_flags &= ~M_PROMISC;
1458 		return;
1459 	}
1460 
1461 	pktlen = m_length(m) + dlen;
1462 
1463 	/*
1464 	 * Craft on-stack mbuf suitable for passing to bpf_filter.
1465 	 * Note that we cut corners here; we only setup what's
1466 	 * absolutely needed--this mbuf should never go anywhere else.
1467 	 */
1468 	(void)memset(&mb, 0, sizeof(mb));
1469 	mb.m_next = m;
1470 	mb.m_data = data;
1471 	mb.m_len = dlen;
1472 
1473 	bpf_deliver(bp, bpf_mcpy, &mb, pktlen, 0, m->m_pkthdr.rcvif != NULL);
1474 }
1475 
1476 /*
1477  * Incoming linkage from device drivers, when packet is in an mbuf chain.
1478  */
1479 static void
1480 _bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1481 {
1482 	void *(*cpfn)(void *, const void *, size_t);
1483 	u_int pktlen, buflen;
1484 	void *marg;
1485 
1486 	/* Skip outgoing duplicate packets. */
1487 	if ((m->m_flags & M_PROMISC) != 0 && m->m_pkthdr.rcvif == NULL) {
1488 		m->m_flags &= ~M_PROMISC;
1489 		return;
1490 	}
1491 
1492 	pktlen = m_length(m);
1493 
1494 	if (pktlen == m->m_len) {
1495 		cpfn = (void *)memcpy;
1496 		marg = mtod(m, void *);
1497 		buflen = pktlen;
1498 	} else {
1499 		cpfn = bpf_mcpy;
1500 		marg = m;
1501 		buflen = 0;
1502 	}
1503 
1504 	bpf_deliver(bp, cpfn, marg, pktlen, buflen, m->m_pkthdr.rcvif != NULL);
1505 }
1506 
1507 /*
1508  * We need to prepend the address family as
1509  * a four byte field.  Cons up a dummy header
1510  * to pacify bpf.  This is safe because bpf
1511  * will only read from the mbuf (i.e., it won't
1512  * try to free it or keep a pointer a to it).
1513  */
1514 static void
1515 _bpf_mtap_af(struct bpf_if *bp, uint32_t af, struct mbuf *m)
1516 {
1517 	struct mbuf m0;
1518 
1519 	m0.m_flags = 0;
1520 	m0.m_next = m;
1521 	m0.m_len = 4;
1522 	m0.m_data = (char *)&af;
1523 
1524 	_bpf_mtap(bp, &m0);
1525 }
1526 
1527 /*
1528  * Put the SLIP pseudo-"link header" in place.
1529  * Note this M_PREPEND() should never fail,
1530  * swince we know we always have enough space
1531  * in the input buffer.
1532  */
1533 static void
1534 _bpf_mtap_sl_in(struct bpf_if *bp, u_char *chdr, struct mbuf **m)
1535 {
1536 	int s;
1537 	u_char *hp;
1538 
1539 	M_PREPEND(*m, SLIP_HDRLEN, M_DONTWAIT);
1540 	if (*m == NULL)
1541 		return;
1542 
1543 	hp = mtod(*m, u_char *);
1544 	hp[SLX_DIR] = SLIPDIR_IN;
1545 	(void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1546 
1547 	s = splnet();
1548 	_bpf_mtap(bp, *m);
1549 	splx(s);
1550 
1551 	m_adj(*m, SLIP_HDRLEN);
1552 }
1553 
1554 /*
1555  * Put the SLIP pseudo-"link header" in
1556  * place.  The compressed header is now
1557  * at the beginning of the mbuf.
1558  */
1559 static void
1560 _bpf_mtap_sl_out(struct bpf_if *bp, u_char *chdr, struct mbuf *m)
1561 {
1562 	struct mbuf m0;
1563 	u_char *hp;
1564 	int s;
1565 
1566 	m0.m_flags = 0;
1567 	m0.m_next = m;
1568 	m0.m_data = m0.m_dat;
1569 	m0.m_len = SLIP_HDRLEN;
1570 
1571 	hp = mtod(&m0, u_char *);
1572 
1573 	hp[SLX_DIR] = SLIPDIR_OUT;
1574 	(void)memcpy(&hp[SLX_CHDR], chdr, CHDR_LEN);
1575 
1576 	s = splnet();
1577 	_bpf_mtap(bp, &m0);
1578 	splx(s);
1579 	m_freem(m);
1580 }
1581 
1582 static int
1583 bpf_hdrlen(struct bpf_d *d)
1584 {
1585 	int hdrlen = d->bd_bif->bif_hdrlen;
1586 	/*
1587 	 * Compute the length of the bpf header.  This is not necessarily
1588 	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1589 	 * that the network layer header begins on a longword boundary (for
1590 	 * performance reasons and to alleviate alignment restrictions).
1591 	 */
1592 #ifdef _LP64
1593 	if (d->bd_compat32)
1594 		return (BPF_WORDALIGN32(hdrlen + SIZEOF_BPF_HDR32) - hdrlen);
1595 	else
1596 #endif
1597 		return (BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen);
1598 }
1599 
1600 /*
1601  * Move the packet data from interface memory (pkt) into the
1602  * store buffer. Call the wakeup functions if it's time to wakeup
1603  * a listener (buffer full), "cpfn" is the routine called to do the
1604  * actual data transfer. memcpy is passed in to copy contiguous chunks,
1605  * while bpf_mcpy is passed in to copy mbuf chains.  In the latter case,
1606  * pkt is really an mbuf.
1607  */
1608 static void
1609 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1610     void *(*cpfn)(void *, const void *, size_t), struct timespec *ts)
1611 {
1612 	char *h;
1613 	int totlen, curlen, caplen;
1614 	int hdrlen = bpf_hdrlen(d);
1615 	int do_wakeup = 0;
1616 
1617 	++d->bd_ccount;
1618 	++bpf_gstats.bs_capt;
1619 	/*
1620 	 * Figure out how many bytes to move.  If the packet is
1621 	 * greater or equal to the snapshot length, transfer that
1622 	 * much.  Otherwise, transfer the whole packet (unless
1623 	 * we hit the buffer size limit).
1624 	 */
1625 	totlen = hdrlen + min(snaplen, pktlen);
1626 	if (totlen > d->bd_bufsize)
1627 		totlen = d->bd_bufsize;
1628 	/*
1629 	 * If we adjusted totlen to fit the bufsize, it could be that
1630 	 * totlen is smaller than hdrlen because of the link layer header.
1631 	 */
1632 	caplen = totlen - hdrlen;
1633 	if (caplen < 0)
1634 		caplen = 0;
1635 
1636 	/*
1637 	 * Round up the end of the previous packet to the next longword.
1638 	 */
1639 #ifdef _LP64
1640 	if (d->bd_compat32)
1641 		curlen = BPF_WORDALIGN32(d->bd_slen);
1642 	else
1643 #endif
1644 		curlen = BPF_WORDALIGN(d->bd_slen);
1645 	if (curlen + totlen > d->bd_bufsize) {
1646 		/*
1647 		 * This packet will overflow the storage buffer.
1648 		 * Rotate the buffers if we can, then wakeup any
1649 		 * pending reads.
1650 		 */
1651 		if (d->bd_fbuf == 0) {
1652 			/*
1653 			 * We haven't completed the previous read yet,
1654 			 * so drop the packet.
1655 			 */
1656 			++d->bd_dcount;
1657 			++bpf_gstats.bs_drop;
1658 			return;
1659 		}
1660 		ROTATE_BUFFERS(d);
1661 		do_wakeup = 1;
1662 		curlen = 0;
1663 	} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
1664 		/*
1665 		 * Immediate mode is set, or the read timeout has
1666 		 * already expired during a select call.  A packet
1667 		 * arrived, so the reader should be woken up.
1668 		 */
1669 		do_wakeup = 1;
1670 	}
1671 
1672 	/*
1673 	 * Append the bpf header.
1674 	 */
1675 	h = (char *)d->bd_sbuf + curlen;
1676 #ifdef _LP64
1677 	if (d->bd_compat32) {
1678 		struct bpf_hdr32 *hp32;
1679 
1680 		hp32 = (struct bpf_hdr32 *)h;
1681 		hp32->bh_tstamp.tv_sec = ts->tv_sec;
1682 		hp32->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
1683 		hp32->bh_datalen = pktlen;
1684 		hp32->bh_hdrlen = hdrlen;
1685 		hp32->bh_caplen = caplen;
1686 	} else
1687 #endif
1688 	{
1689 		struct bpf_hdr *hp;
1690 
1691 		hp = (struct bpf_hdr *)h;
1692 		hp->bh_tstamp.tv_sec = ts->tv_sec;
1693 		hp->bh_tstamp.tv_usec = ts->tv_nsec / 1000;
1694 		hp->bh_datalen = pktlen;
1695 		hp->bh_hdrlen = hdrlen;
1696 		hp->bh_caplen = caplen;
1697 	}
1698 
1699 	/*
1700 	 * Copy the packet data into the store buffer and update its length.
1701 	 */
1702 	(*cpfn)(h + hdrlen, pkt, caplen);
1703 	d->bd_slen = curlen + totlen;
1704 
1705 	/*
1706 	 * Call bpf_wakeup after bd_slen has been updated so that kevent(2)
1707 	 * will cause filt_bpfread() to be called with it adjusted.
1708 	 */
1709 	if (do_wakeup)
1710 		bpf_wakeup(d);
1711 }
1712 
1713 /*
1714  * Initialize all nonzero fields of a descriptor.
1715  */
1716 static int
1717 bpf_allocbufs(struct bpf_d *d)
1718 {
1719 
1720 	d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL);
1721 	if (!d->bd_fbuf)
1722 		return (ENOBUFS);
1723 	d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_WAITOK | M_CANFAIL);
1724 	if (!d->bd_sbuf) {
1725 		free(d->bd_fbuf, M_DEVBUF);
1726 		return (ENOBUFS);
1727 	}
1728 	d->bd_slen = 0;
1729 	d->bd_hlen = 0;
1730 	return (0);
1731 }
1732 
1733 /*
1734  * Free buffers currently in use by a descriptor.
1735  * Called on close.
1736  */
1737 static void
1738 bpf_freed(struct bpf_d *d)
1739 {
1740 	/*
1741 	 * We don't need to lock out interrupts since this descriptor has
1742 	 * been detached from its interface and it yet hasn't been marked
1743 	 * free.
1744 	 */
1745 	if (d->bd_sbuf != NULL) {
1746 		free(d->bd_sbuf, M_DEVBUF);
1747 		if (d->bd_hbuf != NULL)
1748 			free(d->bd_hbuf, M_DEVBUF);
1749 		if (d->bd_fbuf != NULL)
1750 			free(d->bd_fbuf, M_DEVBUF);
1751 	}
1752 	if (d->bd_filter)
1753 		free(d->bd_filter, M_DEVBUF);
1754 
1755 	if (d->bd_jitcode != NULL) {
1756 		bpf_jit_freecode(d->bd_jitcode);
1757 	}
1758 }
1759 
1760 /*
1761  * Attach an interface to bpf.  dlt is the link layer type;
1762  * hdrlen is the fixed size of the link header for the specified dlt
1763  * (variable length headers not yet supported).
1764  */
1765 static void
1766 _bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1767 {
1768 	struct bpf_if *bp;
1769 	bp = malloc(sizeof(*bp), M_DEVBUF, M_DONTWAIT);
1770 	if (bp == 0)
1771 		panic("bpfattach");
1772 
1773 	bp->bif_dlist = 0;
1774 	bp->bif_driverp = driverp;
1775 	bp->bif_ifp = ifp;
1776 	bp->bif_dlt = dlt;
1777 
1778 	bp->bif_next = bpf_iflist;
1779 	bpf_iflist = bp;
1780 
1781 	*bp->bif_driverp = 0;
1782 
1783 	bp->bif_hdrlen = hdrlen;
1784 #if 0
1785 	printf("bpf: %s attached\n", ifp->if_xname);
1786 #endif
1787 }
1788 
1789 /*
1790  * Remove an interface from bpf.
1791  */
1792 static void
1793 _bpfdetach(struct ifnet *ifp)
1794 {
1795 	struct bpf_if *bp, **pbp;
1796 	struct bpf_d *d;
1797 	int s;
1798 
1799 	/* Nuke the vnodes for any open instances */
1800 	LIST_FOREACH(d, &bpf_list, bd_list) {
1801 		if (d->bd_bif != NULL && d->bd_bif->bif_ifp == ifp) {
1802 			/*
1803 			 * Detach the descriptor from an interface now.
1804 			 * It will be free'ed later by close routine.
1805 			 */
1806 			s = splnet();
1807 			d->bd_promisc = 0;	/* we can't touch device. */
1808 			bpf_detachd(d);
1809 			splx(s);
1810 		}
1811 	}
1812 
1813   again:
1814 	for (bp = bpf_iflist, pbp = &bpf_iflist;
1815 	     bp != NULL; pbp = &bp->bif_next, bp = bp->bif_next) {
1816 		if (bp->bif_ifp == ifp) {
1817 			*pbp = bp->bif_next;
1818 			free(bp, M_DEVBUF);
1819 			goto again;
1820 		}
1821 	}
1822 }
1823 
1824 /*
1825  * Change the data link type of a interface.
1826  */
1827 static void
1828 _bpf_change_type(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1829 {
1830 	struct bpf_if *bp;
1831 
1832 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1833 		if (bp->bif_driverp == &ifp->if_bpf)
1834 			break;
1835 	}
1836 	if (bp == NULL)
1837 		panic("bpf_change_type");
1838 
1839 	bp->bif_dlt = dlt;
1840 
1841 	bp->bif_hdrlen = hdrlen;
1842 }
1843 
1844 /*
1845  * Get a list of available data link type of the interface.
1846  */
1847 static int
1848 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1849 {
1850 	int n, error;
1851 	struct ifnet *ifp;
1852 	struct bpf_if *bp;
1853 
1854 	ifp = d->bd_bif->bif_ifp;
1855 	n = 0;
1856 	error = 0;
1857 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1858 		if (bp->bif_ifp != ifp)
1859 			continue;
1860 		if (bfl->bfl_list != NULL) {
1861 			if (n >= bfl->bfl_len)
1862 				return ENOMEM;
1863 			error = copyout(&bp->bif_dlt,
1864 			    bfl->bfl_list + n, sizeof(u_int));
1865 		}
1866 		n++;
1867 	}
1868 	bfl->bfl_len = n;
1869 	return error;
1870 }
1871 
1872 /*
1873  * Set the data link type of a BPF instance.
1874  */
1875 static int
1876 bpf_setdlt(struct bpf_d *d, u_int dlt)
1877 {
1878 	int s, error, opromisc;
1879 	struct ifnet *ifp;
1880 	struct bpf_if *bp;
1881 
1882 	if (d->bd_bif->bif_dlt == dlt)
1883 		return 0;
1884 	ifp = d->bd_bif->bif_ifp;
1885 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1886 		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1887 			break;
1888 	}
1889 	if (bp == NULL)
1890 		return EINVAL;
1891 	s = splnet();
1892 	opromisc = d->bd_promisc;
1893 	bpf_detachd(d);
1894 	bpf_attachd(d, bp);
1895 	reset_d(d);
1896 	if (opromisc) {
1897 		error = ifpromisc(bp->bif_ifp, 1);
1898 		if (error)
1899 			printf("%s: bpf_setdlt: ifpromisc failed (%d)\n",
1900 			    bp->bif_ifp->if_xname, error);
1901 		else
1902 			d->bd_promisc = 1;
1903 	}
1904 	splx(s);
1905 	return 0;
1906 }
1907 
1908 static int
1909 sysctl_net_bpf_maxbufsize(SYSCTLFN_ARGS)
1910 {
1911 	int newsize, error;
1912 	struct sysctlnode node;
1913 
1914 	node = *rnode;
1915 	node.sysctl_data = &newsize;
1916 	newsize = bpf_maxbufsize;
1917 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1918 	if (error || newp == NULL)
1919 		return (error);
1920 
1921 	if (newsize < BPF_MINBUFSIZE || newsize > BPF_MAXBUFSIZE)
1922 		return (EINVAL);
1923 
1924 	bpf_maxbufsize = newsize;
1925 
1926 	return (0);
1927 }
1928 
1929 static int
1930 sysctl_net_bpf_jit(SYSCTLFN_ARGS)
1931 {
1932 	bool newval;
1933 	int error;
1934 	struct sysctlnode node;
1935 
1936 	node = *rnode;
1937 	node.sysctl_data = &newval;
1938 	newval = bpf_jit;
1939 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1940 	if (error != 0 || newp == NULL)
1941 		return error;
1942 
1943 	bpf_jit = newval;
1944 
1945 	/*
1946 	 * Do a full sync to publish new bpf_jit value and
1947 	 * update bpfjit_module_ops.bj_generate_code variable.
1948 	 */
1949 	membar_sync();
1950 
1951 	if (newval && bpfjit_module_ops.bj_generate_code == NULL) {
1952 		printf("WARNING jit activation is postponed "
1953 		    "until after bpfjit module is loaded\n");
1954 	}
1955 
1956 	return 0;
1957 }
1958 
1959 static int
1960 sysctl_net_bpf_peers(SYSCTLFN_ARGS)
1961 {
1962 	int    error, elem_count;
1963 	struct bpf_d	 *dp;
1964 	struct bpf_d_ext  dpe;
1965 	size_t len, needed, elem_size, out_size;
1966 	char   *sp;
1967 
1968 	if (namelen == 1 && name[0] == CTL_QUERY)
1969 		return (sysctl_query(SYSCTLFN_CALL(rnode)));
1970 
1971 	if (namelen != 2)
1972 		return (EINVAL);
1973 
1974 	/* BPF peers is privileged information. */
1975 	error = kauth_authorize_network(l->l_cred, KAUTH_NETWORK_INTERFACE,
1976 	    KAUTH_REQ_NETWORK_INTERFACE_GETPRIV, NULL, NULL, NULL);
1977 	if (error)
1978 		return (EPERM);
1979 
1980 	len = (oldp != NULL) ? *oldlenp : 0;
1981 	sp = oldp;
1982 	elem_size = name[0];
1983 	elem_count = name[1];
1984 	out_size = MIN(sizeof(dpe), elem_size);
1985 	needed = 0;
1986 
1987 	if (elem_size < 1 || elem_count < 0)
1988 		return (EINVAL);
1989 
1990 	mutex_enter(&bpf_mtx);
1991 	LIST_FOREACH(dp, &bpf_list, bd_list) {
1992 		if (len >= elem_size && elem_count > 0) {
1993 #define BPF_EXT(field)	dpe.bde_ ## field = dp->bd_ ## field
1994 			BPF_EXT(bufsize);
1995 			BPF_EXT(promisc);
1996 			BPF_EXT(state);
1997 			BPF_EXT(immediate);
1998 			BPF_EXT(hdrcmplt);
1999 			BPF_EXT(seesent);
2000 			BPF_EXT(pid);
2001 			BPF_EXT(rcount);
2002 			BPF_EXT(dcount);
2003 			BPF_EXT(ccount);
2004 #undef BPF_EXT
2005 			if (dp->bd_bif)
2006 				(void)strlcpy(dpe.bde_ifname,
2007 				    dp->bd_bif->bif_ifp->if_xname,
2008 				    IFNAMSIZ - 1);
2009 			else
2010 				dpe.bde_ifname[0] = '\0';
2011 
2012 			error = copyout(&dpe, sp, out_size);
2013 			if (error)
2014 				break;
2015 			sp += elem_size;
2016 			len -= elem_size;
2017 		}
2018 		needed += elem_size;
2019 		if (elem_count > 0 && elem_count != INT_MAX)
2020 			elem_count--;
2021 	}
2022 	mutex_exit(&bpf_mtx);
2023 
2024 	*oldlenp = needed;
2025 
2026 	return (error);
2027 }
2028 
2029 static struct sysctllog *bpf_sysctllog;
2030 static void
2031 sysctl_net_bpf_setup(void)
2032 {
2033 	const struct sysctlnode *node;
2034 
2035 	node = NULL;
2036 	sysctl_createv(&bpf_sysctllog, 0, NULL, &node,
2037 		       CTLFLAG_PERMANENT,
2038 		       CTLTYPE_NODE, "bpf",
2039 		       SYSCTL_DESCR("BPF options"),
2040 		       NULL, 0, NULL, 0,
2041 		       CTL_NET, CTL_CREATE, CTL_EOL);
2042 	if (node != NULL) {
2043 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2044 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2045 			CTLTYPE_BOOL, "jit",
2046 			SYSCTL_DESCR("Toggle Just-In-Time compilation"),
2047 			sysctl_net_bpf_jit, 0, &bpf_jit, 0,
2048 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2049 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2050 			CTLFLAG_PERMANENT|CTLFLAG_READWRITE,
2051 			CTLTYPE_INT, "maxbufsize",
2052 			SYSCTL_DESCR("Maximum size for data capture buffer"),
2053 			sysctl_net_bpf_maxbufsize, 0, &bpf_maxbufsize, 0,
2054 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2055 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2056 			CTLFLAG_PERMANENT,
2057 			CTLTYPE_STRUCT, "stats",
2058 			SYSCTL_DESCR("BPF stats"),
2059 			NULL, 0, &bpf_gstats, sizeof(bpf_gstats),
2060 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2061 		sysctl_createv(&bpf_sysctllog, 0, NULL, NULL,
2062 			CTLFLAG_PERMANENT,
2063 			CTLTYPE_STRUCT, "peers",
2064 			SYSCTL_DESCR("BPF peers"),
2065 			sysctl_net_bpf_peers, 0, NULL, 0,
2066 			CTL_NET, node->sysctl_num, CTL_CREATE, CTL_EOL);
2067 	}
2068 
2069 }
2070 
2071 struct bpf_ops bpf_ops_kernel = {
2072 	.bpf_attach =		_bpfattach,
2073 	.bpf_detach =		_bpfdetach,
2074 	.bpf_change_type =	_bpf_change_type,
2075 
2076 	.bpf_tap =		_bpf_tap,
2077 	.bpf_mtap =		_bpf_mtap,
2078 	.bpf_mtap2 =		_bpf_mtap2,
2079 	.bpf_mtap_af =		_bpf_mtap_af,
2080 	.bpf_mtap_sl_in =	_bpf_mtap_sl_in,
2081 	.bpf_mtap_sl_out =	_bpf_mtap_sl_out,
2082 };
2083 
2084 MODULE(MODULE_CLASS_DRIVER, bpf, NULL);
2085 
2086 static int
2087 bpf_modcmd(modcmd_t cmd, void *arg)
2088 {
2089 	devmajor_t bmajor, cmajor;
2090 	int error;
2091 
2092 	bmajor = cmajor = NODEVMAJOR;
2093 
2094 	switch (cmd) {
2095 	case MODULE_CMD_INIT:
2096 		bpfilterattach(0);
2097 		error = devsw_attach("bpf", NULL, &bmajor,
2098 		    &bpf_cdevsw, &cmajor);
2099 		if (error == EEXIST)
2100 			error = 0; /* maybe built-in ... improve eventually */
2101 		if (error)
2102 			break;
2103 
2104 		bpf_ops_handover_enter(&bpf_ops_kernel);
2105 		atomic_swap_ptr(&bpf_ops, &bpf_ops_kernel);
2106 		bpf_ops_handover_exit();
2107 		sysctl_net_bpf_setup();
2108 		break;
2109 
2110 	case MODULE_CMD_FINI:
2111 		/*
2112 		 * While there is no reference counting for bpf callers,
2113 		 * unload could at least in theory be done similarly to
2114 		 * system call disestablishment.  This should even be
2115 		 * a little simpler:
2116 		 *
2117 		 * 1) replace op vector with stubs
2118 		 * 2) post update to all cpus with xc
2119 		 * 3) check that nobody is in bpf anymore
2120 		 *    (it's doubtful we'd want something like l_sysent,
2121 		 *     but we could do something like *signed* percpu
2122 		 *     counters.  if the sum is 0, we're good).
2123 		 * 4) if fail, unroll changes
2124 		 *
2125 		 * NOTE: change won't be atomic to the outside.  some
2126 		 * packets may be not captured even if unload is
2127 		 * not succesful.  I think packet capture not working
2128 		 * is a perfectly logical consequence of trying to
2129 		 * disable packet capture.
2130 		 */
2131 		error = EOPNOTSUPP;
2132 		/* insert sysctl teardown */
2133 		break;
2134 
2135 	default:
2136 		error = ENOTTY;
2137 		break;
2138 	}
2139 
2140 	return error;
2141 }
2142