xref: /dflybsd-src/sys/net/bpf.c (revision cd29885abfb8f68adb0c082e313b891156d66964)
1 /*
2  * Copyright (c) 1990, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * This code is derived from the Stanford/CMU enet packet filter,
6  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
7  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
8  * Berkeley Laboratory.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  * 3. All advertising materials mentioning features or use of this software
19  *    must display the following acknowledgement:
20  *	This product includes software developed by the University of
21  *	California, Berkeley and its contributors.
22  * 4. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *      @(#)bpf.c	8.2 (Berkeley) 3/28/94
39  *
40  * $FreeBSD: src/sys/net/bpf.c,v 1.59.2.12 2002/04/14 21:41:48 luigi Exp $
41  * $DragonFly: src/sys/net/bpf.c,v 1.50 2008/09/23 11:28:49 sephe Exp $
42  */
43 
44 #include "use_bpf.h"
45 
46 #include <sys/param.h>
47 #include <sys/systm.h>
48 #include <sys/conf.h>
49 #include <sys/device.h>
50 #include <sys/malloc.h>
51 #include <sys/mbuf.h>
52 #include <sys/time.h>
53 #include <sys/proc.h>
54 #include <sys/signalvar.h>
55 #include <sys/filio.h>
56 #include <sys/sockio.h>
57 #include <sys/ttycom.h>
58 #include <sys/filedesc.h>
59 
60 #include <sys/poll.h>
61 
62 #include <sys/socket.h>
63 #include <sys/vnode.h>
64 
65 #include <sys/thread2.h>
66 
67 #include <net/if.h>
68 #include <net/bpf.h>
69 #include <net/bpfdesc.h>
70 #include <net/netmsg2.h>
71 
72 #include <netinet/in.h>
73 #include <netinet/if_ether.h>
74 #include <sys/kernel.h>
75 #include <sys/sysctl.h>
76 
77 #include <vfs/devfs/devfs.h>
78 
79 struct netmsg_bpf_output {
80 	struct netmsg	nm_netmsg;
81 	struct mbuf	*nm_mbuf;
82 	struct ifnet	*nm_ifp;
83 	struct sockaddr	*nm_dst;
84 };
85 
86 MALLOC_DEFINE(M_BPF, "BPF", "BPF data");
87 DEVFS_DECLARE_CLONE_BITMAP(bpf);
88 
89 #if NBPF > 0
90 
91 /*
92  * The default read buffer size is patchable.
93  */
94 static int bpf_bufsize = BPF_DEFAULTBUFSIZE;
95 SYSCTL_INT(_debug, OID_AUTO, bpf_bufsize, CTLFLAG_RW,
96 	   &bpf_bufsize, 0, "");
97 int bpf_maxbufsize = BPF_MAXBUFSIZE;
98 SYSCTL_INT(_debug, OID_AUTO, bpf_maxbufsize, CTLFLAG_RW,
99 	   &bpf_maxbufsize, 0, "");
100 
101 /*
102  *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
103  */
104 static struct bpf_if	*bpf_iflist;
105 
106 static int	bpf_allocbufs(struct bpf_d *);
107 static void	bpf_attachd(struct bpf_d *d, struct bpf_if *bp);
108 static void	bpf_detachd(struct bpf_d *d);
109 static void	bpf_resetd(struct bpf_d *);
110 static void	bpf_freed(struct bpf_d *);
111 static void	bpf_mcopy(const void *, void *, size_t);
112 static int	bpf_movein(struct uio *, int, struct mbuf **,
113 			   struct sockaddr *, int *, struct bpf_insn *);
114 static int	bpf_setif(struct bpf_d *, struct ifreq *);
115 static void	bpf_timed_out(void *);
116 static void	bpf_wakeup(struct bpf_d *);
117 static void	catchpacket(struct bpf_d *, u_char *, u_int, u_int,
118 			    void (*)(const void *, void *, size_t),
119 			    const struct timeval *);
120 static int	bpf_setf(struct bpf_d *, struct bpf_program *, u_long cmd);
121 static int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
122 static int	bpf_setdlt(struct bpf_d *, u_int);
123 static void	bpf_drvinit(void *unused);
124 
125 static d_open_t		bpfopen;
126 static d_clone_t	bpfclone;
127 static d_close_t	bpfclose;
128 static d_read_t		bpfread;
129 static d_write_t	bpfwrite;
130 static d_ioctl_t	bpfioctl;
131 static d_poll_t		bpfpoll;
132 
133 #define CDEV_MAJOR 23
134 static struct dev_ops bpf_ops = {
135 	{ "bpf", CDEV_MAJOR, 0 },
136 	.d_open =	bpfopen,
137 	.d_close =	bpfclose,
138 	.d_read =	bpfread,
139 	.d_write =	bpfwrite,
140 	.d_ioctl =	bpfioctl,
141 	.d_poll =	bpfpoll,
142 };
143 
144 
145 static int
146 bpf_movein(struct uio *uio, int linktype, struct mbuf **mp,
147 	   struct sockaddr *sockp, int *datlen, struct bpf_insn *wfilter)
148 {
149 	struct mbuf *m;
150 	int error;
151 	int len;
152 	int hlen;
153 	int slen;
154 
155 	*datlen = 0;
156 	*mp = NULL;
157 
158 	/*
159 	 * Build a sockaddr based on the data link layer type.
160 	 * We do this at this level because the ethernet header
161 	 * is copied directly into the data field of the sockaddr.
162 	 * In the case of SLIP, there is no header and the packet
163 	 * is forwarded as is.
164 	 * Also, we are careful to leave room at the front of the mbuf
165 	 * for the link level header.
166 	 */
167 	switch (linktype) {
168 	case DLT_SLIP:
169 		sockp->sa_family = AF_INET;
170 		hlen = 0;
171 		break;
172 
173 	case DLT_EN10MB:
174 		sockp->sa_family = AF_UNSPEC;
175 		/* XXX Would MAXLINKHDR be better? */
176 		hlen = sizeof(struct ether_header);
177 		break;
178 
179 	case DLT_RAW:
180 	case DLT_NULL:
181 		sockp->sa_family = AF_UNSPEC;
182 		hlen = 0;
183 		break;
184 
185 	case DLT_ATM_RFC1483:
186 		/*
187 		 * en atm driver requires 4-byte atm pseudo header.
188 		 * though it isn't standard, vpi:vci needs to be
189 		 * specified anyway.
190 		 */
191 		sockp->sa_family = AF_UNSPEC;
192 		hlen = 12;	/* XXX 4(ATM_PH) + 3(LLC) + 5(SNAP) */
193 		break;
194 
195 	case DLT_PPP:
196 		sockp->sa_family = AF_UNSPEC;
197 		hlen = 4;	/* This should match PPP_HDRLEN */
198 		break;
199 
200 	default:
201 		return(EIO);
202 	}
203 
204 	len = uio->uio_resid;
205 	*datlen = len - hlen;
206 	if ((unsigned)len > MCLBYTES)
207 		return(EIO);
208 
209 	m = m_getl(len, MB_WAIT, MT_DATA, M_PKTHDR, NULL);
210 	if (m == NULL)
211 		return(ENOBUFS);
212 	m->m_pkthdr.len = m->m_len = len;
213 	m->m_pkthdr.rcvif = NULL;
214 	*mp = m;
215 
216 	if (m->m_len < hlen) {
217 		error = EPERM;
218 		goto bad;
219 	}
220 
221 	error = uiomove(mtod(m, u_char *), len, uio);
222 	if (error)
223 		goto bad;
224 
225 	slen = bpf_filter(wfilter, mtod(m, u_char *), len, len);
226 	if (slen == 0) {
227 		error = EPERM;
228 		goto bad;
229 	}
230 
231 	/*
232 	 * Make room for link header, and copy it to sockaddr.
233 	 */
234 	if (hlen != 0) {
235 		bcopy(m->m_data, sockp->sa_data, hlen);
236 		m->m_pkthdr.len -= hlen;
237 		m->m_len -= hlen;
238 		m->m_data += hlen; /* XXX */
239 	}
240 	return (0);
241 bad:
242 	m_freem(m);
243 	return(error);
244 }
245 
246 /*
247  * Attach file to the bpf interface, i.e. make d listen on bp.
248  * Must be called at splimp.
249  */
250 static void
251 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
252 {
253 	/*
254 	 * Point d at bp, and add d to the interface's list of listeners.
255 	 * Finally, point the driver's bpf cookie at the interface so
256 	 * it will divert packets to bpf.
257 	 */
258 	d->bd_bif = bp;
259 	SLIST_INSERT_HEAD(&bp->bif_dlist, d, bd_next);
260 	*bp->bif_driverp = bp;
261 }
262 
263 /*
264  * Detach a file from its interface.
265  */
266 static void
267 bpf_detachd(struct bpf_d *d)
268 {
269 	int error;
270 	struct bpf_if *bp;
271 	struct ifnet *ifp;
272 
273 	bp = d->bd_bif;
274 	ifp = bp->bif_ifp;
275 
276 	/* Remove d from the interface's descriptor list. */
277 	SLIST_REMOVE(&bp->bif_dlist, d, bpf_d, bd_next);
278 
279 	if (SLIST_EMPTY(&bp->bif_dlist)) {
280 		/*
281 		 * Let the driver know that there are no more listeners.
282 		 */
283 		*bp->bif_driverp = NULL;
284 	}
285 	d->bd_bif = NULL;
286 	/*
287 	 * Check if this descriptor had requested promiscuous mode.
288 	 * If so, turn it off.
289 	 */
290 	if (d->bd_promisc) {
291 		d->bd_promisc = 0;
292 		error = ifpromisc(ifp, 0);
293 		if (error != 0 && error != ENXIO) {
294 			/*
295 			 * ENXIO can happen if a pccard is unplugged,
296 			 * Something is really wrong if we were able to put
297 			 * the driver into promiscuous mode, but can't
298 			 * take it out.
299 			 */
300 			if_printf(ifp, "bpf_detach: ifpromisc failed(%d)\n",
301 				  error);
302 		}
303 	}
304 }
305 
306 /*
307  * Open ethernet device.  Returns ENXIO for illegal minor device number,
308  * EBUSY if file is open by another process.
309  */
310 /* ARGSUSED */
311 static int
312 bpfopen(struct dev_open_args *ap)
313 {
314 	cdev_t dev = ap->a_head.a_dev;
315 	struct bpf_d *d;
316 
317 	if (ap->a_cred->cr_prison)
318 		return(EPERM);
319 
320 	d = dev->si_drv1;
321 	/*
322 	 * Each minor can be opened by only one process.  If the requested
323 	 * minor is in use, return EBUSY.
324 	 */
325 	if (d != NULL)
326 		return(EBUSY);
327 
328 	MALLOC(d, struct bpf_d *, sizeof *d, M_BPF, M_WAITOK | M_ZERO);
329 	dev->si_drv1 = d;
330 	d->bd_bufsize = bpf_bufsize;
331 	d->bd_sig = SIGIO;
332 	d->bd_seesent = 1;
333 	callout_init(&d->bd_callout);
334 	return(0);
335 }
336 
337 static int
338 bpfclone(struct dev_clone_args *ap)
339 {
340 	int unit;
341 
342 	unit = devfs_clone_bitmap_get(&DEVFS_CLONE_BITMAP(bpf), 0);
343 	ap->a_dev = make_only_dev(&bpf_ops, unit, 0, 0, 0600, "bpf%d", unit);
344 
345 	return 0;
346 }
347 
348 /*
349  * Close the descriptor by detaching it from its interface,
350  * deallocating its buffers, and marking it free.
351  */
352 /* ARGSUSED */
353 static int
354 bpfclose(struct dev_close_args *ap)
355 {
356 	cdev_t dev = ap->a_head.a_dev;
357 	struct bpf_d *d = dev->si_drv1;
358 
359 	funsetown(d->bd_sigio);
360 	crit_enter();
361 	if (d->bd_state == BPF_WAITING)
362 		callout_stop(&d->bd_callout);
363 	d->bd_state = BPF_IDLE;
364 	if (d->bd_bif != NULL)
365 		bpf_detachd(d);
366 	crit_exit();
367 	bpf_freed(d);
368 	dev->si_drv1 = NULL;
369 	devfs_clone_bitmap_put(&DEVFS_CLONE_BITMAP(bpf), dev->si_uminor);
370 	kfree(d, M_BPF);
371 	return(0);
372 }
373 
374 /*
375  * Rotate the packet buffers in descriptor d.  Move the store buffer
376  * into the hold slot, and the free buffer into the store slot.
377  * Zero the length of the new store buffer.
378  */
379 #define ROTATE_BUFFERS(d) \
380 	(d)->bd_hbuf = (d)->bd_sbuf; \
381 	(d)->bd_hlen = (d)->bd_slen; \
382 	(d)->bd_sbuf = (d)->bd_fbuf; \
383 	(d)->bd_slen = 0; \
384 	(d)->bd_fbuf = NULL;
385 /*
386  *  bpfread - read next chunk of packets from buffers
387  */
388 static int
389 bpfread(struct dev_read_args *ap)
390 {
391 	cdev_t dev = ap->a_head.a_dev;
392 	struct bpf_d *d = dev->si_drv1;
393 	int timed_out;
394 	int error;
395 
396 	/*
397 	 * Restrict application to use a buffer the same size as
398 	 * as kernel buffers.
399 	 */
400 	if (ap->a_uio->uio_resid != d->bd_bufsize)
401 		return(EINVAL);
402 
403 	crit_enter();
404 	if (d->bd_state == BPF_WAITING)
405 		callout_stop(&d->bd_callout);
406 	timed_out = (d->bd_state == BPF_TIMED_OUT);
407 	d->bd_state = BPF_IDLE;
408 	/*
409 	 * If the hold buffer is empty, then do a timed sleep, which
410 	 * ends when the timeout expires or when enough packets
411 	 * have arrived to fill the store buffer.
412 	 */
413 	while (d->bd_hbuf == NULL) {
414 		if ((d->bd_immediate || timed_out) && d->bd_slen != 0) {
415 			/*
416 			 * A packet(s) either arrived since the previous
417 			 * read or arrived while we were asleep.
418 			 * Rotate the buffers and return what's here.
419 			 */
420 			ROTATE_BUFFERS(d);
421 			break;
422 		}
423 
424 		/*
425 		 * No data is available, check to see if the bpf device
426 		 * is still pointed at a real interface.  If not, return
427 		 * ENXIO so that the userland process knows to rebind
428 		 * it before using it again.
429 		 */
430 		if (d->bd_bif == NULL) {
431 			crit_exit();
432 			return(ENXIO);
433 		}
434 
435 		if (ap->a_ioflag & IO_NDELAY) {
436 			crit_exit();
437 			return(EWOULDBLOCK);
438 		}
439 		error = tsleep(d, PCATCH, "bpf", d->bd_rtout);
440 		if (error == EINTR || error == ERESTART) {
441 			crit_exit();
442 			return(error);
443 		}
444 		if (error == EWOULDBLOCK) {
445 			/*
446 			 * On a timeout, return what's in the buffer,
447 			 * which may be nothing.  If there is something
448 			 * in the store buffer, we can rotate the buffers.
449 			 */
450 			if (d->bd_hbuf)
451 				/*
452 				 * We filled up the buffer in between
453 				 * getting the timeout and arriving
454 				 * here, so we don't need to rotate.
455 				 */
456 				break;
457 
458 			if (d->bd_slen == 0) {
459 				crit_exit();
460 				return(0);
461 			}
462 			ROTATE_BUFFERS(d);
463 			break;
464 		}
465 	}
466 	/*
467 	 * At this point, we know we have something in the hold slot.
468 	 */
469 	crit_exit();
470 
471 	/*
472 	 * Move data from hold buffer into user space.
473 	 * We know the entire buffer is transferred since
474 	 * we checked above that the read buffer is bpf_bufsize bytes.
475 	 */
476 	error = uiomove(d->bd_hbuf, d->bd_hlen, ap->a_uio);
477 
478 	crit_enter();
479 	d->bd_fbuf = d->bd_hbuf;
480 	d->bd_hbuf = NULL;
481 	d->bd_hlen = 0;
482 	crit_exit();
483 
484 	return(error);
485 }
486 
487 
488 /*
489  * If there are processes sleeping on this descriptor, wake them up.
490  */
491 static void
492 bpf_wakeup(struct bpf_d *d)
493 {
494 	if (d->bd_state == BPF_WAITING) {
495 		callout_stop(&d->bd_callout);
496 		d->bd_state = BPF_IDLE;
497 	}
498 	wakeup(d);
499 	if (d->bd_async && d->bd_sig && d->bd_sigio)
500 		pgsigio(d->bd_sigio, d->bd_sig, 0);
501 
502 	get_mplock();
503 	selwakeup(&d->bd_sel);
504 	rel_mplock();
505 	/* XXX */
506 	d->bd_sel.si_pid = 0;
507 }
508 
509 static void
510 bpf_timed_out(void *arg)
511 {
512 	struct bpf_d *d = (struct bpf_d *)arg;
513 
514 	crit_enter();
515 	if (d->bd_state == BPF_WAITING) {
516 		d->bd_state = BPF_TIMED_OUT;
517 		if (d->bd_slen != 0)
518 			bpf_wakeup(d);
519 	}
520 	crit_exit();
521 }
522 
523 static void
524 bpf_output_dispatch(struct netmsg *nmsg)
525 {
526 	struct netmsg_bpf_output *bmsg = (struct netmsg_bpf_output *)nmsg;
527 	struct ifnet *ifp = bmsg->nm_ifp;
528 	int error;
529 
530 	/*
531 	 * The driver frees the mbuf.
532 	 */
533 	error = ifp->if_output(ifp, bmsg->nm_mbuf, bmsg->nm_dst, NULL);
534 	lwkt_replymsg(&nmsg->nm_lmsg, error);
535 }
536 
537 static int
538 bpfwrite(struct dev_write_args *ap)
539 {
540 	cdev_t dev = ap->a_head.a_dev;
541 	struct bpf_d *d = dev->si_drv1;
542 	struct ifnet *ifp;
543 	struct mbuf *m;
544 	int error;
545 	struct sockaddr dst;
546 	int datlen;
547 	struct netmsg_bpf_output bmsg;
548 
549 	if (d->bd_bif == NULL)
550 		return(ENXIO);
551 
552 	ifp = d->bd_bif->bif_ifp;
553 
554 	if (ap->a_uio->uio_resid == 0)
555 		return(0);
556 
557 	error = bpf_movein(ap->a_uio, (int)d->bd_bif->bif_dlt, &m,
558 			   &dst, &datlen, d->bd_wfilter);
559 	if (error)
560 		return(error);
561 
562 	if (datlen > ifp->if_mtu) {
563 		m_freem(m);
564 		return(EMSGSIZE);
565 	}
566 
567 	if (d->bd_hdrcmplt)
568 		dst.sa_family = pseudo_AF_HDRCMPLT;
569 
570 	netmsg_init(&bmsg.nm_netmsg, &curthread->td_msgport, MSGF_MPSAFE,
571 		    bpf_output_dispatch);
572 	bmsg.nm_mbuf = m;
573 	bmsg.nm_ifp = ifp;
574 	bmsg.nm_dst = &dst;
575 
576 	return lwkt_domsg(cpu_portfn(0), &bmsg.nm_netmsg.nm_lmsg, 0);
577 }
578 
579 /*
580  * Reset a descriptor by flushing its packet buffer and clearing the
581  * receive and drop counts.  Should be called at splimp.
582  */
583 static void
584 bpf_resetd(struct bpf_d *d)
585 {
586 	if (d->bd_hbuf) {
587 		/* Free the hold buffer. */
588 		d->bd_fbuf = d->bd_hbuf;
589 		d->bd_hbuf = NULL;
590 	}
591 	d->bd_slen = 0;
592 	d->bd_hlen = 0;
593 	d->bd_rcount = 0;
594 	d->bd_dcount = 0;
595 }
596 
597 /*
598  *  FIONREAD		Check for read packet available.
599  *  SIOCGIFADDR		Get interface address - convenient hook to driver.
600  *  BIOCGBLEN		Get buffer len [for read()].
601  *  BIOCSETF		Set ethernet read filter.
602  *  BIOCSETWF		Set ethernet write filter.
603  *  BIOCFLUSH		Flush read packet buffer.
604  *  BIOCPROMISC		Put interface into promiscuous mode.
605  *  BIOCGDLT		Get link layer type.
606  *  BIOCGETIF		Get interface name.
607  *  BIOCSETIF		Set interface.
608  *  BIOCSRTIMEOUT	Set read timeout.
609  *  BIOCGRTIMEOUT	Get read timeout.
610  *  BIOCGSTATS		Get packet stats.
611  *  BIOCIMMEDIATE	Set immediate mode.
612  *  BIOCVERSION		Get filter language version.
613  *  BIOCGHDRCMPLT	Get "header already complete" flag
614  *  BIOCSHDRCMPLT	Set "header already complete" flag
615  *  BIOCGSEESENT	Get "see packets sent" flag
616  *  BIOCSSEESENT	Set "see packets sent" flag
617  *  BIOCLOCK		Set "locked" flag
618  */
619 /* ARGSUSED */
620 static int
621 bpfioctl(struct dev_ioctl_args *ap)
622 {
623 	cdev_t dev = ap->a_head.a_dev;
624 	struct bpf_d *d = dev->si_drv1;
625 	int error = 0;
626 
627 	crit_enter();
628 	if (d->bd_state == BPF_WAITING)
629 		callout_stop(&d->bd_callout);
630 	d->bd_state = BPF_IDLE;
631 	crit_exit();
632 
633 	if (d->bd_locked == 1) {
634 		switch (ap->a_cmd) {
635 		case BIOCGBLEN:
636 		case BIOCFLUSH:
637 		case BIOCGDLT:
638 		case BIOCGDLTLIST:
639 		case BIOCGETIF:
640 		case BIOCGRTIMEOUT:
641 		case BIOCGSTATS:
642 		case BIOCVERSION:
643 		case BIOCGRSIG:
644 		case BIOCGHDRCMPLT:
645 		case FIONREAD:
646 		case BIOCLOCK:
647 		case BIOCSRTIMEOUT:
648 		case BIOCIMMEDIATE:
649 		case TIOCGPGRP:
650 			break;
651 		default:
652 			return (EPERM);
653 		}
654 	}
655 	switch (ap->a_cmd) {
656 	default:
657 		error = EINVAL;
658 		break;
659 
660 	/*
661 	 * Check for read packet available.
662 	 */
663 	case FIONREAD:
664 		{
665 			int n;
666 
667 			crit_enter();
668 			n = d->bd_slen;
669 			if (d->bd_hbuf)
670 				n += d->bd_hlen;
671 			crit_exit();
672 
673 			*(int *)ap->a_data = n;
674 			break;
675 		}
676 
677 	case SIOCGIFADDR:
678 		{
679 			struct ifnet *ifp;
680 
681 			if (d->bd_bif == NULL) {
682 				error = EINVAL;
683 			} else {
684 				ifp = d->bd_bif->bif_ifp;
685 				ifnet_serialize_all(ifp);
686 				error = ifp->if_ioctl(ifp, ap->a_cmd,
687 						      ap->a_data, ap->a_cred);
688 				ifnet_deserialize_all(ifp);
689 			}
690 			break;
691 		}
692 
693 	/*
694 	 * Get buffer len [for read()].
695 	 */
696 	case BIOCGBLEN:
697 		*(u_int *)ap->a_data = d->bd_bufsize;
698 		break;
699 
700 	/*
701 	 * Set buffer length.
702 	 */
703 	case BIOCSBLEN:
704 		if (d->bd_bif != NULL) {
705 			error = EINVAL;
706 		} else {
707 			u_int size = *(u_int *)ap->a_data;
708 
709 			if (size > bpf_maxbufsize)
710 				*(u_int *)ap->a_data = size = bpf_maxbufsize;
711 			else if (size < BPF_MINBUFSIZE)
712 				*(u_int *)ap->a_data = size = BPF_MINBUFSIZE;
713 			d->bd_bufsize = size;
714 		}
715 		break;
716 
717 	/*
718 	 * Set link layer read filter.
719 	 */
720 	case BIOCSETF:
721 	case BIOCSETWF:
722 		error = bpf_setf(d, (struct bpf_program *)ap->a_data,
723 			ap->a_cmd);
724 		break;
725 
726 	/*
727 	 * Flush read packet buffer.
728 	 */
729 	case BIOCFLUSH:
730 		crit_enter();
731 		bpf_resetd(d);
732 		crit_exit();
733 		break;
734 
735 	/*
736 	 * Put interface into promiscuous mode.
737 	 */
738 	case BIOCPROMISC:
739 		if (d->bd_bif == NULL) {
740 			/*
741 			 * No interface attached yet.
742 			 */
743 			error = EINVAL;
744 			break;
745 		}
746 		crit_enter();
747 		if (d->bd_promisc == 0) {
748 			error = ifpromisc(d->bd_bif->bif_ifp, 1);
749 			if (error == 0)
750 				d->bd_promisc = 1;
751 		}
752 		crit_exit();
753 		break;
754 
755 	/*
756 	 * Get device parameters.
757 	 */
758 	case BIOCGDLT:
759 		if (d->bd_bif == NULL)
760 			error = EINVAL;
761 		else
762 			*(u_int *)ap->a_data = d->bd_bif->bif_dlt;
763 		break;
764 
765 	/*
766 	 * Get a list of supported data link types.
767 	 */
768 	case BIOCGDLTLIST:
769 		if (d->bd_bif == NULL) {
770 			error = EINVAL;
771 		} else {
772 			error = bpf_getdltlist(d,
773 				(struct bpf_dltlist *)ap->a_data);
774 		}
775 		break;
776 
777 	/*
778 	 * Set data link type.
779 	 */
780 	case BIOCSDLT:
781 		if (d->bd_bif == NULL)
782 			error = EINVAL;
783 		else
784 			error = bpf_setdlt(d, *(u_int *)ap->a_data);
785 		break;
786 
787 	/*
788 	 * Get interface name.
789 	 */
790 	case BIOCGETIF:
791 		if (d->bd_bif == NULL) {
792 			error = EINVAL;
793 		} else {
794 			struct ifnet *const ifp = d->bd_bif->bif_ifp;
795 			struct ifreq *const ifr = (struct ifreq *)ap->a_data;
796 
797 			strlcpy(ifr->ifr_name, ifp->if_xname,
798 				sizeof ifr->ifr_name);
799 		}
800 		break;
801 
802 	/*
803 	 * Set interface.
804 	 */
805 	case BIOCSETIF:
806 		error = bpf_setif(d, (struct ifreq *)ap->a_data);
807 		break;
808 
809 	/*
810 	 * Set read timeout.
811 	 */
812 	case BIOCSRTIMEOUT:
813 		{
814 			struct timeval *tv = (struct timeval *)ap->a_data;
815 
816 			/*
817 			 * Subtract 1 tick from tvtohz() since this isn't
818 			 * a one-shot timer.
819 			 */
820 			if ((error = itimerfix(tv)) == 0)
821 				d->bd_rtout = tvtohz_low(tv);
822 			break;
823 		}
824 
825 	/*
826 	 * Get read timeout.
827 	 */
828 	case BIOCGRTIMEOUT:
829 		{
830 			struct timeval *tv = (struct timeval *)ap->a_data;
831 
832 			tv->tv_sec = d->bd_rtout / hz;
833 			tv->tv_usec = (d->bd_rtout % hz) * tick;
834 			break;
835 		}
836 
837 	/*
838 	 * Get packet stats.
839 	 */
840 	case BIOCGSTATS:
841 		{
842 			struct bpf_stat *bs = (struct bpf_stat *)ap->a_data;
843 
844 			bs->bs_recv = d->bd_rcount;
845 			bs->bs_drop = d->bd_dcount;
846 			break;
847 		}
848 
849 	/*
850 	 * Set immediate mode.
851 	 */
852 	case BIOCIMMEDIATE:
853 		d->bd_immediate = *(u_int *)ap->a_data;
854 		break;
855 
856 	case BIOCVERSION:
857 		{
858 			struct bpf_version *bv = (struct bpf_version *)ap->a_data;
859 
860 			bv->bv_major = BPF_MAJOR_VERSION;
861 			bv->bv_minor = BPF_MINOR_VERSION;
862 			break;
863 		}
864 
865 	/*
866 	 * Get "header already complete" flag
867 	 */
868 	case BIOCGHDRCMPLT:
869 		*(u_int *)ap->a_data = d->bd_hdrcmplt;
870 		break;
871 
872 	/*
873 	 * Set "header already complete" flag
874 	 */
875 	case BIOCSHDRCMPLT:
876 		d->bd_hdrcmplt = *(u_int *)ap->a_data ? 1 : 0;
877 		break;
878 
879 	/*
880 	 * Get "see sent packets" flag
881 	 */
882 	case BIOCGSEESENT:
883 		*(u_int *)ap->a_data = d->bd_seesent;
884 		break;
885 
886 	/*
887 	 * Set "see sent packets" flag
888 	 */
889 	case BIOCSSEESENT:
890 		d->bd_seesent = *(u_int *)ap->a_data;
891 		break;
892 
893 	case FIOASYNC:		/* Send signal on receive packets */
894 		d->bd_async = *(int *)ap->a_data;
895 		break;
896 
897 	case FIOSETOWN:
898 		error = fsetown(*(int *)ap->a_data, &d->bd_sigio);
899 		break;
900 
901 	case FIOGETOWN:
902 		*(int *)ap->a_data = fgetown(d->bd_sigio);
903 		break;
904 
905 	/* This is deprecated, FIOSETOWN should be used instead. */
906 	case TIOCSPGRP:
907 		error = fsetown(-(*(int *)ap->a_data), &d->bd_sigio);
908 		break;
909 
910 	/* This is deprecated, FIOGETOWN should be used instead. */
911 	case TIOCGPGRP:
912 		*(int *)ap->a_data = -fgetown(d->bd_sigio);
913 		break;
914 
915 	case BIOCSRSIG:		/* Set receive signal */
916 		{
917 			u_int sig;
918 
919 			sig = *(u_int *)ap->a_data;
920 
921 			if (sig >= NSIG)
922 				error = EINVAL;
923 			else
924 				d->bd_sig = sig;
925 			break;
926 		}
927 	case BIOCGRSIG:
928 		*(u_int *)ap->a_data = d->bd_sig;
929 		break;
930 	case BIOCLOCK:
931 		d->bd_locked = 1;
932 		break;
933 	}
934 	return(error);
935 }
936 
937 /*
938  * Set d's packet filter program to fp.  If this file already has a filter,
939  * free it and replace it.  Returns EINVAL for bogus requests.
940  */
941 static int
942 bpf_setf(struct bpf_d *d, struct bpf_program *fp, u_long cmd)
943 {
944 	struct bpf_insn *fcode, *old;
945 	u_int wfilter, flen, size;
946 
947 	if (cmd == BIOCSETWF) {
948 		old = d->bd_wfilter;
949 		wfilter = 1;
950 	} else {
951 		wfilter = 0;
952 		old = d->bd_rfilter;
953 	}
954 	if (fp->bf_insns == NULL) {
955 		if (fp->bf_len != 0)
956 			return(EINVAL);
957 		crit_enter();
958 		if (wfilter)
959 			d->bd_wfilter = NULL;
960 		else
961 			d->bd_rfilter = NULL;
962 		bpf_resetd(d);
963 		crit_exit();
964 		if (old != NULL)
965 			kfree(old, M_BPF);
966 		return(0);
967 	}
968 	flen = fp->bf_len;
969 	if (flen > BPF_MAXINSNS)
970 		return(EINVAL);
971 
972 	size = flen * sizeof *fp->bf_insns;
973 	fcode = (struct bpf_insn *)kmalloc(size, M_BPF, M_WAITOK);
974 	if (copyin(fp->bf_insns, fcode, size) == 0 &&
975 	    bpf_validate(fcode, (int)flen)) {
976 		crit_enter();
977 		if (wfilter)
978 			d->bd_wfilter = fcode;
979 		else
980 			d->bd_rfilter = fcode;
981 		bpf_resetd(d);
982 		crit_exit();
983 		if (old != NULL)
984 			kfree(old, M_BPF);
985 
986 		return(0);
987 	}
988 	kfree(fcode, M_BPF);
989 	return(EINVAL);
990 }
991 
992 /*
993  * Detach a file from its current interface (if attached at all) and attach
994  * to the interface indicated by the name stored in ifr.
995  * Return an errno or 0.
996  */
997 static int
998 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
999 {
1000 	struct bpf_if *bp;
1001 	int error;
1002 	struct ifnet *theywant;
1003 
1004 	theywant = ifunit(ifr->ifr_name);
1005 	if (theywant == NULL)
1006 		return(ENXIO);
1007 
1008 	/*
1009 	 * Look through attached interfaces for the named one.
1010 	 */
1011 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1012 		struct ifnet *ifp = bp->bif_ifp;
1013 
1014 		if (ifp == NULL || ifp != theywant)
1015 			continue;
1016 		/* skip additional entry */
1017 		if (bp->bif_driverp != &ifp->if_bpf)
1018 			continue;
1019 		/*
1020 		 * We found the requested interface.
1021 		 * If it's not up, return an error.
1022 		 * Allocate the packet buffers if we need to.
1023 		 * If we're already attached to requested interface,
1024 		 * just flush the buffer.
1025 		 */
1026 		if (!(ifp->if_flags & IFF_UP))
1027 			return(ENETDOWN);
1028 
1029 		if (d->bd_sbuf == NULL) {
1030 			error = bpf_allocbufs(d);
1031 			if (error != 0)
1032 				return(error);
1033 		}
1034 		crit_enter();
1035 		if (bp != d->bd_bif) {
1036 			if (d->bd_bif != NULL) {
1037 				/*
1038 				 * Detach if attached to something else.
1039 				 */
1040 				bpf_detachd(d);
1041 			}
1042 
1043 			bpf_attachd(d, bp);
1044 		}
1045 		bpf_resetd(d);
1046 		crit_exit();
1047 		return(0);
1048 	}
1049 
1050 	/* Not found. */
1051 	return(ENXIO);
1052 }
1053 
1054 /*
1055  * Support for select() and poll() system calls
1056  *
1057  * Return true iff the specific operation will not block indefinitely.
1058  * Otherwise, return false but make a note that a selwakeup() must be done.
1059  */
1060 static int
1061 bpfpoll(struct dev_poll_args *ap)
1062 {
1063 	cdev_t dev = ap->a_head.a_dev;
1064 	struct bpf_d *d;
1065 	int revents;
1066 
1067 	d = dev->si_drv1;
1068 	if (d->bd_bif == NULL)
1069 		return(ENXIO);
1070 
1071 	revents = ap->a_events & (POLLOUT | POLLWRNORM);
1072 	crit_enter();
1073 	if (ap->a_events & (POLLIN | POLLRDNORM)) {
1074 		/*
1075 		 * An imitation of the FIONREAD ioctl code.
1076 		 * XXX not quite.  An exact imitation:
1077 		 *	if (d->b_slen != 0 ||
1078 		 *	    (d->bd_hbuf != NULL && d->bd_hlen != 0)
1079 		 */
1080 		if (d->bd_hlen != 0 ||
1081 		    ((d->bd_immediate || d->bd_state == BPF_TIMED_OUT) &&
1082 		    d->bd_slen != 0)) {
1083 			revents |= ap->a_events & (POLLIN | POLLRDNORM);
1084 		} else {
1085 			selrecord(curthread, &d->bd_sel);
1086 			/* Start the read timeout if necessary. */
1087 			if (d->bd_rtout > 0 && d->bd_state == BPF_IDLE) {
1088 				callout_reset(&d->bd_callout, d->bd_rtout,
1089 				    bpf_timed_out, d);
1090 				d->bd_state = BPF_WAITING;
1091 			}
1092 		}
1093 	}
1094 	crit_exit();
1095 	ap->a_events = revents;
1096 	return(0);
1097 }
1098 
1099 /*
1100  * Process the packet pkt of length pktlen.  The packet is parsed
1101  * by each listener's filter, and if accepted, stashed into the
1102  * corresponding buffer.
1103  */
1104 void
1105 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1106 {
1107 	struct bpf_d *d;
1108 	struct timeval tv;
1109 	int gottime = 0;
1110 	u_int slen;
1111 
1112 	get_mplock();
1113 
1114 	/* Re-check */
1115 	if (bp == NULL) {
1116 		rel_mplock();
1117 		return;
1118 	}
1119 
1120 	/*
1121 	 * Note that the ipl does not have to be raised at this point.
1122 	 * The only problem that could arise here is that if two different
1123 	 * interfaces shared any data.  This is not the case.
1124 	 */
1125 	SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1126 		++d->bd_rcount;
1127 		slen = bpf_filter(d->bd_rfilter, pkt, pktlen, pktlen);
1128 		if (slen != 0) {
1129 			if (!gottime) {
1130 				microtime(&tv);
1131 				gottime = 1;
1132 			}
1133 			catchpacket(d, pkt, pktlen, slen, ovbcopy, &tv);
1134 		}
1135 	}
1136 
1137 	rel_mplock();
1138 }
1139 
1140 /*
1141  * Copy data from an mbuf chain into a buffer.  This code is derived
1142  * from m_copydata in sys/uipc_mbuf.c.
1143  */
1144 static void
1145 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
1146 {
1147 	const struct mbuf *m;
1148 	u_int count;
1149 	u_char *dst;
1150 
1151 	m = src_arg;
1152 	dst = dst_arg;
1153 	while (len > 0) {
1154 		if (m == NULL)
1155 			panic("bpf_mcopy");
1156 		count = min(m->m_len, len);
1157 		bcopy(mtod(m, void *), dst, count);
1158 		m = m->m_next;
1159 		dst += count;
1160 		len -= count;
1161 	}
1162 }
1163 
1164 /*
1165  * Process the packet in the mbuf chain m.  The packet is parsed by each
1166  * listener's filter, and if accepted, stashed into the corresponding
1167  * buffer.
1168  */
1169 void
1170 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1171 {
1172 	struct bpf_d *d;
1173 	u_int pktlen, slen;
1174 	struct timeval tv;
1175 	int gottime = 0;
1176 
1177 	get_mplock();
1178 
1179 	/* Re-check */
1180 	if (bp == NULL) {
1181 		rel_mplock();
1182 		return;
1183 	}
1184 
1185 	/* Don't compute pktlen, if no descriptor is attached. */
1186 	if (SLIST_EMPTY(&bp->bif_dlist)) {
1187 		rel_mplock();
1188 		return;
1189 	}
1190 
1191 	pktlen = m_lengthm(m, NULL);
1192 
1193 	SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1194 		if (!d->bd_seesent && (m->m_pkthdr.rcvif == NULL))
1195 			continue;
1196 		++d->bd_rcount;
1197 		slen = bpf_filter(d->bd_rfilter, (u_char *)m, pktlen, 0);
1198 		if (slen != 0) {
1199 			if (!gottime) {
1200 				microtime(&tv);
1201 				gottime = 1;
1202 			}
1203 			catchpacket(d, (u_char *)m, pktlen, slen, bpf_mcopy,
1204 				    &tv);
1205 		}
1206 	}
1207 
1208 	rel_mplock();
1209 }
1210 
1211 void
1212 bpf_mtap_family(struct bpf_if *bp, struct mbuf *m, sa_family_t family)
1213 {
1214 	u_int family4;
1215 
1216 	KKASSERT(family != AF_UNSPEC);
1217 
1218 	family4 = (u_int)family;
1219 	bpf_ptap(bp, m, &family4, sizeof(family4));
1220 }
1221 
1222 /*
1223  * Process the packet in the mbuf chain m with the header in m prepended.
1224  * The packet is parsed by each listener's filter, and if accepted,
1225  * stashed into the corresponding buffer.
1226  */
1227 void
1228 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
1229 {
1230 	struct mbuf mb;
1231 
1232 	/*
1233 	 * Craft on-stack mbuf suitable for passing to bpf_mtap.
1234 	 * Note that we cut corners here; we only setup what's
1235 	 * absolutely needed--this mbuf should never go anywhere else.
1236 	 */
1237 	mb.m_next = m;
1238 	mb.m_data = __DECONST(void *, data); /* LINTED */
1239 	mb.m_len = dlen;
1240 	mb.m_pkthdr.rcvif = m->m_pkthdr.rcvif;
1241 
1242 	bpf_mtap(bp, &mb);
1243 }
1244 
1245 /*
1246  * Move the packet data from interface memory (pkt) into the
1247  * store buffer.  Return 1 if it's time to wakeup a listener (buffer full),
1248  * otherwise 0.  "copy" is the routine called to do the actual data
1249  * transfer.  bcopy is passed in to copy contiguous chunks, while
1250  * bpf_mcopy is passed in to copy mbuf chains.  In the latter case,
1251  * pkt is really an mbuf.
1252  */
1253 static void
1254 catchpacket(struct bpf_d *d, u_char *pkt, u_int pktlen, u_int snaplen,
1255 	    void (*cpfn)(const void *, void *, size_t),
1256 	    const struct timeval *tv)
1257 {
1258 	struct bpf_hdr *hp;
1259 	int totlen, curlen;
1260 	int hdrlen = d->bd_bif->bif_hdrlen;
1261 	/*
1262 	 * Figure out how many bytes to move.  If the packet is
1263 	 * greater or equal to the snapshot length, transfer that
1264 	 * much.  Otherwise, transfer the whole packet (unless
1265 	 * we hit the buffer size limit).
1266 	 */
1267 	totlen = hdrlen + min(snaplen, pktlen);
1268 	if (totlen > d->bd_bufsize)
1269 		totlen = d->bd_bufsize;
1270 
1271 	/*
1272 	 * Round up the end of the previous packet to the next longword.
1273 	 */
1274 	curlen = BPF_WORDALIGN(d->bd_slen);
1275 	if (curlen + totlen > d->bd_bufsize) {
1276 		/*
1277 		 * This packet will overflow the storage buffer.
1278 		 * Rotate the buffers if we can, then wakeup any
1279 		 * pending reads.
1280 		 */
1281 		if (d->bd_fbuf == NULL) {
1282 			/*
1283 			 * We haven't completed the previous read yet,
1284 			 * so drop the packet.
1285 			 */
1286 			++d->bd_dcount;
1287 			return;
1288 		}
1289 		ROTATE_BUFFERS(d);
1290 		bpf_wakeup(d);
1291 		curlen = 0;
1292 	} else if (d->bd_immediate || d->bd_state == BPF_TIMED_OUT) {
1293 		/*
1294 		 * Immediate mode is set, or the read timeout has
1295 		 * already expired during a select call.  A packet
1296 		 * arrived, so the reader should be woken up.
1297 		 */
1298 		bpf_wakeup(d);
1299 	}
1300 
1301 	/*
1302 	 * Append the bpf header.
1303 	 */
1304 	hp = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1305 	hp->bh_tstamp = *tv;
1306 	hp->bh_datalen = pktlen;
1307 	hp->bh_hdrlen = hdrlen;
1308 	/*
1309 	 * Copy the packet data into the store buffer and update its length.
1310 	 */
1311 	(*cpfn)(pkt, (u_char *)hp + hdrlen, (hp->bh_caplen = totlen - hdrlen));
1312 	d->bd_slen = curlen + totlen;
1313 }
1314 
1315 /*
1316  * Initialize all nonzero fields of a descriptor.
1317  */
1318 static int
1319 bpf_allocbufs(struct bpf_d *d)
1320 {
1321 	d->bd_fbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
1322 	d->bd_sbuf = kmalloc(d->bd_bufsize, M_BPF, M_WAITOK);
1323 	d->bd_slen = 0;
1324 	d->bd_hlen = 0;
1325 	return(0);
1326 }
1327 
1328 /*
1329  * Free buffers and packet filter program currently in use by a descriptor.
1330  * Called on close.
1331  */
1332 static void
1333 bpf_freed(struct bpf_d *d)
1334 {
1335 	/*
1336 	 * We don't need to lock out interrupts since this descriptor has
1337 	 * been detached from its interface and it yet hasn't been marked
1338 	 * free.
1339 	 */
1340 	if (d->bd_sbuf != NULL) {
1341 		kfree(d->bd_sbuf, M_BPF);
1342 		if (d->bd_hbuf != NULL)
1343 			kfree(d->bd_hbuf, M_BPF);
1344 		if (d->bd_fbuf != NULL)
1345 			kfree(d->bd_fbuf, M_BPF);
1346 	}
1347 	if (d->bd_rfilter)
1348 		kfree(d->bd_rfilter, M_BPF);
1349 	if (d->bd_wfilter)
1350 		kfree(d->bd_wfilter, M_BPF);
1351 }
1352 
1353 /*
1354  * Attach an interface to bpf.  ifp is a pointer to the structure
1355  * defining the interface to be attached, dlt is the link layer type,
1356  * and hdrlen is the fixed size of the link header (variable length
1357  * headers are not yet supported).
1358  */
1359 void
1360 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1361 {
1362 	bpfattach_dlt(ifp, dlt, hdrlen, &ifp->if_bpf);
1363 }
1364 
1365 void
1366 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1367 {
1368 	struct bpf_if *bp;
1369 
1370 	bp = kmalloc(sizeof *bp, M_BPF, M_WAITOK | M_ZERO);
1371 
1372 	SLIST_INIT(&bp->bif_dlist);
1373 	bp->bif_ifp = ifp;
1374 	bp->bif_dlt = dlt;
1375 	bp->bif_driverp = driverp;
1376 	*bp->bif_driverp = NULL;
1377 
1378 	bp->bif_next = bpf_iflist;
1379 	bpf_iflist = bp;
1380 
1381 	/*
1382 	 * Compute the length of the bpf header.  This is not necessarily
1383 	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1384 	 * that the network layer header begins on a longword boundary (for
1385 	 * performance reasons and to alleviate alignment restrictions).
1386 	 */
1387 	bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1388 
1389 	if (bootverbose)
1390 		if_printf(ifp, "bpf attached\n");
1391 }
1392 
1393 /*
1394  * Detach bpf from an interface.  This involves detaching each descriptor
1395  * associated with the interface, and leaving bd_bif NULL.  Notify each
1396  * descriptor as it's detached so that any sleepers wake up and get
1397  * ENXIO.
1398  */
1399 void
1400 bpfdetach(struct ifnet *ifp)
1401 {
1402 	struct bpf_if *bp, *bp_prev;
1403 	struct bpf_d *d;
1404 
1405 	crit_enter();
1406 
1407 	/* Locate BPF interface information */
1408 	bp_prev = NULL;
1409 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1410 		if (ifp == bp->bif_ifp)
1411 			break;
1412 		bp_prev = bp;
1413 	}
1414 
1415 	/* Interface wasn't attached */
1416 	if (bp->bif_ifp == NULL) {
1417 		crit_exit();
1418 		kprintf("bpfdetach: %s was not attached\n", ifp->if_xname);
1419 		return;
1420 	}
1421 
1422 	while ((d = SLIST_FIRST(&bp->bif_dlist)) != NULL) {
1423 		bpf_detachd(d);
1424 		bpf_wakeup(d);
1425 	}
1426 
1427 	if (bp_prev != NULL)
1428 		bp_prev->bif_next = bp->bif_next;
1429 	else
1430 		bpf_iflist = bp->bif_next;
1431 
1432 	kfree(bp, M_BPF);
1433 
1434 	crit_exit();
1435 }
1436 
1437 /*
1438  * Get a list of available data link type of the interface.
1439  */
1440 static int
1441 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1442 {
1443 	int n, error;
1444 	struct ifnet *ifp;
1445 	struct bpf_if *bp;
1446 
1447 	ifp = d->bd_bif->bif_ifp;
1448 	n = 0;
1449 	error = 0;
1450 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1451 		if (bp->bif_ifp != ifp)
1452 			continue;
1453 		if (bfl->bfl_list != NULL) {
1454 			if (n >= bfl->bfl_len) {
1455 				return (ENOMEM);
1456 			}
1457 			error = copyout(&bp->bif_dlt,
1458 			    bfl->bfl_list + n, sizeof(u_int));
1459 		}
1460 		n++;
1461 	}
1462 	bfl->bfl_len = n;
1463 	return(error);
1464 }
1465 
1466 /*
1467  * Set the data link type of a BPF instance.
1468  */
1469 static int
1470 bpf_setdlt(struct bpf_d *d, u_int dlt)
1471 {
1472 	int error, opromisc;
1473 	struct ifnet *ifp;
1474 	struct bpf_if *bp;
1475 
1476 	if (d->bd_bif->bif_dlt == dlt)
1477 		return (0);
1478 	ifp = d->bd_bif->bif_ifp;
1479 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1480 		if (bp->bif_ifp == ifp && bp->bif_dlt == dlt)
1481 			break;
1482 	}
1483 	if (bp != NULL) {
1484 		opromisc = d->bd_promisc;
1485 		crit_enter();
1486 		bpf_detachd(d);
1487 		bpf_attachd(d, bp);
1488 		bpf_resetd(d);
1489 		if (opromisc) {
1490 			error = ifpromisc(bp->bif_ifp, 1);
1491 			if (error) {
1492 				if_printf(bp->bif_ifp,
1493 					"bpf_setdlt: ifpromisc failed (%d)\n",
1494 					error);
1495 			} else {
1496 				d->bd_promisc = 1;
1497 			}
1498 		}
1499 		crit_exit();
1500 	}
1501 	return(bp == NULL ? EINVAL : 0);
1502 }
1503 
1504 static void
1505 bpf_drvinit(void *unused)
1506 {
1507 	dev_ops_add(&bpf_ops, 0, 0);
1508 	make_dev(&bpf_ops, 0, 0, 0, 0600, "bpf");
1509 	devfs_clone_bitmap_init(&DEVFS_CLONE_BITMAP(bpf));
1510 	devfs_clone_handler_add("bpf", bpfclone);
1511 }
1512 
1513 static void
1514 bpf_drvuninit(void *unused)
1515 {
1516 	dev_ops_remove_all(&bpf_ops);
1517 	devfs_clone_bitmap_uninit(&DEVFS_CLONE_BITMAP(bpf));
1518 }
1519 
1520 SYSINIT(bpfdev,SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvinit,NULL)
1521 SYSUNINIT(bpfdev, SI_SUB_DRIVERS,SI_ORDER_MIDDLE+CDEV_MAJOR,bpf_drvuninit, NULL);
1522 
1523 #else /* !BPF */
1524 /*
1525  * NOP stubs to allow bpf-using drivers to load and function.
1526  *
1527  * A 'better' implementation would allow the core bpf functionality
1528  * to be loaded at runtime.
1529  */
1530 
1531 void
1532 bpf_tap(struct bpf_if *bp, u_char *pkt, u_int pktlen)
1533 {
1534 }
1535 
1536 void
1537 bpf_mtap(struct bpf_if *bp, struct mbuf *m)
1538 {
1539 }
1540 
1541 void
1542 bpf_ptap(struct bpf_if *bp, struct mbuf *m, const void *data, u_int dlen)
1543 {
1544 }
1545 
1546 void
1547 bpfattach(struct ifnet *ifp, u_int dlt, u_int hdrlen)
1548 {
1549 }
1550 
1551 void
1552 bpfattach_dlt(struct ifnet *ifp, u_int dlt, u_int hdrlen, struct bpf_if **driverp)
1553 {
1554 }
1555 
1556 void
1557 bpfdetach(struct ifnet *ifp)
1558 {
1559 }
1560 
1561 u_int
1562 bpf_filter(const struct bpf_insn *pc, u_char *p, u_int wirelen, u_int buflen)
1563 {
1564 	return -1;	/* "no filter" behaviour */
1565 }
1566 
1567 #endif /* !BPF */
1568