xref: /openbsd-src/sys/net/bpf.c (revision f6246b7f478ea7b2b6df549ae5998f8112d22650)
1 /*	$OpenBSD: bpf.c,v 1.199 2020/12/26 16:30:58 cheloha Exp $	*/
2 /*	$NetBSD: bpf.c,v 1.33 1997/02/21 23:59:35 thorpej Exp $	*/
3 
4 /*
5  * Copyright (c) 1990, 1991, 1993
6  *	The Regents of the University of California.  All rights reserved.
7  * Copyright (c) 2010, 2014 Henning Brauer <henning@openbsd.org>
8  *
9  * This code is derived from the Stanford/CMU enet packet filter,
10  * (net/enet.c) distributed as part of 4.3BSD, and code contributed
11  * to Berkeley by Steven McCanne and Van Jacobson both of Lawrence
12  * Berkeley Laboratory.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  * 3. Neither the name of the University nor the names of its contributors
23  *    may be used to endorse or promote products derived from this software
24  *    without specific prior written permission.
25  *
26  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
27  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
30  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36  * SUCH DAMAGE.
37  *
38  *	@(#)bpf.c	8.2 (Berkeley) 3/28/94
39  */
40 
41 #include "bpfilter.h"
42 
43 #include <sys/param.h>
44 #include <sys/systm.h>
45 #include <sys/mbuf.h>
46 #include <sys/proc.h>
47 #include <sys/signalvar.h>
48 #include <sys/ioctl.h>
49 #include <sys/conf.h>
50 #include <sys/vnode.h>
51 #include <sys/fcntl.h>
52 #include <sys/socket.h>
53 #include <sys/poll.h>
54 #include <sys/kernel.h>
55 #include <sys/sysctl.h>
56 #include <sys/rwlock.h>
57 #include <sys/atomic.h>
58 #include <sys/smr.h>
59 #include <sys/specdev.h>
60 #include <sys/selinfo.h>
61 #include <sys/sigio.h>
62 #include <sys/task.h>
63 
64 #include <net/if.h>
65 #include <net/bpf.h>
66 #include <net/bpfdesc.h>
67 
68 #include <netinet/in.h>
69 #include <netinet/if_ether.h>
70 
71 #include "vlan.h"
72 #if NVLAN > 0
73 #include <net/if_vlan_var.h>
74 #endif
75 
76 #define BPF_BUFSIZE 32768
77 
78 #define PRINET  26			/* interruptible */
79 
80 /* from kern/kern_clock.c; incremented each clock tick. */
81 extern int ticks;
82 
83 /*
84  * The default read buffer size is patchable.
85  */
86 int bpf_bufsize = BPF_BUFSIZE;
87 int bpf_maxbufsize = BPF_MAXBUFSIZE;
88 
89 /*
90  *  bpf_iflist is the list of interfaces; each corresponds to an ifnet
91  *  bpf_d_list is the list of descriptors
92  */
93 struct bpf_if	*bpf_iflist;
94 LIST_HEAD(, bpf_d) bpf_d_list;
95 
96 int	bpf_allocbufs(struct bpf_d *);
97 void	bpf_ifname(struct bpf_if*, struct ifreq *);
98 void	bpf_mcopy(const void *, void *, size_t);
99 int	bpf_movein(struct uio *, struct bpf_d *, struct mbuf **,
100 	    struct sockaddr *);
101 int	bpf_setif(struct bpf_d *, struct ifreq *);
102 int	bpfpoll(dev_t, int, struct proc *);
103 int	bpfkqfilter(dev_t, struct knote *);
104 void	bpf_wakeup(struct bpf_d *);
105 void	bpf_wakeup_cb(void *);
106 int	_bpf_mtap(caddr_t, const struct mbuf *, const struct mbuf *, u_int);
107 void	bpf_catchpacket(struct bpf_d *, u_char *, size_t, size_t,
108 	    const struct bpf_hdr *);
109 int	bpf_getdltlist(struct bpf_d *, struct bpf_dltlist *);
110 int	bpf_setdlt(struct bpf_d *, u_int);
111 
112 void	filt_bpfrdetach(struct knote *);
113 int	filt_bpfread(struct knote *, long);
114 
115 int	bpf_sysctl_locked(int *, u_int, void *, size_t *, void *, size_t);
116 
117 struct bpf_d *bpfilter_lookup(int);
118 
119 /*
120  * Called holding ``bd_mtx''.
121  */
122 void	bpf_attachd(struct bpf_d *, struct bpf_if *);
123 void	bpf_detachd(struct bpf_d *);
124 void	bpf_resetd(struct bpf_d *);
125 
126 void	bpf_prog_smr(void *);
127 void	bpf_d_smr(void *);
128 
129 /*
130  * Reference count access to descriptor buffers
131  */
132 void	bpf_get(struct bpf_d *);
133 void	bpf_put(struct bpf_d *);
134 
135 
136 struct rwlock bpf_sysctl_lk = RWLOCK_INITIALIZER("bpfsz");
137 
138 int
139 bpf_movein(struct uio *uio, struct bpf_d *d, struct mbuf **mp,
140     struct sockaddr *sockp)
141 {
142 	struct bpf_program_smr *bps;
143 	struct bpf_insn *fcode = NULL;
144 	struct mbuf *m;
145 	struct m_tag *mtag;
146 	int error;
147 	u_int hlen;
148 	u_int len;
149 	u_int linktype;
150 	u_int slen;
151 
152 	/*
153 	 * Build a sockaddr based on the data link layer type.
154 	 * We do this at this level because the ethernet header
155 	 * is copied directly into the data field of the sockaddr.
156 	 * In the case of SLIP, there is no header and the packet
157 	 * is forwarded as is.
158 	 * Also, we are careful to leave room at the front of the mbuf
159 	 * for the link level header.
160 	 */
161 	linktype = d->bd_bif->bif_dlt;
162 	switch (linktype) {
163 
164 	case DLT_SLIP:
165 		sockp->sa_family = AF_INET;
166 		hlen = 0;
167 		break;
168 
169 	case DLT_PPP:
170 		sockp->sa_family = AF_UNSPEC;
171 		hlen = 0;
172 		break;
173 
174 	case DLT_EN10MB:
175 		sockp->sa_family = AF_UNSPEC;
176 		/* XXX Would MAXLINKHDR be better? */
177 		hlen = ETHER_HDR_LEN;
178 		break;
179 
180 	case DLT_IEEE802_11:
181 	case DLT_IEEE802_11_RADIO:
182 		sockp->sa_family = AF_UNSPEC;
183 		hlen = 0;
184 		break;
185 
186 	case DLT_RAW:
187 	case DLT_NULL:
188 		sockp->sa_family = AF_UNSPEC;
189 		hlen = 0;
190 		break;
191 
192 	case DLT_LOOP:
193 		sockp->sa_family = AF_UNSPEC;
194 		hlen = sizeof(u_int32_t);
195 		break;
196 
197 	default:
198 		return (EIO);
199 	}
200 
201 	if (uio->uio_resid > MAXMCLBYTES)
202 		return (EIO);
203 	len = uio->uio_resid;
204 
205 	MGETHDR(m, M_WAIT, MT_DATA);
206 	m->m_pkthdr.ph_ifidx = 0;
207 	m->m_pkthdr.len = len - hlen;
208 
209 	if (len > MHLEN) {
210 		MCLGETL(m, M_WAIT, len);
211 		if ((m->m_flags & M_EXT) == 0) {
212 			error = ENOBUFS;
213 			goto bad;
214 		}
215 	}
216 	m->m_len = len;
217 	*mp = m;
218 
219 	error = uiomove(mtod(m, caddr_t), len, uio);
220 	if (error)
221 		goto bad;
222 
223 	smr_read_enter();
224 	bps = SMR_PTR_GET(&d->bd_wfilter);
225 	if (bps != NULL)
226 		fcode = bps->bps_bf.bf_insns;
227 	slen = bpf_filter(fcode, mtod(m, u_char *), len, len);
228 	smr_read_leave();
229 
230 	if (slen < len) {
231 		error = EPERM;
232 		goto bad;
233 	}
234 
235 	if (m->m_len < hlen) {
236 		error = EPERM;
237 		goto bad;
238 	}
239 	/*
240 	 * Make room for link header, and copy it to sockaddr
241 	 */
242 	if (hlen != 0) {
243 		if (linktype == DLT_LOOP) {
244 			u_int32_t af;
245 
246 			/* the link header indicates the address family */
247 			KASSERT(hlen == sizeof(u_int32_t));
248 			memcpy(&af, m->m_data, hlen);
249 			sockp->sa_family = ntohl(af);
250 		} else
251 			memcpy(sockp->sa_data, m->m_data, hlen);
252 		m->m_len -= hlen;
253 		m->m_data += hlen; /* XXX */
254 	}
255 
256 	/*
257 	 * Prepend the data link type as a mbuf tag
258 	 */
259 	mtag = m_tag_get(PACKET_TAG_DLT, sizeof(u_int), M_WAIT);
260 	*(u_int *)(mtag + 1) = linktype;
261 	m_tag_prepend(m, mtag);
262 
263 	return (0);
264  bad:
265 	m_freem(m);
266 	return (error);
267 }
268 
269 /*
270  * Attach file to the bpf interface, i.e. make d listen on bp.
271  */
272 void
273 bpf_attachd(struct bpf_d *d, struct bpf_if *bp)
274 {
275 	MUTEX_ASSERT_LOCKED(&d->bd_mtx);
276 
277 	/*
278 	 * Point d at bp, and add d to the interface's list of listeners.
279 	 * Finally, point the driver's bpf cookie at the interface so
280 	 * it will divert packets to bpf.
281 	 */
282 
283 	d->bd_bif = bp;
284 
285 	KERNEL_ASSERT_LOCKED();
286 	SMR_SLIST_INSERT_HEAD_LOCKED(&bp->bif_dlist, d, bd_next);
287 
288 	*bp->bif_driverp = bp;
289 }
290 
291 /*
292  * Detach a file from its interface.
293  */
294 void
295 bpf_detachd(struct bpf_d *d)
296 {
297 	struct bpf_if *bp;
298 
299 	MUTEX_ASSERT_LOCKED(&d->bd_mtx);
300 
301 	bp = d->bd_bif;
302 	/* Not attached. */
303 	if (bp == NULL)
304 		return;
305 
306 	/* Remove ``d'' from the interface's descriptor list. */
307 	KERNEL_ASSERT_LOCKED();
308 	SMR_SLIST_REMOVE_LOCKED(&bp->bif_dlist, d, bpf_d, bd_next);
309 
310 	if (SMR_SLIST_EMPTY_LOCKED(&bp->bif_dlist)) {
311 		/*
312 		 * Let the driver know that there are no more listeners.
313 		 */
314 		*bp->bif_driverp = NULL;
315 	}
316 
317 	d->bd_bif = NULL;
318 
319 	/*
320 	 * Check if this descriptor had requested promiscuous mode.
321 	 * If so, turn it off.
322 	 */
323 	if (d->bd_promisc) {
324 		int error;
325 
326 		KASSERT(bp->bif_ifp != NULL);
327 
328 		d->bd_promisc = 0;
329 
330 		bpf_get(d);
331 		mtx_leave(&d->bd_mtx);
332 		NET_LOCK();
333 		error = ifpromisc(bp->bif_ifp, 0);
334 		NET_UNLOCK();
335 		mtx_enter(&d->bd_mtx);
336 		bpf_put(d);
337 
338 		if (error && !(error == EINVAL || error == ENODEV ||
339 		    error == ENXIO))
340 			/*
341 			 * Something is really wrong if we were able to put
342 			 * the driver into promiscuous mode, but can't
343 			 * take it out.
344 			 */
345 			panic("bpf: ifpromisc failed");
346 	}
347 }
348 
349 void
350 bpfilterattach(int n)
351 {
352 	LIST_INIT(&bpf_d_list);
353 }
354 
355 /*
356  * Open ethernet device.  Returns ENXIO for illegal minor device number,
357  * EBUSY if file is open by another process.
358  */
359 int
360 bpfopen(dev_t dev, int flag, int mode, struct proc *p)
361 {
362 	struct bpf_d *bd;
363 	int unit = minor(dev);
364 
365 	if (unit & ((1 << CLONE_SHIFT) - 1))
366 		return (ENXIO);
367 
368 	KASSERT(bpfilter_lookup(unit) == NULL);
369 
370 	/* create on demand */
371 	if ((bd = malloc(sizeof(*bd), M_DEVBUF, M_NOWAIT|M_ZERO)) == NULL)
372 		return (EBUSY);
373 
374 	/* Mark "free" and do most initialization. */
375 	bd->bd_unit = unit;
376 	bd->bd_bufsize = bpf_bufsize;
377 	bd->bd_sig = SIGIO;
378 	mtx_init(&bd->bd_mtx, IPL_NET);
379 	task_set(&bd->bd_wake_task, bpf_wakeup_cb, bd);
380 	smr_init(&bd->bd_smr);
381 	sigio_init(&bd->bd_sigio);
382 
383 	bd->bd_rtout = 0;	/* no timeout by default */
384 	bd->bd_rnonblock = ISSET(flag, FNONBLOCK);
385 
386 	bpf_get(bd);
387 	LIST_INSERT_HEAD(&bpf_d_list, bd, bd_list);
388 
389 	return (0);
390 }
391 
392 /*
393  * Close the descriptor by detaching it from its interface,
394  * deallocating its buffers, and marking it free.
395  */
396 int
397 bpfclose(dev_t dev, int flag, int mode, struct proc *p)
398 {
399 	struct bpf_d *d;
400 
401 	d = bpfilter_lookup(minor(dev));
402 	mtx_enter(&d->bd_mtx);
403 	bpf_detachd(d);
404 	bpf_wakeup(d);
405 	LIST_REMOVE(d, bd_list);
406 	mtx_leave(&d->bd_mtx);
407 	bpf_put(d);
408 
409 	return (0);
410 }
411 
412 /*
413  * Rotate the packet buffers in descriptor d.  Move the store buffer
414  * into the hold slot, and the free buffer into the store slot.
415  * Zero the length of the new store buffer.
416  */
417 #define ROTATE_BUFFERS(d) \
418 	KASSERT(d->bd_in_uiomove == 0); \
419 	MUTEX_ASSERT_LOCKED(&d->bd_mtx); \
420 	(d)->bd_hbuf = (d)->bd_sbuf; \
421 	(d)->bd_hlen = (d)->bd_slen; \
422 	(d)->bd_sbuf = (d)->bd_fbuf; \
423 	(d)->bd_slen = 0; \
424 	(d)->bd_fbuf = NULL;
425 /*
426  *  bpfread - read next chunk of packets from buffers
427  */
428 int
429 bpfread(dev_t dev, struct uio *uio, int ioflag)
430 {
431 	struct bpf_d *d;
432 	caddr_t hbuf;
433 	int end, error, hlen, nticks;
434 
435 	KERNEL_ASSERT_LOCKED();
436 
437 	d = bpfilter_lookup(minor(dev));
438 	if (d->bd_bif == NULL)
439 		return (ENXIO);
440 
441 	bpf_get(d);
442 	mtx_enter(&d->bd_mtx);
443 
444 	/*
445 	 * Restrict application to use a buffer the same size as
446 	 * as kernel buffers.
447 	 */
448 	if (uio->uio_resid != d->bd_bufsize) {
449 		error = EINVAL;
450 		goto out;
451 	}
452 
453 	/*
454 	 * If there's a timeout, mark when the read should end.
455 	 */
456 	if (d->bd_rtout)
457 		end = ticks + (int)d->bd_rtout;
458 
459 	/*
460 	 * If the hold buffer is empty, then do a timed sleep, which
461 	 * ends when the timeout expires or when enough packets
462 	 * have arrived to fill the store buffer.
463 	 */
464 	while (d->bd_hbuf == NULL) {
465 		if (d->bd_bif == NULL) {
466 			/* interface is gone */
467 			if (d->bd_slen == 0) {
468 				error = EIO;
469 				goto out;
470 			}
471 			ROTATE_BUFFERS(d);
472 			break;
473 		}
474 		if (d->bd_immediate && d->bd_slen != 0) {
475 			/*
476 			 * A packet(s) either arrived since the previous
477 			 * read or arrived while we were asleep.
478 			 * Rotate the buffers and return what's here.
479 			 */
480 			ROTATE_BUFFERS(d);
481 			break;
482 		}
483 		if (d->bd_rnonblock) {
484 			/* User requested non-blocking I/O */
485 			error = EWOULDBLOCK;
486 		} else if (d->bd_rtout == 0) {
487 			/* No read timeout set. */
488 			d->bd_nreaders++;
489 			error = msleep_nsec(d, &d->bd_mtx, PRINET|PCATCH,
490 			    "bpf", INFSLP);
491 			d->bd_nreaders--;
492 		} else if ((nticks = end - ticks) > 0) {
493 			/* Read timeout has not expired yet. */
494 			d->bd_nreaders++;
495 			error = msleep(d, &d->bd_mtx, PRINET|PCATCH, "bpf",
496 			    nticks);
497 			d->bd_nreaders--;
498 		} else {
499 			/* Read timeout has expired. */
500 			error = EWOULDBLOCK;
501 		}
502 		if (error == EINTR || error == ERESTART)
503 			goto out;
504 		if (error == EWOULDBLOCK) {
505 			/*
506 			 * On a timeout, return what's in the buffer,
507 			 * which may be nothing.  If there is something
508 			 * in the store buffer, we can rotate the buffers.
509 			 */
510 			if (d->bd_hbuf != NULL)
511 				/*
512 				 * We filled up the buffer in between
513 				 * getting the timeout and arriving
514 				 * here, so we don't need to rotate.
515 				 */
516 				break;
517 
518 			if (d->bd_slen == 0) {
519 				error = 0;
520 				goto out;
521 			}
522 			ROTATE_BUFFERS(d);
523 			break;
524 		}
525 	}
526 	/*
527 	 * At this point, we know we have something in the hold slot.
528 	 */
529 	hbuf = d->bd_hbuf;
530 	hlen = d->bd_hlen;
531 	d->bd_hbuf = NULL;
532 	d->bd_hlen = 0;
533 	d->bd_fbuf = NULL;
534 	d->bd_in_uiomove = 1;
535 
536 	/*
537 	 * Move data from hold buffer into user space.
538 	 * We know the entire buffer is transferred since
539 	 * we checked above that the read buffer is bpf_bufsize bytes.
540 	 */
541 	mtx_leave(&d->bd_mtx);
542 	error = uiomove(hbuf, hlen, uio);
543 	mtx_enter(&d->bd_mtx);
544 
545 	/* Ensure that bpf_resetd() or ROTATE_BUFFERS() haven't been called. */
546 	KASSERT(d->bd_fbuf == NULL);
547 	KASSERT(d->bd_hbuf == NULL);
548 	d->bd_fbuf = hbuf;
549 	d->bd_in_uiomove = 0;
550 out:
551 	mtx_leave(&d->bd_mtx);
552 	bpf_put(d);
553 
554 	return (error);
555 }
556 
557 
558 /*
559  * If there are processes sleeping on this descriptor, wake them up.
560  */
561 void
562 bpf_wakeup(struct bpf_d *d)
563 {
564 	MUTEX_ASSERT_LOCKED(&d->bd_mtx);
565 
566 	/*
567 	 * As long as pgsigio() and selwakeup() need to be protected
568 	 * by the KERNEL_LOCK() we have to delay the wakeup to
569 	 * another context to keep the hot path KERNEL_LOCK()-free.
570 	 */
571 	bpf_get(d);
572 	if (!task_add(systq, &d->bd_wake_task))
573 		bpf_put(d);
574 }
575 
576 void
577 bpf_wakeup_cb(void *xd)
578 {
579 	struct bpf_d *d = xd;
580 
581 	wakeup(d);
582 	if (d->bd_async && d->bd_sig)
583 		pgsigio(&d->bd_sigio, d->bd_sig, 0);
584 
585 	selwakeup(&d->bd_sel);
586 	bpf_put(d);
587 }
588 
589 int
590 bpfwrite(dev_t dev, struct uio *uio, int ioflag)
591 {
592 	struct bpf_d *d;
593 	struct ifnet *ifp;
594 	struct mbuf *m;
595 	int error;
596 	struct sockaddr_storage dst;
597 
598 	KERNEL_ASSERT_LOCKED();
599 
600 	d = bpfilter_lookup(minor(dev));
601 	if (d->bd_bif == NULL)
602 		return (ENXIO);
603 
604 	bpf_get(d);
605 	ifp = d->bd_bif->bif_ifp;
606 
607 	if (ifp == NULL || (ifp->if_flags & IFF_UP) == 0) {
608 		error = ENETDOWN;
609 		goto out;
610 	}
611 
612 	if (uio->uio_resid == 0) {
613 		error = 0;
614 		goto out;
615 	}
616 
617 	error = bpf_movein(uio, d, &m, sstosa(&dst));
618 	if (error)
619 		goto out;
620 
621 	if (m->m_pkthdr.len > ifp->if_mtu) {
622 		m_freem(m);
623 		error = EMSGSIZE;
624 		goto out;
625 	}
626 
627 	m->m_pkthdr.ph_rtableid = ifp->if_rdomain;
628 	m->m_pkthdr.pf.prio = ifp->if_llprio;
629 
630 	if (d->bd_hdrcmplt && dst.ss_family == AF_UNSPEC)
631 		dst.ss_family = pseudo_AF_HDRCMPLT;
632 
633 	NET_LOCK();
634 	error = ifp->if_output(ifp, m, sstosa(&dst), NULL);
635 	NET_UNLOCK();
636 
637 out:
638 	bpf_put(d);
639 	return (error);
640 }
641 
642 /*
643  * Reset a descriptor by flushing its packet buffer and clearing the
644  * receive and drop counts.
645  */
646 void
647 bpf_resetd(struct bpf_d *d)
648 {
649 	MUTEX_ASSERT_LOCKED(&d->bd_mtx);
650 	KASSERT(d->bd_in_uiomove == 0);
651 
652 	if (d->bd_hbuf != NULL) {
653 		/* Free the hold buffer. */
654 		d->bd_fbuf = d->bd_hbuf;
655 		d->bd_hbuf = NULL;
656 	}
657 	d->bd_slen = 0;
658 	d->bd_hlen = 0;
659 	d->bd_rcount = 0;
660 	d->bd_dcount = 0;
661 }
662 
663 /*
664  *  FIONREAD		Check for read packet available.
665  *  BIOCGBLEN		Get buffer len [for read()].
666  *  BIOCSETF		Set ethernet read filter.
667  *  BIOCFLUSH		Flush read packet buffer.
668  *  BIOCPROMISC		Put interface into promiscuous mode.
669  *  BIOCGDLTLIST	Get supported link layer types.
670  *  BIOCGDLT		Get link layer type.
671  *  BIOCSDLT		Set link layer type.
672  *  BIOCGETIF		Get interface name.
673  *  BIOCSETIF		Set interface.
674  *  BIOCSRTIMEOUT	Set read timeout.
675  *  BIOCGRTIMEOUT	Get read timeout.
676  *  BIOCGSTATS		Get packet stats.
677  *  BIOCIMMEDIATE	Set immediate mode.
678  *  BIOCVERSION		Get filter language version.
679  *  BIOCGHDRCMPLT	Get "header already complete" flag
680  *  BIOCSHDRCMPLT	Set "header already complete" flag
681  */
682 int
683 bpfioctl(dev_t dev, u_long cmd, caddr_t addr, int flag, struct proc *p)
684 {
685 	struct bpf_d *d;
686 	int error = 0;
687 
688 	d = bpfilter_lookup(minor(dev));
689 	if (d->bd_locked && suser(p) != 0) {
690 		/* list of allowed ioctls when locked and not root */
691 		switch (cmd) {
692 		case BIOCGBLEN:
693 		case BIOCFLUSH:
694 		case BIOCGDLT:
695 		case BIOCGDLTLIST:
696 		case BIOCGETIF:
697 		case BIOCGRTIMEOUT:
698 		case BIOCGSTATS:
699 		case BIOCVERSION:
700 		case BIOCGRSIG:
701 		case BIOCGHDRCMPLT:
702 		case FIONREAD:
703 		case BIOCLOCK:
704 		case BIOCSRTIMEOUT:
705 		case BIOCIMMEDIATE:
706 		case TIOCGPGRP:
707 		case BIOCGDIRFILT:
708 			break;
709 		default:
710 			return (EPERM);
711 		}
712 	}
713 
714 	bpf_get(d);
715 
716 	switch (cmd) {
717 	default:
718 		error = EINVAL;
719 		break;
720 
721 	/*
722 	 * Check for read packet available.
723 	 */
724 	case FIONREAD:
725 		{
726 			int n;
727 
728 			mtx_enter(&d->bd_mtx);
729 			n = d->bd_slen;
730 			if (d->bd_hbuf != NULL)
731 				n += d->bd_hlen;
732 			mtx_leave(&d->bd_mtx);
733 
734 			*(int *)addr = n;
735 			break;
736 		}
737 
738 	/*
739 	 * Get buffer len [for read()].
740 	 */
741 	case BIOCGBLEN:
742 		*(u_int *)addr = d->bd_bufsize;
743 		break;
744 
745 	/*
746 	 * Set buffer length.
747 	 */
748 	case BIOCSBLEN:
749 		if (d->bd_bif != NULL)
750 			error = EINVAL;
751 		else {
752 			u_int size = *(u_int *)addr;
753 
754 			if (size > bpf_maxbufsize)
755 				*(u_int *)addr = size = bpf_maxbufsize;
756 			else if (size < BPF_MINBUFSIZE)
757 				*(u_int *)addr = size = BPF_MINBUFSIZE;
758 			mtx_enter(&d->bd_mtx);
759 			d->bd_bufsize = size;
760 			mtx_leave(&d->bd_mtx);
761 		}
762 		break;
763 
764 	/*
765 	 * Set link layer read filter.
766 	 */
767 	case BIOCSETF:
768 		error = bpf_setf(d, (struct bpf_program *)addr, 0);
769 		break;
770 
771 	/*
772 	 * Set link layer write filter.
773 	 */
774 	case BIOCSETWF:
775 		error = bpf_setf(d, (struct bpf_program *)addr, 1);
776 		break;
777 
778 	/*
779 	 * Flush read packet buffer.
780 	 */
781 	case BIOCFLUSH:
782 		mtx_enter(&d->bd_mtx);
783 		bpf_resetd(d);
784 		mtx_leave(&d->bd_mtx);
785 		break;
786 
787 	/*
788 	 * Put interface into promiscuous mode.
789 	 */
790 	case BIOCPROMISC:
791 		if (d->bd_bif == NULL) {
792 			/*
793 			 * No interface attached yet.
794 			 */
795 			error = EINVAL;
796 		} else if (d->bd_bif->bif_ifp != NULL) {
797 			if (d->bd_promisc == 0) {
798 				MUTEX_ASSERT_UNLOCKED(&d->bd_mtx);
799 				NET_LOCK();
800 				error = ifpromisc(d->bd_bif->bif_ifp, 1);
801 				NET_UNLOCK();
802 				if (error == 0)
803 					d->bd_promisc = 1;
804 			}
805 		}
806 		break;
807 
808 	/*
809 	 * Get a list of supported device parameters.
810 	 */
811 	case BIOCGDLTLIST:
812 		if (d->bd_bif == NULL)
813 			error = EINVAL;
814 		else
815 			error = bpf_getdltlist(d, (struct bpf_dltlist *)addr);
816 		break;
817 
818 	/*
819 	 * Get device parameters.
820 	 */
821 	case BIOCGDLT:
822 		if (d->bd_bif == NULL)
823 			error = EINVAL;
824 		else
825 			*(u_int *)addr = d->bd_bif->bif_dlt;
826 		break;
827 
828 	/*
829 	 * Set device parameters.
830 	 */
831 	case BIOCSDLT:
832 		if (d->bd_bif == NULL)
833 			error = EINVAL;
834 		else {
835 			mtx_enter(&d->bd_mtx);
836 			error = bpf_setdlt(d, *(u_int *)addr);
837 			mtx_leave(&d->bd_mtx);
838 		}
839 		break;
840 
841 	/*
842 	 * Set interface name.
843 	 */
844 	case BIOCGETIF:
845 		if (d->bd_bif == NULL)
846 			error = EINVAL;
847 		else
848 			bpf_ifname(d->bd_bif, (struct ifreq *)addr);
849 		break;
850 
851 	/*
852 	 * Set interface.
853 	 */
854 	case BIOCSETIF:
855 		error = bpf_setif(d, (struct ifreq *)addr);
856 		break;
857 
858 	/*
859 	 * Set read timeout.
860 	 */
861 	case BIOCSRTIMEOUT:
862 		{
863 			struct timeval *tv = (struct timeval *)addr;
864 			u_long rtout;
865 
866 			/* Compute number of ticks. */
867 			if (tv->tv_sec < 0 || !timerisvalid(tv)) {
868 				error = EINVAL;
869 				break;
870 			}
871 			if (tv->tv_sec > INT_MAX / hz) {
872 				error = EOVERFLOW;
873 				break;
874 			}
875 			rtout = tv->tv_sec * hz;
876 			if (tv->tv_usec / tick > INT_MAX - rtout) {
877 				error = EOVERFLOW;
878 				break;
879 			}
880 			rtout += tv->tv_usec / tick;
881 			mtx_enter(&d->bd_mtx);
882 			d->bd_rtout = rtout;
883 			if (d->bd_rtout == 0 && tv->tv_usec != 0)
884 				d->bd_rtout = 1;
885 			mtx_leave(&d->bd_mtx);
886 			break;
887 		}
888 
889 	/*
890 	 * Get read timeout.
891 	 */
892 	case BIOCGRTIMEOUT:
893 		{
894 			struct timeval *tv = (struct timeval *)addr;
895 
896 			mtx_enter(&d->bd_mtx);
897 			tv->tv_sec = d->bd_rtout / hz;
898 			tv->tv_usec = (d->bd_rtout % hz) * tick;
899 			mtx_leave(&d->bd_mtx);
900 			break;
901 		}
902 
903 	/*
904 	 * Get packet stats.
905 	 */
906 	case BIOCGSTATS:
907 		{
908 			struct bpf_stat *bs = (struct bpf_stat *)addr;
909 
910 			bs->bs_recv = d->bd_rcount;
911 			bs->bs_drop = d->bd_dcount;
912 			break;
913 		}
914 
915 	/*
916 	 * Set immediate mode.
917 	 */
918 	case BIOCIMMEDIATE:
919 		d->bd_immediate = *(u_int *)addr;
920 		break;
921 
922 	case BIOCVERSION:
923 		{
924 			struct bpf_version *bv = (struct bpf_version *)addr;
925 
926 			bv->bv_major = BPF_MAJOR_VERSION;
927 			bv->bv_minor = BPF_MINOR_VERSION;
928 			break;
929 		}
930 
931 	case BIOCGHDRCMPLT:	/* get "header already complete" flag */
932 		*(u_int *)addr = d->bd_hdrcmplt;
933 		break;
934 
935 	case BIOCSHDRCMPLT:	/* set "header already complete" flag */
936 		d->bd_hdrcmplt = *(u_int *)addr ? 1 : 0;
937 		break;
938 
939 	case BIOCLOCK:		/* set "locked" flag (no reset) */
940 		d->bd_locked = 1;
941 		break;
942 
943 	case BIOCGFILDROP:	/* get "filter-drop" flag */
944 		*(u_int *)addr = d->bd_fildrop;
945 		break;
946 
947 	case BIOCSFILDROP: {	/* set "filter-drop" flag */
948 		unsigned int fildrop = *(u_int *)addr;
949 		switch (fildrop) {
950 		case BPF_FILDROP_PASS:
951 		case BPF_FILDROP_CAPTURE:
952 		case BPF_FILDROP_DROP:
953 			d->bd_fildrop = fildrop;
954 			break;
955 		default:
956 			error = EINVAL;
957 			break;
958 		}
959 		break;
960 	}
961 
962 	case BIOCGDIRFILT:	/* get direction filter */
963 		*(u_int *)addr = d->bd_dirfilt;
964 		break;
965 
966 	case BIOCSDIRFILT:	/* set direction filter */
967 		d->bd_dirfilt = (*(u_int *)addr) &
968 		    (BPF_DIRECTION_IN|BPF_DIRECTION_OUT);
969 		break;
970 
971 	case FIONBIO:		/* Non-blocking I/O */
972 		if (*(int *)addr)
973 			d->bd_rnonblock = 1;
974 		else
975 			d->bd_rnonblock = 0;
976 		break;
977 
978 	case FIOASYNC:		/* Send signal on receive packets */
979 		d->bd_async = *(int *)addr;
980 		break;
981 
982 	case FIOSETOWN:		/* Process or group to send signals to */
983 	case TIOCSPGRP:
984 		error = sigio_setown(&d->bd_sigio, cmd, addr);
985 		break;
986 
987 	case FIOGETOWN:
988 	case TIOCGPGRP:
989 		sigio_getown(&d->bd_sigio, cmd, addr);
990 		break;
991 
992 	case BIOCSRSIG:		/* Set receive signal */
993 		{
994 			u_int sig;
995 
996 			sig = *(u_int *)addr;
997 
998 			if (sig >= NSIG)
999 				error = EINVAL;
1000 			else
1001 				d->bd_sig = sig;
1002 			break;
1003 		}
1004 	case BIOCGRSIG:
1005 		*(u_int *)addr = d->bd_sig;
1006 		break;
1007 	}
1008 
1009 	bpf_put(d);
1010 	return (error);
1011 }
1012 
1013 /*
1014  * Set d's packet filter program to fp.  If this file already has a filter,
1015  * free it and replace it.  Returns EINVAL for bogus requests.
1016  */
1017 int
1018 bpf_setf(struct bpf_d *d, struct bpf_program *fp, int wf)
1019 {
1020 	struct bpf_program_smr *bps, *old_bps;
1021 	struct bpf_insn *fcode;
1022 	u_int flen, size;
1023 
1024 	KERNEL_ASSERT_LOCKED();
1025 
1026 	if (fp->bf_insns == 0) {
1027 		if (fp->bf_len != 0)
1028 			return (EINVAL);
1029 		bps = NULL;
1030 	} else {
1031 		flen = fp->bf_len;
1032 		if (flen > BPF_MAXINSNS)
1033 			return (EINVAL);
1034 
1035 		fcode = mallocarray(flen, sizeof(*fp->bf_insns), M_DEVBUF,
1036 		    M_WAITOK | M_CANFAIL);
1037 		if (fcode == NULL)
1038 			return (ENOMEM);
1039 
1040 		size = flen * sizeof(*fp->bf_insns);
1041 		if (copyin(fp->bf_insns, fcode, size) != 0 ||
1042 		    bpf_validate(fcode, (int)flen) == 0) {
1043 			free(fcode, M_DEVBUF, size);
1044 			return (EINVAL);
1045 		}
1046 
1047 		bps = malloc(sizeof(*bps), M_DEVBUF, M_WAITOK);
1048 		smr_init(&bps->bps_smr);
1049 		bps->bps_bf.bf_len = flen;
1050 		bps->bps_bf.bf_insns = fcode;
1051 	}
1052 
1053 	if (wf == 0) {
1054 		old_bps = SMR_PTR_GET_LOCKED(&d->bd_rfilter);
1055 		SMR_PTR_SET_LOCKED(&d->bd_rfilter, bps);
1056 	} else {
1057 		old_bps = SMR_PTR_GET_LOCKED(&d->bd_wfilter);
1058 		SMR_PTR_SET_LOCKED(&d->bd_wfilter, bps);
1059 	}
1060 
1061 	mtx_enter(&d->bd_mtx);
1062 	bpf_resetd(d);
1063 	mtx_leave(&d->bd_mtx);
1064 	if (old_bps != NULL)
1065 		smr_call(&old_bps->bps_smr, bpf_prog_smr, old_bps);
1066 
1067 	return (0);
1068 }
1069 
1070 /*
1071  * Detach a file from its current interface (if attached at all) and attach
1072  * to the interface indicated by the name stored in ifr.
1073  * Return an errno or 0.
1074  */
1075 int
1076 bpf_setif(struct bpf_d *d, struct ifreq *ifr)
1077 {
1078 	struct bpf_if *bp, *candidate = NULL;
1079 	int error = 0;
1080 
1081 	/*
1082 	 * Look through attached interfaces for the named one.
1083 	 */
1084 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1085 		if (strcmp(bp->bif_name, ifr->ifr_name) != 0)
1086 			continue;
1087 
1088 		if (candidate == NULL || candidate->bif_dlt > bp->bif_dlt)
1089 			candidate = bp;
1090 	}
1091 
1092 	/* Not found. */
1093 	if (candidate == NULL)
1094 		return (ENXIO);
1095 
1096 	/*
1097 	 * Allocate the packet buffers if we need to.
1098 	 * If we're already attached to requested interface,
1099 	 * just flush the buffer.
1100 	 */
1101 	mtx_enter(&d->bd_mtx);
1102 	if (d->bd_sbuf == NULL) {
1103 		if ((error = bpf_allocbufs(d)))
1104 			goto out;
1105 	}
1106 	if (candidate != d->bd_bif) {
1107 		/*
1108 		 * Detach if attached to something else.
1109 		 */
1110 		bpf_detachd(d);
1111 		bpf_attachd(d, candidate);
1112 	}
1113 	bpf_resetd(d);
1114 out:
1115 	mtx_leave(&d->bd_mtx);
1116 	return (error);
1117 }
1118 
1119 /*
1120  * Copy the interface name to the ifreq.
1121  */
1122 void
1123 bpf_ifname(struct bpf_if *bif, struct ifreq *ifr)
1124 {
1125 	bcopy(bif->bif_name, ifr->ifr_name, sizeof(ifr->ifr_name));
1126 }
1127 
1128 /*
1129  * Support for poll() system call
1130  */
1131 int
1132 bpfpoll(dev_t dev, int events, struct proc *p)
1133 {
1134 	struct bpf_d *d;
1135 	int revents;
1136 
1137 	KERNEL_ASSERT_LOCKED();
1138 
1139 	/*
1140 	 * An imitation of the FIONREAD ioctl code.
1141 	 */
1142 	d = bpfilter_lookup(minor(dev));
1143 
1144 	/*
1145 	 * XXX The USB stack manages it to trigger some race condition
1146 	 * which causes bpfilter_lookup to return NULL when a USB device
1147 	 * gets detached while it is up and has an open bpf handler (e.g.
1148 	 * dhclient).  We still should recheck if we can fix the root
1149 	 * cause of this issue.
1150 	 */
1151 	if (d == NULL)
1152 		return (POLLERR);
1153 
1154 	/* Always ready to write data */
1155 	revents = events & (POLLOUT | POLLWRNORM);
1156 
1157 	if (events & (POLLIN | POLLRDNORM)) {
1158 		mtx_enter(&d->bd_mtx);
1159 		if (d->bd_hlen != 0 || (d->bd_immediate && d->bd_slen != 0))
1160 			revents |= events & (POLLIN | POLLRDNORM);
1161 		else
1162 			selrecord(p, &d->bd_sel);
1163 		mtx_leave(&d->bd_mtx);
1164 	}
1165 	return (revents);
1166 }
1167 
1168 const struct filterops bpfread_filtops = {
1169 	.f_flags	= FILTEROP_ISFD,
1170 	.f_attach	= NULL,
1171 	.f_detach	= filt_bpfrdetach,
1172 	.f_event	= filt_bpfread,
1173 };
1174 
1175 int
1176 bpfkqfilter(dev_t dev, struct knote *kn)
1177 {
1178 	struct bpf_d *d;
1179 	struct klist *klist;
1180 
1181 	KERNEL_ASSERT_LOCKED();
1182 
1183 	d = bpfilter_lookup(minor(dev));
1184 
1185 	switch (kn->kn_filter) {
1186 	case EVFILT_READ:
1187 		klist = &d->bd_sel.si_note;
1188 		kn->kn_fop = &bpfread_filtops;
1189 		break;
1190 	default:
1191 		return (EINVAL);
1192 	}
1193 
1194 	bpf_get(d);
1195 	kn->kn_hook = d;
1196 	klist_insert_locked(klist, kn);
1197 
1198 	return (0);
1199 }
1200 
1201 void
1202 filt_bpfrdetach(struct knote *kn)
1203 {
1204 	struct bpf_d *d = kn->kn_hook;
1205 
1206 	KERNEL_ASSERT_LOCKED();
1207 
1208 	klist_remove_locked(&d->bd_sel.si_note, kn);
1209 	bpf_put(d);
1210 }
1211 
1212 int
1213 filt_bpfread(struct knote *kn, long hint)
1214 {
1215 	struct bpf_d *d = kn->kn_hook;
1216 
1217 	KERNEL_ASSERT_LOCKED();
1218 
1219 	mtx_enter(&d->bd_mtx);
1220 	kn->kn_data = d->bd_hlen;
1221 	if (d->bd_immediate)
1222 		kn->kn_data += d->bd_slen;
1223 	mtx_leave(&d->bd_mtx);
1224 
1225 	return (kn->kn_data > 0);
1226 }
1227 
1228 /*
1229  * Copy data from an mbuf chain into a buffer.  This code is derived
1230  * from m_copydata in sys/uipc_mbuf.c.
1231  */
1232 void
1233 bpf_mcopy(const void *src_arg, void *dst_arg, size_t len)
1234 {
1235 	const struct mbuf *m;
1236 	u_int count;
1237 	u_char *dst;
1238 
1239 	m = src_arg;
1240 	dst = dst_arg;
1241 	while (len > 0) {
1242 		if (m == NULL)
1243 			panic("bpf_mcopy");
1244 		count = min(m->m_len, len);
1245 		bcopy(mtod(m, caddr_t), (caddr_t)dst, count);
1246 		m = m->m_next;
1247 		dst += count;
1248 		len -= count;
1249 	}
1250 }
1251 
1252 int
1253 bpf_mtap(caddr_t arg, const struct mbuf *m, u_int direction)
1254 {
1255 	return _bpf_mtap(arg, m, m, direction);
1256 }
1257 
1258 int
1259 _bpf_mtap(caddr_t arg, const struct mbuf *mp, const struct mbuf *m,
1260     u_int direction)
1261 {
1262 	struct bpf_if *bp = (struct bpf_if *)arg;
1263 	struct bpf_d *d;
1264 	size_t pktlen, slen;
1265 	const struct mbuf *m0;
1266 	struct bpf_hdr tbh;
1267 	int gothdr = 0;
1268 	int drop = 0;
1269 
1270 	if (m == NULL)
1271 		return (0);
1272 
1273 	if (bp == NULL)
1274 		return (0);
1275 
1276 	pktlen = 0;
1277 	for (m0 = m; m0 != NULL; m0 = m0->m_next)
1278 		pktlen += m0->m_len;
1279 
1280 	smr_read_enter();
1281 	SMR_SLIST_FOREACH(d, &bp->bif_dlist, bd_next) {
1282 		struct bpf_program_smr *bps;
1283 		struct bpf_insn *fcode = NULL;
1284 
1285 		atomic_inc_long(&d->bd_rcount);
1286 
1287 		if (ISSET(d->bd_dirfilt, direction))
1288 			continue;
1289 
1290 		bps = SMR_PTR_GET(&d->bd_rfilter);
1291 		if (bps != NULL)
1292 			fcode = bps->bps_bf.bf_insns;
1293 		slen = bpf_mfilter(fcode, m, pktlen);
1294 
1295 		if (slen == 0)
1296 			continue;
1297 		if (d->bd_fildrop != BPF_FILDROP_PASS)
1298 			drop = 1;
1299 		if (d->bd_fildrop != BPF_FILDROP_DROP) {
1300 			if (!gothdr) {
1301 				struct timeval tv;
1302 				memset(&tbh, 0, sizeof(tbh));
1303 
1304 				if (ISSET(mp->m_flags, M_PKTHDR)) {
1305 					tbh.bh_ifidx = mp->m_pkthdr.ph_ifidx;
1306 					tbh.bh_flowid = mp->m_pkthdr.ph_flowid;
1307 					tbh.bh_flags = mp->m_pkthdr.pf.prio;
1308 					if (ISSET(mp->m_pkthdr.csum_flags,
1309 					    M_FLOWID))
1310 						SET(tbh.bh_flags, BPF_F_FLOWID);
1311 
1312 					m_microtime(mp, &tv);
1313 				} else
1314 					microtime(&tv);
1315 
1316 				tbh.bh_tstamp.tv_sec = tv.tv_sec;
1317 				tbh.bh_tstamp.tv_usec = tv.tv_usec;
1318 				SET(tbh.bh_flags, direction << BPF_F_DIR_SHIFT);
1319 
1320 				gothdr = 1;
1321 			}
1322 
1323 			mtx_enter(&d->bd_mtx);
1324 			bpf_catchpacket(d, (u_char *)m, pktlen, slen, &tbh);
1325 			mtx_leave(&d->bd_mtx);
1326 		}
1327 	}
1328 	smr_read_leave();
1329 
1330 	return (drop);
1331 }
1332 
1333 /*
1334  * Incoming linkage from device drivers, where a data buffer should be
1335  * prepended by an arbitrary header. In this situation we already have a
1336  * way of representing a chain of memory buffers, ie, mbufs, so reuse
1337  * the existing functionality by attaching the buffers to mbufs.
1338  *
1339  * Con up a minimal mbuf chain to pacify bpf by allocating (only) a
1340  * struct m_hdr each for the header and data on the stack.
1341  */
1342 int
1343 bpf_tap_hdr(caddr_t arg, const void *hdr, unsigned int hdrlen,
1344     const void *buf, unsigned int buflen, u_int direction)
1345 {
1346 	struct m_hdr mh, md;
1347 	struct mbuf *m0 = NULL;
1348 	struct mbuf **mp = &m0;
1349 
1350 	if (hdr != NULL) {
1351 		mh.mh_flags = 0;
1352 		mh.mh_next = NULL;
1353 		mh.mh_len = hdrlen;
1354 		mh.mh_data = (void *)hdr;
1355 
1356 		*mp = (struct mbuf *)&mh;
1357 		mp = &mh.mh_next;
1358 	}
1359 
1360 	if (buf != NULL) {
1361 		md.mh_flags = 0;
1362 		md.mh_next = NULL;
1363 		md.mh_len = buflen;
1364 		md.mh_data = (void *)buf;
1365 
1366 		*mp = (struct mbuf *)&md;
1367 	}
1368 
1369 	return bpf_mtap(arg, m0, direction);
1370 }
1371 
1372 /*
1373  * Incoming linkage from device drivers, where we have a mbuf chain
1374  * but need to prepend some arbitrary header from a linear buffer.
1375  *
1376  * Con up a minimal dummy header to pacify bpf.  Allocate (only) a
1377  * struct m_hdr on the stack.  This is safe as bpf only reads from the
1378  * fields in this header that we initialize, and will not try to free
1379  * it or keep a pointer to it.
1380  */
1381 int
1382 bpf_mtap_hdr(caddr_t arg, const void *data, u_int dlen, const struct mbuf *m,
1383     u_int direction)
1384 {
1385 	struct m_hdr mh;
1386 	const struct mbuf *m0;
1387 
1388 	if (dlen > 0) {
1389 		mh.mh_flags = 0;
1390 		mh.mh_next = (struct mbuf *)m;
1391 		mh.mh_len = dlen;
1392 		mh.mh_data = (void *)data;
1393 		m0 = (struct mbuf *)&mh;
1394 	} else
1395 		m0 = m;
1396 
1397 	return _bpf_mtap(arg, m, m0, direction);
1398 }
1399 
1400 /*
1401  * Incoming linkage from device drivers, where we have a mbuf chain
1402  * but need to prepend the address family.
1403  *
1404  * Con up a minimal dummy header to pacify bpf.  We allocate (only) a
1405  * struct m_hdr on the stack.  This is safe as bpf only reads from the
1406  * fields in this header that we initialize, and will not try to free
1407  * it or keep a pointer to it.
1408  */
1409 int
1410 bpf_mtap_af(caddr_t arg, u_int32_t af, const struct mbuf *m, u_int direction)
1411 {
1412 	u_int32_t    afh;
1413 
1414 	afh = htonl(af);
1415 
1416 	return bpf_mtap_hdr(arg, &afh, sizeof(afh), m, direction);
1417 }
1418 
1419 /*
1420  * Incoming linkage from device drivers, where we have a mbuf chain
1421  * but need to prepend a VLAN encapsulation header.
1422  *
1423  * Con up a minimal dummy header to pacify bpf.  Allocate (only) a
1424  * struct m_hdr on the stack.  This is safe as bpf only reads from the
1425  * fields in this header that we initialize, and will not try to free
1426  * it or keep a pointer to it.
1427  */
1428 int
1429 bpf_mtap_ether(caddr_t arg, const struct mbuf *m, u_int direction)
1430 {
1431 #if NVLAN > 0
1432 	struct ether_vlan_header evh;
1433 	struct m_hdr mh, md;
1434 	uint8_t prio;
1435 
1436 	if ((m->m_flags & M_VLANTAG) == 0)
1437 #endif
1438 	{
1439 		return _bpf_mtap(arg, m, m, direction);
1440 	}
1441 
1442 #if NVLAN > 0
1443 	KASSERT(m->m_len >= ETHER_HDR_LEN);
1444 
1445 	prio = m->m_pkthdr.pf.prio;
1446 	if (prio <= 1)
1447 		prio = !prio;
1448 
1449 	memcpy(&evh, mtod(m, char *), ETHER_HDR_LEN);
1450 	evh.evl_proto = evh.evl_encap_proto;
1451 	evh.evl_encap_proto = htons(ETHERTYPE_VLAN);
1452 	evh.evl_tag = htons(m->m_pkthdr.ether_vtag |
1453 	    (prio << EVL_PRIO_BITS));
1454 
1455 	mh.mh_flags = 0;
1456 	mh.mh_data = (caddr_t)&evh;
1457 	mh.mh_len = sizeof(evh);
1458 	mh.mh_next = (struct mbuf *)&md;
1459 
1460 	md.mh_flags = 0;
1461 	md.mh_data = m->m_data + ETHER_HDR_LEN;
1462 	md.mh_len = m->m_len - ETHER_HDR_LEN;
1463 	md.mh_next = m->m_next;
1464 
1465 	return _bpf_mtap(arg, m, (struct mbuf *)&mh, direction);
1466 #endif
1467 }
1468 
1469 /*
1470  * Move the packet data from interface memory (pkt) into the
1471  * store buffer.  Wake up listeners if needed.
1472  * "copy" is the routine called to do the actual data
1473  * transfer.  bcopy is passed in to copy contiguous chunks, while
1474  * bpf_mcopy is passed in to copy mbuf chains.  In the latter case,
1475  * pkt is really an mbuf.
1476  */
1477 void
1478 bpf_catchpacket(struct bpf_d *d, u_char *pkt, size_t pktlen, size_t snaplen,
1479     const struct bpf_hdr *tbh)
1480 {
1481 	struct bpf_hdr *bh;
1482 	int totlen, curlen;
1483 	int hdrlen, do_wakeup = 0;
1484 
1485 	MUTEX_ASSERT_LOCKED(&d->bd_mtx);
1486 	if (d->bd_bif == NULL)
1487 		return;
1488 
1489 	hdrlen = d->bd_bif->bif_hdrlen;
1490 
1491 	/*
1492 	 * Figure out how many bytes to move.  If the packet is
1493 	 * greater or equal to the snapshot length, transfer that
1494 	 * much.  Otherwise, transfer the whole packet (unless
1495 	 * we hit the buffer size limit).
1496 	 */
1497 	totlen = hdrlen + min(snaplen, pktlen);
1498 	if (totlen > d->bd_bufsize)
1499 		totlen = d->bd_bufsize;
1500 
1501 	/*
1502 	 * Round up the end of the previous packet to the next longword.
1503 	 */
1504 	curlen = BPF_WORDALIGN(d->bd_slen);
1505 	if (curlen + totlen > d->bd_bufsize) {
1506 		/*
1507 		 * This packet will overflow the storage buffer.
1508 		 * Rotate the buffers if we can, then wakeup any
1509 		 * pending reads.
1510 		 */
1511 		if (d->bd_fbuf == NULL) {
1512 			/*
1513 			 * We haven't completed the previous read yet,
1514 			 * so drop the packet.
1515 			 */
1516 			++d->bd_dcount;
1517 			return;
1518 		}
1519 		ROTATE_BUFFERS(d);
1520 		do_wakeup = 1;
1521 		curlen = 0;
1522 	}
1523 
1524 	/*
1525 	 * Append the bpf header.
1526 	 */
1527 	bh = (struct bpf_hdr *)(d->bd_sbuf + curlen);
1528 	*bh = *tbh;
1529 	bh->bh_datalen = pktlen;
1530 	bh->bh_hdrlen = hdrlen;
1531 	bh->bh_caplen = totlen - hdrlen;
1532 
1533 	/*
1534 	 * Copy the packet data into the store buffer and update its length.
1535 	 */
1536 	bpf_mcopy(pkt, (u_char *)bh + hdrlen, bh->bh_caplen);
1537 	d->bd_slen = curlen + totlen;
1538 
1539 	if (d->bd_immediate) {
1540 		/*
1541 		 * Immediate mode is set.  A packet arrived so any
1542 		 * reads should be woken up.
1543 		 */
1544 		do_wakeup = 1;
1545 	}
1546 
1547 	if (d->bd_nreaders > 0) {
1548 		/*
1549 		 * We have one or more threads sleeping in bpfread().
1550 		 * We got a packet, so wake up all readers.
1551 		 */
1552 		if (d->bd_fbuf != NULL) {
1553 			ROTATE_BUFFERS(d);
1554 			do_wakeup = 1;
1555 		}
1556 	}
1557 
1558 	if (do_wakeup)
1559 		bpf_wakeup(d);
1560 }
1561 
1562 /*
1563  * Initialize all nonzero fields of a descriptor.
1564  */
1565 int
1566 bpf_allocbufs(struct bpf_d *d)
1567 {
1568 	MUTEX_ASSERT_LOCKED(&d->bd_mtx);
1569 
1570 	d->bd_fbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT);
1571 	if (d->bd_fbuf == NULL)
1572 		return (ENOMEM);
1573 
1574 	d->bd_sbuf = malloc(d->bd_bufsize, M_DEVBUF, M_NOWAIT);
1575 	if (d->bd_sbuf == NULL) {
1576 		free(d->bd_fbuf, M_DEVBUF, d->bd_bufsize);
1577 		return (ENOMEM);
1578 	}
1579 
1580 	d->bd_slen = 0;
1581 	d->bd_hlen = 0;
1582 
1583 	return (0);
1584 }
1585 
1586 void
1587 bpf_prog_smr(void *bps_arg)
1588 {
1589 	struct bpf_program_smr *bps = bps_arg;
1590 
1591 	free(bps->bps_bf.bf_insns, M_DEVBUF,
1592 	    bps->bps_bf.bf_len * sizeof(struct bpf_insn));
1593 	free(bps, M_DEVBUF, sizeof(struct bpf_program_smr));
1594 }
1595 
1596 void
1597 bpf_d_smr(void *smr)
1598 {
1599 	struct bpf_d	*bd = smr;
1600 
1601 	sigio_free(&bd->bd_sigio);
1602 	free(bd->bd_sbuf, M_DEVBUF, bd->bd_bufsize);
1603 	free(bd->bd_hbuf, M_DEVBUF, bd->bd_bufsize);
1604 	free(bd->bd_fbuf, M_DEVBUF, bd->bd_bufsize);
1605 
1606 	if (bd->bd_rfilter != NULL)
1607 		bpf_prog_smr(bd->bd_rfilter);
1608 	if (bd->bd_wfilter != NULL)
1609 		bpf_prog_smr(bd->bd_wfilter);
1610 
1611 	free(bd, M_DEVBUF, sizeof(*bd));
1612 }
1613 
1614 void
1615 bpf_get(struct bpf_d *bd)
1616 {
1617 	atomic_inc_int(&bd->bd_ref);
1618 }
1619 
1620 /*
1621  * Free buffers currently in use by a descriptor
1622  * when the reference count drops to zero.
1623  */
1624 void
1625 bpf_put(struct bpf_d *bd)
1626 {
1627 	if (atomic_dec_int_nv(&bd->bd_ref) > 0)
1628 		return;
1629 
1630 	smr_call(&bd->bd_smr, bpf_d_smr, bd);
1631 }
1632 
1633 void *
1634 bpfsattach(caddr_t *bpfp, const char *name, u_int dlt, u_int hdrlen)
1635 {
1636 	struct bpf_if *bp;
1637 
1638 	if ((bp = malloc(sizeof(*bp), M_DEVBUF, M_NOWAIT)) == NULL)
1639 		panic("bpfattach");
1640 	SMR_SLIST_INIT(&bp->bif_dlist);
1641 	bp->bif_driverp = (struct bpf_if **)bpfp;
1642 	bp->bif_name = name;
1643 	bp->bif_ifp = NULL;
1644 	bp->bif_dlt = dlt;
1645 
1646 	bp->bif_next = bpf_iflist;
1647 	bpf_iflist = bp;
1648 
1649 	*bp->bif_driverp = NULL;
1650 
1651 	/*
1652 	 * Compute the length of the bpf header.  This is not necessarily
1653 	 * equal to SIZEOF_BPF_HDR because we want to insert spacing such
1654 	 * that the network layer header begins on a longword boundary (for
1655 	 * performance reasons and to alleviate alignment restrictions).
1656 	 */
1657 	bp->bif_hdrlen = BPF_WORDALIGN(hdrlen + SIZEOF_BPF_HDR) - hdrlen;
1658 
1659 	return (bp);
1660 }
1661 
1662 void
1663 bpfattach(caddr_t *driverp, struct ifnet *ifp, u_int dlt, u_int hdrlen)
1664 {
1665 	struct bpf_if *bp;
1666 
1667 	bp = bpfsattach(driverp, ifp->if_xname, dlt, hdrlen);
1668 	bp->bif_ifp = ifp;
1669 }
1670 
1671 /* Detach an interface from its attached bpf device.  */
1672 void
1673 bpfdetach(struct ifnet *ifp)
1674 {
1675 	struct bpf_if *bp, *nbp;
1676 
1677 	KERNEL_ASSERT_LOCKED();
1678 
1679 	for (bp = bpf_iflist; bp; bp = nbp) {
1680 		nbp = bp->bif_next;
1681 		if (bp->bif_ifp == ifp)
1682 			bpfsdetach(bp);
1683 	}
1684 	ifp->if_bpf = NULL;
1685 }
1686 
1687 void
1688 bpfsdetach(void *p)
1689 {
1690 	struct bpf_if *bp = p, *tbp;
1691 	struct bpf_d *bd;
1692 	int maj;
1693 
1694 	KERNEL_ASSERT_LOCKED();
1695 
1696 	/* Locate the major number. */
1697 	for (maj = 0; maj < nchrdev; maj++)
1698 		if (cdevsw[maj].d_open == bpfopen)
1699 			break;
1700 
1701 	while ((bd = SMR_SLIST_FIRST_LOCKED(&bp->bif_dlist)))
1702 		vdevgone(maj, bd->bd_unit, bd->bd_unit, VCHR);
1703 
1704 	for (tbp = bpf_iflist; tbp; tbp = tbp->bif_next) {
1705 		if (tbp->bif_next == bp) {
1706 			tbp->bif_next = bp->bif_next;
1707 			break;
1708 		}
1709 	}
1710 
1711 	if (bpf_iflist == bp)
1712 		bpf_iflist = bp->bif_next;
1713 
1714 	free(bp, M_DEVBUF, sizeof(*bp));
1715 }
1716 
1717 int
1718 bpf_sysctl_locked(int *name, u_int namelen, void *oldp, size_t *oldlenp,
1719     void *newp, size_t newlen)
1720 {
1721 	switch (name[0]) {
1722 	case NET_BPF_BUFSIZE:
1723 		return sysctl_int_bounded(oldp, oldlenp, newp, newlen,
1724 		    &bpf_bufsize, BPF_MINBUFSIZE, bpf_maxbufsize);
1725 	case NET_BPF_MAXBUFSIZE:
1726 		return sysctl_int_bounded(oldp, oldlenp, newp, newlen,
1727 		    &bpf_maxbufsize, BPF_MINBUFSIZE, INT_MAX);
1728 	default:
1729 		return (EOPNOTSUPP);
1730 	}
1731 }
1732 
1733 int
1734 bpf_sysctl(int *name, u_int namelen, void *oldp, size_t *oldlenp, void *newp,
1735     size_t newlen)
1736 {
1737 	int flags = RW_INTR;
1738 	int error;
1739 
1740 	if (namelen != 1)
1741 		return (ENOTDIR);
1742 
1743 	flags |= (newp == NULL) ? RW_READ : RW_WRITE;
1744 
1745 	error = rw_enter(&bpf_sysctl_lk, flags);
1746 	if (error != 0)
1747 		return (error);
1748 
1749 	error = bpf_sysctl_locked(name, namelen, oldp, oldlenp, newp, newlen);
1750 
1751 	rw_exit(&bpf_sysctl_lk);
1752 
1753 	return (error);
1754 }
1755 
1756 struct bpf_d *
1757 bpfilter_lookup(int unit)
1758 {
1759 	struct bpf_d *bd;
1760 
1761 	KERNEL_ASSERT_LOCKED();
1762 
1763 	LIST_FOREACH(bd, &bpf_d_list, bd_list)
1764 		if (bd->bd_unit == unit)
1765 			return (bd);
1766 	return (NULL);
1767 }
1768 
1769 /*
1770  * Get a list of available data link type of the interface.
1771  */
1772 int
1773 bpf_getdltlist(struct bpf_d *d, struct bpf_dltlist *bfl)
1774 {
1775 	int n, error;
1776 	struct bpf_if *bp;
1777 	const char *name;
1778 
1779 	name = d->bd_bif->bif_name;
1780 	n = 0;
1781 	error = 0;
1782 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1783 		if (strcmp(name, bp->bif_name) != 0)
1784 			continue;
1785 		if (bfl->bfl_list != NULL) {
1786 			if (n >= bfl->bfl_len)
1787 				return (ENOMEM);
1788 			error = copyout(&bp->bif_dlt,
1789 			    bfl->bfl_list + n, sizeof(u_int));
1790 			if (error)
1791 				break;
1792 		}
1793 		n++;
1794 	}
1795 
1796 	bfl->bfl_len = n;
1797 	return (error);
1798 }
1799 
1800 /*
1801  * Set the data link type of a BPF instance.
1802  */
1803 int
1804 bpf_setdlt(struct bpf_d *d, u_int dlt)
1805 {
1806 	const char *name;
1807 	struct bpf_if *bp;
1808 
1809 	MUTEX_ASSERT_LOCKED(&d->bd_mtx);
1810 	if (d->bd_bif->bif_dlt == dlt)
1811 		return (0);
1812 	name = d->bd_bif->bif_name;
1813 	for (bp = bpf_iflist; bp != NULL; bp = bp->bif_next) {
1814 		if (strcmp(name, bp->bif_name) != 0)
1815 			continue;
1816 		if (bp->bif_dlt == dlt)
1817 			break;
1818 	}
1819 	if (bp == NULL)
1820 		return (EINVAL);
1821 	bpf_detachd(d);
1822 	bpf_attachd(d, bp);
1823 	bpf_resetd(d);
1824 	return (0);
1825 }
1826 
1827 u_int32_t	bpf_mbuf_ldw(const void *, u_int32_t, int *);
1828 u_int32_t	bpf_mbuf_ldh(const void *, u_int32_t, int *);
1829 u_int32_t	bpf_mbuf_ldb(const void *, u_int32_t, int *);
1830 
1831 int		bpf_mbuf_copy(const struct mbuf *, u_int32_t,
1832 		    void *, u_int32_t);
1833 
1834 const struct bpf_ops bpf_mbuf_ops = {
1835 	bpf_mbuf_ldw,
1836 	bpf_mbuf_ldh,
1837 	bpf_mbuf_ldb,
1838 };
1839 
1840 int
1841 bpf_mbuf_copy(const struct mbuf *m, u_int32_t off, void *buf, u_int32_t len)
1842 {
1843 	u_int8_t *cp = buf;
1844 	u_int32_t count;
1845 
1846 	while (off >= m->m_len) {
1847 		off -= m->m_len;
1848 
1849 		m = m->m_next;
1850 		if (m == NULL)
1851 			return (-1);
1852 	}
1853 
1854 	for (;;) {
1855 		count = min(m->m_len - off, len);
1856 
1857 		memcpy(cp, m->m_data + off, count);
1858 		len -= count;
1859 
1860 		if (len == 0)
1861 			return (0);
1862 
1863 		m = m->m_next;
1864 		if (m == NULL)
1865 			break;
1866 
1867 		cp += count;
1868 		off = 0;
1869 	}
1870 
1871 	return (-1);
1872 }
1873 
1874 u_int32_t
1875 bpf_mbuf_ldw(const void *m0, u_int32_t k, int *err)
1876 {
1877 	u_int32_t v;
1878 
1879 	if (bpf_mbuf_copy(m0, k, &v, sizeof(v)) != 0) {
1880 		*err = 1;
1881 		return (0);
1882 	}
1883 
1884 	*err = 0;
1885 	return ntohl(v);
1886 }
1887 
1888 u_int32_t
1889 bpf_mbuf_ldh(const void *m0, u_int32_t k, int *err)
1890 {
1891 	u_int16_t v;
1892 
1893 	if (bpf_mbuf_copy(m0, k, &v, sizeof(v)) != 0) {
1894 		*err = 1;
1895 		return (0);
1896 	}
1897 
1898 	*err = 0;
1899 	return ntohs(v);
1900 }
1901 
1902 u_int32_t
1903 bpf_mbuf_ldb(const void *m0, u_int32_t k, int *err)
1904 {
1905 	const struct mbuf *m = m0;
1906 	u_int8_t v;
1907 
1908 	while (k >= m->m_len) {
1909 		k -= m->m_len;
1910 
1911 		m = m->m_next;
1912 		if (m == NULL) {
1913 			*err = 1;
1914 			return (0);
1915 		}
1916 	}
1917 	v = m->m_data[k];
1918 
1919 	*err = 0;
1920 	return v;
1921 }
1922 
1923 u_int
1924 bpf_mfilter(const struct bpf_insn *pc, const struct mbuf *m, u_int wirelen)
1925 {
1926 	return _bpf_filter(pc, &bpf_mbuf_ops, m, wirelen);
1927 }
1928