xref: /netbsd-src/sys/net/if_tap.c (revision aaf4ece63a859a04e37cf3a7229b5fab0157cc06)
1 /*	$NetBSD: if_tap.c,v 1.11 2005/12/11 12:24:51 christos Exp $	*/
2 
3 /*
4  *  Copyright (c) 2003, 2004 The NetBSD Foundation.
5  *  All rights reserved.
6  *
7  *  This code is derived from software contributed to the NetBSD Foundation
8  *   by Quentin Garnier.
9  *
10  *  Redistribution and use in source and binary forms, with or without
11  *  modification, are permitted provided that the following conditions
12  *  are met:
13  *  1. Redistributions of source code must retain the above copyright
14  *     notice, this list of conditions and the following disclaimer.
15  *  2. Redistributions in binary form must reproduce the above copyright
16  *     notice, this list of conditions and the following disclaimer in the
17  *     documentation and/or other materials provided with the distribution.
18  *  3. All advertising materials mentioning features or use of this software
19  *     must display the following acknowledgement:
20  *         This product includes software developed by the NetBSD
21  *         Foundation, Inc. and its contributors.
22  *  4. Neither the name of The NetBSD Foundation nor the names of its
23  *     contributors may be used to endorse or promote products derived
24  *     from this software without specific prior written permission.
25  *
26  *  THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27  *  ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28  *  TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29  *  PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30  *  BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31  *  CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32  *  SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33  *  INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34  *  CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35  *  ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36  *  POSSIBILITY OF SUCH DAMAGE.
37  */
38 
39 /*
40  * tap(4) is a virtual Ethernet interface.  It appears as a real Ethernet
41  * device to the system, but can also be accessed by userland through a
42  * character device interface, which allows reading and injecting frames.
43  */
44 
45 #include <sys/cdefs.h>
46 __KERNEL_RCSID(0, "$NetBSD: if_tap.c,v 1.11 2005/12/11 12:24:51 christos Exp $");
47 
48 #if defined(_KERNEL_OPT)
49 #include "bpfilter.h"
50 #endif
51 
52 #include <sys/param.h>
53 #include <sys/systm.h>
54 #include <sys/kernel.h>
55 #include <sys/malloc.h>
56 #include <sys/conf.h>
57 #include <sys/device.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/ksyms.h>
61 #include <sys/poll.h>
62 #include <sys/select.h>
63 #include <sys/sockio.h>
64 #include <sys/sysctl.h>
65 
66 #include <net/if.h>
67 #include <net/if_dl.h>
68 #include <net/if_ether.h>
69 #include <net/if_media.h>
70 #include <net/if_tap.h>
71 #if NBPFILTER > 0
72 #include <net/bpf.h>
73 #endif
74 
75 /*
76  * sysctl node management
77  *
78  * It's not really possible to use a SYSCTL_SETUP block with
79  * current LKM implementation, so it is easier to just define
80  * our own function.
81  *
82  * The handler function is a "helper" in Andrew Brown's sysctl
83  * framework terminology.  It is used as a gateway for sysctl
84  * requests over the nodes.
85  *
86  * tap_log allows the module to log creations of nodes and
87  * destroy them all at once using sysctl_teardown.
88  */
89 static int tap_node;
90 static int	tap_sysctl_handler(SYSCTLFN_PROTO);
91 SYSCTL_SETUP_PROTO(sysctl_tap_setup);
92 
93 /*
94  * Since we're an Ethernet device, we need the 3 following
95  * components: a leading struct device, a struct ethercom,
96  * and also a struct ifmedia since we don't attach a PHY to
97  * ourselves. We could emulate one, but there's no real
98  * point.
99  */
100 
101 struct tap_softc {
102 	struct device	sc_dev;
103 	struct ifmedia	sc_im;
104 	struct ethercom	sc_ec;
105 	int		sc_flags;
106 #define	TAP_INUSE	0x00000001	/* tap device can only be opened once */
107 #define TAP_ASYNCIO	0x00000002	/* user is using async I/O (SIGIO) on the device */
108 #define TAP_NBIO	0x00000004	/* user wants calls to avoid blocking */
109 #define TAP_GOING	0x00000008	/* interface is being destroyed */
110 	struct selinfo	sc_rsel;
111 	pid_t		sc_pgid; /* For async. IO */
112 	struct lock	sc_rdlock;
113 	struct simplelock	sc_kqlock;
114 };
115 
116 /* autoconf(9) glue */
117 
118 void	tapattach(int);
119 
120 static int	tap_match(struct device *, struct cfdata *, void *);
121 static void	tap_attach(struct device *, struct device *, void *);
122 static int	tap_detach(struct device*, int);
123 
124 /* Ethernet address helper functions */
125 
126 static char	*tap_ether_sprintf(char *, const u_char *);
127 static int	tap_ether_aton(u_char *, char *);
128 
129 CFATTACH_DECL(tap, sizeof(struct tap_softc),
130     tap_match, tap_attach, tap_detach, NULL);
131 extern struct cfdriver tap_cd;
132 
133 /* Real device access routines */
134 static int	tap_dev_close(struct tap_softc *);
135 static int	tap_dev_read(int, struct uio *, int);
136 static int	tap_dev_write(int, struct uio *, int);
137 static int	tap_dev_ioctl(int, u_long, caddr_t, struct lwp *);
138 static int	tap_dev_poll(int, int, struct lwp *);
139 static int	tap_dev_kqfilter(int, struct knote *);
140 
141 /* Fileops access routines */
142 static int	tap_fops_close(struct file *, struct lwp *);
143 static int	tap_fops_read(struct file *, off_t *, struct uio *,
144     struct ucred *, int);
145 static int	tap_fops_write(struct file *, off_t *, struct uio *,
146     struct ucred *, int);
147 static int	tap_fops_ioctl(struct file *, u_long, void *,
148     struct lwp *);
149 static int	tap_fops_poll(struct file *, int, struct lwp *);
150 static int	tap_fops_kqfilter(struct file *, struct knote *);
151 
152 static const struct fileops tap_fileops = {
153 	tap_fops_read,
154 	tap_fops_write,
155 	tap_fops_ioctl,
156 	fnullop_fcntl,
157 	tap_fops_poll,
158 	fbadop_stat,
159 	tap_fops_close,
160 	tap_fops_kqfilter,
161 };
162 
163 /* Helper for cloning open() */
164 static int	tap_dev_cloner(struct lwp *);
165 
166 /* Character device routines */
167 static int	tap_cdev_open(dev_t, int, int, struct lwp *);
168 static int	tap_cdev_close(dev_t, int, int, struct lwp *);
169 static int	tap_cdev_read(dev_t, struct uio *, int);
170 static int	tap_cdev_write(dev_t, struct uio *, int);
171 static int	tap_cdev_ioctl(dev_t, u_long, caddr_t, int, struct lwp *);
172 static int	tap_cdev_poll(dev_t, int, struct lwp *);
173 static int	tap_cdev_kqfilter(dev_t, struct knote *);
174 
175 const struct cdevsw tap_cdevsw = {
176 	tap_cdev_open, tap_cdev_close,
177 	tap_cdev_read, tap_cdev_write,
178 	tap_cdev_ioctl, nostop, notty,
179 	tap_cdev_poll, nommap,
180 	tap_cdev_kqfilter,
181 };
182 
183 #define TAP_CLONER	0xfffff		/* Maximal minor value */
184 
185 /* kqueue-related routines */
186 static void	tap_kqdetach(struct knote *);
187 static int	tap_kqread(struct knote *, long);
188 
189 /*
190  * Those are needed by the if_media interface.
191  */
192 
193 static int	tap_mediachange(struct ifnet *);
194 static void	tap_mediastatus(struct ifnet *, struct ifmediareq *);
195 
196 /*
197  * Those are needed by the ifnet interface, and would typically be
198  * there for any network interface driver.
199  * Some other routines are optional: watchdog and drain.
200  */
201 
202 static void	tap_start(struct ifnet *);
203 static void	tap_stop(struct ifnet *, int);
204 static int	tap_init(struct ifnet *);
205 static int	tap_ioctl(struct ifnet *, u_long, caddr_t);
206 
207 /* This is an internal function to keep tap_ioctl readable */
208 static int	tap_lifaddr(struct ifnet *, u_long, struct ifaliasreq *);
209 
210 /*
211  * tap is a clonable interface, although it is highly unrealistic for
212  * an Ethernet device.
213  *
214  * Here are the bits needed for a clonable interface.
215  */
216 static int	tap_clone_create(struct if_clone *, int);
217 static int	tap_clone_destroy(struct ifnet *);
218 
219 struct if_clone tap_cloners = IF_CLONE_INITIALIZER("tap",
220 					tap_clone_create,
221 					tap_clone_destroy);
222 
223 /* Helper functionis shared by the two cloning code paths */
224 static struct tap_softc *	tap_clone_creator(int);
225 static int	tap_clone_destroyer(struct device *);
226 
227 void
228 tapattach(int n)
229 {
230 	int error;
231 
232 	error = config_cfattach_attach(tap_cd.cd_name, &tap_ca);
233 	if (error) {
234 		aprint_error("%s: unable to register cfattach\n",
235 		    tap_cd.cd_name);
236 		(void)config_cfdriver_detach(&tap_cd);
237 		return;
238 	}
239 
240 	if_clone_attach(&tap_cloners);
241 }
242 
243 /* Pretty much useless for a pseudo-device */
244 static int
245 tap_match(struct device *self, struct cfdata *cfdata, void *arg)
246 {
247 	return (1);
248 }
249 
250 void
251 tap_attach(struct device *parent, struct device *self, void *aux)
252 {
253 	struct tap_softc *sc = (struct tap_softc *)self;
254 	struct ifnet *ifp;
255 	u_int8_t enaddr[ETHER_ADDR_LEN] =
256 	    { 0xf2, 0x0b, 0xa4, 0xff, 0xff, 0xff };
257 	char enaddrstr[18];
258 	uint32_t ui;
259 	int error;
260 	const struct sysctlnode *node;
261 
262 	aprint_normal("%s: faking Ethernet device\n",
263 	    self->dv_xname);
264 
265 	/*
266 	 * In order to obtain unique initial Ethernet address on a host,
267 	 * do some randomisation using mono_time.  It's not meant for anything
268 	 * but avoiding hard-coding an address.
269 	 */
270 	ui = (mono_time.tv_sec ^ mono_time.tv_usec) & 0xffffff;
271 	memcpy(enaddr+3, (u_int8_t *)&ui, 3);
272 
273 	aprint_normal("%s: Ethernet address %s\n", sc->sc_dev.dv_xname,
274 	    tap_ether_sprintf(enaddrstr, enaddr));
275 
276 	/*
277 	 * Why 1000baseT? Why not? You can add more.
278 	 *
279 	 * Note that there are 3 steps: init, one or several additions to
280 	 * list of supported media, and in the end, the selection of one
281 	 * of them.
282 	 */
283 	ifmedia_init(&sc->sc_im, 0, tap_mediachange, tap_mediastatus);
284 	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T, 0, NULL);
285 	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL);
286 	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX, 0, NULL);
287 	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL);
288 	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T, 0, NULL);
289 	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL);
290 	ifmedia_add(&sc->sc_im, IFM_ETHER|IFM_AUTO, 0, NULL);
291 	ifmedia_set(&sc->sc_im, IFM_ETHER|IFM_AUTO);
292 
293 	/*
294 	 * One should note that an interface must do multicast in order
295 	 * to support IPv6.
296 	 */
297 	ifp = &sc->sc_ec.ec_if;
298 	strcpy(ifp->if_xname, sc->sc_dev.dv_xname);
299 	ifp->if_softc	= sc;
300 	ifp->if_flags	= IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST;
301 	ifp->if_ioctl	= tap_ioctl;
302 	ifp->if_start	= tap_start;
303 	ifp->if_stop	= tap_stop;
304 	ifp->if_init	= tap_init;
305 	IFQ_SET_READY(&ifp->if_snd);
306 
307 	sc->sc_ec.ec_capabilities = ETHERCAP_VLAN_MTU | ETHERCAP_JUMBO_MTU;
308 
309 	/* Those steps are mandatory for an Ethernet driver, the fisrt call
310 	 * being common to all network interface drivers. */
311 	if_attach(ifp);
312 	ether_ifattach(ifp, enaddr);
313 
314 	sc->sc_flags = 0;
315 
316 	/*
317 	 * Add a sysctl node for that interface.
318 	 *
319 	 * The pointer transmitted is not a string, but instead a pointer to
320 	 * the softc structure, which we can use to build the string value on
321 	 * the fly in the helper function of the node.  See the comments for
322 	 * tap_sysctl_handler for details.
323 	 */
324 	if ((error = sysctl_createv(NULL, 0, NULL,
325 	    &node, CTLFLAG_READWRITE,
326 	    CTLTYPE_STRING, sc->sc_dev.dv_xname, NULL,
327 	    tap_sysctl_handler, 0, sc, 18,
328 	    CTL_NET, AF_LINK, tap_node, sc->sc_dev.dv_unit, CTL_EOL)) != 0)
329 		aprint_error("%s: sysctl_createv returned %d, ignoring\n",
330 		    sc->sc_dev.dv_xname, error);
331 
332 	/*
333 	 * Initialize the two locks for the device.
334 	 *
335 	 * We need a lock here because even though the tap device can be
336 	 * opened only once, the file descriptor might be passed to another
337 	 * process, say a fork(2)ed child.
338 	 *
339 	 * The Giant saves us from most of the hassle, but since the read
340 	 * operation can sleep, we don't want two processes to wake up at
341 	 * the same moment and both try and dequeue a single packet.
342 	 *
343 	 * The queue for event listeners (used by kqueue(9), see below) has
344 	 * to be protected, too, but we don't need the same level of
345 	 * complexity for that lock, so a simple spinning lock is fine.
346 	 */
347 	lockinit(&sc->sc_rdlock, PSOCK|PCATCH, "tapl", 0, LK_SLEEPFAIL);
348 	simple_lock_init(&sc->sc_kqlock);
349 }
350 
351 /*
352  * When detaching, we do the inverse of what is done in the attach
353  * routine, in reversed order.
354  */
355 static int
356 tap_detach(struct device* self, int flags)
357 {
358 	struct tap_softc *sc = (struct tap_softc *)self;
359 	struct ifnet *ifp = &sc->sc_ec.ec_if;
360 	int error, s;
361 
362 	/*
363 	 * Some processes might be sleeping on "tap", so we have to make
364 	 * them release their hold on the device.
365 	 *
366 	 * The LK_DRAIN operation will wait for every locked process to
367 	 * release their hold.
368 	 */
369 	sc->sc_flags |= TAP_GOING;
370 	s = splnet();
371 	tap_stop(ifp, 1);
372 	if_down(ifp);
373 	splx(s);
374 	lockmgr(&sc->sc_rdlock, LK_DRAIN, NULL);
375 
376 	/*
377 	 * Destroying a single leaf is a very straightforward operation using
378 	 * sysctl_destroyv.  One should be sure to always end the path with
379 	 * CTL_EOL.
380 	 */
381 	if ((error = sysctl_destroyv(NULL, CTL_NET, AF_LINK, tap_node,
382 	    sc->sc_dev.dv_unit, CTL_EOL)) != 0)
383 		aprint_error("%s: sysctl_destroyv returned %d, ignoring\n",
384 		    sc->sc_dev.dv_xname, error);
385 	ether_ifdetach(ifp);
386 	if_detach(ifp);
387 	ifmedia_delete_instance(&sc->sc_im, IFM_INST_ANY);
388 
389 	return (0);
390 }
391 
392 /*
393  * This function is called by the ifmedia layer to notify the driver
394  * that the user requested a media change.  A real driver would
395  * reconfigure the hardware.
396  */
397 static int
398 tap_mediachange(struct ifnet *ifp)
399 {
400 	return (0);
401 }
402 
403 /*
404  * Here the user asks for the currently used media.
405  */
406 static void
407 tap_mediastatus(struct ifnet *ifp, struct ifmediareq *imr)
408 {
409 	struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
410 	imr->ifm_active = sc->sc_im.ifm_cur->ifm_media;
411 }
412 
413 /*
414  * This is the function where we SEND packets.
415  *
416  * There is no 'receive' equivalent.  A typical driver will get
417  * interrupts from the hardware, and from there will inject new packets
418  * into the network stack.
419  *
420  * Once handled, a packet must be freed.  A real driver might not be able
421  * to fit all the pending packets into the hardware, and is allowed to
422  * return before having sent all the packets.  It should then use the
423  * if_flags flag IFF_OACTIVE to notify the upper layer.
424  *
425  * There are also other flags one should check, such as IFF_PAUSE.
426  *
427  * It is our duty to make packets available to BPF listeners.
428  *
429  * You should be aware that this function is called by the Ethernet layer
430  * at splnet().
431  *
432  * When the device is opened, we have to pass the packet(s) to the
433  * userland.  For that we stay in OACTIVE mode while the userland gets
434  * the packets, and we send a signal to the processes waiting to read.
435  *
436  * wakeup(sc) is the counterpart to the tsleep call in
437  * tap_dev_read, while selnotify() is used for kevent(2) and
438  * poll(2) (which includes select(2)) listeners.
439  */
440 static void
441 tap_start(struct ifnet *ifp)
442 {
443 	struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
444 	struct mbuf *m0;
445 
446 	if ((sc->sc_flags & TAP_INUSE) == 0) {
447 		/* Simply drop packets */
448 		for(;;) {
449 			IFQ_DEQUEUE(&ifp->if_snd, m0);
450 			if (m0 == NULL)
451 				return;
452 
453 			ifp->if_opackets++;
454 #if NBPFILTER > 0
455 			if (ifp->if_bpf)
456 				bpf_mtap(ifp->if_bpf, m0);
457 #endif
458 
459 			m_freem(m0);
460 		}
461 	} else if (!IFQ_IS_EMPTY(&ifp->if_snd)) {
462 		ifp->if_flags |= IFF_OACTIVE;
463 		wakeup(sc);
464 		selnotify(&sc->sc_rsel, 1);
465 		if (sc->sc_flags & TAP_ASYNCIO)
466 			fownsignal(sc->sc_pgid, SIGIO, POLL_IN,
467 			    POLLIN|POLLRDNORM, NULL);
468 	}
469 }
470 
471 /*
472  * A typical driver will only contain the following handlers for
473  * ioctl calls, except SIOCSIFPHYADDR.
474  * The latter is a hack I used to set the Ethernet address of the
475  * faked device.
476  *
477  * Note that both ifmedia_ioctl() and ether_ioctl() have to be
478  * called under splnet().
479  */
480 static int
481 tap_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
482 {
483 	struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
484 	struct ifreq *ifr = (struct ifreq *)data;
485 	int s, error;
486 
487 	s = splnet();
488 
489 	switch (cmd) {
490 	case SIOCSIFMEDIA:
491 	case SIOCGIFMEDIA:
492 		error = ifmedia_ioctl(ifp, ifr, &sc->sc_im, cmd);
493 		break;
494 	case SIOCSIFPHYADDR:
495 		error = tap_lifaddr(ifp, cmd, (struct ifaliasreq *)data);
496 		break;
497 	default:
498 		error = ether_ioctl(ifp, cmd, data);
499 		if (error == ENETRESET)
500 			error = 0;
501 		break;
502 	}
503 
504 	splx(s);
505 
506 	return (error);
507 }
508 
509 /*
510  * Helper function to set Ethernet address.  This shouldn't be done there,
511  * and should actually be available to all Ethernet drivers, real or not.
512  */
513 static int
514 tap_lifaddr(struct ifnet *ifp, u_long cmd, struct ifaliasreq *ifra)
515 {
516 	struct sockaddr *sa = (struct sockaddr *)&ifra->ifra_addr;
517 
518 	if (sa->sa_family != AF_LINK)
519 		return (EINVAL);
520 
521 	memcpy(LLADDR(ifp->if_sadl), sa->sa_data, ETHER_ADDR_LEN);
522 
523 	return (0);
524 }
525 
526 /*
527  * _init() would typically be called when an interface goes up,
528  * meaning it should configure itself into the state in which it
529  * can send packets.
530  */
531 static int
532 tap_init(struct ifnet *ifp)
533 {
534 	ifp->if_flags |= IFF_RUNNING;
535 
536 	tap_start(ifp);
537 
538 	return (0);
539 }
540 
541 /*
542  * _stop() is called when an interface goes down.  It is our
543  * responsability to validate that state by clearing the
544  * IFF_RUNNING flag.
545  *
546  * We have to wake up all the sleeping processes to have the pending
547  * read requests cancelled.
548  */
549 static void
550 tap_stop(struct ifnet *ifp, int disable)
551 {
552 	struct tap_softc *sc = (struct tap_softc *)ifp->if_softc;
553 
554 	ifp->if_flags &= ~IFF_RUNNING;
555 	wakeup(sc);
556 	selnotify(&sc->sc_rsel, 1);
557 	if (sc->sc_flags & TAP_ASYNCIO)
558 		fownsignal(sc->sc_pgid, SIGIO, POLL_HUP, 0, NULL);
559 }
560 
561 /*
562  * The 'create' command of ifconfig can be used to create
563  * any numbered instance of a given device.  Thus we have to
564  * make sure we have enough room in cd_devs to create the
565  * user-specified instance.  config_attach_pseudo will do this
566  * for us.
567  */
568 static int
569 tap_clone_create(struct if_clone *ifc, int unit)
570 {
571 	if (tap_clone_creator(unit) == NULL) {
572 		aprint_error("%s%d: unable to attach an instance\n",
573                     tap_cd.cd_name, unit);
574 		return (ENXIO);
575 	}
576 
577 	return (0);
578 }
579 
580 /*
581  * tap(4) can be cloned by two ways:
582  *   using 'ifconfig tap0 create', which will use the network
583  *     interface cloning API, and call tap_clone_create above.
584  *   opening the cloning device node, whose minor number is TAP_CLONER.
585  *     See below for an explanation on how this part work.
586  *
587  * config_attach_pseudo can be called with unit = DVUNIT_ANY to have
588  * autoconf(9) choose a unit number for us.  This is what happens when
589  * the cloner is openend, while the ifcloner interface creates a device
590  * with a specific unit number.
591  */
592 static struct tap_softc *
593 tap_clone_creator(int unit)
594 {
595 	struct cfdata *cf;
596 
597 	cf = malloc(sizeof(*cf), M_DEVBUF, M_WAITOK);
598 	cf->cf_name = tap_cd.cd_name;
599 	cf->cf_atname = tap_ca.ca_name;
600 	cf->cf_unit = unit;
601 	cf->cf_fstate = FSTATE_STAR;
602 
603 	return (struct tap_softc *)config_attach_pseudo(cf);
604 }
605 
606 /*
607  * The clean design of if_clone and autoconf(9) makes that part
608  * really straightforward.  The second argument of config_detach
609  * means neither QUIET nor FORCED.
610  */
611 static int
612 tap_clone_destroy(struct ifnet *ifp)
613 {
614 	return tap_clone_destroyer((struct device *)ifp->if_softc);
615 }
616 
617 static int
618 tap_clone_destroyer(struct device *dev)
619 {
620 	struct cfdata *cf = dev->dv_cfdata;
621 	int error;
622 
623 	if ((error = config_detach(dev, 0)) != 0)
624 		aprint_error("%s: unable to detach instance\n",
625 		    dev->dv_xname);
626 	free(cf, M_DEVBUF);
627 
628 	return (error);
629 }
630 
631 /*
632  * tap(4) is a bit of an hybrid device.  It can be used in two different
633  * ways:
634  *  1. ifconfig tapN create, then use /dev/tapN to read/write off it.
635  *  2. open /dev/tap, get a new interface created and read/write off it.
636  *     That interface is destroyed when the process that had it created exits.
637  *
638  * The first way is managed by the cdevsw structure, and you access interfaces
639  * through a (major, minor) mapping:  tap4 is obtained by the minor number
640  * 4.  The entry points for the cdevsw interface are prefixed by tap_cdev_.
641  *
642  * The second way is the so-called "cloning" device.  It's a special minor
643  * number (chosen as the maximal number, to allow as much tap devices as
644  * possible).  The user first opens the cloner (e.g., /dev/tap), and that
645  * call ends in tap_cdev_open.  The actual place where it is handled is
646  * tap_dev_cloner.
647  *
648  * An tap device cannot be opened more than once at a time, so the cdevsw
649  * part of open() does nothing but noting that the interface is being used and
650  * hence ready to actually handle packets.
651  */
652 
653 static int
654 tap_cdev_open(dev_t dev, int flags, int fmt, struct lwp *l)
655 {
656 	struct tap_softc *sc;
657 
658 	if (minor(dev) == TAP_CLONER)
659 		return tap_dev_cloner(l);
660 
661 	sc = (struct tap_softc *)device_lookup(&tap_cd, minor(dev));
662 	if (sc == NULL)
663 		return (ENXIO);
664 
665 	/* The device can only be opened once */
666 	if (sc->sc_flags & TAP_INUSE)
667 		return (EBUSY);
668 	sc->sc_flags |= TAP_INUSE;
669 	return (0);
670 }
671 
672 /*
673  * There are several kinds of cloning devices, and the most simple is the one
674  * tap(4) uses.  What it does is change the file descriptor with a new one,
675  * with its own fileops structure (which maps to the various read, write,
676  * ioctl functions).  It starts allocating a new file descriptor with falloc,
677  * then actually creates the new tap devices.
678  *
679  * Once those two steps are successful, we can re-wire the existing file
680  * descriptor to its new self.  This is done with fdclone():  it fills the fp
681  * structure as needed (notably f_data gets filled with the fifth parameter
682  * passed, the unit of the tap device which will allows us identifying the
683  * device later), and returns EMOVEFD.
684  *
685  * That magic value is interpreted by sys_open() which then replaces the
686  * current file descriptor by the new one (through a magic member of struct
687  * proc, p_dupfd).
688  *
689  * The tap device is flagged as being busy since it otherwise could be
690  * externally accessed through the corresponding device node with the cdevsw
691  * interface.
692  */
693 
694 static int
695 tap_dev_cloner(struct lwp *l)
696 {
697 	struct tap_softc *sc;
698 	struct file *fp;
699 	int error, fd;
700 
701 	if ((error = falloc(l->l_proc, &fp, &fd)) != 0)
702 		return (error);
703 
704 	if ((sc = tap_clone_creator(DVUNIT_ANY)) == NULL) {
705 		FILE_UNUSE(fp, l);
706 		ffree(fp);
707 		return (ENXIO);
708 	}
709 
710 	sc->sc_flags |= TAP_INUSE;
711 
712 	return fdclone(l, fp, fd, FREAD|FWRITE, &tap_fileops,
713 	    (void *)(intptr_t)sc->sc_dev.dv_unit);
714 }
715 
716 /*
717  * While all other operations (read, write, ioctl, poll and kqfilter) are
718  * really the same whether we are in cdevsw or fileops mode, the close()
719  * function is slightly different in the two cases.
720  *
721  * As for the other, the core of it is shared in tap_dev_close.  What
722  * it does is sufficient for the cdevsw interface, but the cloning interface
723  * needs another thing:  the interface is destroyed when the processes that
724  * created it closes it.
725  */
726 static int
727 tap_cdev_close(dev_t dev, int flags, int fmt, struct lwp *l)
728 {
729 	struct tap_softc *sc =
730 	    (struct tap_softc *)device_lookup(&tap_cd, minor(dev));
731 
732 	if (sc == NULL)
733 		return (ENXIO);
734 
735 	return tap_dev_close(sc);
736 }
737 
738 /*
739  * It might happen that the administrator used ifconfig to externally destroy
740  * the interface.  In that case, tap_fops_close will be called while
741  * tap_detach is already happening.  If we called it again from here, we
742  * would dead lock.  TAP_GOING ensures that this situation doesn't happen.
743  */
744 static int
745 tap_fops_close(struct file *fp, struct lwp *l)
746 {
747 	int unit = (intptr_t)fp->f_data;
748 	struct tap_softc *sc;
749 	int error;
750 
751 	sc = (struct tap_softc *)device_lookup(&tap_cd, unit);
752 	if (sc == NULL)
753 		return (ENXIO);
754 
755 	/* tap_dev_close currently always succeeds, but it might not
756 	 * always be the case. */
757 	if ((error = tap_dev_close(sc)) != 0)
758 		return (error);
759 
760 	/* Destroy the device now that it is no longer useful,
761 	 * unless it's already being destroyed. */
762 	if ((sc->sc_flags & TAP_GOING) != 0)
763 		return (0);
764 
765 	return tap_clone_destroyer((struct device *)sc);
766 }
767 
768 static int
769 tap_dev_close(struct tap_softc *sc)
770 {
771 	struct ifnet *ifp;
772 	int s;
773 
774 	s = splnet();
775 	/* Let tap_start handle packets again */
776 	ifp = &sc->sc_ec.ec_if;
777 	ifp->if_flags &= ~IFF_OACTIVE;
778 
779 	/* Purge output queue */
780 	if (!(IFQ_IS_EMPTY(&ifp->if_snd))) {
781 		struct mbuf *m;
782 
783 		for (;;) {
784 			IFQ_DEQUEUE(&ifp->if_snd, m);
785 			if (m == NULL)
786 				break;
787 
788 			ifp->if_opackets++;
789 #if NBPFILTER > 0
790 			if (ifp->if_bpf)
791 				bpf_mtap(ifp->if_bpf, m);
792 #endif
793 		}
794 	}
795 	splx(s);
796 
797 	sc->sc_flags &= ~(TAP_INUSE | TAP_ASYNCIO);
798 
799 	return (0);
800 }
801 
802 static int
803 tap_cdev_read(dev_t dev, struct uio *uio, int flags)
804 {
805 	return tap_dev_read(minor(dev), uio, flags);
806 }
807 
808 static int
809 tap_fops_read(struct file *fp, off_t *offp, struct uio *uio,
810     struct ucred *cred, int flags)
811 {
812 	return tap_dev_read((intptr_t)fp->f_data, uio, flags);
813 }
814 
815 static int
816 tap_dev_read(int unit, struct uio *uio, int flags)
817 {
818 	struct tap_softc *sc =
819 	    (struct tap_softc *)device_lookup(&tap_cd, unit);
820 	struct ifnet *ifp;
821 	struct mbuf *m, *n;
822 	int error = 0, s;
823 
824 	if (sc == NULL)
825 		return (ENXIO);
826 
827 	ifp = &sc->sc_ec.ec_if;
828 	if ((ifp->if_flags & IFF_UP) == 0)
829 		return (EHOSTDOWN);
830 
831 	/*
832 	 * In the TAP_NBIO case, we have to make sure we won't be sleeping
833 	 */
834 	if ((sc->sc_flags & TAP_NBIO) &&
835 	    lockstatus(&sc->sc_rdlock) == LK_EXCLUSIVE)
836 		return (EWOULDBLOCK);
837 	error = lockmgr(&sc->sc_rdlock, LK_EXCLUSIVE, NULL);
838 	if (error != 0)
839 		return (error);
840 
841 	s = splnet();
842 	if (IFQ_IS_EMPTY(&ifp->if_snd)) {
843 		ifp->if_flags &= ~IFF_OACTIVE;
844 		splx(s);
845 		/*
846 		 * We must release the lock before sleeping, and re-acquire it
847 		 * after.
848 		 */
849 		(void)lockmgr(&sc->sc_rdlock, LK_RELEASE, NULL);
850 		if (sc->sc_flags & TAP_NBIO)
851 			error = EWOULDBLOCK;
852 		else
853 			error = tsleep(sc, PSOCK|PCATCH, "tap", 0);
854 
855 		if (error != 0)
856 			return (error);
857 		/* The device might have been downed */
858 		if ((ifp->if_flags & IFF_UP) == 0)
859 			return (EHOSTDOWN);
860 		if ((sc->sc_flags & TAP_NBIO) &&
861 		    lockstatus(&sc->sc_rdlock) == LK_EXCLUSIVE)
862 			return (EWOULDBLOCK);
863 		error = lockmgr(&sc->sc_rdlock, LK_EXCLUSIVE, NULL);
864 		if (error != 0)
865 			return (error);
866 		s = splnet();
867 	}
868 
869 	IFQ_DEQUEUE(&ifp->if_snd, m);
870 	ifp->if_flags &= ~IFF_OACTIVE;
871 	splx(s);
872 	if (m == NULL) {
873 		error = 0;
874 		goto out;
875 	}
876 
877 	ifp->if_opackets++;
878 #if NBPFILTER > 0
879 	if (ifp->if_bpf)
880 		bpf_mtap(ifp->if_bpf, m);
881 #endif
882 
883 	/*
884 	 * One read is one packet.
885 	 */
886 	do {
887 		error = uiomove(mtod(m, caddr_t),
888 		    min(m->m_len, uio->uio_resid), uio);
889 		MFREE(m, n);
890 		m = n;
891 	} while (m != NULL && uio->uio_resid > 0 && error == 0);
892 
893 	if (m != NULL)
894 		m_freem(m);
895 
896 out:
897 	(void)lockmgr(&sc->sc_rdlock, LK_RELEASE, NULL);
898 	return (error);
899 }
900 
901 static int
902 tap_cdev_write(dev_t dev, struct uio *uio, int flags)
903 {
904 	return tap_dev_write(minor(dev), uio, flags);
905 }
906 
907 static int
908 tap_fops_write(struct file *fp, off_t *offp, struct uio *uio,
909     struct ucred *cred, int flags)
910 {
911 	return tap_dev_write((intptr_t)fp->f_data, uio, flags);
912 }
913 
914 static int
915 tap_dev_write(int unit, struct uio *uio, int flags)
916 {
917 	struct tap_softc *sc =
918 	    (struct tap_softc *)device_lookup(&tap_cd, unit);
919 	struct ifnet *ifp;
920 	struct mbuf *m, **mp;
921 	int error = 0;
922 	int s;
923 
924 	if (sc == NULL)
925 		return (ENXIO);
926 
927 	ifp = &sc->sc_ec.ec_if;
928 
929 	/* One write, one packet, that's the rule */
930 	MGETHDR(m, M_DONTWAIT, MT_DATA);
931 	if (m == NULL) {
932 		ifp->if_ierrors++;
933 		return (ENOBUFS);
934 	}
935 	m->m_pkthdr.len = uio->uio_resid;
936 
937 	mp = &m;
938 	while (error == 0 && uio->uio_resid > 0) {
939 		if (*mp != m) {
940 			MGET(*mp, M_DONTWAIT, MT_DATA);
941 			if (*mp == NULL) {
942 				error = ENOBUFS;
943 				break;
944 			}
945 		}
946 		(*mp)->m_len = min(MHLEN, uio->uio_resid);
947 		error = uiomove(mtod(*mp, caddr_t), (*mp)->m_len, uio);
948 		mp = &(*mp)->m_next;
949 	}
950 	if (error) {
951 		ifp->if_ierrors++;
952 		m_freem(m);
953 		return (error);
954 	}
955 
956 	ifp->if_ipackets++;
957 	m->m_pkthdr.rcvif = ifp;
958 
959 #if NBPFILTER > 0
960 	if (ifp->if_bpf)
961 		bpf_mtap(ifp->if_bpf, m);
962 #endif
963 	s =splnet();
964 	(*ifp->if_input)(ifp, m);
965 	splx(s);
966 
967 	return (0);
968 }
969 
970 static int
971 tap_cdev_ioctl(dev_t dev, u_long cmd, caddr_t data, int flags,
972     struct lwp *l)
973 {
974 	return tap_dev_ioctl(minor(dev), cmd, data, l);
975 }
976 
977 static int
978 tap_fops_ioctl(struct file *fp, u_long cmd, void *data, struct lwp *l)
979 {
980 	return tap_dev_ioctl((intptr_t)fp->f_data, cmd, (caddr_t)data, l);
981 }
982 
983 static int
984 tap_dev_ioctl(int unit, u_long cmd, caddr_t data, struct lwp *l)
985 {
986 	struct tap_softc *sc =
987 	    (struct tap_softc *)device_lookup(&tap_cd, unit);
988 	int error = 0;
989 
990 	if (sc == NULL)
991 		return (ENXIO);
992 
993 	switch (cmd) {
994 	case FIONREAD:
995 		{
996 			struct ifnet *ifp = &sc->sc_ec.ec_if;
997 			struct mbuf *m;
998 			int s;
999 
1000 			s = splnet();
1001 			IFQ_POLL(&ifp->if_snd, m);
1002 
1003 			if (m == NULL)
1004 				*(int *)data = 0;
1005 			else
1006 				*(int *)data = m->m_pkthdr.len;
1007 			splx(s);
1008 		} break;
1009 	case TIOCSPGRP:
1010 	case FIOSETOWN:
1011 		error = fsetown(l->l_proc, &sc->sc_pgid, cmd, data);
1012 		break;
1013 	case TIOCGPGRP:
1014 	case FIOGETOWN:
1015 		error = fgetown(l->l_proc, sc->sc_pgid, cmd, data);
1016 		break;
1017 	case FIOASYNC:
1018 		if (*(int *)data)
1019 			sc->sc_flags |= TAP_ASYNCIO;
1020 		else
1021 			sc->sc_flags &= ~TAP_ASYNCIO;
1022 		break;
1023 	case FIONBIO:
1024 		if (*(int *)data)
1025 			sc->sc_flags |= TAP_NBIO;
1026 		else
1027 			sc->sc_flags &= ~TAP_NBIO;
1028 		break;
1029 	case TAPGIFNAME:
1030 		{
1031 			struct ifreq *ifr = (struct ifreq *)data;
1032 			struct ifnet *ifp = &sc->sc_ec.ec_if;
1033 
1034 			strlcpy(ifr->ifr_name, ifp->if_xname, IFNAMSIZ);
1035 		} break;
1036 	default:
1037 		error = ENOTTY;
1038 		break;
1039 	}
1040 
1041 	return (0);
1042 }
1043 
1044 static int
1045 tap_cdev_poll(dev_t dev, int events, struct lwp *l)
1046 {
1047 	return tap_dev_poll(minor(dev), events, l);
1048 }
1049 
1050 static int
1051 tap_fops_poll(struct file *fp, int events, struct lwp *l)
1052 {
1053 	return tap_dev_poll((intptr_t)fp->f_data, events, l);
1054 }
1055 
1056 static int
1057 tap_dev_poll(int unit, int events, struct lwp *l)
1058 {
1059 	struct tap_softc *sc =
1060 	    (struct tap_softc *)device_lookup(&tap_cd, unit);
1061 	int revents = 0;
1062 
1063 	if (sc == NULL)
1064 		return (ENXIO);
1065 
1066 	if (events & (POLLIN|POLLRDNORM)) {
1067 		struct ifnet *ifp = &sc->sc_ec.ec_if;
1068 		struct mbuf *m;
1069 		int s;
1070 
1071 		s = splnet();
1072 		IFQ_POLL(&ifp->if_snd, m);
1073 		splx(s);
1074 
1075 		if (m != NULL)
1076 			revents |= events & (POLLIN|POLLRDNORM);
1077 		else {
1078 			simple_lock(&sc->sc_kqlock);
1079 			selrecord(l, &sc->sc_rsel);
1080 			simple_unlock(&sc->sc_kqlock);
1081 		}
1082 	}
1083 	revents |= events & (POLLOUT|POLLWRNORM);
1084 
1085 	return (revents);
1086 }
1087 
1088 static struct filterops tap_read_filterops = { 1, NULL, tap_kqdetach,
1089 	tap_kqread };
1090 static struct filterops tap_seltrue_filterops = { 1, NULL, tap_kqdetach,
1091 	filt_seltrue };
1092 
1093 static int
1094 tap_cdev_kqfilter(dev_t dev, struct knote *kn)
1095 {
1096 	return tap_dev_kqfilter(minor(dev), kn);
1097 }
1098 
1099 static int
1100 tap_fops_kqfilter(struct file *fp, struct knote *kn)
1101 {
1102 	return tap_dev_kqfilter((intptr_t)fp->f_data, kn);
1103 }
1104 
1105 static int
1106 tap_dev_kqfilter(int unit, struct knote *kn)
1107 {
1108 	struct tap_softc *sc =
1109 	    (struct tap_softc *)device_lookup(&tap_cd, unit);
1110 
1111 	if (sc == NULL)
1112 		return (ENXIO);
1113 
1114 	switch(kn->kn_filter) {
1115 	case EVFILT_READ:
1116 		kn->kn_fop = &tap_read_filterops;
1117 		break;
1118 	case EVFILT_WRITE:
1119 		kn->kn_fop = &tap_seltrue_filterops;
1120 		break;
1121 	default:
1122 		return (1);
1123 	}
1124 
1125 	kn->kn_hook = sc;
1126 	simple_lock(&sc->sc_kqlock);
1127 	SLIST_INSERT_HEAD(&sc->sc_rsel.sel_klist, kn, kn_selnext);
1128 	simple_unlock(&sc->sc_kqlock);
1129 	return (0);
1130 }
1131 
1132 static void
1133 tap_kqdetach(struct knote *kn)
1134 {
1135 	struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;
1136 
1137 	simple_lock(&sc->sc_kqlock);
1138 	SLIST_REMOVE(&sc->sc_rsel.sel_klist, kn, knote, kn_selnext);
1139 	simple_unlock(&sc->sc_kqlock);
1140 }
1141 
1142 static int
1143 tap_kqread(struct knote *kn, long hint)
1144 {
1145 	struct tap_softc *sc = (struct tap_softc *)kn->kn_hook;
1146 	struct ifnet *ifp = &sc->sc_ec.ec_if;
1147 	struct mbuf *m;
1148 	int s;
1149 
1150 	s = splnet();
1151 	IFQ_POLL(&ifp->if_snd, m);
1152 
1153 	if (m == NULL)
1154 		kn->kn_data = 0;
1155 	else
1156 		kn->kn_data = m->m_pkthdr.len;
1157 	splx(s);
1158 	return (kn->kn_data != 0 ? 1 : 0);
1159 }
1160 
1161 /*
1162  * sysctl management routines
1163  * You can set the address of an interface through:
1164  * net.link.tap.tap<number>
1165  *
1166  * Note the consistent use of tap_log in order to use
1167  * sysctl_teardown at unload time.
1168  *
1169  * In the kernel you will find a lot of SYSCTL_SETUP blocks.  Those
1170  * blocks register a function in a special section of the kernel
1171  * (called a link set) which is used at init_sysctl() time to cycle
1172  * through all those functions to create the kernel's sysctl tree.
1173  *
1174  * It is not (currently) possible to use link sets in a LKM, so the
1175  * easiest is to simply call our own setup routine at load time.
1176  *
1177  * In the SYSCTL_SETUP blocks you find in the kernel, nodes have the
1178  * CTLFLAG_PERMANENT flag, meaning they cannot be removed.  Once the
1179  * whole kernel sysctl tree is built, it is not possible to add any
1180  * permanent node.
1181  *
1182  * It should be noted that we're not saving the sysctlnode pointer
1183  * we are returned when creating the "tap" node.  That structure
1184  * cannot be trusted once out of the calling function, as it might
1185  * get reused.  So we just save the MIB number, and always give the
1186  * full path starting from the root for later calls to sysctl_createv
1187  * and sysctl_destroyv.
1188  */
1189 SYSCTL_SETUP(sysctl_tap_setup, "sysctl net.link.tap subtree setup")
1190 {
1191 	const struct sysctlnode *node;
1192 	int error = 0;
1193 
1194 	if ((error = sysctl_createv(clog, 0, NULL, NULL,
1195 	    CTLFLAG_PERMANENT,
1196 	    CTLTYPE_NODE, "net", NULL,
1197 	    NULL, 0, NULL, 0,
1198 	    CTL_NET, CTL_EOL)) != 0)
1199 		return;
1200 
1201 	if ((error = sysctl_createv(clog, 0, NULL, NULL,
1202 	    CTLFLAG_PERMANENT,
1203 	    CTLTYPE_NODE, "link", NULL,
1204 	    NULL, 0, NULL, 0,
1205 	    CTL_NET, AF_LINK, CTL_EOL)) != 0)
1206 		return;
1207 
1208 	/*
1209 	 * The first four parameters of sysctl_createv are for management.
1210 	 *
1211 	 * The four that follows, here starting with a '0' for the flags,
1212 	 * describe the node.
1213 	 *
1214 	 * The next series of four set its value, through various possible
1215 	 * means.
1216 	 *
1217 	 * Last but not least, the path to the node is described.  That path
1218 	 * is relative to the given root (third argument).  Here we're
1219 	 * starting from the root.
1220 	 */
1221 	if ((error = sysctl_createv(clog, 0, NULL, &node,
1222 	    CTLFLAG_PERMANENT,
1223 	    CTLTYPE_NODE, "tap", NULL,
1224 	    NULL, 0, NULL, 0,
1225 	    CTL_NET, AF_LINK, CTL_CREATE, CTL_EOL)) != 0)
1226 		return;
1227 	tap_node = node->sysctl_num;
1228 }
1229 
1230 /*
1231  * The helper functions make Andrew Brown's interface really
1232  * shine.  It makes possible to create value on the fly whether
1233  * the sysctl value is read or written.
1234  *
1235  * As shown as an example in the man page, the first step is to
1236  * create a copy of the node to have sysctl_lookup work on it.
1237  *
1238  * Here, we have more work to do than just a copy, since we have
1239  * to create the string.  The first step is to collect the actual
1240  * value of the node, which is a convenient pointer to the softc
1241  * of the interface.  From there we create the string and use it
1242  * as the value, but only for the *copy* of the node.
1243  *
1244  * Then we let sysctl_lookup do the magic, which consists in
1245  * setting oldp and newp as required by the operation.  When the
1246  * value is read, that means that the string will be copied to
1247  * the user, and when it is written, the new value will be copied
1248  * over in the addr array.
1249  *
1250  * If newp is NULL, the user was reading the value, so we don't
1251  * have anything else to do.  If a new value was written, we
1252  * have to check it.
1253  *
1254  * If it is incorrect, we can return an error and leave 'node' as
1255  * it is:  since it is a copy of the actual node, the change will
1256  * be forgotten.
1257  *
1258  * Upon a correct input, we commit the change to the ifnet
1259  * structure of our interface.
1260  */
1261 static int
1262 tap_sysctl_handler(SYSCTLFN_ARGS)
1263 {
1264 	struct sysctlnode node;
1265 	struct tap_softc *sc;
1266 	struct ifnet *ifp;
1267 	int error;
1268 	size_t len;
1269 	char addr[18];
1270 
1271 	node = *rnode;
1272 	sc = node.sysctl_data;
1273 	ifp = &sc->sc_ec.ec_if;
1274 	(void)tap_ether_sprintf(addr, LLADDR(ifp->if_sadl));
1275 	node.sysctl_data = addr;
1276 	error = sysctl_lookup(SYSCTLFN_CALL(&node));
1277 	if (error || newp == NULL)
1278 		return (error);
1279 
1280 	len = strlen(addr);
1281 	if (len < 11 || len > 17)
1282 		return (EINVAL);
1283 
1284 	/* Commit change */
1285 	if (tap_ether_aton(LLADDR(ifp->if_sadl), addr) != 0)
1286 		return (EINVAL);
1287 	return (error);
1288 }
1289 
1290 /*
1291  * ether_aton implementation, not using a static buffer.
1292  */
1293 static int
1294 tap_ether_aton(u_char *dest, char *str)
1295 {
1296 	int i;
1297 	char *cp = str;
1298 	u_char val[6];
1299 
1300 #define	set_value			\
1301 	if (*cp > '9' && *cp < 'a')	\
1302 		*cp -= 'A' - 10;	\
1303 	else if (*cp > '9')		\
1304 		*cp -= 'a' - 10;	\
1305 	else				\
1306 		*cp -= '0'
1307 
1308 	for (i = 0; i < 6; i++, cp++) {
1309 		if (!isxdigit(*cp))
1310 			return (1);
1311 		set_value;
1312 		val[i] = *cp++;
1313 		if (isxdigit(*cp)) {
1314 			set_value;
1315 			val[i] *= 16;
1316 			val[i] += *cp++;
1317 		}
1318 		if (*cp == ':' || i == 5)
1319 			continue;
1320 		else
1321 			return (1);
1322 	}
1323 	memcpy(dest, val, 6);
1324 	return (0);
1325 }
1326 
1327 /*
1328  * ether_sprintf made thread-safer.
1329  *
1330  * Copied over from sys/net/if_ethersubr.c, with a change to avoid the use
1331  * of a static buffer.
1332  */
1333 
1334 /*
1335  * Copyright (c) 1982, 1989, 1993
1336  *      The Regents of the University of California.  All rights reserved.
1337  *
1338  * Redistribution and use in source and binary forms, with or without
1339  * modification, are permitted provided that the following conditions
1340  * are met:
1341  * 1. Redistributions of source code must retain the above copyright
1342  *    notice, this list of conditions and the following disclaimer.
1343  * 2. Redistributions in binary form must reproduce the above copyright
1344  *    notice, this list of conditions and the following disclaimer in the
1345  *    documentation and/or other materials provided with the distribution.
1346  * 3. Neither the name of the University nor the names of its contributors
1347  *    may be used to endorse or promote products derived from this software
1348  *    without specific prior written permission.
1349  *
1350  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
1351  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
1352  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1353  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
1354  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
1355  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
1356  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1357  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
1358  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
1359  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
1360  * SUCH DAMAGE.
1361  *
1362  *      @(#)if_ethersubr.c      8.2 (Berkeley) 4/4/96
1363  */
1364 
1365 static char *
1366 tap_ether_sprintf(char *dest, const u_char *ap)
1367 {
1368 	char *cp = dest;
1369 	int i;
1370 
1371 	for (i = 0; i < 6; i++) {
1372 		*cp++ = hexdigits[*ap >> 4];
1373 		*cp++ = hexdigits[*ap++ & 0xf];
1374 		*cp++ = ':';
1375 	}
1376 	*--cp = 0;
1377 	return (dest);
1378 }
1379