xref: /netbsd-src/sys/net/rtsock.c (revision cda4f8f6ee55684e8d311b86c99ea59191e6b74f)
1 /*
2  * Copyright (c) 1988, 1991 Regents of the University of California.
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	from: @(#)rtsock.c	7.18 (Berkeley) 6/27/91
34  *	$Id: rtsock.c,v 1.3 1993/05/22 11:42:21 cgd Exp $
35  */
36 
37 #include "param.h"
38 #include "mbuf.h"
39 #include "proc.h"
40 #include "socket.h"
41 #include "socketvar.h"
42 #include "domain.h"
43 #include "protosw.h"
44 
45 #include "af.h"
46 #include "if.h"
47 #include "route.h"
48 #include "raw_cb.h"
49 
50 #include "machine/mtpr.h"
51 
52 struct sockaddr route_dst = { 2, PF_ROUTE, };
53 struct sockaddr route_src = { 2, PF_ROUTE, };
54 struct sockproto route_proto = { PF_ROUTE, };
55 
56 /*ARGSUSED*/
57 route_usrreq(so, req, m, nam, control)
58 	register struct socket *so;
59 	int req;
60 	struct mbuf *m, *nam, *control;
61 {
62 	register int error = 0;
63 	register struct rawcb *rp = sotorawcb(so);
64 	int s;
65 	if (req == PRU_ATTACH) {
66 		MALLOC(rp, struct rawcb *, sizeof(*rp), M_PCB, M_WAITOK);
67 		if (so->so_pcb = (caddr_t)rp)
68 			bzero(so->so_pcb, sizeof(*rp));
69 
70 	}
71 	if (req == PRU_DETACH && rp) {
72 		int af = rp->rcb_proto.sp_protocol;
73 		if (af == AF_INET)
74 			route_cb.ip_count--;
75 		else if (af == AF_NS)
76 			route_cb.ns_count--;
77 		else if (af == AF_ISO)
78 			route_cb.iso_count--;
79 		route_cb.any_count--;
80 	}
81 	s = splnet();
82 	error = raw_usrreq(so, req, m, nam, control);
83 	rp = sotorawcb(so);
84 	if (req == PRU_ATTACH && rp) {
85 		int af = rp->rcb_proto.sp_protocol;
86 		if (error) {
87 			free((caddr_t)rp, M_PCB);
88 			splx(s);
89 			return (error);
90 		}
91 		if (af == AF_INET)
92 			route_cb.ip_count++;
93 		else if (af == AF_NS)
94 			route_cb.ns_count++;
95 		else if (af == AF_ISO)
96 			route_cb.iso_count++;
97 		rp->rcb_faddr = &route_src;
98 		route_cb.any_count++;
99 		soisconnected(so);
100 		so->so_options |= SO_USELOOPBACK;
101 	}
102 	splx(s);
103 	return (error);
104 }
105 #define ROUNDUP(a) \
106 	((a) > 0 ? (1 + (((a) - 1) | (sizeof(long) - 1))) : sizeof(long))
107 #define ADVANCE(x, n) (x += ROUNDUP((n)->sa_len))
108 
109 /*ARGSUSED*/
110 route_output(m, so)
111 	register struct mbuf *m;
112 	struct socket *so;
113 {
114 	register struct rt_msghdr *rtm = 0;
115 	register struct rtentry *rt = 0;
116 	struct rtentry *saved_nrt = 0;
117 	struct sockaddr *dst = 0, *gate = 0, *netmask = 0, *genmask = 0;
118 	struct sockaddr *ifpaddr = 0, *ifaaddr = 0;
119 	caddr_t cp, lim;
120 	int len, error = 0;
121 	struct ifnet *ifp = 0;
122 	struct ifaddr *ifa = 0;
123 	struct ifaddr *ifaof_ifpforaddr(), *ifa_ifwithroute();
124 
125 #define senderr(e) { error = e; goto flush;}
126 	if (m == 0 || m->m_len < sizeof(long))
127 		return (ENOBUFS);
128 	if ((m = m_pullup(m, sizeof(long))) == 0)
129 		return (ENOBUFS);
130 	if ((m->m_flags & M_PKTHDR) == 0)
131 		panic("route_output");
132 	len = m->m_pkthdr.len;
133 	if (len < sizeof(*rtm) ||
134 	    len != mtod(m, struct rt_msghdr *)->rtm_msglen)
135 		senderr(EINVAL);
136 	R_Malloc(rtm, struct rt_msghdr *, len);
137 	if (rtm == 0)
138 		senderr(ENOBUFS);
139 	m_copydata(m, 0, len, (caddr_t)rtm);
140 	if (rtm->rtm_version != RTM_VERSION)
141 		senderr(EPROTONOSUPPORT);
142 	rtm->rtm_pid = curproc->p_pid;
143 	lim = len + (caddr_t) rtm;
144 	cp = (caddr_t) (rtm + 1);
145 	if (rtm->rtm_addrs & RTA_DST) {
146 		dst = (struct sockaddr *)cp;
147 		ADVANCE(cp, dst);
148 	} else
149 		senderr(EINVAL);
150 	if ((rtm->rtm_addrs & RTA_GATEWAY) && cp < lim)  {
151 		gate = (struct sockaddr *)cp;
152 		ADVANCE(cp, gate);
153 	}
154 	if ((rtm->rtm_addrs & RTA_NETMASK) && cp < lim)  {
155 		netmask = (struct sockaddr *)cp;
156 		ADVANCE(cp, netmask);
157 	}
158 	if ((rtm->rtm_addrs & RTA_GENMASK) && cp < lim)  {
159 		struct radix_node *t, *rn_addmask();
160 		genmask = (struct sockaddr *)cp;
161 		ADVANCE(cp, genmask);
162 		t = rn_addmask(genmask, 1, 2);
163 		if (t && Bcmp(genmask, t->rn_key, *(u_char *)genmask) == 0)
164 			genmask = (struct sockaddr *)(t->rn_key);
165 		else
166 			senderr(ENOBUFS);
167 	}
168 	if ((rtm->rtm_addrs & RTA_IFP) && cp < lim)  {
169 		ifpaddr = (struct sockaddr *)cp;
170 		ADVANCE(cp, ifpaddr);
171 	}
172 	if ((rtm->rtm_addrs & RTA_IFA) && cp < lim)  {
173 		ifaaddr = (struct sockaddr *)cp;
174 	}
175 	switch (rtm->rtm_type) {
176 	case RTM_ADD:
177 		if (gate == 0)
178 			senderr(EINVAL);
179 		error = rtrequest(RTM_ADD, dst, gate, netmask,
180 					rtm->rtm_flags, &saved_nrt);
181 		if (error == 0 && saved_nrt) {
182 			rt_setmetrics(rtm->rtm_inits,
183 				&rtm->rtm_rmx, &saved_nrt->rt_rmx);
184 			saved_nrt->rt_refcnt--;
185 			saved_nrt->rt_genmask = genmask;
186 		}
187 		break;
188 
189 	case RTM_DELETE:
190 		error = rtrequest(RTM_DELETE, dst, gate, netmask,
191 				rtm->rtm_flags, (struct rtentry **)0);
192 		break;
193 
194 	case RTM_GET:
195 	case RTM_CHANGE:
196 	case RTM_LOCK:
197 		rt = rtalloc1(dst, 0);
198 		if (rt == 0)
199 			senderr(ESRCH);
200 		if (rtm->rtm_type != RTM_GET) {
201 			if (Bcmp(dst, rt_key(rt), dst->sa_len) != 0)
202 				senderr(ESRCH);
203 			if (rt->rt_nodes->rn_dupedkey &&
204 			    (netmask == 0 ||
205 			     Bcmp(netmask, rt_mask(rt), netmask->sa_len)))
206 				senderr(ETOOMANYREFS);
207 		}
208 		switch(rtm->rtm_type) {
209 
210 		case RTM_GET:
211 			dst = rt_key(rt); len = sizeof(*rtm);
212 			ADVANCE(len, dst);
213 			rtm->rtm_addrs |= RTA_DST;
214 			if (gate = rt->rt_gateway) {
215 				ADVANCE(len, gate);
216 				rtm->rtm_addrs |= RTA_GATEWAY;
217 			} else
218 				rtm->rtm_addrs &= ~RTA_GATEWAY;
219 			if (netmask = rt_mask(rt)) {
220 				ADVANCE(len, netmask);
221 				rtm->rtm_addrs |= RTA_NETMASK;
222 			} else
223 				rtm->rtm_addrs &= ~RTA_NETMASK;
224 			if (genmask = rt->rt_genmask) {
225 				ADVANCE(len, genmask);
226 				rtm->rtm_addrs |= RTA_GENMASK;
227 			} else
228 				rtm->rtm_addrs &= ~RTA_GENMASK;
229 			if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) {
230 				if (rt->rt_ifp == 0)
231 					goto badif;
232 				for (ifa = rt->rt_ifp->if_addrlist;
233 				    ifa && ifa->ifa_addr->sa_family != AF_LINK;
234 				     ifa = ifa->ifa_next){}
235 				if (ifa && rt->rt_ifa) {
236 					ifpaddr = ifa->ifa_addr;
237 					ADVANCE(len, ifpaddr);
238 					ifaaddr = rt->rt_ifa->ifa_addr;
239 					ADVANCE(len, ifaaddr);
240 					rtm->rtm_addrs |= RTA_IFP | RTA_IFA;
241 				} else {
242 				badif:	ifpaddr = 0;
243 					rtm->rtm_addrs &= ~(RTA_IFP | RTA_IFA);
244 				}
245 			}
246 			if (len > rtm->rtm_msglen) {
247 				struct rt_msghdr *new_rtm;
248 				R_Malloc(new_rtm, struct rt_msghdr *, len);
249 				if (new_rtm == 0)
250 					senderr(ENOBUFS);
251 				Bcopy(rtm, new_rtm, rtm->rtm_msglen);
252 				Free(rtm); rtm = new_rtm;
253 			}
254 			rtm->rtm_msglen = len;
255 			rtm->rtm_flags = rt->rt_flags;
256 			rtm->rtm_rmx = rt->rt_rmx;
257 			cp = (caddr_t) (1 + rtm);
258 			len = ROUNDUP(dst->sa_len);
259 			Bcopy(dst, cp, len); cp += len;
260 			if (gate) {
261 			    len = ROUNDUP(gate->sa_len);
262 			    Bcopy(gate, cp, len); cp += len;
263 			}
264 			if (netmask) {
265 			    len = ROUNDUP(netmask->sa_len);
266 			    Bcopy(netmask, cp, len); cp += len;
267 			}
268 			if (genmask) {
269 			    len = ROUNDUP(genmask->sa_len);
270 			    Bcopy(genmask, cp, len); cp += len;
271 			}
272 			if (ifpaddr) {
273 			    len = ROUNDUP(ifpaddr->sa_len);
274 			    Bcopy(ifpaddr, cp, len); cp += len;
275 			    len = ROUNDUP(ifaaddr->sa_len);
276 			    Bcopy(ifaaddr, cp, len); cp += len;
277 			}
278 			break;
279 
280 		case RTM_CHANGE:
281 			if (gate &&
282 			    (gate->sa_len > (len = rt->rt_gateway->sa_len)))
283 				senderr(EDQUOT);
284 			/* new gateway could require new ifaddr, ifp;
285 			   flags may also be different; ifp may be specified
286 			   by ll sockaddr when protocol address is ambiguous */
287 			if (ifpaddr && (ifa = ifa_ifwithnet(ifpaddr)) &&
288 			    (ifp = ifa->ifa_ifp))
289 				ifa = ifaof_ifpforaddr(ifaaddr ? ifaaddr : gate,
290 							ifp);
291 			else if ((ifaaddr && (ifa = ifa_ifwithaddr(ifaaddr))) ||
292 				 (ifa = ifa_ifwithroute(rt->rt_flags,
293 							rt_key(rt), gate)))
294 				ifp = ifa->ifa_ifp;
295 			if (ifa) {
296 				register struct ifaddr *oifa = rt->rt_ifa;
297 				if (oifa != ifa) {
298 				    if (oifa && oifa->ifa_rtrequest)
299 					oifa->ifa_rtrequest(RTM_DELETE,
300 								rt, gate);
301 				    rt->rt_ifa = ifa;
302 				    rt->rt_ifp = ifp;
303 				}
304 			}
305 			if (gate)
306 				Bcopy(gate, rt->rt_gateway, len);
307 			rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx,
308 					&rt->rt_rmx);
309 			if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest)
310 			       rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt, gate);
311 			if (genmask)
312 				rt->rt_genmask = genmask;
313 			/*
314 			 * Fall into
315 			 */
316 		case RTM_LOCK:
317 			rt->rt_rmx.rmx_locks |=
318 				(rtm->rtm_inits & rtm->rtm_rmx.rmx_locks);
319 			rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits);
320 			break;
321 		}
322 		goto cleanup;
323 
324 	default:
325 		senderr(EOPNOTSUPP);
326 	}
327 
328 flush:
329 	if (rtm) {
330 		if (error)
331 			rtm->rtm_errno = error;
332 		else
333 			rtm->rtm_flags |= RTF_DONE;
334 	}
335 cleanup:
336 	if (rt)
337 		rtfree(rt);
338     {
339 	register struct rawcb *rp = 0;
340 	/*
341 	 * Check to see if we don't want our own messages.
342 	 */
343 	if ((so->so_options & SO_USELOOPBACK) == 0) {
344 		if (route_cb.any_count <= 1) {
345 			if (rtm)
346 				Free(rtm);
347 			m_freem(m);
348 			return (error);
349 		}
350 		/* There is another listener, so construct message */
351 		rp = sotorawcb(so);
352 	}
353 	if (rtm) {
354 		m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm);
355 		Free(rtm);
356 	}
357 	if (rp)
358 		rp->rcb_proto.sp_family = 0; /* Avoid us */
359 	if (dst)
360 		route_proto.sp_protocol = dst->sa_family;
361 	raw_input(m, &route_proto, &route_src, &route_dst);
362 	if (rp)
363 		rp->rcb_proto.sp_family = PF_ROUTE;
364     }
365 	return (error);
366 }
367 
368 rt_setmetrics(which, in, out)
369 	u_long which;
370 	register struct rt_metrics *in, *out;
371 {
372 #define metric(f, e) if (which & (f)) out->e = in->e;
373 	metric(RTV_RPIPE, rmx_recvpipe);
374 	metric(RTV_SPIPE, rmx_sendpipe);
375 	metric(RTV_SSTHRESH, rmx_ssthresh);
376 	metric(RTV_RTT, rmx_rtt);
377 	metric(RTV_RTTVAR, rmx_rttvar);
378 	metric(RTV_HOPCOUNT, rmx_hopcount);
379 	metric(RTV_MTU, rmx_mtu);
380 	metric(RTV_EXPIRE, rmx_expire);
381 #undef metric
382 }
383 
384 /*
385  * Copy data from a buffer back into the indicated mbuf chain,
386  * starting "off" bytes from the beginning, extending the mbuf
387  * chain if necessary.
388  */
389 m_copyback(m0, off, len, cp)
390 	struct	mbuf *m0;
391 	register int off;
392 	register int len;
393 	caddr_t cp;
394 
395 {
396 	register int mlen;
397 	register struct mbuf *m = m0, *n;
398 	int totlen = 0;
399 
400 	if (m0 == 0)
401 		return;
402 	while (off > (mlen = m->m_len)) {
403 		off -= mlen;
404 		totlen += mlen;
405 		if (m->m_next == 0) {
406 			n = m_getclr(M_DONTWAIT, m->m_type);
407 			if (n == 0)
408 				goto out;
409 			n->m_len = min(MLEN, len + off);
410 			m->m_next = n;
411 		}
412 		m = m->m_next;
413 	}
414 	while (len > 0) {
415 		mlen = min (m->m_len - off, len);
416 		bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen);
417 		cp += mlen;
418 		len -= mlen;
419 		mlen += off;
420 		off = 0;
421 		totlen += mlen;
422 		if (len == 0)
423 			break;
424 		if (m->m_next == 0) {
425 			n = m_get(M_DONTWAIT, m->m_type);
426 			if (n == 0)
427 				break;
428 			n->m_len = min(MLEN, len);
429 			m->m_next = n;
430 		}
431 		m = m->m_next;
432 	}
433 out:	if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen))
434 		m->m_pkthdr.len = totlen;
435 }
436 
437 /*
438  * The miss message and losing message are very similar.
439  */
440 
441 rt_missmsg(type, dst, gate, mask, src, flags, error)
442 register struct sockaddr *dst;
443 struct sockaddr *gate, *mask, *src;
444 {
445 	register struct rt_msghdr *rtm;
446 	register struct mbuf *m;
447 	int dlen = ROUNDUP(dst->sa_len);
448 	int len = dlen + sizeof(*rtm);
449 
450 	if (route_cb.any_count == 0)
451 		return;
452 	m = m_gethdr(M_DONTWAIT, MT_DATA);
453 	if (m == 0)
454 		return;
455 	m->m_pkthdr.len = m->m_len = min(len, MHLEN);
456 	m->m_pkthdr.rcvif = 0;
457 	rtm = mtod(m, struct rt_msghdr *);
458 	bzero((caddr_t)rtm, sizeof(*rtm)); /*XXX assumes sizeof(*rtm) < MHLEN*/
459 	rtm->rtm_flags = RTF_DONE | flags;
460 	rtm->rtm_msglen = len;
461 	rtm->rtm_version = RTM_VERSION;
462 	rtm->rtm_type = type;
463 	rtm->rtm_addrs = RTA_DST;
464 	if (type == RTM_OLDADD || type == RTM_OLDDEL) {
465 		rtm->rtm_pid = curproc->p_pid;
466 	}
467 	m_copyback(m, sizeof (*rtm), dlen, (caddr_t)dst);
468 	if (gate) {
469 		dlen = ROUNDUP(gate->sa_len);
470 		m_copyback(m, len ,  dlen, (caddr_t)gate);
471 		len += dlen;
472 		rtm->rtm_addrs |= RTA_GATEWAY;
473 	}
474 	if (mask) {
475 		dlen = ROUNDUP(mask->sa_len);
476 		m_copyback(m, len ,  dlen, (caddr_t)mask);
477 		len += dlen;
478 		rtm->rtm_addrs |= RTA_NETMASK;
479 	}
480 	if (src) {
481 		dlen = ROUNDUP(src->sa_len);
482 		m_copyback(m, len ,  dlen, (caddr_t)src);
483 		len += dlen;
484 		rtm->rtm_addrs |= RTA_AUTHOR;
485 	}
486 	if (m->m_pkthdr.len != len) {
487 		m_freem(m);
488 		return;
489 	}
490 	rtm->rtm_errno = error;
491 	rtm->rtm_msglen = len;
492 	route_proto.sp_protocol = dst->sa_family;
493 	raw_input(m, &route_proto, &route_src, &route_dst);
494 }
495 
496 #include "kinfo.h"
497 struct walkarg {
498 	int	w_op, w_arg;
499 	int	w_given, w_needed;
500 	caddr_t	w_where;
501 	struct	{
502 		struct rt_msghdr m_rtm;
503 		char	m_sabuf[128];
504 	} w_m;
505 #define w_rtm w_m.m_rtm
506 };
507 /*
508  * This is used in dumping the kernel table via getkinfo().
509  */
510 rt_dumpentry(rn, w)
511 	struct radix_node *rn;
512 	register struct walkarg *w;
513 {
514 	register struct sockaddr *sa;
515 	int n, error;
516 
517     for (; rn; rn = rn->rn_dupedkey) {
518 	int count = 0, size = sizeof(w->w_rtm);
519 	register struct rtentry *rt = (struct rtentry *)rn;
520 
521 	if (rn->rn_flags & RNF_ROOT)
522 		continue;
523 	if (w->w_op == KINFO_RT_FLAGS && !(rt->rt_flags & w->w_arg))
524 		continue;
525 #define next(a, l) {size += (l); w->w_rtm.rtm_addrs |= (a); }
526 	w->w_rtm.rtm_addrs = 0;
527 	if (sa = rt_key(rt))
528 		next(RTA_DST, ROUNDUP(sa->sa_len));
529 	if (sa = rt->rt_gateway)
530 		next(RTA_GATEWAY, ROUNDUP(sa->sa_len));
531 	if (sa = rt_mask(rt))
532 		next(RTA_NETMASK, ROUNDUP(sa->sa_len));
533 	if (sa = rt->rt_genmask)
534 		next(RTA_GENMASK, ROUNDUP(sa->sa_len));
535 	w->w_needed += size;
536 	if (w->w_where == NULL || w->w_needed > 0)
537 		continue;
538 	w->w_rtm.rtm_msglen = size;
539 	w->w_rtm.rtm_flags = rt->rt_flags;
540 	w->w_rtm.rtm_use = rt->rt_use;
541 	w->w_rtm.rtm_rmx = rt->rt_rmx;
542 	w->w_rtm.rtm_index = rt->rt_ifp->if_index;
543 #undef next
544 #define next(l) {n = (l); Bcopy(sa, cp, n); cp += n;}
545 	if (size <= sizeof(w->w_m)) {
546 		register caddr_t cp = (caddr_t)(w->w_m.m_sabuf);
547 		if (sa = rt_key(rt))
548 			next(ROUNDUP(sa->sa_len));
549 		if (sa = rt->rt_gateway)
550 			next(ROUNDUP(sa->sa_len));
551 		if (sa = rt_mask(rt))
552 			next(ROUNDUP(sa->sa_len));
553 		if (sa = rt->rt_genmask)
554 			next(ROUNDUP(sa->sa_len));
555 #undef next
556 #define next(s, l) {n = (l); \
557     if (error = copyout((caddr_t)(s), w->w_where, n)) return (error); \
558     w->w_where += n;}
559 
560 		next(&w->w_m, size); /* Copy rtmsg and sockaddrs back */
561 		continue;
562 	}
563 	next(&w->w_rtm, sizeof(w->w_rtm));
564 	if (sa = rt_key(rt))
565 		next(sa, ROUNDUP(sa->sa_len));
566 	if (sa = rt->rt_gateway)
567 		next(sa, ROUNDUP(sa->sa_len));
568 	if (sa = rt_mask(rt))
569 		next(sa, ROUNDUP(sa->sa_len));
570 	if (sa = rt->rt_genmask)
571 		next(sa, ROUNDUP(sa->sa_len));
572     }
573 	return (0);
574 #undef next
575 }
576 
577 kinfo_rtable(op, where, given, arg, needed)
578 	int	op, arg;
579 	caddr_t	where;
580 	int	*given, *needed;
581 {
582 	register struct radix_node_head *rnh;
583 	int	s, error = 0;
584 	u_char  af = ki_af(op);
585 	struct	walkarg w;
586 
587 	op &= 0xffff;
588 	if (op != KINFO_RT_DUMP && op != KINFO_RT_FLAGS)
589 		return (EINVAL);
590 
591 	Bzero(&w, sizeof(w));
592 	if ((w.w_where = where) && given)
593 		w.w_given = *given;
594 	w.w_needed = 0 - w.w_given;
595 	w.w_arg = arg;
596 	w.w_op = op;
597 	w.w_rtm.rtm_version = RTM_VERSION;
598 	w.w_rtm.rtm_type = RTM_GET;
599 
600 	s = splnet();
601 	for (rnh = radix_node_head; rnh; rnh = rnh->rnh_next) {
602 		if (rnh->rnh_af == 0)
603 			continue;
604 		if (af && af != rnh->rnh_af)
605 			continue;
606 		error = rt_walk(rnh->rnh_treetop, rt_dumpentry, &w);
607 		if (error)
608 			break;
609 	}
610 	w.w_needed += w.w_given;
611 	if (where && given)
612 		*given = w.w_where - where;
613 	else
614 		w.w_needed = (11 * w.w_needed) / 10;
615 	*needed = w.w_needed;
616 	splx(s);
617 	return (error);
618 }
619 
620 rt_walk(rn, f, w)
621 	register struct radix_node *rn;
622 	register int (*f)();
623 	struct walkarg *w;
624 {
625 	int error;
626 	for (;;) {
627 		while (rn->rn_b >= 0)
628 			rn = rn->rn_l;	/* First time through node, go left */
629 		if (error = (*f)(rn, w))
630 			return (error);	/* Process Leaf */
631 		while (rn->rn_p->rn_r == rn) {	/* if coming back from right */
632 			rn = rn->rn_p;		/* go back up */
633 			if (rn->rn_flags & RNF_ROOT)
634 				return 0;
635 		}
636 		rn = rn->rn_p->rn_r;		/* otherwise, go right*/
637 	}
638 }
639 
640 /*
641  * Definitions of protocols supported in the ROUTE domain.
642  */
643 
644 int	raw_init(),raw_usrreq(),raw_input(),raw_ctlinput();
645 extern	struct domain routedomain;		/* or at least forward */
646 
647 struct protosw routesw[] = {
648 { SOCK_RAW,	&routedomain,	0,		PR_ATOMIC|PR_ADDR,
649   raw_input,	route_output,	raw_ctlinput,	0,
650   route_usrreq,
651   raw_init,	0,		0,		0,
652 }
653 };
654 
655 int	unp_externalize(), unp_dispose();
656 
657 struct domain routedomain =
658     { PF_ROUTE, "route", 0, 0, 0,
659       routesw, &routesw[sizeof(routesw)/sizeof(routesw[0])] };
660