1 /* 2 * Copyright (c) 1988 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that the above copyright notice and this paragraph are 7 * duplicated in all such forms and that any documentation, 8 * advertising materials, and other materials related to such 9 * distribution and use acknowledge that the software was developed 10 * by the University of California, Berkeley. The name of the 11 * University may not be used to endorse or promote products derived 12 * from this software without specific prior written permission. 13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * 17 * @(#)rtsock.c 7.7 (Berkeley) 03/12/90 18 */ 19 20 #ifndef RTF_UP 21 #include "param.h" 22 #include "mbuf.h" 23 #include "user.h" 24 #include "proc.h" 25 #include "socket.h" 26 #include "socketvar.h" 27 #include "domain.h" 28 #include "protosw.h" 29 #include "errno.h" 30 31 #include "af.h" 32 #include "if.h" 33 #include "route.h" 34 #include "raw_cb.h" 35 36 #include "machine/mtpr.h" 37 #endif 38 39 struct sockaddr route_dst = { 0, PF_ROUTE, }; 40 struct sockaddr route_src = { 0, PF_ROUTE, }; 41 struct sockproto route_proto = { PF_ROUTE, }; 42 43 /*ARGSUSED*/ 44 route_usrreq(so, req, m, nam, control) 45 register struct socket *so; 46 int req; 47 struct mbuf *m, *nam, *control; 48 { 49 register int error = 0; 50 register struct rawcb *rp = sotorawcb(so); 51 if (req == PRU_ATTACH) { 52 MALLOC(rp, struct rawcb *, sizeof(*rp), M_PCB, M_WAITOK); 53 if (so->so_pcb = (caddr_t)rp) 54 bzero(so->so_pcb, sizeof(*rp)); 55 56 } 57 if (req == PRU_DETACH && rp) { 58 int af = rp->rcb_proto.sp_protocol; 59 if (af == AF_INET) 60 route_cb.ip_count--; 61 else if (af == AF_NS) 62 route_cb.ns_count--; 63 else if (af == AF_ISO) 64 route_cb.iso_count--; 65 route_cb.any_count--; 66 } 67 error = raw_usrreq(so, req, m, nam, control); 68 rp = sotorawcb(so); 69 if (req == PRU_ATTACH && rp) { 70 int af = rp->rcb_proto.sp_protocol; 71 if (error) { 72 free((caddr_t)rp, M_PCB); 73 return (error); 74 } 75 if (af == AF_INET) 76 route_cb.ip_count++; 77 else if (af == AF_NS) 78 route_cb.ns_count++; 79 else if (af == AF_ISO) 80 route_cb.iso_count++; 81 rp->rcb_faddr = &route_src; 82 route_cb.any_count++; 83 soisconnected(so); 84 } 85 return (error); 86 } 87 #define ROUNDUP(a) (1 + (((a) - 1) | (sizeof(long) - 1))) 88 89 /*ARGSUSED*/ 90 route_output(m, so) 91 register struct mbuf *m; 92 struct socket *so; 93 { 94 register struct rt_msghdr *rtm = 0; 95 register struct rtentry *rt = 0; 96 struct rtentry *saved_nrt = 0; 97 struct sockaddr *dst = 0, *gate = 0, *netmask = 0, *author; 98 struct rt_metrics *rmm = 0; 99 caddr_t cp = 0; 100 int len, error = 0; 101 102 #define senderr(e) { error = e; goto flush;} 103 if (m == 0) 104 return (ENOBUFS); 105 if (m->m_len < sizeof(long)) 106 if ((m = m_pullup(m, sizeof(long))) == 0) 107 return (ENOBUFS); 108 if ((m->m_flags & M_PKTHDR) == 0) 109 panic("route_output"); 110 len = m->m_pkthdr.len; 111 rtm = mtod(m, struct rt_msghdr *); 112 if (len < rtm->rtm_msglen) 113 senderr(EINVAL); 114 R_Malloc(rtm, struct rt_msghdr *, len); 115 if (rtm == 0) 116 senderr(ENOBUFS); 117 m_copydata(m, 0, len, (caddr_t)rtm); 118 if (rtm->rtm_version != 1) 119 senderr(EPROTONOSUPPORT); 120 rtm->rtm_pid = u.u_procp->p_pid; 121 cp = (caddr_t) (rtm + 1); 122 #ifdef notyet 123 switch (rtm->rtm_type) { 124 125 case RTM_ADD: case RTM_CHANGE: case RTM_GET: 126 rmm = (struct rt_metrics *)cp ; 127 cp = (caddr_t) (rmm + 1); 128 } 129 #endif 130 if (rtm->rtm_count > 0) { 131 dst = (struct sockaddr *)cp; 132 cp += ROUNDUP(dst->sa_len); 133 } 134 if (rtm->rtm_count > 1) { 135 gate = (struct sockaddr *)cp; 136 cp += ROUNDUP(gate->sa_len); 137 } 138 if (rtm->rtm_count > 2) { 139 netmask = (struct sockaddr *)cp; 140 if (*cp) 141 cp += ROUNDUP(netmask->sa_len); 142 else 143 cp += sizeof(long); 144 145 } 146 if (rtm->rtm_count > 3) { 147 author = (struct sockaddr *)cp; 148 } 149 switch (rtm->rtm_type) { 150 case RTM_ADD: 151 error = rtrequest(RTM_ADD, dst, gate, netmask, 152 rtm->rtm_flags, &saved_nrt); 153 /* XXX -- add metrics !!! */ 154 if (error == 0 && saved_nrt) 155 saved_nrt->rt_refcnt--; 156 break; 157 158 case RTM_DELETE: 159 error = rtrequest(RTM_DELETE, dst, gate, netmask, 160 rtm->rtm_flags, (struct rtentry **)0); 161 break; 162 163 case RTM_GET: 164 case RTM_CHANGE: 165 case RTM_LOCK: 166 rt = rtalloc1(dst, 0); 167 if (rt == 0) 168 senderr(ESRCH); 169 switch(rtm->rtm_type) { 170 struct sockaddr *outmask; 171 172 case RTM_GET: 173 netmask = rt_mask(rt); 174 len = sizeof(*rtm) + ROUNDUP(rt_key(rt)->sa_len) 175 + ROUNDUP(rt->rt_gateway->sa_len); 176 if (netmask) 177 len += netmask->sa_len; 178 if (len > rtm->rtm_msglen) { 179 struct rt_msghdr *new_rtm; 180 R_Malloc(new_rtm, struct rt_msghdr *, len); 181 if (new_rtm == 0) 182 senderr(ENOBUFS); 183 Bcopy(rtm, new_rtm, rtm->rtm_msglen); 184 Free(rtm); rtm = new_rtm; 185 gate = (struct sockaddr *) 186 (ROUNDUP(rt->rt_gateway->sa_len) 187 + (char *)dst); 188 Bcopy(&rt->rt_gateway, gate, 189 rt->rt_gateway->sa_len); 190 rtm->rtm_flags = rt->rt_flags; 191 rtm->rtm_count = 2; 192 if (netmask) { 193 outmask = (struct sockaddr *) 194 (ROUNDUP(netmask->sa_len)+(char *)gate); 195 Bcopy(netmask, outmask, netmask->sa_len); 196 rtm->rtm_count = 3; 197 } 198 } 199 break; 200 201 case RTM_CHANGE: 202 if (gate->sa_len > (len = rt->rt_gateway->sa_len)) 203 senderr(EDQUOT); 204 if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest) 205 rt->rt_ifa->ifa_rtrequest(RTM_CHANGE, rt, gate); 206 Bcopy(gate, rt->rt_gateway, len); 207 rt->rt_gateway->sa_len = len; 208 209 #ifdef notdef 210 #define metric(f, e) if (rtm->rtm_inits & (f)) rt->rt_m.e = rtm->e; 211 metric(RTM_RPIPE, rtm_recvpipe); 212 metric(RTM_SPIPE, rtm_sendpipe); 213 metric(RTM_SSTHRESH, rtm_ssthresh); 214 metric(RTM_RTT, rtm_rtt); 215 metric(RTM_RTTVAR, rtm_rttvar); 216 metric(RTM_HOPCOUNT, rtm_hopcount); 217 metric(RTM_MTU, rtm_mtu); 218 /* 219 * Fall into 220 */ 221 case RTM_LOCKS: 222 rt->rt_locks |= (rtm->rtm_inits & rtm->rtm_locks); 223 rt->rt_locks &= ~(rtm->rtm_inits); 224 break; 225 #endif 226 } 227 goto cleanup; 228 229 default: 230 senderr(EOPNOTSUPP); 231 } 232 233 flush: 234 if (rtm) { 235 if (error) 236 rtm->rtm_errno = error; 237 else 238 rtm->rtm_flags |= RTF_DONE; 239 } 240 cleanup: 241 if (rt) 242 rtfree(rt); 243 if (cp = (caddr_t)rtm) { 244 m_copyback(m, 0, len, cp); 245 Free(rtm); 246 } 247 route_proto.sp_protocol = dst->sa_family; 248 raw_input(m, &route_proto, &route_src, &route_dst); 249 return (error); 250 } 251 252 /* 253 * Copy data from a buffer back into the indicated mbuf chain, 254 * starting "off" bytes from the beginning, extending the mbuf 255 * chain if necessary. 256 */ 257 m_copyback(m0, off, len, cp) 258 struct mbuf *m0; 259 register int off; 260 register int len; 261 caddr_t cp; 262 263 { 264 register int mlen; 265 register struct mbuf *m = m0, *n; 266 int totlen = 0; 267 268 if (m0 == 0) 269 return; 270 while (off >= (mlen = m->m_len)) { 271 off -= mlen; 272 totlen += mlen; 273 if (m->m_next == 0) { 274 n = m_getclr(M_DONTWAIT, m->m_type); 275 if (n == 0) 276 goto out; 277 n->m_len = min(MLEN, len + off); 278 m->m_next = n; 279 } 280 m = m->m_next; 281 } 282 while (len > 0) { 283 mlen = min (m->m_len - off, len); 284 bcopy(cp, off + mtod(m, caddr_t), (unsigned)mlen); 285 cp += mlen; 286 len -= mlen; 287 mlen += off; 288 off = 0; 289 totlen += mlen; 290 if (len == 0) 291 break; 292 if (m->m_next == 0) { 293 n = m_get(M_DONTWAIT, m->m_type); 294 if (n == 0) 295 break; 296 n->m_len = min(MLEN, len); 297 m->m_next = n; 298 } 299 m = m->m_next; 300 } 301 out: if (((m = m0)->m_flags & M_PKTHDR) && (m->m_pkthdr.len < totlen)) 302 m->m_pkthdr.len = totlen; 303 } 304 305 /* 306 * The miss message and losing message are very similar. 307 */ 308 309 rt_missmsg(type, dst, gate, mask, src, flags, error) 310 register struct sockaddr *dst; 311 struct sockaddr *gate, *mask, *src; 312 { 313 register struct rt_msghdr *rtm; 314 register struct mbuf *m; 315 int dlen = ROUNDUP(dst->sa_len); 316 int len = dlen + sizeof(*rtm); 317 318 if (route_cb.any_count == 0) 319 return; 320 m = m_gethdr(M_DONTWAIT, MT_DATA); 321 if (m == 0) 322 return; 323 m->m_pkthdr.len = m->m_len = min(len, MHLEN); 324 m->m_pkthdr.rcvif = 0; 325 rtm = mtod(m, struct rt_msghdr *); 326 bzero((caddr_t)rtm, sizeof(*rtm)); /*XXX assumes sizeof(*rtm) < MHLEN*/ 327 rtm->rtm_flags = RTF_DONE | flags; 328 rtm->rtm_msglen = len; 329 rtm->rtm_version = 1; 330 rtm->rtm_type = type; 331 rtm->rtm_count = 1; 332 if (type == RTM_OLDADD || type == RTM_OLDDEL) { 333 rtm->rtm_pid = u.u_procp->p_pid; 334 } 335 m_copyback(m, sizeof (*rtm), dlen, (caddr_t)dst); 336 if (gate) { 337 dlen = ROUNDUP(gate->sa_len); 338 m_copyback(m, len , dlen, (caddr_t)gate); 339 len += dlen; 340 rtm->rtm_count++; 341 } 342 if (mask) { 343 if (mask->sa_len) 344 dlen = ROUNDUP(mask->sa_len); 345 else 346 dlen = sizeof(long); 347 m_copyback(m, len , dlen, (caddr_t)mask); 348 len += dlen; 349 rtm->rtm_count++; 350 } 351 if (src) { 352 dlen = ROUNDUP(src->sa_len); 353 m_copyback(m, len , dlen, (caddr_t)src); 354 len += dlen; 355 rtm->rtm_count++; 356 } 357 if (m->m_pkthdr.len != len) { 358 m_freem(m); 359 return; 360 } 361 rtm->rtm_errno = error; 362 rtm->rtm_msglen = len; 363 route_proto.sp_protocol = dst->sa_family; 364 raw_input(m, &route_proto, &route_src, &route_dst); 365 } 366 367 #include "kinfo.h" 368 static long zero_l = 0; 369 struct walkarg { 370 int w_op, w_arg; 371 int w_given, w_needed; 372 caddr_t w_where; 373 struct { 374 struct rt_msghdr m_rtm; 375 char m_sabuf[128]; 376 } w_m; 377 #define w_rtm w_m.m_rtm 378 }; 379 /* 380 * This is used in dumping the kernel table via getkinfo(). 381 */ 382 rt_dumpentry(rn, w) 383 struct radix_node *rn; 384 register struct walkarg *w; 385 { 386 register struct sockaddr *sa; 387 int n, error; 388 389 for (; rn && !(rn->rn_flags & RNF_ROOT); rn = rn->rn_dupedkey) { 390 int count = 0, size = sizeof(w->w_rtm); 391 register struct rtentry *rt = (struct rtentry *)rn; 392 393 if (w->w_op == KINFO_RT_FLAGS && !(rt->rt_flags & w->w_arg)) 394 continue; 395 if (sa = rt_key(rt)) 396 {size += ROUNDUP(sa->sa_len); count++;} 397 if (sa = rt->rt_gateway) 398 {size += ROUNDUP(sa->sa_len); count++;} 399 if (sa = rt_mask(rt)) { 400 size += sa->sa_len ? ROUNDUP(sa->sa_len) : sizeof(long); 401 count++; 402 } 403 if (sa = rt->rt_genmask) 404 {size += ROUNDUP(sa->sa_len); count++;} 405 w->w_needed += size; 406 if (w->w_where == NULL || w->w_needed > 0) 407 continue; 408 w->w_rtm.rtm_msglen = size; 409 w->w_rtm.rtm_count = count; 410 w->w_rtm.rtm_flags = rt->rt_flags; 411 412 if (size <= sizeof(w->w_m)) { 413 register caddr_t cp = (caddr_t)(w->w_m.m_sabuf); 414 #define next(a, b) {n = (b); Bcopy((a), cp, n); cp += n;} 415 if (sa = rt_key(rt)) 416 next(sa, ROUNDUP(sa->sa_len)); 417 if (sa = rt->rt_gateway) 418 next(sa, ROUNDUP(sa->sa_len)); 419 if (sa = rt_mask(rt)) { 420 w->w_rtm.rtm_flags |= RTF_MASK; 421 if (sa->sa_len) 422 {next(sa, ROUNDUP(sa->sa_len));} 423 else 424 next((&zero_l), sizeof(zero_l)); 425 } 426 if (sa = rt->rt_genmask) 427 next(sa, ROUNDUP(sa->sa_len)); 428 #undef next 429 #define next(a, b) {n = (b); \ 430 if (error = copyout((caddr_t)(a), w->w_where, n)) return (error); \ 431 w->w_where += n; } 432 433 next(&w->w_m, size); /* Copy rtmsg and sockaddrs back */ 434 continue; 435 } 436 next(&w->w_rtm, sizeof(w->w_rtm)); 437 if (sa = rt_key(rt)) 438 next(sa, ROUNDUP(sa->sa_len)); 439 if (sa = rt->rt_gateway) 440 next(sa, ROUNDUP(sa->sa_len)); 441 if (sa = rt_mask(rt)) { 442 if (sa->sa_len) 443 {next(sa, ROUNDUP(sa->sa_len));} 444 else 445 next(&zero_l, sizeof(zero_l)); 446 } 447 if (sa = rt->rt_genmask) 448 next(sa, ROUNDUP(sa->sa_len)); 449 } 450 return (0); 451 #undef next 452 } 453 454 kinfo_rtable(op, where, given, arg, needed) 455 int op, arg; 456 caddr_t where; 457 int *given, *needed; 458 { 459 register struct radix_node_head *rnh; 460 int s, error = 0; 461 u_char af = ki_af(op); 462 struct walkarg w; 463 464 op &= 0xffff; 465 if (op != KINFO_RT_DUMP && op != KINFO_RT_FLAGS) 466 return (EINVAL); 467 468 Bzero(&w, sizeof(w)); 469 if ((w.w_where = where) && given) 470 w.w_given = *given; 471 w.w_needed = 0 - w.w_given; 472 w.w_arg = arg; 473 w.w_op = op; 474 w.w_rtm.rtm_version = 1; 475 w.w_rtm.rtm_type = RTM_GET; 476 477 s = splnet(); 478 for (rnh = radix_node_head; rnh; rnh = rnh->rnh_next) { 479 if (rnh->rnh_af == 0) 480 continue; 481 if (af && af != rnh->rnh_af) 482 continue; 483 error = rt_walk(rnh->rnh_treetop, rt_dumpentry, &w); 484 if (error) 485 break; 486 } 487 w.w_needed += w.w_given; 488 if (where && given) 489 *given = w.w_where - where; 490 else 491 w.w_needed = (11 * w.w_needed) / 10; 492 *needed = w.w_needed; 493 splx(s); 494 return (error); 495 } 496 497 rt_walk(rn, f, w) 498 register struct radix_node *rn; 499 register int (*f)(); 500 struct walkarg *w; 501 { 502 int error; 503 for (;;) { 504 while (rn->rn_b >= 0) 505 rn = rn->rn_l; /* First time through node, go left */ 506 if (error = (*f)(rn, w)) 507 return (error); /* Process Leaf */ 508 while (rn->rn_p->rn_r == rn) { /* if coming back from right */ 509 rn = rn->rn_p; /* go back up */ 510 if (rn->rn_flags & RNF_ROOT) 511 return 0; 512 } 513 rn = rn->rn_p->rn_r; /* otherwise, go right*/ 514 } 515 } 516 517 /* 518 * Definitions of protocols supported in the ROUTE domain. 519 */ 520 521 int route_output(); 522 int raw_init(),raw_usrreq(),raw_input(),raw_ctlinput(); 523 extern struct domain routedomain; /* or at least forward */ 524 525 struct protosw routesw[] = { 526 { SOCK_RAW, &routedomain, 0, PR_ATOMIC|PR_ADDR, 527 raw_input, route_output, raw_ctlinput, 0, 528 route_usrreq, 529 raw_init, 0, 0, 0, 530 }, 531 { 0, 0, 0, 0, 532 raw_input, 0, raw_ctlinput, 0, 533 raw_usrreq, 534 raw_init, 0, 0, 0, 535 } 536 }; 537 538 int unp_externalize(), unp_dispose(); 539 540 struct domain routedomain = 541 { PF_ROUTE, "route", 0, 0, 0, 542 routesw, &routesw[sizeof(routesw)/sizeof(routesw[0])] }; 543