1 /* 2 * Copyright (c) 2004, 2005 The DragonFly Project. All rights reserved. 3 * 4 * This code is derived from software contributed to The DragonFly Project 5 * by Jeffrey M. Hsu. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of The DragonFly Project nor the names of its 16 * contributors may be used to endorse or promote products derived 17 * from this software without specific, prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 21 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS 22 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE 23 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, 24 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING, 25 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; 26 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED 27 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 28 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 29 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 /* 34 * Copyright (c) 1988, 1991, 1993 35 * The Regents of the University of California. All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 1. Redistributions of source code must retain the above copyright 41 * notice, this list of conditions and the following disclaimer. 42 * 2. Redistributions in binary form must reproduce the above copyright 43 * notice, this list of conditions and the following disclaimer in the 44 * documentation and/or other materials provided with the distribution. 45 * 3. Neither the name of the University nor the names of its contributors 46 * may be used to endorse or promote products derived from this software 47 * without specific prior written permission. 48 * 49 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 50 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 51 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 52 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 53 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 54 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 55 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 56 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 57 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 58 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 59 * SUCH DAMAGE. 60 * 61 * @(#)rtsock.c 8.7 (Berkeley) 10/12/95 62 * $FreeBSD: src/sys/net/rtsock.c,v 1.44.2.11 2002/12/04 14:05:41 ru Exp $ 63 */ 64 65 #include <sys/param.h> 66 #include <sys/systm.h> 67 #include <sys/kernel.h> 68 #include <sys/sysctl.h> 69 #include <sys/proc.h> 70 #include <sys/priv.h> 71 #include <sys/malloc.h> 72 #include <sys/mbuf.h> 73 #include <sys/protosw.h> 74 #include <sys/socket.h> 75 #include <sys/socketvar.h> 76 #include <sys/domain.h> 77 #include <sys/jail.h> 78 79 #include <sys/thread2.h> 80 #include <sys/socketvar2.h> 81 82 #include <net/if.h> 83 #include <net/if_var.h> 84 #include <net/route.h> 85 #include <net/raw_cb.h> 86 #include <net/netmsg2.h> 87 #include <net/netisr2.h> 88 89 MALLOC_DEFINE(M_RTABLE, "routetbl", "routing tables"); 90 91 static struct route_cb { 92 int ip_count; 93 int ip6_count; 94 int ns_count; 95 int any_count; 96 } route_cb; 97 98 static const struct sockaddr route_src = { 2, PF_ROUTE, }; 99 100 struct walkarg { 101 int w_tmemsize; 102 int w_op, w_arg; 103 void *w_tmem; 104 struct sysctl_req *w_req; 105 }; 106 107 #ifndef RTTABLE_DUMP_MSGCNT_MAX 108 /* Should be large enough for dupkeys */ 109 #define RTTABLE_DUMP_MSGCNT_MAX 64 110 #endif 111 112 struct rttable_walkarg { 113 int w_op; 114 int w_arg; 115 int w_bufsz; 116 void *w_buf; 117 118 int w_buflen; 119 120 const char *w_key; 121 const char *w_mask; 122 123 struct sockaddr_storage w_key0; 124 struct sockaddr_storage w_mask0; 125 }; 126 127 struct netmsg_rttable_walk { 128 struct netmsg_base base; 129 int af; 130 struct rttable_walkarg *w; 131 }; 132 133 struct routecb { 134 struct rawcb rocb_rcb; 135 unsigned int rocb_msgfilter; 136 }; 137 #define sotoroutecb(so) ((struct routecb *)(so)->so_pcb) 138 139 static struct mbuf * 140 rt_msg_mbuf (int, struct rt_addrinfo *); 141 static void rt_msg_buffer (int, struct rt_addrinfo *, void *buf, int len); 142 static int rt_msgsize(int type, const struct rt_addrinfo *rtinfo); 143 static int rt_xaddrs (char *, char *, struct rt_addrinfo *); 144 static int sysctl_rttable(int af, struct sysctl_req *req, int op, int arg); 145 static int if_addrflags(const struct ifaddr *ifa); 146 static int sysctl_iflist (int af, struct walkarg *w); 147 static int route_output(struct mbuf *, struct socket *, ...); 148 static void rt_setmetrics (u_long, struct rt_metrics *, 149 struct rt_metrics *); 150 151 /* 152 * It really doesn't make any sense at all for this code to share much 153 * with raw_usrreq.c, since its functionality is so restricted. XXX 154 */ 155 static void 156 rts_abort(netmsg_t msg) 157 { 158 crit_enter(); 159 raw_usrreqs.pru_abort(msg); 160 /* msg invalid now */ 161 crit_exit(); 162 } 163 164 static int 165 rts_filter(struct mbuf *m, const struct sockproto *proto, 166 const struct rawcb *rp) 167 { 168 const struct routecb *rop = (const struct routecb *)rp; 169 const struct rt_msghdr *rtm; 170 171 KKASSERT(m != NULL); 172 KKASSERT(proto != NULL); 173 KKASSERT(rp != NULL); 174 175 /* Wrong family for this socket. */ 176 if (proto->sp_family != PF_ROUTE) 177 return ENOPROTOOPT; 178 179 /* If no filter set, just return. */ 180 if (rop->rocb_msgfilter == 0) 181 return 0; 182 183 /* Ensure we can access rtm_type */ 184 if (m->m_len < 185 offsetof(struct rt_msghdr, rtm_type) + sizeof(rtm->rtm_type)) 186 return EINVAL; 187 188 rtm = mtod(m, const struct rt_msghdr *); 189 /* If the rtm type is filtered out, return a positive. */ 190 if (!(rop->rocb_msgfilter & ROUTE_FILTER(rtm->rtm_type))) 191 return EEXIST; 192 193 /* Passed the filter. */ 194 return 0; 195 } 196 197 198 /* pru_accept is EOPNOTSUPP */ 199 200 static void 201 rts_attach(netmsg_t msg) 202 { 203 struct socket *so = msg->base.nm_so; 204 struct pru_attach_info *ai = msg->attach.nm_ai; 205 struct rawcb *rp; 206 struct routecb *rop; 207 int proto = msg->attach.nm_proto; 208 int error; 209 210 crit_enter(); 211 if (sotorawcb(so) != NULL) { 212 error = EISCONN; 213 goto done; 214 } 215 216 rop = kmalloc(sizeof *rop, M_PCB, M_WAITOK | M_ZERO); 217 rp = &rop->rocb_rcb; 218 219 /* 220 * The critical section is necessary to block protocols from sending 221 * error notifications (like RTM_REDIRECT or RTM_LOSING) while 222 * this PCB is extant but incompletely initialized. 223 * Probably we should try to do more of this work beforehand and 224 * eliminate the critical section. 225 */ 226 so->so_pcb = rp; 227 soreference(so); /* so_pcb assignment */ 228 error = raw_attach(so, proto, ai->sb_rlimit); 229 rp = sotorawcb(so); 230 if (error) { 231 kfree(rop, M_PCB); 232 goto done; 233 } 234 switch(rp->rcb_proto.sp_protocol) { 235 case AF_INET: 236 route_cb.ip_count++; 237 break; 238 case AF_INET6: 239 route_cb.ip6_count++; 240 break; 241 } 242 rp->rcb_faddr = &route_src; 243 rp->rcb_filter = rts_filter; 244 route_cb.any_count++; 245 soisconnected(so); 246 so->so_options |= SO_USELOOPBACK; 247 error = 0; 248 done: 249 crit_exit(); 250 lwkt_replymsg(&msg->lmsg, error); 251 } 252 253 static void 254 rts_bind(netmsg_t msg) 255 { 256 crit_enter(); 257 raw_usrreqs.pru_bind(msg); /* xxx just EINVAL */ 258 /* msg invalid now */ 259 crit_exit(); 260 } 261 262 static void 263 rts_connect(netmsg_t msg) 264 { 265 crit_enter(); 266 raw_usrreqs.pru_connect(msg); /* XXX just EINVAL */ 267 /* msg invalid now */ 268 crit_exit(); 269 } 270 271 /* pru_connect2 is EOPNOTSUPP */ 272 /* pru_control is EOPNOTSUPP */ 273 274 static void 275 rts_detach(netmsg_t msg) 276 { 277 struct socket *so = msg->base.nm_so; 278 struct rawcb *rp = sotorawcb(so); 279 280 crit_enter(); 281 if (rp != NULL) { 282 switch(rp->rcb_proto.sp_protocol) { 283 case AF_INET: 284 route_cb.ip_count--; 285 break; 286 case AF_INET6: 287 route_cb.ip6_count--; 288 break; 289 } 290 route_cb.any_count--; 291 } 292 raw_usrreqs.pru_detach(msg); 293 /* msg invalid now */ 294 crit_exit(); 295 } 296 297 static void 298 rts_disconnect(netmsg_t msg) 299 { 300 crit_enter(); 301 raw_usrreqs.pru_disconnect(msg); 302 /* msg invalid now */ 303 crit_exit(); 304 } 305 306 /* pru_listen is EOPNOTSUPP */ 307 308 static void 309 rts_peeraddr(netmsg_t msg) 310 { 311 crit_enter(); 312 raw_usrreqs.pru_peeraddr(msg); 313 /* msg invalid now */ 314 crit_exit(); 315 } 316 317 /* pru_rcvd is EOPNOTSUPP */ 318 /* pru_rcvoob is EOPNOTSUPP */ 319 320 static void 321 rts_send(netmsg_t msg) 322 { 323 crit_enter(); 324 raw_usrreqs.pru_send(msg); 325 /* msg invalid now */ 326 crit_exit(); 327 } 328 329 /* pru_sense is null */ 330 331 static void 332 rts_shutdown(netmsg_t msg) 333 { 334 crit_enter(); 335 raw_usrreqs.pru_shutdown(msg); 336 /* msg invalid now */ 337 crit_exit(); 338 } 339 340 static void 341 rts_sockaddr(netmsg_t msg) 342 { 343 crit_enter(); 344 raw_usrreqs.pru_sockaddr(msg); 345 /* msg invalid now */ 346 crit_exit(); 347 } 348 349 static struct pr_usrreqs route_usrreqs = { 350 .pru_abort = rts_abort, 351 .pru_accept = pr_generic_notsupp, 352 .pru_attach = rts_attach, 353 .pru_bind = rts_bind, 354 .pru_connect = rts_connect, 355 .pru_connect2 = pr_generic_notsupp, 356 .pru_control = pr_generic_notsupp, 357 .pru_detach = rts_detach, 358 .pru_disconnect = rts_disconnect, 359 .pru_listen = pr_generic_notsupp, 360 .pru_peeraddr = rts_peeraddr, 361 .pru_rcvd = pr_generic_notsupp, 362 .pru_rcvoob = pr_generic_notsupp, 363 .pru_send = rts_send, 364 .pru_sense = pru_sense_null, 365 .pru_shutdown = rts_shutdown, 366 .pru_sockaddr = rts_sockaddr, 367 .pru_sosend = sosend, 368 .pru_soreceive = soreceive 369 }; 370 371 static __inline sa_family_t 372 familyof(struct sockaddr *sa) 373 { 374 return (sa != NULL ? sa->sa_family : 0); 375 } 376 377 /* 378 * Routing socket input function. The packet must be serialized onto cpu 0. 379 * We use the cpu0_soport() netisr processing loop to handle it. 380 * 381 * This looks messy but it means that anyone, including interrupt code, 382 * can send a message to the routing socket. 383 */ 384 static void 385 rts_input_handler(netmsg_t msg) 386 { 387 static const struct sockaddr route_dst = { 2, PF_ROUTE, }; 388 struct sockproto route_proto; 389 struct netmsg_packet *pmsg = &msg->packet; 390 struct mbuf *m; 391 sa_family_t family; 392 struct rawcb *skip; 393 394 family = pmsg->base.lmsg.u.ms_result; 395 route_proto.sp_family = PF_ROUTE; 396 route_proto.sp_protocol = family; 397 398 m = pmsg->nm_packet; 399 M_ASSERTPKTHDR(m); 400 401 skip = m->m_pkthdr.header; 402 m->m_pkthdr.header = NULL; 403 404 raw_input(m, &route_proto, &route_src, &route_dst, skip); 405 } 406 407 static void 408 rts_input_skip(struct mbuf *m, sa_family_t family, struct rawcb *skip) 409 { 410 struct netmsg_packet *pmsg; 411 lwkt_port_t port; 412 413 M_ASSERTPKTHDR(m); 414 415 port = netisr_cpuport(0); /* XXX same as for routing socket */ 416 pmsg = &m->m_hdr.mh_netmsg; 417 netmsg_init(&pmsg->base, NULL, &netisr_apanic_rport, 418 0, rts_input_handler); 419 pmsg->nm_packet = m; 420 pmsg->base.lmsg.u.ms_result = family; 421 m->m_pkthdr.header = skip; /* XXX steal field in pkthdr */ 422 lwkt_sendmsg(port, &pmsg->base.lmsg); 423 } 424 425 static __inline void 426 rts_input(struct mbuf *m, sa_family_t family) 427 { 428 rts_input_skip(m, family, NULL); 429 } 430 431 static void 432 route_ctloutput(netmsg_t msg) 433 { 434 struct socket *so = msg->ctloutput.base.nm_so; 435 struct sockopt *sopt = msg->ctloutput.nm_sopt; 436 struct routecb *rop = sotoroutecb(so); 437 int error; 438 unsigned int msgfilter; 439 440 if (sopt->sopt_level != AF_ROUTE) { 441 error = EINVAL; 442 goto out; 443 } 444 445 error = 0; 446 447 switch (sopt->sopt_dir) { 448 case SOPT_SET: 449 switch (sopt->sopt_name) { 450 case ROUTE_MSGFILTER: 451 error = soopt_to_kbuf(sopt, &msgfilter, 452 sizeof(msgfilter), sizeof(msgfilter)); 453 if (error == 0) 454 rop->rocb_msgfilter = msgfilter; 455 break; 456 default: 457 error = ENOPROTOOPT; 458 break; 459 } 460 break; 461 case SOPT_GET: 462 switch (sopt->sopt_name) { 463 case ROUTE_MSGFILTER: 464 msgfilter = rop->rocb_msgfilter; 465 soopt_from_kbuf(sopt, &msgfilter, sizeof(msgfilter)); 466 break; 467 default: 468 error = ENOPROTOOPT; 469 break; 470 } 471 } 472 out: 473 lwkt_replymsg(&msg->ctloutput.base.lmsg, error); 474 } 475 476 477 478 static void * 479 reallocbuf_nofree(void *ptr, size_t len, size_t olen) 480 { 481 void *newptr; 482 483 newptr = kmalloc(len, M_RTABLE, M_INTWAIT | M_NULLOK); 484 if (newptr == NULL) 485 return NULL; 486 bcopy(ptr, newptr, olen); 487 return (newptr); 488 } 489 490 /* 491 * Internal helper routine for route_output(). 492 */ 493 static int 494 _fillrtmsg(struct rt_msghdr **prtm, struct rtentry *rt, 495 struct rt_addrinfo *rtinfo) 496 { 497 int msglen; 498 struct rt_msghdr *rtm = *prtm; 499 500 /* Fill in rt_addrinfo for call to rt_msg_buffer(). */ 501 rtinfo->rti_dst = rt_key(rt); 502 rtinfo->rti_gateway = rt->rt_gateway; 503 rtinfo->rti_netmask = rt_mask(rt); /* might be NULL */ 504 rtinfo->rti_genmask = rt->rt_genmask; /* might be NULL */ 505 if (rtm->rtm_addrs & (RTA_IFP | RTA_IFA)) { 506 if (rt->rt_ifp != NULL) { 507 rtinfo->rti_ifpaddr = 508 TAILQ_FIRST(&rt->rt_ifp->if_addrheads[mycpuid]) 509 ->ifa->ifa_addr; 510 rtinfo->rti_ifaaddr = rt->rt_ifa->ifa_addr; 511 if (rt->rt_ifp->if_flags & IFF_POINTOPOINT) 512 rtinfo->rti_bcastaddr = rt->rt_ifa->ifa_dstaddr; 513 rtm->rtm_index = rt->rt_ifp->if_index; 514 } else { 515 rtinfo->rti_ifpaddr = NULL; 516 rtinfo->rti_ifaaddr = NULL; 517 } 518 } else if (rt->rt_ifp != NULL) { 519 rtm->rtm_index = rt->rt_ifp->if_index; 520 } 521 522 msglen = rt_msgsize(rtm->rtm_type, rtinfo); 523 if (rtm->rtm_msglen < msglen) { 524 /* NOTE: Caller will free the old rtm accordingly */ 525 rtm = reallocbuf_nofree(rtm, msglen, rtm->rtm_msglen); 526 if (rtm == NULL) 527 return (ENOBUFS); 528 *prtm = rtm; 529 } 530 rt_msg_buffer(rtm->rtm_type, rtinfo, rtm, msglen); 531 532 rtm->rtm_flags = rt->rt_flags; 533 rtm->rtm_rmx = rt->rt_rmx; 534 rtm->rtm_addrs = rtinfo->rti_addrs; 535 536 return (0); 537 } 538 539 struct rtm_arg { 540 struct rt_msghdr *bak_rtm; 541 struct rt_msghdr *new_rtm; 542 }; 543 544 static int 545 fillrtmsg(struct rtm_arg *arg, struct rtentry *rt, 546 struct rt_addrinfo *rtinfo) 547 { 548 struct rt_msghdr *rtm = arg->new_rtm; 549 int error; 550 551 error = _fillrtmsg(&rtm, rt, rtinfo); 552 if (!error) { 553 if (arg->new_rtm != rtm) { 554 /* 555 * _fillrtmsg() just allocated a new rtm; 556 * if the previously allocated rtm is not 557 * the backing rtm, it should be freed. 558 */ 559 if (arg->new_rtm != arg->bak_rtm) 560 kfree(arg->new_rtm, M_RTABLE); 561 arg->new_rtm = rtm; 562 } 563 } 564 return error; 565 } 566 567 static void route_output_add_callback(int, int, struct rt_addrinfo *, 568 struct rtentry *, void *); 569 static void route_output_delete_callback(int, int, struct rt_addrinfo *, 570 struct rtentry *, void *); 571 static int route_output_get_callback(int, struct rt_addrinfo *, 572 struct rtentry *, void *, int); 573 static int route_output_change_callback(int, struct rt_addrinfo *, 574 struct rtentry *, void *, int); 575 static int route_output_lock_callback(int, struct rt_addrinfo *, 576 struct rtentry *, void *, int); 577 578 /*ARGSUSED*/ 579 static int 580 route_output(struct mbuf *m, struct socket *so, ...) 581 { 582 struct rtm_arg arg; 583 struct rt_msghdr *rtm = NULL; 584 struct rawcb *rp = NULL; 585 struct pr_output_info *oi; 586 struct rt_addrinfo rtinfo; 587 sa_family_t family; 588 int len, error = 0; 589 __va_list ap; 590 591 M_ASSERTPKTHDR(m); 592 593 __va_start(ap, so); 594 oi = __va_arg(ap, struct pr_output_info *); 595 __va_end(ap); 596 597 family = familyof(NULL); 598 599 #define gotoerr(e) { error = e; goto flush;} 600 601 if (m == NULL || 602 (m->m_len < sizeof(long) && 603 (m = m_pullup(m, sizeof(long))) == NULL)) 604 return (ENOBUFS); 605 len = m->m_pkthdr.len; 606 if (len < sizeof(struct rt_msghdr) || 607 len != mtod(m, struct rt_msghdr *)->rtm_msglen) 608 gotoerr(EINVAL); 609 610 rtm = kmalloc(len, M_RTABLE, M_INTWAIT | M_NULLOK); 611 if (rtm == NULL) 612 gotoerr(ENOBUFS); 613 614 m_copydata(m, 0, len, (caddr_t)rtm); 615 if (rtm->rtm_version != RTM_VERSION) 616 gotoerr(EPROTONOSUPPORT); 617 618 rtm->rtm_pid = oi->p_pid; 619 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 620 rtinfo.rti_addrs = rtm->rtm_addrs; 621 if (rt_xaddrs((char *)(rtm + 1), (char *)rtm + len, &rtinfo) != 0) 622 gotoerr(EINVAL); 623 624 rtinfo.rti_flags = rtm->rtm_flags; 625 if (rtinfo.rti_dst == NULL || rtinfo.rti_dst->sa_family >= AF_MAX || 626 (rtinfo.rti_gateway && rtinfo.rti_gateway->sa_family >= AF_MAX)) 627 gotoerr(EINVAL); 628 629 family = familyof(rtinfo.rti_dst); 630 631 /* 632 * Verify that the caller has the appropriate privilege; RTM_GET 633 * is the only operation the non-superuser is allowed. 634 */ 635 if (rtm->rtm_type != RTM_GET && 636 priv_check_cred(so->so_cred, PRIV_ROOT, 0) != 0) 637 gotoerr(EPERM); 638 639 if (rtinfo.rti_genmask != NULL) { 640 error = rtmask_add_global(rtinfo.rti_genmask, 641 rtm->rtm_type != RTM_GET ? 642 RTREQ_PRIO_HIGH : RTREQ_PRIO_NORM); 643 if (error) 644 goto flush; 645 } 646 647 switch (rtm->rtm_type) { 648 case RTM_ADD: 649 if (rtinfo.rti_gateway == NULL) { 650 error = EINVAL; 651 } else { 652 error = rtrequest1_global(RTM_ADD, &rtinfo, 653 route_output_add_callback, rtm, RTREQ_PRIO_HIGH); 654 } 655 break; 656 case RTM_DELETE: 657 /* 658 * Backing rtm (bak_rtm) could _not_ be freed during 659 * rtrequest1_global or rtsearch_global, even if the 660 * callback reallocates the rtm due to its size changes, 661 * since rtinfo points to the backing rtm's memory area. 662 * After rtrequest1_global or rtsearch_global returns, 663 * it is safe to free the backing rtm, since rtinfo will 664 * not be used anymore. 665 * 666 * new_rtm will be used to save the new rtm allocated 667 * by rtrequest1_global or rtsearch_global. 668 */ 669 arg.bak_rtm = rtm; 670 arg.new_rtm = rtm; 671 error = rtrequest1_global(RTM_DELETE, &rtinfo, 672 route_output_delete_callback, &arg, RTREQ_PRIO_HIGH); 673 rtm = arg.new_rtm; 674 if (rtm != arg.bak_rtm) 675 kfree(arg.bak_rtm, M_RTABLE); 676 break; 677 case RTM_GET: 678 /* See the comment in RTM_DELETE */ 679 arg.bak_rtm = rtm; 680 arg.new_rtm = rtm; 681 error = rtsearch_global(RTM_GET, &rtinfo, 682 route_output_get_callback, &arg, RTS_NOEXACTMATCH, 683 RTREQ_PRIO_NORM); 684 rtm = arg.new_rtm; 685 if (rtm != arg.bak_rtm) 686 kfree(arg.bak_rtm, M_RTABLE); 687 break; 688 case RTM_CHANGE: 689 error = rtsearch_global(RTM_CHANGE, &rtinfo, 690 route_output_change_callback, rtm, RTS_EXACTMATCH, 691 RTREQ_PRIO_HIGH); 692 break; 693 case RTM_LOCK: 694 error = rtsearch_global(RTM_LOCK, &rtinfo, 695 route_output_lock_callback, rtm, RTS_EXACTMATCH, 696 RTREQ_PRIO_HIGH); 697 break; 698 default: 699 error = EOPNOTSUPP; 700 break; 701 } 702 flush: 703 if (rtm != NULL) { 704 if (error != 0) 705 rtm->rtm_errno = error; 706 else 707 rtm->rtm_flags |= RTF_DONE; 708 } 709 710 /* 711 * Check to see if we don't want our own messages. 712 */ 713 if (!(so->so_options & SO_USELOOPBACK)) { 714 if (route_cb.any_count <= 1) { 715 if (rtm != NULL) 716 kfree(rtm, M_RTABLE); 717 m_freem(m); 718 return (error); 719 } 720 /* There is another listener, so construct message */ 721 rp = sotorawcb(so); 722 } 723 if (rtm != NULL) { 724 m_copyback(m, 0, rtm->rtm_msglen, (caddr_t)rtm); 725 if (m->m_pkthdr.len < rtm->rtm_msglen) { 726 m_freem(m); 727 m = NULL; 728 } else if (m->m_pkthdr.len > rtm->rtm_msglen) 729 m_adj(m, rtm->rtm_msglen - m->m_pkthdr.len); 730 kfree(rtm, M_RTABLE); 731 } 732 if (m != NULL) 733 rts_input_skip(m, family, rp); 734 return (error); 735 } 736 737 static void 738 route_output_add_callback(int cmd, int error, struct rt_addrinfo *rtinfo, 739 struct rtentry *rt, void *arg) 740 { 741 struct rt_msghdr *rtm = arg; 742 743 if (error == 0 && rt != NULL) { 744 rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, 745 &rt->rt_rmx); 746 rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits); 747 rt->rt_rmx.rmx_locks |= 748 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks); 749 if (rtinfo->rti_genmask != NULL) { 750 rt->rt_genmask = rtmask_purelookup(rtinfo->rti_genmask); 751 if (rt->rt_genmask == NULL) { 752 /* 753 * This should not happen, since we 754 * have already installed genmask 755 * on each CPU before we reach here. 756 */ 757 panic("genmask is gone!?"); 758 } 759 } else { 760 rt->rt_genmask = NULL; 761 } 762 rtm->rtm_index = rt->rt_ifp->if_index; 763 } 764 } 765 766 static void 767 route_output_delete_callback(int cmd, int error, struct rt_addrinfo *rtinfo, 768 struct rtentry *rt, void *arg) 769 { 770 if (error == 0 && rt) { 771 ++rt->rt_refcnt; 772 if (fillrtmsg(arg, rt, rtinfo) != 0) { 773 error = ENOBUFS; 774 /* XXX no way to return the error */ 775 } 776 --rt->rt_refcnt; 777 } 778 if (rt && rt->rt_refcnt == 0) { 779 ++rt->rt_refcnt; 780 rtfree(rt); 781 } 782 } 783 784 static int 785 route_output_get_callback(int cmd, struct rt_addrinfo *rtinfo, 786 struct rtentry *rt, void *arg, int found_cnt) 787 { 788 int error, found = 0; 789 790 if (((rtinfo->rti_flags ^ rt->rt_flags) & RTF_HOST) == 0) 791 found = 1; 792 793 error = fillrtmsg(arg, rt, rtinfo); 794 if (!error && found) { 795 /* Got the exact match, we could return now! */ 796 error = EJUSTRETURN; 797 } 798 return error; 799 } 800 801 static int 802 route_output_change_callback(int cmd, struct rt_addrinfo *rtinfo, 803 struct rtentry *rt, void *arg, int found_cnt) 804 { 805 struct rt_msghdr *rtm = arg; 806 struct ifaddr *ifa; 807 int error = 0; 808 809 /* 810 * new gateway could require new ifaddr, ifp; 811 * flags may also be different; ifp may be specified 812 * by ll sockaddr when protocol address is ambiguous 813 */ 814 if (((rt->rt_flags & RTF_GATEWAY) && rtinfo->rti_gateway != NULL) || 815 rtinfo->rti_ifpaddr != NULL || 816 (rtinfo->rti_ifaaddr != NULL && 817 !sa_equal(rtinfo->rti_ifaaddr, rt->rt_ifa->ifa_addr))) { 818 error = rt_getifa(rtinfo); 819 if (error != 0) 820 goto done; 821 } 822 if (rtinfo->rti_gateway != NULL) { 823 /* 824 * We only need to generate rtmsg upon the 825 * first route to be changed. 826 */ 827 error = rt_setgate(rt, rt_key(rt), rtinfo->rti_gateway); 828 if (error != 0) 829 goto done; 830 } 831 if ((ifa = rtinfo->rti_ifa) != NULL) { 832 struct ifaddr *oifa = rt->rt_ifa; 833 834 if (oifa != ifa) { 835 if (oifa && oifa->ifa_rtrequest) 836 oifa->ifa_rtrequest(RTM_DELETE, rt); 837 IFAFREE(rt->rt_ifa); 838 IFAREF(ifa); 839 rt->rt_ifa = ifa; 840 rt->rt_ifp = rtinfo->rti_ifp; 841 } 842 } 843 rt_setmetrics(rtm->rtm_inits, &rtm->rtm_rmx, &rt->rt_rmx); 844 if (rt->rt_ifa && rt->rt_ifa->ifa_rtrequest) 845 rt->rt_ifa->ifa_rtrequest(RTM_ADD, rt); 846 if (rtinfo->rti_genmask != NULL) { 847 rt->rt_genmask = rtmask_purelookup(rtinfo->rti_genmask); 848 if (rt->rt_genmask == NULL) { 849 /* 850 * This should not happen, since we 851 * have already installed genmask 852 * on each CPU before we reach here. 853 */ 854 panic("genmask is gone!?"); 855 } 856 } 857 rtm->rtm_index = rt->rt_ifp->if_index; 858 if (found_cnt == 1) 859 rt_rtmsg(RTM_CHANGE, rt, rt->rt_ifp, 0); 860 done: 861 return error; 862 } 863 864 static int 865 route_output_lock_callback(int cmd, struct rt_addrinfo *rtinfo, 866 struct rtentry *rt, void *arg, 867 int found_cnt __unused) 868 { 869 struct rt_msghdr *rtm = arg; 870 871 rt->rt_rmx.rmx_locks &= ~(rtm->rtm_inits); 872 rt->rt_rmx.rmx_locks |= 873 (rtm->rtm_inits & rtm->rtm_rmx.rmx_locks); 874 return 0; 875 } 876 877 static void 878 rt_setmetrics(u_long which, struct rt_metrics *in, struct rt_metrics *out) 879 { 880 #define setmetric(flag, elt) if (which & (flag)) out->elt = in->elt; 881 setmetric(RTV_RPIPE, rmx_recvpipe); 882 setmetric(RTV_SPIPE, rmx_sendpipe); 883 setmetric(RTV_SSTHRESH, rmx_ssthresh); 884 setmetric(RTV_RTT, rmx_rtt); 885 setmetric(RTV_RTTVAR, rmx_rttvar); 886 setmetric(RTV_HOPCOUNT, rmx_hopcount); 887 setmetric(RTV_MTU, rmx_mtu); 888 setmetric(RTV_EXPIRE, rmx_expire); 889 setmetric(RTV_MSL, rmx_msl); 890 setmetric(RTV_IWMAXSEGS, rmx_iwmaxsegs); 891 setmetric(RTV_IWCAPSEGS, rmx_iwcapsegs); 892 #undef setmetric 893 } 894 895 /* 896 * Extract the addresses of the passed sockaddrs. 897 * Do a little sanity checking so as to avoid bad memory references. 898 * This data is derived straight from userland. 899 */ 900 static int 901 rt_xaddrs(char *cp, char *cplim, struct rt_addrinfo *rtinfo) 902 { 903 struct sockaddr *sa; 904 int i; 905 906 for (i = 0; (i < RTAX_MAX) && (cp < cplim); i++) { 907 if ((rtinfo->rti_addrs & (1 << i)) == 0) 908 continue; 909 sa = (struct sockaddr *)cp; 910 /* 911 * It won't fit. 912 */ 913 if ((cp + sa->sa_len) > cplim) { 914 return (EINVAL); 915 } 916 917 /* 918 * There are no more... Quit now. 919 * If there are more bits, they are in error. 920 * I've seen this. route(1) can evidently generate these. 921 * This causes kernel to core dump. 922 * For compatibility, if we see this, point to a safe address. 923 */ 924 if (sa->sa_len == 0) { 925 static struct sockaddr sa_zero = { 926 sizeof sa_zero, AF_INET, 927 }; 928 929 rtinfo->rti_info[i] = &sa_zero; 930 kprintf("rtsock: received more addr bits than sockaddrs.\n"); 931 return (0); /* should be EINVAL but for compat */ 932 } 933 934 /* Accept the sockaddr. */ 935 rtinfo->rti_info[i] = sa; 936 cp += RT_ROUNDUP(sa->sa_len); 937 } 938 return (0); 939 } 940 941 static int 942 rt_msghdrsize(int type) 943 { 944 switch (type) { 945 case RTM_DELADDR: 946 case RTM_NEWADDR: 947 return sizeof(struct ifa_msghdr); 948 case RTM_DELMADDR: 949 case RTM_NEWMADDR: 950 return sizeof(struct ifma_msghdr); 951 case RTM_IFINFO: 952 return sizeof(struct if_msghdr); 953 case RTM_IFANNOUNCE: 954 case RTM_IEEE80211: 955 return sizeof(struct if_announcemsghdr); 956 default: 957 return sizeof(struct rt_msghdr); 958 } 959 } 960 961 static int 962 rt_msgsize(int type, const struct rt_addrinfo *rtinfo) 963 { 964 int len, i; 965 966 len = rt_msghdrsize(type); 967 for (i = 0; i < RTAX_MAX; i++) { 968 if (rtinfo->rti_info[i] != NULL) 969 len += RT_ROUNDUP(rtinfo->rti_info[i]->sa_len); 970 } 971 len = ALIGN(len); 972 return len; 973 } 974 975 /* 976 * Build a routing message in a buffer. 977 * Copy the addresses in the rtinfo->rti_info[] sockaddr array 978 * to the end of the buffer after the message header. 979 * 980 * Set the rtinfo->rti_addrs bitmask of addresses present in rtinfo->rti_info[]. 981 * This side-effect can be avoided if we reorder the addrs bitmask field in all 982 * the route messages to line up so we can set it here instead of back in the 983 * calling routine. 984 */ 985 static void 986 rt_msg_buffer(int type, struct rt_addrinfo *rtinfo, void *buf, int msglen) 987 { 988 struct rt_msghdr *rtm; 989 char *cp; 990 int dlen, i; 991 992 rtm = (struct rt_msghdr *) buf; 993 rtm->rtm_version = RTM_VERSION; 994 rtm->rtm_type = type; 995 rtm->rtm_msglen = msglen; 996 997 cp = (char *)buf + rt_msghdrsize(type); 998 rtinfo->rti_addrs = 0; 999 for (i = 0; i < RTAX_MAX; i++) { 1000 struct sockaddr *sa; 1001 1002 if ((sa = rtinfo->rti_info[i]) == NULL) 1003 continue; 1004 rtinfo->rti_addrs |= (1 << i); 1005 dlen = RT_ROUNDUP(sa->sa_len); 1006 bcopy(sa, cp, dlen); 1007 cp += dlen; 1008 } 1009 } 1010 1011 /* 1012 * Build a routing message in a mbuf chain. 1013 * Copy the addresses in the rtinfo->rti_info[] sockaddr array 1014 * to the end of the mbuf after the message header. 1015 * 1016 * Set the rtinfo->rti_addrs bitmask of addresses present in rtinfo->rti_info[]. 1017 * This side-effect can be avoided if we reorder the addrs bitmask field in all 1018 * the route messages to line up so we can set it here instead of back in the 1019 * calling routine. 1020 */ 1021 static struct mbuf * 1022 rt_msg_mbuf(int type, struct rt_addrinfo *rtinfo) 1023 { 1024 struct mbuf *m; 1025 struct rt_msghdr *rtm; 1026 int hlen, len; 1027 int i; 1028 1029 hlen = rt_msghdrsize(type); 1030 KASSERT(hlen <= MCLBYTES, ("rt_msg_mbuf: hlen %d doesn't fit", hlen)); 1031 1032 m = m_getl(hlen, M_NOWAIT, MT_DATA, M_PKTHDR, NULL); 1033 if (m == NULL) 1034 return (NULL); 1035 mbuftrackid(m, 32); 1036 m->m_pkthdr.len = m->m_len = hlen; 1037 m->m_pkthdr.rcvif = NULL; 1038 rtinfo->rti_addrs = 0; 1039 len = hlen; 1040 for (i = 0; i < RTAX_MAX; i++) { 1041 struct sockaddr *sa; 1042 int dlen; 1043 1044 if ((sa = rtinfo->rti_info[i]) == NULL) 1045 continue; 1046 rtinfo->rti_addrs |= (1 << i); 1047 dlen = RT_ROUNDUP(sa->sa_len); 1048 m_copyback(m, len, dlen, (caddr_t)sa); /* can grow mbuf chain */ 1049 len += dlen; 1050 } 1051 if (m->m_pkthdr.len != len) { /* one of the m_copyback() calls failed */ 1052 m_freem(m); 1053 return (NULL); 1054 } 1055 rtm = mtod(m, struct rt_msghdr *); 1056 bzero(rtm, hlen); 1057 rtm->rtm_msglen = len; 1058 rtm->rtm_version = RTM_VERSION; 1059 rtm->rtm_type = type; 1060 return (m); 1061 } 1062 1063 /* 1064 * This routine is called to generate a message from the routing 1065 * socket indicating that a redirect has occurred, a routing lookup 1066 * has failed, or that a protocol has detected timeouts to a particular 1067 * destination. 1068 */ 1069 void 1070 rt_missmsg(int type, struct rt_addrinfo *rtinfo, int flags, int error) 1071 { 1072 struct sockaddr *dst = rtinfo->rti_info[RTAX_DST]; 1073 struct rt_msghdr *rtm; 1074 struct mbuf *m; 1075 1076 if (route_cb.any_count == 0) 1077 return; 1078 m = rt_msg_mbuf(type, rtinfo); 1079 if (m == NULL) 1080 return; 1081 rtm = mtod(m, struct rt_msghdr *); 1082 rtm->rtm_flags = RTF_DONE | flags; 1083 rtm->rtm_errno = error; 1084 rtm->rtm_addrs = rtinfo->rti_addrs; 1085 rts_input(m, familyof(dst)); 1086 } 1087 1088 void 1089 rt_dstmsg(int type, struct sockaddr *dst, int error) 1090 { 1091 struct rt_msghdr *rtm; 1092 struct rt_addrinfo addrs; 1093 struct mbuf *m; 1094 1095 if (route_cb.any_count == 0) 1096 return; 1097 bzero(&addrs, sizeof(struct rt_addrinfo)); 1098 addrs.rti_info[RTAX_DST] = dst; 1099 m = rt_msg_mbuf(type, &addrs); 1100 if (m == NULL) 1101 return; 1102 rtm = mtod(m, struct rt_msghdr *); 1103 rtm->rtm_flags = RTF_DONE; 1104 rtm->rtm_errno = error; 1105 rtm->rtm_addrs = addrs.rti_addrs; 1106 rts_input(m, familyof(dst)); 1107 } 1108 1109 /* 1110 * This routine is called to generate a message from the routing 1111 * socket indicating that the status of a network interface has changed. 1112 */ 1113 void 1114 rt_ifmsg(struct ifnet *ifp) 1115 { 1116 struct if_msghdr *ifm; 1117 struct mbuf *m; 1118 struct rt_addrinfo rtinfo; 1119 1120 if (route_cb.any_count == 0) 1121 return; 1122 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1123 m = rt_msg_mbuf(RTM_IFINFO, &rtinfo); 1124 if (m == NULL) 1125 return; 1126 ifm = mtod(m, struct if_msghdr *); 1127 ifm->ifm_index = ifp->if_index; 1128 ifm->ifm_flags = ifp->if_flags; 1129 ifm->ifm_data = ifp->if_data; 1130 ifm->ifm_addrs = 0; 1131 rts_input(m, 0); 1132 } 1133 1134 static void 1135 rt_ifamsg(int cmd, struct ifaddr *ifa) 1136 { 1137 struct ifa_msghdr *ifam; 1138 struct rt_addrinfo rtinfo; 1139 struct mbuf *m; 1140 struct ifnet *ifp = ifa->ifa_ifp; 1141 1142 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1143 rtinfo.rti_ifaaddr = ifa->ifa_addr; 1144 rtinfo.rti_ifpaddr = 1145 TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa->ifa_addr; 1146 rtinfo.rti_netmask = ifa->ifa_netmask; 1147 rtinfo.rti_bcastaddr = ifa->ifa_dstaddr; 1148 1149 m = rt_msg_mbuf(cmd, &rtinfo); 1150 if (m == NULL) 1151 return; 1152 1153 ifam = mtod(m, struct ifa_msghdr *); 1154 ifam->ifam_index = ifp->if_index; 1155 ifam->ifam_flags = ifa->ifa_flags; 1156 ifam->ifam_addrs = rtinfo.rti_addrs; 1157 ifam->ifam_pid = curproc->p_pid; 1158 ifam->ifam_addrflags = if_addrflags(ifa); 1159 ifam->ifam_metric = ifa->ifa_metric; 1160 1161 rts_input(m, familyof(ifa->ifa_addr)); 1162 } 1163 1164 void 1165 rt_rtmsg(int cmd, struct rtentry *rt, struct ifnet *ifp, int error) 1166 { 1167 struct rt_msghdr *rtm; 1168 struct rt_addrinfo rtinfo; 1169 struct mbuf *m; 1170 struct sockaddr *dst; 1171 1172 if (rt == NULL) 1173 return; 1174 1175 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1176 rtinfo.rti_dst = dst = rt_key(rt); 1177 rtinfo.rti_gateway = rt->rt_gateway; 1178 rtinfo.rti_netmask = rt_mask(rt); 1179 if (ifp != NULL) { 1180 rtinfo.rti_ifpaddr = 1181 TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa->ifa_addr; 1182 } 1183 if (rt->rt_ifa != NULL) 1184 rtinfo.rti_ifaaddr = rt->rt_ifa->ifa_addr; 1185 1186 m = rt_msg_mbuf(cmd, &rtinfo); 1187 if (m == NULL) 1188 return; 1189 1190 rtm = mtod(m, struct rt_msghdr *); 1191 if (ifp != NULL) 1192 rtm->rtm_index = ifp->if_index; 1193 rtm->rtm_flags |= rt->rt_flags; 1194 rtm->rtm_errno = error; 1195 rtm->rtm_addrs = rtinfo.rti_addrs; 1196 1197 rts_input(m, familyof(dst)); 1198 } 1199 1200 /* 1201 * This is called to generate messages from the routing socket 1202 * indicating a network interface has had addresses associated with it. 1203 * if we ever reverse the logic and replace messages TO the routing 1204 * socket indicate a request to configure interfaces, then it will 1205 * be unnecessary as the routing socket will automatically generate 1206 * copies of it. 1207 */ 1208 void 1209 rt_newaddrmsg(int cmd, struct ifaddr *ifa, int error, struct rtentry *rt) 1210 { 1211 if (route_cb.any_count == 0) 1212 return; 1213 1214 if (cmd == RTM_ADD) { 1215 rt_ifamsg(RTM_NEWADDR, ifa); 1216 rt_rtmsg(RTM_ADD, rt, ifa->ifa_ifp, error); 1217 } else { 1218 KASSERT((cmd == RTM_DELETE), ("unknown cmd %d", cmd)); 1219 rt_rtmsg(RTM_DELETE, rt, ifa->ifa_ifp, error); 1220 rt_ifamsg(RTM_DELADDR, ifa); 1221 } 1222 } 1223 1224 /* 1225 * This is the analogue to the rt_newaddrmsg which performs the same 1226 * function but for multicast group memberhips. This is easier since 1227 * there is no route state to worry about. 1228 */ 1229 void 1230 rt_newmaddrmsg(int cmd, struct ifmultiaddr *ifma) 1231 { 1232 struct rt_addrinfo rtinfo; 1233 struct mbuf *m = NULL; 1234 struct ifnet *ifp = ifma->ifma_ifp; 1235 struct ifma_msghdr *ifmam; 1236 1237 if (route_cb.any_count == 0) 1238 return; 1239 1240 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1241 rtinfo.rti_ifaaddr = ifma->ifma_addr; 1242 if (ifp != NULL && !TAILQ_EMPTY(&ifp->if_addrheads[mycpuid])) { 1243 rtinfo.rti_ifpaddr = 1244 TAILQ_FIRST(&ifp->if_addrheads[mycpuid])->ifa->ifa_addr; 1245 } 1246 /* 1247 * If a link-layer address is present, present it as a ``gateway'' 1248 * (similarly to how ARP entries, e.g., are presented). 1249 */ 1250 rtinfo.rti_gateway = ifma->ifma_lladdr; 1251 1252 m = rt_msg_mbuf(cmd, &rtinfo); 1253 if (m == NULL) 1254 return; 1255 1256 ifmam = mtod(m, struct ifma_msghdr *); 1257 ifmam->ifmam_index = ifp->if_index; 1258 ifmam->ifmam_addrs = rtinfo.rti_addrs; 1259 1260 rts_input(m, familyof(ifma->ifma_addr)); 1261 } 1262 1263 static struct mbuf * 1264 rt_makeifannouncemsg(struct ifnet *ifp, int type, int what, 1265 struct rt_addrinfo *info) 1266 { 1267 struct if_announcemsghdr *ifan; 1268 struct mbuf *m; 1269 1270 if (route_cb.any_count == 0) 1271 return NULL; 1272 1273 bzero(info, sizeof(*info)); 1274 m = rt_msg_mbuf(type, info); 1275 if (m == NULL) 1276 return NULL; 1277 1278 ifan = mtod(m, struct if_announcemsghdr *); 1279 ifan->ifan_index = ifp->if_index; 1280 strlcpy(ifan->ifan_name, ifp->if_xname, sizeof ifan->ifan_name); 1281 ifan->ifan_what = what; 1282 return m; 1283 } 1284 1285 /* 1286 * This is called to generate routing socket messages indicating 1287 * IEEE80211 wireless events. 1288 * XXX we piggyback on the RTM_IFANNOUNCE msg format in a clumsy way. 1289 */ 1290 void 1291 rt_ieee80211msg(struct ifnet *ifp, int what, void *data, size_t data_len) 1292 { 1293 struct rt_addrinfo info; 1294 struct mbuf *m; 1295 1296 m = rt_makeifannouncemsg(ifp, RTM_IEEE80211, what, &info); 1297 if (m == NULL) 1298 return; 1299 1300 /* 1301 * Append the ieee80211 data. Try to stick it in the 1302 * mbuf containing the ifannounce msg; otherwise allocate 1303 * a new mbuf and append. 1304 * 1305 * NB: we assume m is a single mbuf. 1306 */ 1307 if (data_len > M_TRAILINGSPACE(m)) { 1308 /* XXX use m_getb(data_len, M_NOWAIT, MT_DATA, 0); */ 1309 struct mbuf *n = m_get(M_NOWAIT, MT_DATA); 1310 if (n == NULL) { 1311 m_freem(m); 1312 return; 1313 } 1314 KKASSERT(data_len <= M_TRAILINGSPACE(n)); 1315 bcopy(data, mtod(n, void *), data_len); 1316 n->m_len = data_len; 1317 m->m_next = n; 1318 } else if (data_len > 0) { 1319 bcopy(data, mtod(m, u_int8_t *) + m->m_len, data_len); 1320 m->m_len += data_len; 1321 } 1322 mbuftrackid(m, 33); 1323 if (m->m_flags & M_PKTHDR) 1324 m->m_pkthdr.len += data_len; 1325 mtod(m, struct if_announcemsghdr *)->ifan_msglen += data_len; 1326 rts_input(m, 0); 1327 } 1328 1329 /* 1330 * This is called to generate routing socket messages indicating 1331 * network interface arrival and departure. 1332 */ 1333 void 1334 rt_ifannouncemsg(struct ifnet *ifp, int what) 1335 { 1336 struct rt_addrinfo addrinfo; 1337 struct mbuf *m; 1338 1339 m = rt_makeifannouncemsg(ifp, RTM_IFANNOUNCE, what, &addrinfo); 1340 if (m != NULL) 1341 rts_input(m, 0); 1342 } 1343 1344 static int 1345 resizewalkarg(struct walkarg *w, int len) 1346 { 1347 void *newptr; 1348 1349 newptr = kmalloc(len, M_RTABLE, M_INTWAIT | M_NULLOK); 1350 if (newptr == NULL) 1351 return (ENOMEM); 1352 if (w->w_tmem != NULL) 1353 kfree(w->w_tmem, M_RTABLE); 1354 w->w_tmem = newptr; 1355 w->w_tmemsize = len; 1356 return (0); 1357 } 1358 1359 static void 1360 ifnet_compute_stats(struct ifnet *ifp) 1361 { 1362 IFNET_STAT_GET(ifp, ipackets, ifp->if_ipackets); 1363 IFNET_STAT_GET(ifp, ierrors, ifp->if_ierrors); 1364 IFNET_STAT_GET(ifp, opackets, ifp->if_opackets); 1365 IFNET_STAT_GET(ifp, collisions, ifp->if_collisions); 1366 IFNET_STAT_GET(ifp, ibytes, ifp->if_ibytes); 1367 IFNET_STAT_GET(ifp, obytes, ifp->if_obytes); 1368 IFNET_STAT_GET(ifp, imcasts, ifp->if_imcasts); 1369 IFNET_STAT_GET(ifp, omcasts, ifp->if_omcasts); 1370 IFNET_STAT_GET(ifp, iqdrops, ifp->if_iqdrops); 1371 IFNET_STAT_GET(ifp, noproto, ifp->if_noproto); 1372 IFNET_STAT_GET(ifp, oqdrops, ifp->if_oqdrops); 1373 } 1374 1375 static int 1376 if_addrflags(const struct ifaddr *ifa) 1377 { 1378 switch (ifa->ifa_addr->sa_family) { 1379 #ifdef INET6 1380 case AF_INET6: 1381 return ((const struct in6_ifaddr *)ifa)->ia6_flags; 1382 #endif 1383 default: 1384 return 0; 1385 } 1386 } 1387 1388 static int 1389 sysctl_iflist(int af, struct walkarg *w) 1390 { 1391 struct ifnet *ifp; 1392 struct rt_addrinfo rtinfo; 1393 int msglen, error; 1394 1395 bzero(&rtinfo, sizeof(struct rt_addrinfo)); 1396 1397 ifnet_lock(); 1398 TAILQ_FOREACH(ifp, &ifnetlist, if_link) { 1399 struct ifaddr_container *ifac, *ifac_mark; 1400 struct ifaddr_marker mark; 1401 struct ifaddrhead *head; 1402 struct ifaddr *ifa; 1403 1404 if (w->w_arg && w->w_arg != ifp->if_index) 1405 continue; 1406 head = &ifp->if_addrheads[mycpuid]; 1407 /* 1408 * There is no need to reference the first ifaddr 1409 * even if the following resizewalkarg() blocks, 1410 * since the first ifaddr will not be destroyed 1411 * when the ifnet lock is held. 1412 */ 1413 ifac = TAILQ_FIRST(head); 1414 ifa = ifac->ifa; 1415 rtinfo.rti_ifpaddr = ifa->ifa_addr; 1416 msglen = rt_msgsize(RTM_IFINFO, &rtinfo); 1417 if (w->w_tmemsize < msglen && resizewalkarg(w, msglen) != 0) { 1418 ifnet_unlock(); 1419 return (ENOMEM); 1420 } 1421 rt_msg_buffer(RTM_IFINFO, &rtinfo, w->w_tmem, msglen); 1422 rtinfo.rti_ifpaddr = NULL; 1423 if (w->w_req != NULL && w->w_tmem != NULL) { 1424 struct if_msghdr *ifm = w->w_tmem; 1425 1426 ifm->ifm_index = ifp->if_index; 1427 ifm->ifm_flags = ifp->if_flags; 1428 ifnet_compute_stats(ifp); 1429 ifm->ifm_data = ifp->if_data; 1430 ifm->ifm_addrs = rtinfo.rti_addrs; 1431 error = SYSCTL_OUT(w->w_req, ifm, msglen); 1432 if (error) { 1433 ifnet_unlock(); 1434 return (error); 1435 } 1436 } 1437 /* 1438 * Add a marker, since SYSCTL_OUT() could block and during 1439 * that period the list could be changed. 1440 */ 1441 ifa_marker_init(&mark, ifp); 1442 ifac_mark = &mark.ifac; 1443 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 1444 while ((ifac = TAILQ_NEXT(ifac_mark, ifa_link)) != NULL) { 1445 TAILQ_REMOVE(head, ifac_mark, ifa_link); 1446 TAILQ_INSERT_AFTER(head, ifac, ifac_mark, ifa_link); 1447 1448 ifa = ifac->ifa; 1449 1450 /* Ignore marker */ 1451 if (ifa->ifa_addr->sa_family == AF_UNSPEC) 1452 continue; 1453 1454 if (af && af != ifa->ifa_addr->sa_family) 1455 continue; 1456 if (curproc->p_ucred->cr_prison && 1457 prison_if(curproc->p_ucred, ifa->ifa_addr)) 1458 continue; 1459 rtinfo.rti_ifaaddr = ifa->ifa_addr; 1460 rtinfo.rti_netmask = ifa->ifa_netmask; 1461 rtinfo.rti_bcastaddr = ifa->ifa_dstaddr; 1462 msglen = rt_msgsize(RTM_NEWADDR, &rtinfo); 1463 /* 1464 * Keep a reference on this ifaddr, so that it will 1465 * not be destroyed if the following resizewalkarg() 1466 * blocks. 1467 */ 1468 IFAREF(ifa); 1469 if (w->w_tmemsize < msglen && 1470 resizewalkarg(w, msglen) != 0) { 1471 IFAFREE(ifa); 1472 TAILQ_REMOVE(head, ifac_mark, ifa_link); 1473 ifnet_unlock(); 1474 return (ENOMEM); 1475 } 1476 rt_msg_buffer(RTM_NEWADDR, &rtinfo, w->w_tmem, msglen); 1477 if (w->w_req != NULL) { 1478 struct ifa_msghdr *ifam = w->w_tmem; 1479 1480 ifam->ifam_index = ifa->ifa_ifp->if_index; 1481 ifam->ifam_flags = ifa->ifa_flags; 1482 ifam->ifam_addrs = rtinfo.rti_addrs; 1483 ifam->ifam_pid = 0 ; 1484 ifam->ifam_addrflags = if_addrflags(ifa); 1485 ifam->ifam_metric = ifa->ifa_metric; 1486 error = SYSCTL_OUT(w->w_req, w->w_tmem, msglen); 1487 if (error) { 1488 IFAFREE(ifa); 1489 TAILQ_REMOVE(head, ifac_mark, ifa_link); 1490 ifnet_unlock(); 1491 return (error); 1492 } 1493 } 1494 IFAFREE(ifa); 1495 } 1496 TAILQ_REMOVE(head, ifac_mark, ifa_link); 1497 rtinfo.rti_netmask = NULL; 1498 rtinfo.rti_ifaaddr = NULL; 1499 rtinfo.rti_bcastaddr = NULL; 1500 } 1501 ifnet_unlock(); 1502 return (0); 1503 } 1504 1505 static int 1506 rttable_walkarg_create(struct rttable_walkarg *w, int op, int arg) 1507 { 1508 struct rt_addrinfo rtinfo; 1509 struct sockaddr_storage ss; 1510 int i, msglen; 1511 1512 memset(w, 0, sizeof(*w)); 1513 w->w_op = op; 1514 w->w_arg = arg; 1515 1516 memset(&ss, 0, sizeof(ss)); 1517 ss.ss_len = sizeof(ss); 1518 1519 memset(&rtinfo, 0, sizeof(rtinfo)); 1520 for (i = 0; i < RTAX_MAX; ++i) 1521 rtinfo.rti_info[i] = (struct sockaddr *)&ss; 1522 msglen = rt_msgsize(RTM_GET, &rtinfo); 1523 1524 w->w_bufsz = msglen * RTTABLE_DUMP_MSGCNT_MAX; 1525 w->w_buf = kmalloc(w->w_bufsz, M_TEMP, M_WAITOK | M_NULLOK); 1526 if (w->w_buf == NULL) 1527 return ENOMEM; 1528 return 0; 1529 } 1530 1531 static void 1532 rttable_walkarg_destroy(struct rttable_walkarg *w) 1533 { 1534 kfree(w->w_buf, M_TEMP); 1535 } 1536 1537 static void 1538 rttable_entry_rtinfo(struct rt_addrinfo *rtinfo, struct radix_node *rn) 1539 { 1540 struct rtentry *rt = (struct rtentry *)rn; 1541 1542 bzero(rtinfo, sizeof(*rtinfo)); 1543 rtinfo->rti_dst = rt_key(rt); 1544 rtinfo->rti_gateway = rt->rt_gateway; 1545 rtinfo->rti_netmask = rt_mask(rt); 1546 rtinfo->rti_genmask = rt->rt_genmask; 1547 if (rt->rt_ifp != NULL) { 1548 rtinfo->rti_ifpaddr = 1549 TAILQ_FIRST(&rt->rt_ifp->if_addrheads[mycpuid])->ifa->ifa_addr; 1550 rtinfo->rti_ifaaddr = rt->rt_ifa->ifa_addr; 1551 if (rt->rt_ifp->if_flags & IFF_POINTOPOINT) 1552 rtinfo->rti_bcastaddr = rt->rt_ifa->ifa_dstaddr; 1553 } 1554 } 1555 1556 static int 1557 rttable_walk_entry(struct radix_node *rn, void *xw) 1558 { 1559 struct rttable_walkarg *w = xw; 1560 struct rtentry *rt = (struct rtentry *)rn; 1561 struct rt_addrinfo rtinfo; 1562 struct rt_msghdr *rtm; 1563 boolean_t save = FALSE; 1564 int msglen, w_bufleft; 1565 void *ptr; 1566 1567 rttable_entry_rtinfo(&rtinfo, rn); 1568 msglen = rt_msgsize(RTM_GET, &rtinfo); 1569 1570 w_bufleft = w->w_bufsz - w->w_buflen; 1571 1572 if (rn->rn_dupedkey != NULL) { 1573 struct radix_node *rn1 = rn; 1574 int total_msglen = msglen; 1575 1576 /* 1577 * Make sure that we have enough space left for all 1578 * dupedkeys, since rn_walktree_at always starts 1579 * from the first dupedkey. 1580 */ 1581 while ((rn1 = rn1->rn_dupedkey) != NULL) { 1582 struct rt_addrinfo rtinfo1; 1583 int msglen1; 1584 1585 if (rn1->rn_flags & RNF_ROOT) 1586 continue; 1587 1588 rttable_entry_rtinfo(&rtinfo1, rn1); 1589 msglen1 = rt_msgsize(RTM_GET, &rtinfo1); 1590 total_msglen += msglen1; 1591 } 1592 1593 if (total_msglen > w_bufleft) { 1594 if (total_msglen > w->w_bufsz) { 1595 static int logged = 0; 1596 1597 if (!logged) { 1598 kprintf("buffer is too small for " 1599 "all dupedkeys, increase " 1600 "RTTABLE_DUMP_MSGCNT_MAX\n"); 1601 logged = 1; 1602 } 1603 return ENOMEM; 1604 } 1605 save = TRUE; 1606 } 1607 } else if (msglen > w_bufleft) { 1608 save = TRUE; 1609 } 1610 1611 if (save) { 1612 /* 1613 * Not enough buffer left; remember the position 1614 * to start from upon next round. 1615 */ 1616 KASSERT(msglen <= w->w_bufsz, ("msg too long %d", msglen)); 1617 1618 KASSERT(rtinfo.rti_dst->sa_len <= sizeof(w->w_key0), 1619 ("key too long %d", rtinfo.rti_dst->sa_len)); 1620 memset(&w->w_key0, 0, sizeof(w->w_key0)); 1621 memcpy(&w->w_key0, rtinfo.rti_dst, rtinfo.rti_dst->sa_len); 1622 w->w_key = (const char *)&w->w_key0; 1623 1624 if (rtinfo.rti_netmask != NULL) { 1625 KASSERT( 1626 rtinfo.rti_netmask->sa_len <= sizeof(w->w_mask0), 1627 ("mask too long %d", rtinfo.rti_netmask->sa_len)); 1628 memset(&w->w_mask0, 0, sizeof(w->w_mask0)); 1629 memcpy(&w->w_mask0, rtinfo.rti_netmask, 1630 rtinfo.rti_netmask->sa_len); 1631 w->w_mask = (const char *)&w->w_mask0; 1632 } else { 1633 w->w_mask = NULL; 1634 } 1635 return EJUSTRETURN; 1636 } 1637 1638 if (w->w_op == NET_RT_FLAGS && !(rt->rt_flags & w->w_arg)) 1639 return 0; 1640 1641 ptr = ((uint8_t *)w->w_buf) + w->w_buflen; 1642 rt_msg_buffer(RTM_GET, &rtinfo, ptr, msglen); 1643 1644 rtm = (struct rt_msghdr *)ptr; 1645 rtm->rtm_flags = rt->rt_flags; 1646 rtm->rtm_use = rt->rt_use; 1647 rtm->rtm_rmx = rt->rt_rmx; 1648 rtm->rtm_index = rt->rt_ifp->if_index; 1649 rtm->rtm_errno = rtm->rtm_pid = rtm->rtm_seq = 0; 1650 rtm->rtm_addrs = rtinfo.rti_addrs; 1651 1652 w->w_buflen += msglen; 1653 1654 return 0; 1655 } 1656 1657 static void 1658 rttable_walk_dispatch(netmsg_t msg) 1659 { 1660 struct netmsg_rttable_walk *nmsg = (struct netmsg_rttable_walk *)msg; 1661 struct radix_node_head *rnh = rt_tables[mycpuid][nmsg->af]; 1662 struct rttable_walkarg *w = nmsg->w; 1663 int error; 1664 1665 error = rnh->rnh_walktree_at(rnh, w->w_key, w->w_mask, 1666 rttable_walk_entry, w); 1667 lwkt_replymsg(&nmsg->base.lmsg, error); 1668 } 1669 1670 static int 1671 sysctl_rttable(int af, struct sysctl_req *req, int op, int arg) 1672 { 1673 struct rttable_walkarg w; 1674 int error, i; 1675 1676 error = rttable_walkarg_create(&w, op, arg); 1677 if (error) 1678 return error; 1679 1680 error = EINVAL; 1681 for (i = 1; i <= AF_MAX; i++) { 1682 if (rt_tables[mycpuid][i] != NULL && (af == 0 || af == i)) { 1683 w.w_key = NULL; 1684 w.w_mask = NULL; 1685 for (;;) { 1686 struct netmsg_rttable_walk nmsg; 1687 1688 netmsg_init(&nmsg.base, NULL, 1689 &curthread->td_msgport, 0, 1690 rttable_walk_dispatch); 1691 nmsg.af = i; 1692 nmsg.w = &w; 1693 1694 w.w_buflen = 0; 1695 1696 error = lwkt_domsg(netisr_cpuport(mycpuid), 1697 &nmsg.base.lmsg, 0); 1698 if (error && error != EJUSTRETURN) 1699 goto done; 1700 1701 if (req != NULL && w.w_buflen > 0) { 1702 int error1; 1703 1704 error1 = SYSCTL_OUT(req, w.w_buf, 1705 w.w_buflen); 1706 if (error1) { 1707 error = error1; 1708 goto done; 1709 } 1710 } 1711 if (error == 0) /* done */ 1712 break; 1713 } 1714 } 1715 } 1716 done: 1717 rttable_walkarg_destroy(&w); 1718 return error; 1719 } 1720 1721 static int 1722 sysctl_rtsock(SYSCTL_HANDLER_ARGS) 1723 { 1724 int *name = (int *)arg1; 1725 u_int namelen = arg2; 1726 int error = EINVAL; 1727 int origcpu, cpu; 1728 u_char af; 1729 struct walkarg w; 1730 1731 name ++; 1732 namelen--; 1733 if (req->newptr) 1734 return (EPERM); 1735 if (namelen != 3 && namelen != 4) 1736 return (EINVAL); 1737 af = name[0]; 1738 bzero(&w, sizeof w); 1739 w.w_op = name[1]; 1740 w.w_arg = name[2]; 1741 w.w_req = req; 1742 1743 /* 1744 * Optional third argument specifies cpu, used primarily for 1745 * debugging the route table. 1746 */ 1747 if (namelen == 4) { 1748 if (name[3] < 0 || name[3] >= netisr_ncpus) 1749 return (EINVAL); 1750 cpu = name[3]; 1751 } else { 1752 /* 1753 * Target cpu is not specified, use cpu0 then, so that 1754 * the result set will be relatively stable. 1755 */ 1756 cpu = 0; 1757 } 1758 origcpu = mycpuid; 1759 lwkt_migratecpu(cpu); 1760 1761 switch (w.w_op) { 1762 case NET_RT_DUMP: 1763 case NET_RT_FLAGS: 1764 error = sysctl_rttable(af, w.w_req, w.w_op, w.w_arg); 1765 break; 1766 1767 case NET_RT_IFLIST: 1768 error = sysctl_iflist(af, &w); 1769 break; 1770 } 1771 if (w.w_tmem != NULL) 1772 kfree(w.w_tmem, M_RTABLE); 1773 1774 lwkt_migratecpu(origcpu); 1775 return (error); 1776 } 1777 1778 SYSCTL_NODE(_net, PF_ROUTE, routetable, CTLFLAG_RD, sysctl_rtsock, ""); 1779 1780 /* 1781 * Definitions of protocols supported in the ROUTE domain. 1782 */ 1783 1784 static struct domain routedomain; /* or at least forward */ 1785 1786 static struct protosw routesw[] = { 1787 { 1788 .pr_type = SOCK_RAW, 1789 .pr_domain = &routedomain, 1790 .pr_protocol = 0, 1791 .pr_flags = PR_ATOMIC|PR_ADDR, 1792 .pr_input = NULL, 1793 .pr_output = route_output, 1794 .pr_ctlinput = raw_ctlinput, 1795 .pr_ctloutput = route_ctloutput, 1796 .pr_ctlport = cpu0_ctlport, 1797 1798 .pr_init = raw_init, 1799 .pr_usrreqs = &route_usrreqs 1800 } 1801 }; 1802 1803 static struct domain routedomain = { 1804 .dom_family = AF_ROUTE, 1805 .dom_name = "route", 1806 .dom_init = NULL, 1807 .dom_externalize = NULL, 1808 .dom_dispose = NULL, 1809 .dom_protosw = routesw, 1810 .dom_protoswNPROTOSW = &routesw[(sizeof routesw)/(sizeof routesw[0])], 1811 .dom_next = SLIST_ENTRY_INITIALIZER, 1812 .dom_rtattach = NULL, 1813 .dom_rtoffset = 0, 1814 .dom_maxrtkey = 0, 1815 .dom_ifattach = NULL, 1816 .dom_ifdetach = NULL 1817 }; 1818 1819 DOMAIN_SET(route); 1820 1821