1 /* $OpenBSD: rde.c,v 1.31 2024/11/21 13:38:15 claudio Exp $ */ 2 3 /* 4 * Copyright (c) 2006 Michele Marchetto <mydecay@openbeer.it> 5 * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org> 6 * Copyright (c) 2004 Esben Norby <norby@openbsd.org> 7 * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org> 8 * 9 * Permission to use, copy, modify, and distribute this software for any 10 * purpose with or without fee is hereby granted, provided that the above 11 * copyright notice and this permission notice appear in all copies. 12 * 13 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 14 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 15 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 16 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 17 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 18 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 19 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 20 */ 21 22 #include <sys/socket.h> 23 #include <sys/queue.h> 24 #include <netinet/in.h> 25 #include <arpa/inet.h> 26 #include <err.h> 27 #include <errno.h> 28 #include <stdlib.h> 29 #include <signal.h> 30 #include <string.h> 31 #include <pwd.h> 32 #include <unistd.h> 33 #include <event.h> 34 35 #include "ripd.h" 36 #include "rip.h" 37 #include "ripe.h" 38 #include "log.h" 39 #include "rde.h" 40 41 #define MINIMUM(a, b) (((a) < (b)) ? (a) : (b)) 42 43 struct ripd_conf *rdeconf = NULL; 44 static struct imsgev *iev_ripe; 45 static struct imsgev *iev_main; 46 47 void rde_sig_handler(int, short, void *); 48 __dead void rde_shutdown(void); 49 void rde_dispatch_imsg(int, short, void *); 50 void rde_dispatch_parent(int, short, void *); 51 int rde_imsg_compose_ripe(int, u_int32_t, pid_t, void *, u_int16_t); 52 int rde_check_route(struct rip_route *); 53 void triggered_update(struct rt_node *); 54 55 void 56 rde_sig_handler(int sig, short event, void *arg) 57 { 58 /* 59 * signal handler rules don't apply, libevent decouples for us 60 */ 61 62 switch (sig) { 63 case SIGINT: 64 case SIGTERM: 65 rde_shutdown(); 66 /* NOTREACHED */ 67 default: 68 fatalx("unexpected signal"); 69 } 70 } 71 72 /* route decision engine */ 73 pid_t 74 rde(struct ripd_conf *xconf, int pipe_parent2rde[2], int pipe_ripe2rde[2], 75 int pipe_parent2ripe[2]) 76 { 77 struct event ev_sigint, ev_sigterm; 78 struct passwd *pw; 79 struct redistribute *r; 80 pid_t pid; 81 82 switch (pid = fork()) { 83 case -1: 84 fatal("cannot fork"); 85 /* NOTREACHED */ 86 case 0: 87 break; 88 default: 89 return (pid); 90 } 91 92 rdeconf = xconf; 93 94 if ((pw = getpwnam(RIPD_USER)) == NULL) 95 fatal("getpwnam"); 96 97 if (chroot(pw->pw_dir) == -1) 98 fatal("chroot"); 99 if (chdir("/") == -1) 100 fatal("chdir(\"/\")"); 101 102 setproctitle("route decision engine"); 103 log_procname = "rde"; 104 105 if (setgroups(1, &pw->pw_gid) || 106 setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) || 107 setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid)) 108 fatal("can't drop privileges"); 109 110 if (pledge("stdio", NULL) == -1) 111 fatal("pledge"); 112 113 event_init(); 114 115 /* setup signal handler */ 116 signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL); 117 signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL); 118 signal_add(&ev_sigint, NULL); 119 signal_add(&ev_sigterm, NULL); 120 signal(SIGPIPE, SIG_IGN); 121 signal(SIGHUP, SIG_IGN); 122 123 /* setup pipes */ 124 close(pipe_ripe2rde[0]); 125 close(pipe_parent2rde[0]); 126 close(pipe_parent2ripe[0]); 127 close(pipe_parent2ripe[1]); 128 129 if ((iev_ripe = malloc(sizeof(struct imsgev))) == NULL || 130 (iev_main = malloc(sizeof(struct imsgev))) == NULL) 131 fatal(NULL); 132 if (imsgbuf_init(&iev_ripe->ibuf, pipe_ripe2rde[1]) == -1) 133 fatal(NULL); 134 iev_ripe->handler = rde_dispatch_imsg; 135 if (imsgbuf_init(&iev_main->ibuf, pipe_parent2rde[1]) == -1) 136 fatal(NULL); 137 iev_main->handler = rde_dispatch_parent; 138 139 /* setup event handler */ 140 iev_ripe->events = EV_READ; 141 event_set(&iev_ripe->ev, iev_ripe->ibuf.fd, iev_ripe->events, 142 iev_ripe->handler, iev_ripe); 143 event_add(&iev_ripe->ev, NULL); 144 145 iev_main->events = EV_READ; 146 event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events, 147 iev_main->handler, iev_main); 148 event_add(&iev_main->ev, NULL); 149 rt_init(); 150 151 /* remove unneeded config stuff */ 152 while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) { 153 SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry); 154 free(r); 155 } 156 157 event_dispatch(); 158 159 rde_shutdown(); 160 /* NOTREACHED */ 161 162 return (0); 163 } 164 165 __dead void 166 rde_shutdown(void) 167 { 168 /* close pipes */ 169 imsgbuf_clear(&iev_ripe->ibuf); 170 close(iev_ripe->ibuf.fd); 171 imsgbuf_clear(&iev_main->ibuf); 172 close(iev_main->ibuf.fd); 173 174 rt_clear(); 175 free(iev_ripe); 176 free(iev_main); 177 free(rdeconf); 178 179 log_info("route decision engine exiting"); 180 _exit(0); 181 } 182 183 int 184 rde_imsg_compose_ripe(int type, u_int32_t peerid, pid_t pid, void *data, 185 u_int16_t datalen) 186 { 187 return (imsg_compose_event(iev_ripe, type, peerid, pid, -1, 188 data, datalen)); 189 } 190 191 void 192 rde_dispatch_imsg(int fd, short event, void *bula) 193 { 194 struct imsgev *iev = bula; 195 struct imsgbuf *ibuf = &iev->ibuf; 196 struct rip_route rr; 197 struct imsg imsg; 198 ssize_t n; 199 int shut = 0, verbose; 200 201 if (event & EV_READ) { 202 if ((n = imsgbuf_read(ibuf)) == -1) 203 fatal("imsgbuf_read error"); 204 if (n == 0) /* connection closed */ 205 shut = 1; 206 } 207 if (event & EV_WRITE) { 208 if (imsgbuf_write(ibuf) == -1) { 209 if (errno == EPIPE) /* connection closed */ 210 shut = 1; 211 else 212 fatal("imsgbuf_write"); 213 } 214 } 215 216 for (;;) { 217 if ((n = imsg_get(ibuf, &imsg)) == -1) 218 fatal("rde_dispatch_imsg: imsg_get error"); 219 if (n == 0) 220 break; 221 222 switch (imsg.hdr.type) { 223 case IMSG_ROUTE_FEED: 224 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr)) 225 fatalx("invalid size of RDE request"); 226 227 memcpy(&rr, imsg.data, sizeof(rr)); 228 229 if (rde_check_route(&rr) == -1) 230 log_debug("rde_dispatch_imsg: " 231 "packet malformed\n"); 232 break; 233 case IMSG_FULL_REQUEST: 234 bzero(&rr, sizeof(rr)); 235 /* 236 * AFI == 0 && metric == INFINITY request the 237 * whole routing table 238 */ 239 rr.metric = INFINITY; 240 rde_imsg_compose_ripe(IMSG_REQUEST_ADD, 0, 241 0, &rr, sizeof(rr)); 242 rde_imsg_compose_ripe(IMSG_SEND_REQUEST, 0, 243 0, NULL, 0); 244 break; 245 case IMSG_FULL_RESPONSE: 246 rt_snap(imsg.hdr.peerid); 247 rde_imsg_compose_ripe(IMSG_SEND_RESPONSE, 248 imsg.hdr.peerid, 0, NULL, 0); 249 break; 250 case IMSG_ROUTE_REQUEST: 251 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr)) 252 fatalx("invalid size of RDE request"); 253 254 memcpy(&rr, imsg.data, sizeof(rr)); 255 256 rt_complete(&rr); 257 rde_imsg_compose_ripe(IMSG_RESPONSE_ADD, 258 imsg.hdr.peerid, 0, &rr, sizeof(rr)); 259 260 break; 261 case IMSG_ROUTE_REQUEST_END: 262 rde_imsg_compose_ripe(IMSG_SEND_RESPONSE, 263 imsg.hdr.peerid, 0, NULL, 0); 264 break; 265 case IMSG_CTL_SHOW_RIB: 266 rt_dump(imsg.hdr.pid); 267 268 imsg_compose_event(iev_ripe, IMSG_CTL_END, 0, 269 imsg.hdr.pid, -1, NULL, 0); 270 271 break; 272 case IMSG_CTL_LOG_VERBOSE: 273 /* already checked by ripe */ 274 memcpy(&verbose, imsg.data, sizeof(verbose)); 275 log_verbose(verbose); 276 break; 277 default: 278 log_debug("rde_dispatch_msg: unexpected imsg %d", 279 imsg.hdr.type); 280 break; 281 } 282 imsg_free(&imsg); 283 } 284 if (!shut) 285 imsg_event_add(iev); 286 else { 287 /* this pipe is dead, so remove the event handler */ 288 event_del(&iev->ev); 289 event_loopexit(NULL); 290 } 291 } 292 293 void 294 rde_dispatch_parent(int fd, short event, void *bula) 295 { 296 struct imsg imsg; 297 struct rt_node *rt; 298 struct kroute kr; 299 struct imsgev *iev = bula; 300 struct imsgbuf *ibuf = &iev->ibuf; 301 ssize_t n; 302 int shut = 0; 303 304 if (event & EV_READ) { 305 if ((n = imsgbuf_read(ibuf)) == -1) 306 fatal("imsgbuf_read error"); 307 if (n == 0) /* connection closed */ 308 shut = 1; 309 } 310 if (event & EV_WRITE) { 311 if (imsgbuf_write(ibuf) == -1) { 312 if (errno == EPIPE) /* connection closed */ 313 shut = 1; 314 else 315 fatal("imsgbuf_write"); 316 } 317 } 318 319 for (;;) { 320 if ((n = imsg_get(ibuf, &imsg)) == -1) 321 fatal("rde_dispatch_parent: imsg_get error"); 322 if (n == 0) 323 break; 324 325 switch (imsg.hdr.type) { 326 case IMSG_NETWORK_ADD: 327 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) { 328 log_warnx("rde_dispatch: wrong imsg len"); 329 break; 330 } 331 332 memcpy(&kr, imsg.data, sizeof(kr)); 333 334 rt = rt_new_kr(&kr); 335 rt_insert(rt); 336 break; 337 case IMSG_NETWORK_DEL: 338 if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) { 339 log_warnx("rde_dispatch: wrong imsg len"); 340 break; 341 } 342 memcpy(&kr, imsg.data, sizeof(kr)); 343 344 if ((rt = rt_find(kr.prefix.s_addr, 345 kr.netmask.s_addr)) != NULL) 346 rt_remove(rt); 347 break; 348 default: 349 log_debug("rde_dispatch_parent: unexpected imsg %d", 350 imsg.hdr.type); 351 break; 352 } 353 imsg_free(&imsg); 354 } 355 if (!shut) 356 imsg_event_add(iev); 357 else { 358 /* this pipe is dead, so remove the event handler */ 359 event_del(&iev->ev); 360 event_loopexit(NULL); 361 } 362 } 363 364 void 365 rde_send_change_kroute(struct rt_node *r) 366 { 367 struct kroute kr; 368 369 bzero(&kr, sizeof(kr)); 370 kr.prefix.s_addr = r->prefix.s_addr; 371 kr.nexthop.s_addr = r->nexthop.s_addr; 372 kr.netmask.s_addr = r->netmask.s_addr; 373 kr.metric = r->metric; 374 kr.flags = r->flags; 375 kr.ifindex = r->ifindex; 376 377 imsg_compose_event(iev_main, IMSG_KROUTE_CHANGE, 0, 0, -1, 378 &kr, sizeof(kr)); 379 } 380 381 void 382 rde_send_delete_kroute(struct rt_node *r) 383 { 384 struct kroute kr; 385 386 bzero(&kr, sizeof(kr)); 387 kr.prefix.s_addr = r->prefix.s_addr; 388 kr.nexthop.s_addr = r->nexthop.s_addr; 389 kr.netmask.s_addr = r->netmask.s_addr; 390 kr.metric = r->metric; 391 kr.flags = r->flags; 392 kr.ifindex = r->ifindex; 393 394 imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1, 395 &kr, sizeof(kr)); 396 } 397 398 int 399 rde_check_route(struct rip_route *e) 400 { 401 struct timeval tv, now; 402 struct rt_node *rn; 403 struct iface *iface; 404 u_int8_t metric; 405 406 if ((e->nexthop.s_addr & htonl(IN_CLASSA_NET)) == 407 htonl(INADDR_LOOPBACK & IN_CLASSA_NET) || 408 e->nexthop.s_addr == INADDR_ANY) 409 return (-1); 410 411 if ((iface = if_find_index(e->ifindex)) == NULL) 412 return (-1); 413 414 metric = MINIMUM(INFINITY, e->metric + iface->cost); 415 416 if ((rn = rt_find(e->address.s_addr, e->mask.s_addr)) == NULL) { 417 if (metric >= INFINITY) 418 return (0); 419 rn = rt_new_rr(e, metric); 420 rt_insert(rn); 421 rde_send_change_kroute(rn); 422 route_start_timeout(rn); 423 triggered_update(rn); 424 } else { 425 /* 426 * XXX don't we have to track all incoming routes? 427 * what happens if the kernel route is removed later. 428 */ 429 if (rn->flags & F_KERNEL) 430 return (0); 431 432 if (metric < rn->metric) { 433 rn->metric = metric; 434 rn->nexthop.s_addr = e->nexthop.s_addr; 435 rn->ifindex = e->ifindex; 436 rde_send_change_kroute(rn); 437 triggered_update(rn); 438 } else if (e->nexthop.s_addr == rn->nexthop.s_addr && 439 metric > rn->metric) { 440 rn->metric = metric; 441 rde_send_change_kroute(rn); 442 triggered_update(rn); 443 if (rn->metric == INFINITY) 444 route_start_garbage(rn); 445 } else if (e->nexthop.s_addr != rn->nexthop.s_addr && 446 metric == rn->metric) { 447 /* If the new metric is the same as the old one, 448 * examine the timeout for the existing route. If it 449 * is at least halfway to the expiration point, switch 450 * to the new route. 451 */ 452 timerclear(&tv); 453 gettimeofday(&now, NULL); 454 evtimer_pending(&rn->timeout_timer, &tv); 455 if (tv.tv_sec - now.tv_sec < ROUTE_TIMEOUT / 2) { 456 rn->nexthop.s_addr = e->nexthop.s_addr; 457 rn->ifindex = e->ifindex; 458 rde_send_change_kroute(rn); 459 } 460 } 461 462 if (e->nexthop.s_addr == rn->nexthop.s_addr && 463 rn->metric < INFINITY) 464 route_reset_timers(rn); 465 } 466 467 return (0); 468 } 469 470 void 471 triggered_update(struct rt_node *rn) 472 { 473 struct rip_route rr; 474 475 rr.address.s_addr = rn->prefix.s_addr; 476 rr.mask.s_addr = rn->netmask.s_addr; 477 rr.nexthop.s_addr = rn->nexthop.s_addr; 478 rr.metric = rn->metric; 479 rr.ifindex = rn->ifindex; 480 481 rde_imsg_compose_ripe(IMSG_SEND_TRIGGERED_UPDATE, 0, 0, &rr, 482 sizeof(struct rip_route)); 483 } 484