xref: /openbsd-src/usr.sbin/ripd/rde.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: rde.c,v 1.9 2007/10/24 20:38:03 claudio Exp $ */
2 
3 /*
4  * Copyright (c) 2006 Michele Marchetto <mydecay@openbeer.it>
5  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
6  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
7  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/types.h>
23 #include <sys/socket.h>
24 #include <sys/queue.h>
25 #include <netinet/in.h>
26 #include <arpa/inet.h>
27 #include <err.h>
28 #include <errno.h>
29 #include <stdlib.h>
30 #include <signal.h>
31 #include <string.h>
32 #include <pwd.h>
33 #include <unistd.h>
34 #include <event.h>
35 
36 #include "ripd.h"
37 #include "rip.h"
38 #include "ripe.h"
39 #include "log.h"
40 #include "rde.h"
41 
42 struct ripd_conf	*rdeconf = NULL;
43 struct imsgbuf		*ibuf_ripe;
44 struct imsgbuf		*ibuf_main;
45 
46 void	rde_sig_handler(int, short, void *);
47 void	rde_shutdown(void);
48 void	rde_dispatch_imsg(int, short, void *);
49 void	rde_dispatch_parent(int, short, void *);
50 int	rde_imsg_compose_ripe(int, u_int32_t, pid_t, void *, u_int16_t);
51 int	rde_check_route(struct rip_route *);
52 void	triggered_update(struct rt_node *);
53 
54 /* ARGSUSED */
55 void
56 rde_sig_handler(int sig, short event, void *arg)
57 {
58 	/*
59 	 * signal handler rules don't apply, libevent decouples for us
60 	 */
61 
62 	switch (sig) {
63 	case SIGINT:
64 	case SIGTERM:
65 		rde_shutdown();
66 		/* NOTREACHED */
67 	default:
68 		fatalx("unexpected signal");
69 	}
70 }
71 
72 /* route decision engine */
73 pid_t
74 rde(struct ripd_conf *xconf, int pipe_parent2rde[2], int pipe_ripe2rde[2],
75     int pipe_parent2ripe[2])
76 {
77 	struct event		 ev_sigint, ev_sigterm;
78 	struct passwd		*pw;
79 	struct redistribute	*r;
80 	pid_t			 pid;
81 
82 	switch (pid = fork()) {
83 	case -1:
84 		fatal("cannot fork");
85 		/* NOTREACHED */
86 	case 0:
87 		break;
88 	default:
89 		return (pid);
90 	}
91 
92 	rdeconf = xconf;
93 
94 	if ((pw = getpwnam(RIPD_USER)) == NULL)
95 		fatal("getpwnam");
96 
97 	if (chroot(pw->pw_dir) == -1)
98 		fatal("chroot");
99 	if (chdir("/") == -1)
100 		fatal("chdir(\"/\")");
101 
102 	setproctitle("route decision engine");
103 	ripd_process = PROC_RDE_ENGINE;
104 
105 	if (setgroups(1, &pw->pw_gid) ||
106 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
107 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
108 		fatal("can't drop privileges");
109 
110 	event_init();
111 
112 	/* setup signal handler */
113 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
114 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
115 	signal_add(&ev_sigint, NULL);
116 	signal_add(&ev_sigterm, NULL);
117 	signal(SIGPIPE, SIG_IGN);
118 	signal(SIGHUP, SIG_IGN);
119 
120 	/* setup pipes */
121 	close(pipe_ripe2rde[0]);
122 	close(pipe_parent2rde[0]);
123 	close(pipe_parent2ripe[0]);
124 	close(pipe_parent2ripe[1]);
125 
126 	if ((ibuf_ripe = malloc(sizeof(struct imsgbuf))) == NULL ||
127 	    (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
128 		fatal(NULL);
129 	imsg_init(ibuf_ripe, pipe_ripe2rde[1], rde_dispatch_imsg);
130 	imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent);
131 
132 	/* setup event handler */
133 	ibuf_ripe->events = EV_READ;
134 	event_set(&ibuf_ripe->ev, ibuf_ripe->fd, ibuf_ripe->events,
135 	    ibuf_ripe->handler, ibuf_ripe);
136 	event_add(&ibuf_ripe->ev, NULL);
137 
138 	ibuf_main->events = EV_READ;
139 	event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events,
140 	    ibuf_main->handler, ibuf_main);
141 	event_add(&ibuf_main->ev, NULL);
142 	rt_init();
143 
144 	/* remove unneeded config stuff */
145 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
146 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
147 		free(r);
148 	}
149 
150 	event_dispatch();
151 
152 	rde_shutdown();
153 	/* NOTREACHED */
154 
155 	return (0);
156 }
157 
158 void
159 rde_shutdown(void)
160 {
161 	rt_clear();
162 
163 	msgbuf_clear(&ibuf_ripe->w);
164 	free(ibuf_ripe);
165 	msgbuf_clear(&ibuf_main->w);
166 	free(ibuf_main);
167 	free(rdeconf);
168 
169 	log_info("route decision engine exiting");
170 	_exit(0);
171 }
172 
173 int
174 rde_imsg_compose_ripe(int type, u_int32_t peerid, pid_t pid, void *data,
175     u_int16_t datalen)
176 {
177 	return (imsg_compose(ibuf_ripe, type, peerid, pid, data, datalen));
178 }
179 
180 /* ARGSUSED */
181 void
182 rde_dispatch_imsg(int fd, short event, void *bula)
183 {
184 	struct imsgbuf		*ibuf = bula;
185 	struct rip_route	 rr;
186 	struct imsg		 imsg;
187 	ssize_t			 n;
188 	int			 shut = 0;
189 
190 	switch (event) {
191 	case EV_READ:
192 		if ((n = imsg_read(ibuf)) == -1)
193 			fatal("imsg_read error");
194 		if (n == 0)	/* connection closed */
195 			shut = 1;
196 		break;
197 	case EV_WRITE:
198 		if (msgbuf_write(&ibuf->w) == -1)
199 			fatal("msgbuf_write");
200 		imsg_event_add(ibuf);
201 		return;
202 	default:
203 		fatalx("unknown event");
204 	}
205 
206 	for (;;) {
207 		if ((n = imsg_get(ibuf, &imsg)) == -1)
208 			fatal("rde_dispatch_imsg: imsg_read error");
209 		if (n == 0)
210 			break;
211 
212 		switch (imsg.hdr.type) {
213 		case IMSG_ROUTE_FEED:
214 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr))
215 				fatalx("invalid size of RDE request");
216 
217 			memcpy(&rr, imsg.data, sizeof(rr));
218 
219 			if (rde_check_route(&rr) == -1)
220 				log_debug("rde_dispatch_imsg: "
221 				    "packet malformed\n");
222 			break;
223 		case IMSG_FULL_REQUEST:
224 			bzero(&rr, sizeof(rr));
225 			/*
226 			 * AFI == 0 && metric == INFINITY request the
227 			 * whole routing table
228 			 */
229 			rr.metric = INFINITY;
230 			rde_imsg_compose_ripe(IMSG_REQUEST_ADD, 0,
231 			    0, &rr, sizeof(rr));
232 			rde_imsg_compose_ripe(IMSG_SEND_REQUEST, 0,
233 			    0, NULL, 0);
234 			break;
235 		case IMSG_FULL_RESPONSE:
236 			rt_snap(imsg.hdr.peerid);
237 			rde_imsg_compose_ripe(IMSG_SEND_RESPONSE,
238 			    imsg.hdr.peerid, 0, NULL, 0);
239 			break;
240 		case IMSG_ROUTE_REQUEST:
241 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr))
242 				fatalx("invalid size of RDE request");
243 
244 			memcpy(&rr, imsg.data, sizeof(rr));
245 
246 			rt_complete(&rr);
247 			rde_imsg_compose_ripe(IMSG_RESPONSE_ADD,
248 			    imsg.hdr.peerid, 0, &rr, sizeof(rr));
249 
250 			break;
251 		case IMSG_ROUTE_REQUEST_END:
252 			rde_imsg_compose_ripe(IMSG_SEND_RESPONSE,
253 			    imsg.hdr.peerid, 0, NULL, 0);
254 			break;
255 		case IMSG_CTL_SHOW_RIB:
256 			rt_dump(imsg.hdr.pid);
257 
258 			imsg_compose(ibuf_ripe, IMSG_CTL_END, 0, imsg.hdr.pid,
259 			    NULL, 0);
260 
261 			break;
262 		default:
263 			log_debug("rde_dispatch_msg: unexpected imsg %d",
264 			    imsg.hdr.type);
265 			break;
266 		}
267 		imsg_free(&imsg);
268 	}
269 	if (!shut)
270 		imsg_event_add(ibuf);
271 	else {
272 		/* this pipe is dead, so remove the event handler */
273 		event_del(&ibuf->ev);
274 		event_loopexit(NULL);
275 	}
276 }
277 
278 /* ARGSUSED */
279 void
280 rde_dispatch_parent(int fd, short event, void *bula)
281 {
282 	struct imsg		 imsg;
283 	struct rt_node		*rt;
284 	struct kroute		 kr;
285 	struct imsgbuf		*ibuf = bula;
286 	ssize_t			 n;
287 	int			 shut = 0;
288 
289 	switch (event) {
290 	case EV_READ:
291 		if ((n = imsg_read(ibuf)) == -1)
292 			fatal("imsg_read error");
293 		if (n == 0)	/* connection closed */
294 			shut = 1;
295 		break;
296 	case EV_WRITE:
297 		if (msgbuf_write(&ibuf->w) == -1)
298 			fatal("msgbuf_write");
299 		imsg_event_add(ibuf);
300 		return;
301 	default:
302 		fatalx("unknown event");
303 	}
304 
305 	for (;;) {
306 		if ((n = imsg_get(ibuf, &imsg)) == -1)
307 			fatal("rde_dispatch_parent: imsg_read error");
308 		if (n == 0)
309 			break;
310 
311 		switch (imsg.hdr.type) {
312 		case IMSG_NETWORK_ADD:
313 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) {
314 				log_warnx("rde_dispatch: wrong imsg len");
315 				break;
316 			}
317 
318 			memcpy(&kr, imsg.data, sizeof(kr));
319 
320 			rt = rt_new_kr(&kr);
321 			rt_insert(rt);
322 			break;
323 		case IMSG_NETWORK_DEL:
324 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) {
325 				log_warnx("rde_dispatch: wrong imsg len");
326 				break;
327 			}
328 			memcpy(&kr, imsg.data, sizeof(kr));
329 
330 			if ((rt = rt_find(kr.prefix.s_addr,
331 			    kr.netmask.s_addr)) != NULL)
332 				rt_remove(rt);
333 			break;
334 		case IMSG_KROUTE_GET:
335 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) {
336 				log_warnx("rde_dispatch: wrong imsg len");
337 				break;
338 			}
339 			memcpy(&kr, imsg.data, sizeof(kr));
340 
341 			if ((rt = rt_find(kr.prefix.s_addr,
342 			    kr.netmask.s_addr)) != NULL)
343 				rde_send_change_kroute(rt);
344 			else
345 				/* should not happen */
346 				imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0,
347 				    0, &kr, sizeof(kr));
348 
349 			break;
350 		default:
351 			log_debug("rde_dispatch_parent: unexpected imsg %d",
352 			    imsg.hdr.type);
353 			break;
354 		}
355 		imsg_free(&imsg);
356 	}
357 	if (!shut)
358 		imsg_event_add(ibuf);
359 	else {
360 		/* this pipe is dead, so remove the event handler */
361 		event_del(&ibuf->ev);
362 		event_loopexit(NULL);
363 	}
364 }
365 
366 void
367 rde_send_change_kroute(struct rt_node *r)
368 {
369 	struct kroute	 kr;
370 
371 	bzero(&kr, sizeof(kr));
372 	kr.prefix.s_addr = r->prefix.s_addr;
373 	kr.nexthop.s_addr = r->nexthop.s_addr;
374 	kr.netmask.s_addr = r->netmask.s_addr;
375 	kr.metric = r->metric;
376 	kr.flags = r->flags;
377 	kr.ifindex = r->ifindex;
378 
379 	imsg_compose(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0, &kr, sizeof(kr));
380 }
381 
382 void
383 rde_send_delete_kroute(struct rt_node *r)
384 {
385 	struct kroute	 kr;
386 
387 	bzero(&kr, sizeof(kr));
388 	kr.prefix.s_addr = r->prefix.s_addr;
389 	kr.nexthop.s_addr = r->nexthop.s_addr;
390 	kr.netmask.s_addr = r->netmask.s_addr;
391 	kr.metric = r->metric;
392 	kr.flags = r->flags;
393 	kr.ifindex = r->ifindex;
394 
395 	imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr));
396 }
397 
398 int
399 rde_check_route(struct rip_route *e)
400 {
401 	struct timeval	 tv, now;
402 	struct rt_node	*rn;
403 	struct iface	*iface;
404 	u_int8_t	 metric;
405 
406 	if ((e->nexthop.s_addr & htonl(IN_CLASSA_NET)) ==
407 	    htonl(INADDR_LOOPBACK & IN_CLASSA_NET) ||
408 	    e->nexthop.s_addr == INADDR_ANY)
409 		return (-1);
410 
411 	if ((iface = if_find_index(e->ifindex)) == NULL)
412 		return (-1);
413 
414 	metric = MIN(INFINITY, e->metric + iface->cost);
415 
416 	if ((rn = rt_find(e->address.s_addr, e->mask.s_addr)) == NULL) {
417 		if (metric >= INFINITY)
418 			return (0);
419 		rn = rt_new_rr(e, metric);
420 		rt_insert(rn);
421 		rde_send_change_kroute(rn);
422 		route_start_timeout(rn);
423 		triggered_update(rn);
424 	} else {
425 		/*
426 		 * XXX don't we have to track all incoming routes?
427 		 * what happens if the kernel route is removed later.
428 		 */
429 		if (rn->flags & F_KERNEL)
430 			return (0);
431 
432 		if (metric < rn->metric) {
433 			rn->metric = metric;
434 			rn->nexthop.s_addr = e->nexthop.s_addr;
435 			rn->ifindex = e->ifindex;
436 			rde_send_change_kroute(rn);
437 			triggered_update(rn);
438 		} else if (e->nexthop.s_addr == rn->nexthop.s_addr &&
439 		    metric > rn->metric) {
440 				rn->metric = metric;
441 				rde_send_change_kroute(rn);
442 				triggered_update(rn);
443 				if (rn->metric == INFINITY)
444 					route_start_garbage(rn);
445 		} else if (e->nexthop.s_addr != rn->nexthop.s_addr &&
446 		    metric == rn->metric) {
447 			/* If the new metric is the same as the old one,
448 			 * examine the timeout for the existing route.  If it
449 			 * is at least halfway to the expiration point, switch
450 			 * to the new route.
451 			 */
452 			timerclear(&tv);
453 			gettimeofday(&now, NULL);
454 			evtimer_pending(&rn->timeout_timer, &tv);
455 			if (tv.tv_sec - now.tv_sec < ROUTE_TIMEOUT / 2) {
456 				rn->nexthop.s_addr = e->nexthop.s_addr;
457 				rn->ifindex = e->ifindex;
458 				rde_send_change_kroute(rn);
459 			}
460 		}
461 
462 		if (e->nexthop.s_addr == rn->nexthop.s_addr &&
463 		    rn->metric < INFINITY)
464 			route_reset_timers(rn);
465 	}
466 
467 	return (0);
468 }
469 
470 void
471 triggered_update(struct rt_node *rn)
472 {
473 	struct rip_route	 rr;
474 
475 	rr.address.s_addr = rn->prefix.s_addr;
476 	rr.mask.s_addr = rn->netmask.s_addr;
477 	rr.nexthop.s_addr = rn->nexthop.s_addr;
478 	rr.metric = rn->metric;
479 	rr.ifindex = rn->ifindex;
480 
481 	rde_imsg_compose_ripe(IMSG_SEND_TRIGGERED_UPDATE, 0, 0, &rr,
482 	    sizeof(struct rip_route));
483 }
484