xref: /openbsd-src/usr.sbin/ripd/rde.c (revision 50b7afb2c2c0993b0894d4e34bf857cb13ed9c80)
1 /*	$OpenBSD: rde.c,v 1.17 2014/07/12 20:16:38 krw Exp $ */
2 
3 /*
4  * Copyright (c) 2006 Michele Marchetto <mydecay@openbeer.it>
5  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
6  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
7  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
8  *
9  * Permission to use, copy, modify, and distribute this software for any
10  * purpose with or without fee is hereby granted, provided that the above
11  * copyright notice and this permission notice appear in all copies.
12  *
13  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
14  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
15  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
16  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
17  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20  */
21 
22 #include <sys/param.h>		/* for MIN() */
23 #include <sys/socket.h>
24 #include <sys/queue.h>
25 #include <netinet/in.h>
26 #include <arpa/inet.h>
27 #include <err.h>
28 #include <errno.h>
29 #include <stdlib.h>
30 #include <signal.h>
31 #include <string.h>
32 #include <pwd.h>
33 #include <unistd.h>
34 #include <event.h>
35 
36 #include "ripd.h"
37 #include "rip.h"
38 #include "ripe.h"
39 #include "log.h"
40 #include "rde.h"
41 
42 struct ripd_conf	*rdeconf = NULL;
43 struct imsgev		*iev_ripe;
44 struct imsgev		*iev_main;
45 
46 void	rde_sig_handler(int, short, void *);
47 void	rde_shutdown(void);
48 void	rde_dispatch_imsg(int, short, void *);
49 void	rde_dispatch_parent(int, short, void *);
50 int	rde_imsg_compose_ripe(int, u_int32_t, pid_t, void *, u_int16_t);
51 int	rde_check_route(struct rip_route *);
52 void	triggered_update(struct rt_node *);
53 
54 /* ARGSUSED */
55 void
56 rde_sig_handler(int sig, short event, void *arg)
57 {
58 	/*
59 	 * signal handler rules don't apply, libevent decouples for us
60 	 */
61 
62 	switch (sig) {
63 	case SIGINT:
64 	case SIGTERM:
65 		rde_shutdown();
66 		/* NOTREACHED */
67 	default:
68 		fatalx("unexpected signal");
69 	}
70 }
71 
72 /* route decision engine */
73 pid_t
74 rde(struct ripd_conf *xconf, int pipe_parent2rde[2], int pipe_ripe2rde[2],
75     int pipe_parent2ripe[2])
76 {
77 	struct event		 ev_sigint, ev_sigterm;
78 	struct passwd		*pw;
79 	struct redistribute	*r;
80 	pid_t			 pid;
81 
82 	switch (pid = fork()) {
83 	case -1:
84 		fatal("cannot fork");
85 		/* NOTREACHED */
86 	case 0:
87 		break;
88 	default:
89 		return (pid);
90 	}
91 
92 	rdeconf = xconf;
93 
94 	if ((pw = getpwnam(RIPD_USER)) == NULL)
95 		fatal("getpwnam");
96 
97 	if (chroot(pw->pw_dir) == -1)
98 		fatal("chroot");
99 	if (chdir("/") == -1)
100 		fatal("chdir(\"/\")");
101 
102 	setproctitle("route decision engine");
103 	ripd_process = PROC_RDE_ENGINE;
104 
105 	if (setgroups(1, &pw->pw_gid) ||
106 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
107 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
108 		fatal("can't drop privileges");
109 
110 	event_init();
111 
112 	/* setup signal handler */
113 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
114 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
115 	signal_add(&ev_sigint, NULL);
116 	signal_add(&ev_sigterm, NULL);
117 	signal(SIGPIPE, SIG_IGN);
118 	signal(SIGHUP, SIG_IGN);
119 
120 	/* setup pipes */
121 	close(pipe_ripe2rde[0]);
122 	close(pipe_parent2rde[0]);
123 	close(pipe_parent2ripe[0]);
124 	close(pipe_parent2ripe[1]);
125 
126 	if ((iev_ripe = malloc(sizeof(struct imsgev))) == NULL ||
127 	    (iev_main = malloc(sizeof(struct imsgev))) == NULL)
128 		fatal(NULL);
129 	imsg_init(&iev_ripe->ibuf, pipe_ripe2rde[1]);
130 	iev_ripe->handler =  rde_dispatch_imsg;
131 	imsg_init(&iev_main->ibuf, pipe_parent2rde[1]);
132 	iev_main->handler = rde_dispatch_parent;
133 
134 	/* setup event handler */
135 	iev_ripe->events = EV_READ;
136 	event_set(&iev_ripe->ev, iev_ripe->ibuf.fd, iev_ripe->events,
137 	    iev_ripe->handler, iev_ripe);
138 	event_add(&iev_ripe->ev, NULL);
139 
140 	iev_main->events = EV_READ;
141 	event_set(&iev_main->ev, iev_main->ibuf.fd, iev_main->events,
142 	    iev_main->handler, iev_main);
143 	event_add(&iev_main->ev, NULL);
144 	rt_init();
145 
146 	/* remove unneeded config stuff */
147 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
148 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
149 		free(r);
150 	}
151 
152 	event_dispatch();
153 
154 	rde_shutdown();
155 	/* NOTREACHED */
156 
157 	return (0);
158 }
159 
160 void
161 rde_shutdown(void)
162 {
163 	rt_clear();
164 
165 	msgbuf_clear(&iev_ripe->ibuf.w);
166 	free(iev_ripe);
167 	msgbuf_clear(&iev_main->ibuf.w);
168 	free(iev_main);
169 	free(rdeconf);
170 
171 	log_info("route decision engine exiting");
172 	_exit(0);
173 }
174 
175 int
176 rde_imsg_compose_ripe(int type, u_int32_t peerid, pid_t pid, void *data,
177     u_int16_t datalen)
178 {
179 	return (imsg_compose_event(iev_ripe, type, peerid, pid, -1,
180 		    data, datalen));
181 }
182 
183 /* ARGSUSED */
184 void
185 rde_dispatch_imsg(int fd, short event, void *bula)
186 {
187 	struct imsgev		*iev = bula;
188 	struct imsgbuf		*ibuf = &iev->ibuf;
189 	struct rip_route	 rr;
190 	struct imsg		 imsg;
191 	ssize_t			 n;
192 	int			 shut = 0, verbose;
193 
194 	if (event & EV_READ) {
195 		if ((n = imsg_read(ibuf)) == -1)
196 			fatal("imsg_read error");
197 		if (n == 0)	/* connection closed */
198 			shut = 1;
199 	}
200 	if (event & EV_WRITE) {
201 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
202 			fatal("msgbuf_write");
203 		if (n == 0)	/* connection closed */
204 			shut = 1;
205 	}
206 
207 	for (;;) {
208 		if ((n = imsg_get(ibuf, &imsg)) == -1)
209 			fatal("rde_dispatch_imsg: imsg_read error");
210 		if (n == 0)
211 			break;
212 
213 		switch (imsg.hdr.type) {
214 		case IMSG_ROUTE_FEED:
215 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr))
216 				fatalx("invalid size of RDE request");
217 
218 			memcpy(&rr, imsg.data, sizeof(rr));
219 
220 			if (rde_check_route(&rr) == -1)
221 				log_debug("rde_dispatch_imsg: "
222 				    "packet malformed\n");
223 			break;
224 		case IMSG_FULL_REQUEST:
225 			bzero(&rr, sizeof(rr));
226 			/*
227 			 * AFI == 0 && metric == INFINITY request the
228 			 * whole routing table
229 			 */
230 			rr.metric = INFINITY;
231 			rde_imsg_compose_ripe(IMSG_REQUEST_ADD, 0,
232 			    0, &rr, sizeof(rr));
233 			rde_imsg_compose_ripe(IMSG_SEND_REQUEST, 0,
234 			    0, NULL, 0);
235 			break;
236 		case IMSG_FULL_RESPONSE:
237 			rt_snap(imsg.hdr.peerid);
238 			rde_imsg_compose_ripe(IMSG_SEND_RESPONSE,
239 			    imsg.hdr.peerid, 0, NULL, 0);
240 			break;
241 		case IMSG_ROUTE_REQUEST:
242 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rr))
243 				fatalx("invalid size of RDE request");
244 
245 			memcpy(&rr, imsg.data, sizeof(rr));
246 
247 			rt_complete(&rr);
248 			rde_imsg_compose_ripe(IMSG_RESPONSE_ADD,
249 			    imsg.hdr.peerid, 0, &rr, sizeof(rr));
250 
251 			break;
252 		case IMSG_ROUTE_REQUEST_END:
253 			rde_imsg_compose_ripe(IMSG_SEND_RESPONSE,
254 			    imsg.hdr.peerid, 0, NULL, 0);
255 			break;
256 		case IMSG_CTL_SHOW_RIB:
257 			rt_dump(imsg.hdr.pid);
258 
259 			imsg_compose_event(iev_ripe, IMSG_CTL_END, 0,
260 			    imsg.hdr.pid, -1, NULL, 0);
261 
262 			break;
263 		case IMSG_CTL_LOG_VERBOSE:
264 			/* already checked by ripe */
265 			memcpy(&verbose, imsg.data, sizeof(verbose));
266 			log_verbose(verbose);
267 			break;
268 		default:
269 			log_debug("rde_dispatch_msg: unexpected imsg %d",
270 			    imsg.hdr.type);
271 			break;
272 		}
273 		imsg_free(&imsg);
274 	}
275 	if (!shut)
276 		imsg_event_add(iev);
277 	else {
278 		/* this pipe is dead, so remove the event handler */
279 		event_del(&iev->ev);
280 		event_loopexit(NULL);
281 	}
282 }
283 
284 /* ARGSUSED */
285 void
286 rde_dispatch_parent(int fd, short event, void *bula)
287 {
288 	struct imsg		 imsg;
289 	struct rt_node		*rt;
290 	struct kroute		 kr;
291 	struct imsgev		*iev = bula;
292 	struct imsgbuf		*ibuf = &iev->ibuf;
293 	ssize_t			 n;
294 	int			 shut = 0;
295 
296 	if (event & EV_READ) {
297 		if ((n = imsg_read(ibuf)) == -1)
298 			fatal("imsg_read error");
299 		if (n == 0)	/* connection closed */
300 			shut = 1;
301 	}
302 	if (event & EV_WRITE) {
303 		if ((n = msgbuf_write(&ibuf->w)) == -1 && errno != EAGAIN)
304 			fatal("msgbuf_write");
305 		if (n == 0)	/* connection closed */
306 			shut = 1;
307 	}
308 
309 	for (;;) {
310 		if ((n = imsg_get(ibuf, &imsg)) == -1)
311 			fatal("rde_dispatch_parent: imsg_read error");
312 		if (n == 0)
313 			break;
314 
315 		switch (imsg.hdr.type) {
316 		case IMSG_NETWORK_ADD:
317 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) {
318 				log_warnx("rde_dispatch: wrong imsg len");
319 				break;
320 			}
321 
322 			memcpy(&kr, imsg.data, sizeof(kr));
323 
324 			rt = rt_new_kr(&kr);
325 			rt_insert(rt);
326 			break;
327 		case IMSG_NETWORK_DEL:
328 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(kr)) {
329 				log_warnx("rde_dispatch: wrong imsg len");
330 				break;
331 			}
332 			memcpy(&kr, imsg.data, sizeof(kr));
333 
334 			if ((rt = rt_find(kr.prefix.s_addr,
335 			    kr.netmask.s_addr)) != NULL)
336 				rt_remove(rt);
337 			break;
338 		default:
339 			log_debug("rde_dispatch_parent: unexpected imsg %d",
340 			    imsg.hdr.type);
341 			break;
342 		}
343 		imsg_free(&imsg);
344 	}
345 	if (!shut)
346 		imsg_event_add(iev);
347 	else {
348 		/* this pipe is dead, so remove the event handler */
349 		event_del(&iev->ev);
350 		event_loopexit(NULL);
351 	}
352 }
353 
354 void
355 rde_send_change_kroute(struct rt_node *r)
356 {
357 	struct kroute	 kr;
358 
359 	bzero(&kr, sizeof(kr));
360 	kr.prefix.s_addr = r->prefix.s_addr;
361 	kr.nexthop.s_addr = r->nexthop.s_addr;
362 	kr.netmask.s_addr = r->netmask.s_addr;
363 	kr.metric = r->metric;
364 	kr.flags = r->flags;
365 	kr.ifindex = r->ifindex;
366 
367 	imsg_compose_event(iev_main, IMSG_KROUTE_CHANGE, 0, 0, -1,
368 	    &kr, sizeof(kr));
369 }
370 
371 void
372 rde_send_delete_kroute(struct rt_node *r)
373 {
374 	struct kroute	 kr;
375 
376 	bzero(&kr, sizeof(kr));
377 	kr.prefix.s_addr = r->prefix.s_addr;
378 	kr.nexthop.s_addr = r->nexthop.s_addr;
379 	kr.netmask.s_addr = r->netmask.s_addr;
380 	kr.metric = r->metric;
381 	kr.flags = r->flags;
382 	kr.ifindex = r->ifindex;
383 
384 	imsg_compose_event(iev_main, IMSG_KROUTE_DELETE, 0, 0, -1,
385 	    &kr, sizeof(kr));
386 }
387 
388 int
389 rde_check_route(struct rip_route *e)
390 {
391 	struct timeval	 tv, now;
392 	struct rt_node	*rn;
393 	struct iface	*iface;
394 	u_int8_t	 metric;
395 
396 	if ((e->nexthop.s_addr & htonl(IN_CLASSA_NET)) ==
397 	    htonl(INADDR_LOOPBACK & IN_CLASSA_NET) ||
398 	    e->nexthop.s_addr == INADDR_ANY)
399 		return (-1);
400 
401 	if ((iface = if_find_index(e->ifindex)) == NULL)
402 		return (-1);
403 
404 	metric = MIN(INFINITY, e->metric + iface->cost);
405 
406 	if ((rn = rt_find(e->address.s_addr, e->mask.s_addr)) == NULL) {
407 		if (metric >= INFINITY)
408 			return (0);
409 		rn = rt_new_rr(e, metric);
410 		rt_insert(rn);
411 		rde_send_change_kroute(rn);
412 		route_start_timeout(rn);
413 		triggered_update(rn);
414 	} else {
415 		/*
416 		 * XXX don't we have to track all incoming routes?
417 		 * what happens if the kernel route is removed later.
418 		 */
419 		if (rn->flags & F_KERNEL)
420 			return (0);
421 
422 		if (metric < rn->metric) {
423 			rn->metric = metric;
424 			rn->nexthop.s_addr = e->nexthop.s_addr;
425 			rn->ifindex = e->ifindex;
426 			rde_send_change_kroute(rn);
427 			triggered_update(rn);
428 		} else if (e->nexthop.s_addr == rn->nexthop.s_addr &&
429 		    metric > rn->metric) {
430 				rn->metric = metric;
431 				rde_send_change_kroute(rn);
432 				triggered_update(rn);
433 				if (rn->metric == INFINITY)
434 					route_start_garbage(rn);
435 		} else if (e->nexthop.s_addr != rn->nexthop.s_addr &&
436 		    metric == rn->metric) {
437 			/* If the new metric is the same as the old one,
438 			 * examine the timeout for the existing route.  If it
439 			 * is at least halfway to the expiration point, switch
440 			 * to the new route.
441 			 */
442 			timerclear(&tv);
443 			gettimeofday(&now, NULL);
444 			evtimer_pending(&rn->timeout_timer, &tv);
445 			if (tv.tv_sec - now.tv_sec < ROUTE_TIMEOUT / 2) {
446 				rn->nexthop.s_addr = e->nexthop.s_addr;
447 				rn->ifindex = e->ifindex;
448 				rde_send_change_kroute(rn);
449 			}
450 		}
451 
452 		if (e->nexthop.s_addr == rn->nexthop.s_addr &&
453 		    rn->metric < INFINITY)
454 			route_reset_timers(rn);
455 	}
456 
457 	return (0);
458 }
459 
460 void
461 triggered_update(struct rt_node *rn)
462 {
463 	struct rip_route	 rr;
464 
465 	rr.address.s_addr = rn->prefix.s_addr;
466 	rr.mask.s_addr = rn->netmask.s_addr;
467 	rr.nexthop.s_addr = rn->nexthop.s_addr;
468 	rr.metric = rn->metric;
469 	rr.ifindex = rn->ifindex;
470 
471 	rde_imsg_compose_ripe(IMSG_SEND_TRIGGERED_UPDATE, 0, 0, &rr,
472 	    sizeof(struct rip_route));
473 }
474