xref: /openbsd-src/usr.sbin/ospfd/rde.c (revision a28daedfc357b214be5c701aa8ba8adb29a7f1c2)
1 /*	$OpenBSD: rde.c,v 1.77 2009/01/27 12:45:52 michele Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <netinet/in.h>
25 #include <arpa/inet.h>
26 #include <err.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <signal.h>
30 #include <string.h>
31 #include <pwd.h>
32 #include <unistd.h>
33 #include <event.h>
34 
35 #include "ospf.h"
36 #include "ospfd.h"
37 #include "ospfe.h"
38 #include "log.h"
39 #include "rde.h"
40 
41 void		 rde_sig_handler(int sig, short, void *);
42 void		 rde_shutdown(void);
43 void		 rde_dispatch_imsg(int, short, void *);
44 void		 rde_dispatch_parent(int, short, void *);
45 
46 void		 rde_send_summary(pid_t);
47 void		 rde_send_summary_area(struct area *, pid_t);
48 void		 rde_nbr_init(u_int32_t);
49 void		 rde_nbr_free(void);
50 struct rde_nbr	*rde_nbr_find(u_int32_t);
51 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
52 void		 rde_nbr_del(struct rde_nbr *);
53 
54 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
55 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
56 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
57 void		 rde_req_list_free(struct rde_nbr *);
58 
59 struct lsa	*rde_asext_get(struct rroute *);
60 struct lsa	*rde_asext_put(struct rroute *);
61 
62 struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
63 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
64 
65 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
66 struct imsgbuf		*ibuf_ospfe;
67 struct imsgbuf		*ibuf_main;
68 struct rde_nbr		*nbrself;
69 struct lsa_tree		 asext_tree;
70 
71 /* ARGSUSED */
72 void
73 rde_sig_handler(int sig, short event, void *arg)
74 {
75 	/*
76 	 * signal handler rules don't apply, libevent decouples for us
77 	 */
78 
79 	switch (sig) {
80 	case SIGINT:
81 	case SIGTERM:
82 		rde_shutdown();
83 		/* NOTREACHED */
84 	default:
85 		fatalx("unexpected signal");
86 	}
87 }
88 
89 /* route decision engine */
90 pid_t
91 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
92     int pipe_parent2ospfe[2])
93 {
94 	struct event		 ev_sigint, ev_sigterm;
95 	struct timeval		 now;
96 	struct area		*area;
97 	struct iface		*iface;
98 	struct passwd		*pw;
99 	struct redistribute	*r;
100 	pid_t			 pid;
101 
102 	switch (pid = fork()) {
103 	case -1:
104 		fatal("cannot fork");
105 		/* NOTREACHED */
106 	case 0:
107 		break;
108 	default:
109 		return (pid);
110 	}
111 
112 	rdeconf = xconf;
113 
114 	if ((pw = getpwnam(OSPFD_USER)) == NULL)
115 		fatal("getpwnam");
116 
117 	if (chroot(pw->pw_dir) == -1)
118 		fatal("chroot");
119 	if (chdir("/") == -1)
120 		fatal("chdir(\"/\")");
121 
122 	setproctitle("route decision engine");
123 	ospfd_process = PROC_RDE_ENGINE;
124 
125 	if (setgroups(1, &pw->pw_gid) ||
126 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
127 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
128 		fatal("can't drop privileges");
129 
130 	event_init();
131 	rde_nbr_init(NBR_HASHSIZE);
132 	lsa_init(&asext_tree);
133 
134 	/* setup signal handler */
135 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
136 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
137 	signal_add(&ev_sigint, NULL);
138 	signal_add(&ev_sigterm, NULL);
139 	signal(SIGPIPE, SIG_IGN);
140 	signal(SIGHUP, SIG_IGN);
141 
142 	/* setup pipes */
143 	close(pipe_ospfe2rde[0]);
144 	close(pipe_parent2rde[0]);
145 	close(pipe_parent2ospfe[0]);
146 	close(pipe_parent2ospfe[1]);
147 
148 	if ((ibuf_ospfe = malloc(sizeof(struct imsgbuf))) == NULL ||
149 	    (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
150 		fatal(NULL);
151 	imsg_init(ibuf_ospfe, pipe_ospfe2rde[1], rde_dispatch_imsg);
152 	imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent);
153 
154 	/* setup event handler */
155 	ibuf_ospfe->events = EV_READ;
156 	event_set(&ibuf_ospfe->ev, ibuf_ospfe->fd, ibuf_ospfe->events,
157 	    ibuf_ospfe->handler, ibuf_ospfe);
158 	event_add(&ibuf_ospfe->ev, NULL);
159 
160 	ibuf_main->events = EV_READ;
161 	event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events,
162 	    ibuf_main->handler, ibuf_main);
163 	event_add(&ibuf_main->ev, NULL);
164 
165 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
166 	cand_list_init();
167 	rt_init();
168 
169 	/* remove unneded stuff from config */
170 	LIST_FOREACH(area, &rdeconf->area_list, entry)
171 		LIST_FOREACH(iface, &area->iface_list, entry)
172 			md_list_clr(&iface->auth_md_list);
173 
174 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
175 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
176 		free(r);
177 	}
178 
179 	gettimeofday(&now, NULL);
180 	rdeconf->uptime = now.tv_sec;
181 
182 	event_dispatch();
183 
184 	rde_shutdown();
185 	/* NOTREACHED */
186 
187 	return (0);
188 }
189 
190 void
191 rde_shutdown(void)
192 {
193 	struct area	*a;
194 
195 	stop_spf_timer(rdeconf);
196 	cand_list_clr();
197 	rt_clear();
198 
199 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
200 		LIST_REMOVE(a, entry);
201 		area_del(a);
202 	}
203 	rde_nbr_free();
204 
205 	msgbuf_clear(&ibuf_ospfe->w);
206 	free(ibuf_ospfe);
207 	msgbuf_clear(&ibuf_main->w);
208 	free(ibuf_main);
209 	free(rdeconf);
210 
211 	log_info("route decision engine exiting");
212 	_exit(0);
213 }
214 
215 int
216 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
217     u_int16_t datalen)
218 {
219 	return (imsg_compose(ibuf_ospfe, type, peerid, pid, data, datalen));
220 }
221 
222 /* ARGSUSED */
223 void
224 rde_dispatch_imsg(int fd, short event, void *bula)
225 {
226 	struct imsgbuf		*ibuf = bula;
227 	struct imsg		 imsg;
228 	struct in_addr		 aid;
229 	struct ls_req_hdr	 req_hdr;
230 	struct lsa_hdr		 lsa_hdr, *db_hdr;
231 	struct rde_nbr		 rn, *nbr;
232 	struct timespec		 tp;
233 	struct lsa		*lsa;
234 	struct area		*area;
235 	struct vertex		*v;
236 	char			*buf;
237 	ssize_t			 n;
238 	time_t			 now;
239 	int			 r, state, self, error, shut = 0;
240 	u_int16_t		 l;
241 
242 	switch (event) {
243 	case EV_READ:
244 		if ((n = imsg_read(ibuf)) == -1)
245 			fatal("imsg_read error");
246 		if (n == 0)	/* connection closed */
247 			shut = 1;
248 		break;
249 	case EV_WRITE:
250 		if (msgbuf_write(&ibuf->w) == -1)
251 			fatal("msgbuf_write");
252 		imsg_event_add(ibuf);
253 		return;
254 	default:
255 		fatalx("unknown event");
256 	}
257 
258 	clock_gettime(CLOCK_MONOTONIC, &tp);
259 	now = tp.tv_sec;
260 
261 	for (;;) {
262 		if ((n = imsg_get(ibuf, &imsg)) == -1)
263 			fatal("rde_dispatch_imsg: imsg_read error");
264 		if (n == 0)
265 			break;
266 
267 		switch (imsg.hdr.type) {
268 		case IMSG_NEIGHBOR_UP:
269 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
270 				fatalx("invalid size of OE request");
271 			memcpy(&rn, imsg.data, sizeof(rn));
272 
273 			if (rde_nbr_new(imsg.hdr.peerid, &rn) == NULL)
274 				fatalx("rde_dispatch_imsg: "
275 				    "neighbor already exists");
276 			break;
277 		case IMSG_NEIGHBOR_DOWN:
278 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
279 			break;
280 		case IMSG_NEIGHBOR_CHANGE:
281 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
282 				fatalx("invalid size of OE request");
283 			memcpy(&state, imsg.data, sizeof(state));
284 
285 			nbr = rde_nbr_find(imsg.hdr.peerid);
286 			if (nbr == NULL)
287 				break;
288 
289 			if (state != nbr->state && (nbr->state & NBR_STA_FULL ||
290 			    state & NBR_STA_FULL))
291 				area_track(nbr->area, state);
292 
293 			nbr->state = state;
294 			if (nbr->state & NBR_STA_FULL)
295 				rde_req_list_free(nbr);
296 			break;
297 		case IMSG_DB_SNAPSHOT:
298 			nbr = rde_nbr_find(imsg.hdr.peerid);
299 			if (nbr == NULL)
300 				break;
301 
302 			lsa_snap(nbr->area, imsg.hdr.peerid);
303 
304 			imsg_compose(ibuf_ospfe, IMSG_DB_END, imsg.hdr.peerid,
305 			    0, NULL, 0);
306 			break;
307 		case IMSG_DD:
308 			nbr = rde_nbr_find(imsg.hdr.peerid);
309 			if (nbr == NULL)
310 				break;
311 
312 			buf = imsg.data;
313 			error = 0;
314 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
315 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
316 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
317 				buf += sizeof(lsa_hdr);
318 
319 				if (lsa_hdr.type == LSA_TYPE_EXTERNAL &&
320 				    nbr->area->stub) {
321 					error = 1;
322 					break;
323 				}
324 				v = lsa_find(nbr->area, lsa_hdr.type,
325 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
326 				if (v == NULL)
327 					db_hdr = NULL;
328 				else
329 					db_hdr = &v->lsa->hdr;
330 
331 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
332 					/*
333 					 * only request LSAs that are
334 					 * newer or missing
335 					 */
336 					rde_req_list_add(nbr, &lsa_hdr);
337 					imsg_compose(ibuf_ospfe, IMSG_DD,
338 					    imsg.hdr.peerid, 0, &lsa_hdr,
339 					    sizeof(lsa_hdr));
340 				}
341 			}
342 			if (l != 0 && !error)
343 				log_warnx("rde_dispatch_imsg: peerid %lu, "
344 				    "trailing garbage in Database Description "
345 				    "packet", imsg.hdr.peerid);
346 
347 			if (!error)
348 				imsg_compose(ibuf_ospfe, IMSG_DD_END,
349 				    imsg.hdr.peerid, 0, NULL, 0);
350 			else
351 				imsg_compose(ibuf_ospfe, IMSG_DD_BADLSA,
352 				    imsg.hdr.peerid, 0, NULL, 0);
353 			break;
354 		case IMSG_LS_REQ:
355 			nbr = rde_nbr_find(imsg.hdr.peerid);
356 			if (nbr == NULL)
357 				break;
358 
359 			buf = imsg.data;
360 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
361 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
362 				memcpy(&req_hdr, buf, sizeof(req_hdr));
363 				buf += sizeof(req_hdr);
364 
365 				if ((v = lsa_find(nbr->area,
366 				    ntohl(req_hdr.type), req_hdr.ls_id,
367 				    req_hdr.adv_rtr)) == NULL) {
368 					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
369 					    imsg.hdr.peerid, 0, NULL, 0);
370 					continue;
371 				}
372 				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
373 				    imsg.hdr.peerid, 0, v->lsa,
374 				    ntohs(v->lsa->hdr.len));
375 			}
376 			if (l != 0)
377 				log_warnx("rde_dispatch_imsg: peerid %lu, "
378 				    "trailing garbage in LS Request "
379 				    "packet", imsg.hdr.peerid);
380 			break;
381 		case IMSG_LS_UPD:
382 			nbr = rde_nbr_find(imsg.hdr.peerid);
383 			if (nbr == NULL)
384 				break;
385 
386 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
387 			if (lsa == NULL)
388 				fatal(NULL);
389 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
390 
391 			if (!lsa_check(nbr, lsa,
392 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
393 				free(lsa);
394 				break;
395 			}
396 
397 			v = lsa_find(nbr->area, lsa->hdr.type, lsa->hdr.ls_id,
398 				    lsa->hdr.adv_rtr);
399 			if (v == NULL)
400 				db_hdr = NULL;
401 			else
402 				db_hdr = &v->lsa->hdr;
403 
404 			if (nbr->self) {
405 				lsa_merge(nbr, lsa, v);
406 				/* lsa_merge frees the right lsa */
407 				break;
408 			}
409 
410 			r = lsa_newer(&lsa->hdr, db_hdr);
411 			if (r > 0) {
412 				/* new LSA newer than DB */
413 				if (v && v->flooded &&
414 				    v->changed + MIN_LS_ARRIVAL >= now) {
415 					free(lsa);
416 					break;
417 				}
418 
419 				rde_req_list_del(nbr, &lsa->hdr);
420 
421 				if (!(self = lsa_self(nbr, lsa, v)))
422 					if (lsa_add(nbr, lsa))
423 						/* delayed lsa */
424 						break;
425 
426 				/* flood and perhaps ack LSA */
427 				imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
428 				    imsg.hdr.peerid, 0, lsa,
429 				    ntohs(lsa->hdr.len));
430 
431 				/* reflood self originated LSA */
432 				if (self && v)
433 					imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
434 					    v->peerid, 0, v->lsa,
435 					    ntohs(v->lsa->hdr.len));
436 				/* lsa not added so free it */
437 				if (self)
438 					free(lsa);
439 			} else if (r < 0) {
440 				/* lsa no longer needed */
441 				free(lsa);
442 
443 				/*
444 				 * point 6 of "The Flooding Procedure"
445 				 * We are violating the RFC here because
446 				 * it does not make sense to reset a session
447 				 * because an equal LSA is already in the table.
448 				 * Only if the LSA sent is older than the one
449 				 * in the table we should reset the session.
450 				 */
451 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
452 					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
453 					    imsg.hdr.peerid, 0, NULL, 0);
454 					break;
455 				}
456 
457 				/* new LSA older than DB */
458 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
459 				    ntohs(db_hdr->age) == MAX_AGE)
460 					/* seq-num wrap */
461 					break;
462 
463 				if (v->changed + MIN_LS_ARRIVAL >= now)
464 					break;
465 
466 				/* directly send current LSA, no ack */
467 				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
468 				    imsg.hdr.peerid, 0, v->lsa,
469 				    ntohs(v->lsa->hdr.len));
470 			} else {
471 				/* LSA equal send direct ack */
472 				imsg_compose(ibuf_ospfe, IMSG_LS_ACK,
473 				    imsg.hdr.peerid, 0, &lsa->hdr,
474 				    sizeof(lsa->hdr));
475 				free(lsa);
476 			}
477 			break;
478 		case IMSG_LS_MAXAGE:
479 			nbr = rde_nbr_find(imsg.hdr.peerid);
480 			if (nbr == NULL)
481 				break;
482 
483 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
484 			    sizeof(struct lsa_hdr))
485 				fatalx("invalid size of OE request");
486 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
487 
488 			if (rde_nbr_loading(nbr->area))
489 				break;
490 
491 			v = lsa_find(nbr->area, lsa_hdr.type, lsa_hdr.ls_id,
492 				    lsa_hdr.adv_rtr);
493 			if (v == NULL)
494 				db_hdr = NULL;
495 			else
496 				db_hdr = &v->lsa->hdr;
497 
498 			/*
499 			 * only delete LSA if the one in the db is not newer
500 			 */
501 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
502 				lsa_del(nbr, &lsa_hdr);
503 			break;
504 		case IMSG_CTL_SHOW_DATABASE:
505 		case IMSG_CTL_SHOW_DB_EXT:
506 		case IMSG_CTL_SHOW_DB_NET:
507 		case IMSG_CTL_SHOW_DB_RTR:
508 		case IMSG_CTL_SHOW_DB_SELF:
509 		case IMSG_CTL_SHOW_DB_SUM:
510 		case IMSG_CTL_SHOW_DB_ASBR:
511 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
512 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
513 				log_warnx("rde_dispatch_imsg: wrong imsg len");
514 				break;
515 			}
516 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
517 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
518 					imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
519 					    0, imsg.hdr.pid, area,
520 					    sizeof(*area));
521 					lsa_dump(&area->lsa_tree, imsg.hdr.type,
522 					    imsg.hdr.pid);
523 				}
524 				lsa_dump(&asext_tree, imsg.hdr.type,
525 				    imsg.hdr.pid);
526 			} else {
527 				memcpy(&aid, imsg.data, sizeof(aid));
528 				if ((area = area_find(rdeconf, aid)) != NULL) {
529 					imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
530 					    0, imsg.hdr.pid, area,
531 					    sizeof(*area));
532 					lsa_dump(&area->lsa_tree, imsg.hdr.type,
533 					    imsg.hdr.pid);
534 					if (!area->stub)
535 						lsa_dump(&asext_tree,
536 						    imsg.hdr.type,
537 						    imsg.hdr.pid);
538 				}
539 			}
540 			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
541 			    NULL, 0);
542 			break;
543 		case IMSG_CTL_SHOW_RIB:
544 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
545 				imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
546 				    0, imsg.hdr.pid, area, sizeof(*area));
547 
548 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
549 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
550 			}
551 			aid.s_addr = 0;
552 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
553 
554 			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
555 			    NULL, 0);
556 			break;
557 		case IMSG_CTL_SHOW_SUM:
558 			rde_send_summary(imsg.hdr.pid);
559 			LIST_FOREACH(area, &rdeconf->area_list, entry)
560 				rde_send_summary_area(area, imsg.hdr.pid);
561 			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
562 			    NULL, 0);
563 			break;
564 		default:
565 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
566 			    imsg.hdr.type);
567 			break;
568 		}
569 		imsg_free(&imsg);
570 	}
571 	if (!shut)
572 		imsg_event_add(ibuf);
573 	else {
574 		/* this pipe is dead, so remove the event handler */
575 		event_del(&ibuf->ev);
576 		event_loopexit(NULL);
577 	}
578 }
579 
580 /* ARGSUSED */
581 void
582 rde_dispatch_parent(int fd, short event, void *bula)
583 {
584 	static struct area	*narea;
585 	struct iface		*niface;
586 	struct imsg		 imsg;
587 	struct rroute		 rr;
588 	struct imsgbuf		*ibuf = bula;
589 	struct lsa		*lsa;
590 	struct vertex		*v;
591 	struct redistribute	*nred;
592 	ssize_t			 n;
593 	int			 shut = 0;
594 
595 	switch (event) {
596 	case EV_READ:
597 		if ((n = imsg_read(ibuf)) == -1)
598 			fatal("imsg_read error");
599 		if (n == 0)	/* connection closed */
600 			shut = 1;
601 		break;
602 	case EV_WRITE:
603 		if (msgbuf_write(&ibuf->w) == -1)
604 			fatal("msgbuf_write");
605 		imsg_event_add(ibuf);
606 		return;
607 	default:
608 		fatalx("unknown event");
609 	}
610 
611 	for (;;) {
612 		if ((n = imsg_get(ibuf, &imsg)) == -1)
613 			fatal("rde_dispatch_parent: imsg_read error");
614 		if (n == 0)
615 			break;
616 
617 		switch (imsg.hdr.type) {
618 		case IMSG_NETWORK_ADD:
619 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
620 				log_warnx("rde_dispatch_parent: "
621 				    "wrong imsg len");
622 				break;
623 			}
624 			memcpy(&rr, imsg.data, sizeof(rr));
625 
626 			if ((lsa = rde_asext_get(&rr)) != NULL) {
627 				v = lsa_find(NULL, lsa->hdr.type,
628 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
629 
630 				lsa_merge(nbrself, lsa, v);
631 			}
632 			break;
633 		case IMSG_NETWORK_DEL:
634 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
635 				log_warnx("rde_dispatch_parent: "
636 				    "wrong imsg len");
637 				break;
638 			}
639 			memcpy(&rr, imsg.data, sizeof(rr));
640 
641 			if ((lsa = rde_asext_put(&rr)) != NULL) {
642 				v = lsa_find(NULL, lsa->hdr.type,
643 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
644 
645 				/*
646 				 * if v == NULL no LSA is in the table and
647 				 * nothing has to be done.
648 				 */
649 				if (v)
650 					lsa_merge(nbrself, lsa, v);
651 			}
652 			break;
653 		case IMSG_RECONF_CONF:
654 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
655 			    NULL)
656 				fatal(NULL);
657 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
658 
659 			LIST_INIT(&nconf->area_list);
660 			LIST_INIT(&nconf->cand_list);
661 			break;
662 		case IMSG_RECONF_AREA:
663 			if ((narea = area_new()) == NULL)
664 				fatal(NULL);
665 			memcpy(narea, imsg.data, sizeof(struct area));
666 
667 			LIST_INIT(&narea->iface_list);
668 			LIST_INIT(&narea->nbr_list);
669 			RB_INIT(&narea->lsa_tree);
670 			SIMPLEQ_INIT(&narea->redist_list);
671 
672 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
673 			break;
674 		case IMSG_RECONF_REDIST:
675 			if ((nred= malloc(sizeof(struct redistribute))) == NULL)
676 				fatal(NULL);
677 			memcpy(nred, imsg.data, sizeof(struct redistribute));
678 
679 			SIMPLEQ_INSERT_TAIL(&narea->redist_list, nred, entry);
680 			break;
681 		case IMSG_RECONF_IFACE:
682 			if ((niface = malloc(sizeof(struct iface))) == NULL)
683 				fatal(NULL);
684 			memcpy(niface, imsg.data, sizeof(struct iface));
685 
686 			LIST_INIT(&niface->nbr_list);
687 			TAILQ_INIT(&niface->ls_ack_list);
688 			TAILQ_INIT(&niface->auth_md_list);
689 
690 			niface->area = narea;
691 			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
692 
693 			break;
694 		case IMSG_RECONF_END:
695 			merge_config(rdeconf, nconf);
696 			nconf = NULL;
697 			break;
698 		default:
699 			log_debug("rde_dispatch_parent: unexpected imsg %d",
700 			    imsg.hdr.type);
701 			break;
702 		}
703 		imsg_free(&imsg);
704 	}
705 	if (!shut)
706 		imsg_event_add(ibuf);
707 	else {
708 		/* this pipe is dead, so remove the event handler */
709 		event_del(&ibuf->ev);
710 		event_loopexit(NULL);
711 	}
712 }
713 
714 u_int32_t
715 rde_router_id(void)
716 {
717 	return (rdeconf->rtr_id.s_addr);
718 }
719 
720 struct area *
721 rde_backbone_area(void)
722 {
723 	struct in_addr	id;
724 
725 	id.s_addr = INADDR_ANY;
726 
727 	return (area_find(rdeconf, id));
728 }
729 
730 void
731 rde_send_change_kroute(struct rt_node *r)
732 {
733 	int			 krcount = 0;
734 	struct kroute		 kr;
735 	struct rt_nexthop	*rn;
736 	struct buf		*wbuf;
737 
738 	if ((wbuf = imsg_create(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0,
739 	    sizeof(kr))) == NULL) {
740 		return;
741 	}
742 
743 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
744 		if (rn->invalid)
745 			continue;
746 		krcount++;
747 
748 		bzero(&kr, sizeof(kr));
749 		kr.prefix.s_addr = r->prefix.s_addr;
750 		kr.nexthop.s_addr = rn->nexthop.s_addr;
751 		kr.prefixlen = r->prefixlen;
752 		kr.ext_tag = r->ext_tag;
753 		imsg_add(wbuf, &kr, sizeof(kr));
754 	}
755 	if (krcount == 0)
756 		fatalx("rde_send_change_kroute: no valid nexthop found");
757 	imsg_close(ibuf_main, wbuf);
758 }
759 
760 void
761 rde_send_delete_kroute(struct rt_node *r)
762 {
763 	struct kroute	 kr;
764 
765 	bzero(&kr, sizeof(kr));
766 	kr.prefix.s_addr = r->prefix.s_addr;
767 	kr.prefixlen = r->prefixlen;
768 
769 	imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr));
770 }
771 
772 void
773 rde_send_summary(pid_t pid)
774 {
775 	static struct ctl_sum	 sumctl;
776 	struct timeval		 now;
777 	struct area		*area;
778 	struct vertex		*v;
779 
780 	bzero(&sumctl, sizeof(struct ctl_sum));
781 
782 	sumctl.rtr_id.s_addr = rde_router_id();
783 	sumctl.spf_delay = rdeconf->spf_delay;
784 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
785 
786 	LIST_FOREACH(area, &rdeconf->area_list, entry)
787 		sumctl.num_area++;
788 
789 	RB_FOREACH(v, lsa_tree, &asext_tree)
790 		sumctl.num_ext_lsa++;
791 
792 	gettimeofday(&now, NULL);
793 	if (rdeconf->uptime < now.tv_sec)
794 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
795 	else
796 		sumctl.uptime = 0;
797 
798 	sumctl.rfc1583compat = rdeconf->rfc1583compat;
799 
800 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
801 	    sizeof(sumctl));
802 }
803 
804 void
805 rde_send_summary_area(struct area *area, pid_t pid)
806 {
807 	static struct ctl_sum_area	 sumareactl;
808 	struct iface			*iface;
809 	struct rde_nbr			*nbr;
810 	struct lsa_tree			*tree = &area->lsa_tree;
811 	struct vertex			*v;
812 
813 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
814 
815 	sumareactl.area.s_addr = area->id.s_addr;
816 	sumareactl.num_spf_calc = area->num_spf_calc;
817 
818 	LIST_FOREACH(iface, &area->iface_list, entry)
819 		sumareactl.num_iface++;
820 
821 	LIST_FOREACH(nbr, &area->nbr_list, entry)
822 		if (nbr->state == NBR_STA_FULL && !nbr->self)
823 			sumareactl.num_adj_nbr++;
824 
825 	RB_FOREACH(v, lsa_tree, tree)
826 		sumareactl.num_lsa++;
827 
828 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
829 	    sizeof(sumareactl));
830 }
831 
832 LIST_HEAD(rde_nbr_head, rde_nbr);
833 
834 struct nbr_table {
835 	struct rde_nbr_head	*hashtbl;
836 	u_int32_t		 hashmask;
837 } rdenbrtable;
838 
839 #define RDE_NBR_HASH(x)		\
840 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
841 
842 void
843 rde_nbr_init(u_int32_t hashsize)
844 {
845 	struct rde_nbr_head	*head;
846 	u_int32_t		 hs, i;
847 
848 	for (hs = 1; hs < hashsize; hs <<= 1)
849 		;
850 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
851 	if (rdenbrtable.hashtbl == NULL)
852 		fatal("rde_nbr_init");
853 
854 	for (i = 0; i < hs; i++)
855 		LIST_INIT(&rdenbrtable.hashtbl[i]);
856 
857 	rdenbrtable.hashmask = hs - 1;
858 
859 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
860 		fatal("rde_nbr_init");
861 
862 	nbrself->id.s_addr = rde_router_id();
863 	nbrself->peerid = NBR_IDSELF;
864 	nbrself->state = NBR_STA_DOWN;
865 	nbrself->self = 1;
866 	head = RDE_NBR_HASH(NBR_IDSELF);
867 	LIST_INSERT_HEAD(head, nbrself, hash);
868 }
869 
870 void
871 rde_nbr_free(void)
872 {
873 	free(nbrself);
874 	free(rdenbrtable.hashtbl);
875 }
876 
877 struct rde_nbr *
878 rde_nbr_find(u_int32_t peerid)
879 {
880 	struct rde_nbr_head	*head;
881 	struct rde_nbr		*nbr;
882 
883 	head = RDE_NBR_HASH(peerid);
884 
885 	LIST_FOREACH(nbr, head, hash) {
886 		if (nbr->peerid == peerid)
887 			return (nbr);
888 	}
889 
890 	return (NULL);
891 }
892 
893 struct rde_nbr *
894 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
895 {
896 	struct rde_nbr_head	*head;
897 	struct rde_nbr		*nbr;
898 	struct area		*area;
899 
900 	if (rde_nbr_find(peerid))
901 		return (NULL);
902 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
903 		fatalx("rde_nbr_new: unknown area");
904 
905 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
906 		fatal("rde_nbr_new");
907 
908 	memcpy(nbr, new, sizeof(*nbr));
909 	nbr->peerid = peerid;
910 	nbr->area = area;
911 
912 	TAILQ_INIT(&nbr->req_list);
913 
914 	head = RDE_NBR_HASH(peerid);
915 	LIST_INSERT_HEAD(head, nbr, hash);
916 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
917 
918 	return (nbr);
919 }
920 
921 void
922 rde_nbr_del(struct rde_nbr *nbr)
923 {
924 	if (nbr == NULL)
925 		return;
926 
927 	rde_req_list_free(nbr);
928 
929 	LIST_REMOVE(nbr, entry);
930 	LIST_REMOVE(nbr, hash);
931 
932 	free(nbr);
933 }
934 
935 int
936 rde_nbr_loading(struct area *area)
937 {
938 	struct rde_nbr		*nbr;
939 	int			 checkall = 0;
940 
941 	if (area == NULL) {
942 		area = LIST_FIRST(&rdeconf->area_list);
943 		checkall = 1;
944 	}
945 
946 	while (area != NULL) {
947 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
948 			if (nbr->self)
949 				continue;
950 			if (nbr->state & NBR_STA_XCHNG ||
951 			    nbr->state & NBR_STA_LOAD)
952 				return (1);
953 		}
954 		if (!checkall)
955 			break;
956 		area = LIST_NEXT(area, entry);
957 	}
958 
959 	return (0);
960 }
961 
962 struct rde_nbr *
963 rde_nbr_self(struct area *area)
964 {
965 	struct rde_nbr		*nbr;
966 
967 	LIST_FOREACH(nbr, &area->nbr_list, entry)
968 		if (nbr->self)
969 			return (nbr);
970 
971 	/* this may not happen */
972 	fatalx("rde_nbr_self: area without self");
973 	return (NULL);
974 }
975 
976 /*
977  * LSA req list
978  */
979 void
980 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
981 {
982 	struct rde_req_entry	*le;
983 
984 	if ((le = calloc(1, sizeof(*le))) == NULL)
985 		fatal("rde_req_list_add");
986 
987 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
988 	le->type = lsa->type;
989 	le->ls_id = lsa->ls_id;
990 	le->adv_rtr = lsa->adv_rtr;
991 }
992 
993 int
994 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
995 {
996 	struct rde_req_entry	*le;
997 
998 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
999 		if ((lsa_hdr->type == le->type) &&
1000 		    (lsa_hdr->ls_id == le->ls_id) &&
1001 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1002 			return (1);
1003 	}
1004 	return (0);
1005 }
1006 
1007 void
1008 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1009 {
1010 	struct rde_req_entry	*le;
1011 
1012 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1013 		if ((lsa_hdr->type == le->type) &&
1014 		    (lsa_hdr->ls_id == le->ls_id) &&
1015 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1016 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1017 			free(le);
1018 			return;
1019 		}
1020 	}
1021 }
1022 
1023 void
1024 rde_req_list_free(struct rde_nbr *nbr)
1025 {
1026 	struct rde_req_entry	*le;
1027 
1028 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1029 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1030 		free(le);
1031 	}
1032 }
1033 
1034 /*
1035  * as-external LSA handling
1036  */
1037 struct lsa *
1038 rde_asext_get(struct rroute *rr)
1039 {
1040 	struct area	*area;
1041 	struct iface	*iface;
1042 
1043 	LIST_FOREACH(area, &rdeconf->area_list, entry)
1044 		LIST_FOREACH(iface, &area->iface_list, entry) {
1045 			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1046 			    rr->kr.prefix.s_addr && iface->mask.s_addr ==
1047 			    prefixlen2mask(rr->kr.prefixlen)) {
1048 				/* already announced as (stub) net LSA */
1049 				log_debug("rde_asext_get: %s/%d is net LSA",
1050 				    inet_ntoa(rr->kr.prefix), rr->kr.prefixlen);
1051 				return (NULL);
1052 			}
1053 		}
1054 
1055 	/* update of seqnum is done by lsa_merge */
1056 	return (orig_asext_lsa(rr, DEFAULT_AGE));
1057 }
1058 
1059 struct lsa *
1060 rde_asext_put(struct rroute *rr)
1061 {
1062 	/*
1063 	 * just try to remove the LSA. If the prefix is announced as
1064 	 * stub net LSA lsa_find() will fail later and nothing will happen.
1065 	 */
1066 
1067 	/* remove by reflooding with MAX_AGE */
1068 	return (orig_asext_lsa(rr, MAX_AGE));
1069 }
1070 
1071 /*
1072  * summary LSA stuff
1073  */
1074 void
1075 rde_summary_update(struct rt_node *rte, struct area *area)
1076 {
1077 	struct rt_nexthop	*rn;
1078 	struct rt_node		*nr;
1079 	struct vertex		*v = NULL;
1080 	struct lsa		*lsa;
1081 	u_int8_t		 type = 0;
1082 
1083 	/* first check if we actually need to announce this route */
1084 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1085 		return;
1086 	/* never create summaries for as-ext LSA */
1087 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1088 		return;
1089 	/* no need for summary LSA in the originating area */
1090 	if (rte->area.s_addr == area->id.s_addr)
1091 		return;
1092 	/* no need to originate inter-area routes to the backbone */
1093 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1094 		return;
1095 	/* nexthop check, nexthop part of area -> no summary */
1096 	TAILQ_FOREACH(rn, &rte->nexthop, entry) {
1097 		nr = rt_lookup(DT_NET, rn->nexthop.s_addr);
1098 		if (nr && nr->area.s_addr == area->id.s_addr)
1099 			continue;
1100 		break;
1101 	}
1102 	if (rn == NULL)	/* all nexthops belong to this area */
1103 		return;
1104 
1105 	if (rte->cost >= LS_INFINITY)
1106 		return;
1107 	/* TODO AS border router specific checks */
1108 	/* TODO inter-area network route stuff */
1109 	/* TODO intra-area stuff -- condense LSA ??? */
1110 
1111 	if (rte->d_type == DT_NET) {
1112 		type = LSA_TYPE_SUM_NETWORK;
1113 	} else if (rte->d_type == DT_RTR) {
1114 		if (area->stub)
1115 			/* do not redistribute type 4 LSA into stub areas */
1116 			return;
1117 		type = LSA_TYPE_SUM_ROUTER;
1118 	} else
1119 		fatalx("rde_summary_update: unknown route type");
1120 
1121 	/* update lsa but only if it was changed */
1122 	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1123 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1124 	lsa_merge(rde_nbr_self(area), lsa, v);
1125 
1126 	if (v == NULL)
1127 		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1128 
1129 	/* suppressed/deleted routes are not found in the second lsa_find */
1130 	if (v)
1131 		v->cost = rte->cost;
1132 }
1133 
1134 
1135 /*
1136  * functions for self-originated LSA
1137  */
1138 struct lsa *
1139 orig_asext_lsa(struct rroute *rr, u_int16_t age)
1140 {
1141 	struct lsa	*lsa;
1142 	u_int16_t	 len;
1143 
1144 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1145 	if ((lsa = calloc(1, len)) == NULL)
1146 		fatal("orig_asext_lsa");
1147 
1148 	log_debug("orig_asext_lsa: %s/%d age %d",
1149 	    inet_ntoa(rr->kr.prefix), rr->kr.prefixlen, age);
1150 
1151 	/* LSA header */
1152 	lsa->hdr.age = htons(age);
1153 	lsa->hdr.opts = area_ospf_options(NULL);
1154 	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1155 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1156 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1157 	lsa->hdr.len = htons(len);
1158 
1159 	/* prefix and mask */
1160 	/*
1161 	 * TODO ls_id must be unique, for overlapping routes this may
1162 	 * not be true. In this case a hack needs to be done to
1163 	 * make the ls_id unique.
1164 	 */
1165 	lsa->hdr.ls_id = rr->kr.prefix.s_addr;
1166 	lsa->data.asext.mask = prefixlen2mask(rr->kr.prefixlen);
1167 
1168 	/*
1169 	 * nexthop -- on connected routes we are the nexthop,
1170 	 * on all other cases we announce the true nexthop.
1171 	 * XXX this is wrong as the true nexthop may be outside
1172 	 * of the ospf cloud and so unreachable. For now we force
1173 	 * all traffic to be directed to us.
1174 	 */
1175 	lsa->data.asext.fw_addr = 0;
1176 
1177 	lsa->data.asext.metric = htonl(rr->metric);
1178 	lsa->data.asext.ext_tag = htonl(rr->kr.ext_tag);
1179 
1180 	lsa->hdr.ls_chksum = 0;
1181 	lsa->hdr.ls_chksum =
1182 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1183 
1184 	return (lsa);
1185 }
1186 
1187 struct lsa *
1188 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1189 {
1190 	struct lsa	*lsa;
1191 	u_int16_t	 len;
1192 
1193 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1194 	if ((lsa = calloc(1, len)) == NULL)
1195 		fatal("orig_sum_lsa");
1196 
1197 	/* LSA header */
1198 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1199 	lsa->hdr.opts = area_ospf_options(area);
1200 	lsa->hdr.type = type;
1201 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1202 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1203 	lsa->hdr.len = htons(len);
1204 
1205 	/* prefix and mask */
1206 	/*
1207 	 * TODO ls_id must be unique, for overlapping routes this may
1208 	 * not be true. In this case a hack needs to be done to
1209 	 * make the ls_id unique.
1210 	 */
1211 	lsa->hdr.ls_id = rte->prefix.s_addr;
1212 	if (type == LSA_TYPE_SUM_NETWORK)
1213 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1214 	else
1215 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1216 
1217 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1218 
1219 	lsa->hdr.ls_chksum = 0;
1220 	lsa->hdr.ls_chksum =
1221 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1222 
1223 	return (lsa);
1224 }
1225