xref: /openbsd-src/usr.sbin/ospf6d/rde.c (revision 850e275390052b330d93020bf619a739a3c277ac)
1 /*	$OpenBSD: rde.c,v 1.11 2008/02/11 13:48:39 norby Exp $ */
2 
3 /*
4  * Copyright (c) 2004, 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004 Esben Norby <norby@openbsd.org>
6  * Copyright (c) 2003, 2004 Henning Brauer <henning@openbsd.org>
7  *
8  * Permission to use, copy, modify, and distribute this software for any
9  * purpose with or without fee is hereby granted, provided that the above
10  * copyright notice and this permission notice appear in all copies.
11  *
12  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
13  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
14  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
15  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
16  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19  */
20 
21 #include <sys/types.h>
22 #include <sys/socket.h>
23 #include <sys/queue.h>
24 #include <netinet/in.h>
25 #include <arpa/inet.h>
26 #include <err.h>
27 #include <errno.h>
28 #include <stdlib.h>
29 #include <signal.h>
30 #include <string.h>
31 #include <pwd.h>
32 #include <unistd.h>
33 #include <event.h>
34 
35 #include "ospf6.h"
36 #include "ospf6d.h"
37 #include "ospfe.h"
38 #include "log.h"
39 #include "rde.h"
40 
41 void		 rde_sig_handler(int sig, short, void *);
42 void		 rde_shutdown(void);
43 void		 rde_dispatch_imsg(int, short, void *);
44 void		 rde_dispatch_parent(int, short, void *);
45 void		 rde_dump_area(struct area *, int, pid_t);
46 
47 void		 rde_send_summary(pid_t);
48 void		 rde_send_summary_area(struct area *, pid_t);
49 void		 rde_nbr_init(u_int32_t);
50 void		 rde_nbr_free(void);
51 struct rde_nbr	*rde_nbr_find(u_int32_t);
52 struct rde_nbr	*rde_nbr_new(u_int32_t, struct rde_nbr *);
53 void		 rde_nbr_del(struct rde_nbr *);
54 
55 void		 rde_req_list_add(struct rde_nbr *, struct lsa_hdr *);
56 int		 rde_req_list_exists(struct rde_nbr *, struct lsa_hdr *);
57 void		 rde_req_list_del(struct rde_nbr *, struct lsa_hdr *);
58 void		 rde_req_list_free(struct rde_nbr *);
59 
60 struct lsa	*rde_asext_get(struct rroute *);
61 struct lsa	*rde_asext_put(struct rroute *);
62 
63 struct lsa	*orig_asext_lsa(struct rroute *, u_int16_t);
64 struct lsa	*orig_sum_lsa(struct rt_node *, struct area *, u_int8_t, int);
65 
66 struct ospfd_conf	*rdeconf = NULL, *nconf = NULL;
67 struct imsgbuf		*ibuf_ospfe;
68 struct imsgbuf		*ibuf_main;
69 struct rde_nbr		*nbrself;
70 struct lsa_tree		 asext_tree;
71 
72 /* ARGSUSED */
73 void
74 rde_sig_handler(int sig, short event, void *arg)
75 {
76 	/*
77 	 * signal handler rules don't apply, libevent decouples for us
78 	 */
79 
80 	switch (sig) {
81 	case SIGINT:
82 	case SIGTERM:
83 		rde_shutdown();
84 		/* NOTREACHED */
85 	default:
86 		fatalx("unexpected signal");
87 	}
88 }
89 
90 /* route decision engine */
91 pid_t
92 rde(struct ospfd_conf *xconf, int pipe_parent2rde[2], int pipe_ospfe2rde[2],
93     int pipe_parent2ospfe[2])
94 {
95 	struct event		 ev_sigint, ev_sigterm;
96 	struct timeval		 now;
97 	struct passwd		*pw;
98 	struct redistribute	*r;
99 	pid_t			 pid;
100 
101 	switch (pid = fork()) {
102 	case -1:
103 		fatal("cannot fork");
104 		/* NOTREACHED */
105 	case 0:
106 		break;
107 	default:
108 		return (pid);
109 	}
110 
111 	rdeconf = xconf;
112 
113 	if ((pw = getpwnam(OSPF6D_USER)) == NULL)
114 		fatal("getpwnam");
115 
116 	if (chroot(pw->pw_dir) == -1)
117 		fatal("chroot");
118 	if (chdir("/") == -1)
119 		fatal("chdir(\"/\")");
120 
121 	setproctitle("route decision engine");
122 	ospfd_process = PROC_RDE_ENGINE;
123 
124 	if (setgroups(1, &pw->pw_gid) ||
125 	    setresgid(pw->pw_gid, pw->pw_gid, pw->pw_gid) ||
126 	    setresuid(pw->pw_uid, pw->pw_uid, pw->pw_uid))
127 		fatal("can't drop privileges");
128 
129 	event_init();
130 	rde_nbr_init(NBR_HASHSIZE);
131 	lsa_init(&asext_tree);
132 
133 	/* setup signal handler */
134 	signal_set(&ev_sigint, SIGINT, rde_sig_handler, NULL);
135 	signal_set(&ev_sigterm, SIGTERM, rde_sig_handler, NULL);
136 	signal_add(&ev_sigint, NULL);
137 	signal_add(&ev_sigterm, NULL);
138 	signal(SIGPIPE, SIG_IGN);
139 	signal(SIGHUP, SIG_IGN);
140 
141 	/* setup pipes */
142 	close(pipe_ospfe2rde[0]);
143 	close(pipe_parent2rde[0]);
144 	close(pipe_parent2ospfe[0]);
145 	close(pipe_parent2ospfe[1]);
146 
147 	if ((ibuf_ospfe = malloc(sizeof(struct imsgbuf))) == NULL ||
148 	    (ibuf_main = malloc(sizeof(struct imsgbuf))) == NULL)
149 		fatal(NULL);
150 	imsg_init(ibuf_ospfe, pipe_ospfe2rde[1], rde_dispatch_imsg);
151 	imsg_init(ibuf_main, pipe_parent2rde[1], rde_dispatch_parent);
152 
153 	/* setup event handler */
154 	ibuf_ospfe->events = EV_READ;
155 	event_set(&ibuf_ospfe->ev, ibuf_ospfe->fd, ibuf_ospfe->events,
156 	    ibuf_ospfe->handler, ibuf_ospfe);
157 	event_add(&ibuf_ospfe->ev, NULL);
158 
159 	ibuf_main->events = EV_READ;
160 	event_set(&ibuf_main->ev, ibuf_main->fd, ibuf_main->events,
161 	    ibuf_main->handler, ibuf_main);
162 	event_add(&ibuf_main->ev, NULL);
163 
164 	evtimer_set(&rdeconf->ev, spf_timer, rdeconf);
165 	cand_list_init();
166 	rt_init();
167 
168 	while ((r = SIMPLEQ_FIRST(&rdeconf->redist_list)) != NULL) {
169 		SIMPLEQ_REMOVE_HEAD(&rdeconf->redist_list, entry);
170 		free(r);
171 	}
172 
173 	gettimeofday(&now, NULL);
174 	rdeconf->uptime = now.tv_sec;
175 
176 	event_dispatch();
177 
178 	rde_shutdown();
179 	/* NOTREACHED */
180 
181 	return (0);
182 }
183 
184 void
185 rde_shutdown(void)
186 {
187 	struct area	*a;
188 
189 	stop_spf_timer(rdeconf);
190 	cand_list_clr();
191 	rt_clear();
192 
193 	while ((a = LIST_FIRST(&rdeconf->area_list)) != NULL) {
194 		LIST_REMOVE(a, entry);
195 		area_del(a);
196 	}
197 	rde_nbr_free();
198 
199 	msgbuf_clear(&ibuf_ospfe->w);
200 	free(ibuf_ospfe);
201 	msgbuf_clear(&ibuf_main->w);
202 	free(ibuf_main);
203 	free(rdeconf);
204 
205 	log_info("route decision engine exiting");
206 	_exit(0);
207 }
208 
209 int
210 rde_imsg_compose_ospfe(int type, u_int32_t peerid, pid_t pid, void *data,
211     u_int16_t datalen)
212 {
213 	return (imsg_compose(ibuf_ospfe, type, peerid, pid, data, datalen));
214 }
215 
216 /* ARGSUSED */
217 void
218 rde_dispatch_imsg(int fd, short event, void *bula)
219 {
220 	struct imsgbuf		*ibuf = bula;
221 	struct imsg		 imsg;
222 	struct in_addr		 aid;
223 	struct ls_req_hdr	 req_hdr;
224 	struct lsa_hdr		 lsa_hdr, *db_hdr;
225 	struct rde_nbr		 rn, *nbr;
226 	struct timespec		 tp;
227 	struct lsa		*lsa;
228 	struct area		*area;
229 	struct vertex		*v;
230 	char			*buf;
231 	ssize_t			 n;
232 	time_t			 now;
233 	int			 r, state, self, shut = 0;
234 	u_int16_t		 l;
235 
236 	switch (event) {
237 	case EV_READ:
238 		if ((n = imsg_read(ibuf)) == -1)
239 			fatal("imsg_read error");
240 		if (n == 0)	/* connection closed */
241 			shut = 1;
242 		break;
243 	case EV_WRITE:
244 		if (msgbuf_write(&ibuf->w) == -1)
245 			fatal("msgbuf_write");
246 		imsg_event_add(ibuf);
247 		return;
248 	default:
249 		fatalx("unknown event");
250 	}
251 
252 	clock_gettime(CLOCK_MONOTONIC, &tp);
253 	now = tp.tv_sec;
254 
255 	for (;;) {
256 		if ((n = imsg_get(ibuf, &imsg)) == -1)
257 			fatal("rde_dispatch_imsg: imsg_read error");
258 		if (n == 0)
259 			break;
260 
261 		switch (imsg.hdr.type) {
262 		case IMSG_NEIGHBOR_UP:
263 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(rn))
264 				fatalx("invalid size of OE request");
265 			memcpy(&rn, imsg.data, sizeof(rn));
266 
267 			if (rde_nbr_find(imsg.hdr.peerid))
268 				fatalx("rde_dispatch_imsg: "
269 				    "neighbor already exists");
270 			rde_nbr_new(imsg.hdr.peerid, &rn);
271 			break;
272 		case IMSG_NEIGHBOR_DOWN:
273 			rde_nbr_del(rde_nbr_find(imsg.hdr.peerid));
274 			break;
275 		case IMSG_NEIGHBOR_CHANGE:
276 			if (imsg.hdr.len - IMSG_HEADER_SIZE != sizeof(state))
277 				fatalx("invalid size of OE request");
278 			memcpy(&state, imsg.data, sizeof(state));
279 
280 			nbr = rde_nbr_find(imsg.hdr.peerid);
281 			if (nbr == NULL)
282 				break;
283 
284 			if (state != nbr->state && (nbr->state & NBR_STA_FULL ||
285 			    state & NBR_STA_FULL))
286 				area_track(nbr->area, state);
287 
288 			nbr->state = state;
289 			if (nbr->state & NBR_STA_FULL)
290 				rde_req_list_free(nbr);
291 			break;
292 		case IMSG_DB_SNAPSHOT:
293 			nbr = rde_nbr_find(imsg.hdr.peerid);
294 			if (nbr == NULL)
295 				break;
296 
297 			lsa_snap(nbr->area, imsg.hdr.peerid);
298 
299 			imsg_compose(ibuf_ospfe, IMSG_DB_END, imsg.hdr.peerid,
300 			    0, NULL, 0);
301 			break;
302 		case IMSG_DD:
303 			nbr = rde_nbr_find(imsg.hdr.peerid);
304 			if (nbr == NULL)
305 				break;
306 
307 			buf = imsg.data;
308 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
309 			    l >= sizeof(lsa_hdr); l -= sizeof(lsa_hdr)) {
310 				memcpy(&lsa_hdr, buf, sizeof(lsa_hdr));
311 				buf += sizeof(lsa_hdr);
312 
313 				v = lsa_find(nbr->iface, lsa_hdr.type,
314 				    lsa_hdr.ls_id, lsa_hdr.adv_rtr);
315 				if (v == NULL)
316 					db_hdr = NULL;
317 				else
318 					db_hdr = &v->lsa->hdr;
319 
320 				if (lsa_newer(&lsa_hdr, db_hdr) > 0) {
321 					/*
322 					 * only request LSAs that are
323 					 * newer or missing
324 					 */
325 					rde_req_list_add(nbr, &lsa_hdr);
326 					imsg_compose(ibuf_ospfe, IMSG_DD,
327 					    imsg.hdr.peerid, 0, &lsa_hdr,
328 					    sizeof(lsa_hdr));
329 				}
330 			}
331 			if (l != 0)
332 				log_warnx("rde_dispatch_imsg: peerid %lu, "
333 				    "trailing garbage in Database Description "
334 				    "packet", imsg.hdr.peerid);
335 
336 			imsg_compose(ibuf_ospfe, IMSG_DD_END, imsg.hdr.peerid,
337 			    0, NULL, 0);
338 			break;
339 		case IMSG_LS_REQ:
340 			nbr = rde_nbr_find(imsg.hdr.peerid);
341 			if (nbr == NULL)
342 				break;
343 
344 			buf = imsg.data;
345 			for (l = imsg.hdr.len - IMSG_HEADER_SIZE;
346 			    l >= sizeof(req_hdr); l -= sizeof(req_hdr)) {
347 				memcpy(&req_hdr, buf, sizeof(req_hdr));
348 				buf += sizeof(req_hdr);
349 
350 				if ((v = lsa_find(nbr->iface,
351 				    ntohl(req_hdr.type), req_hdr.ls_id,
352 				    req_hdr.adv_rtr)) == NULL) {
353 					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
354 					    imsg.hdr.peerid, 0, NULL, 0);
355 					continue;
356 				}
357 				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
358 				    imsg.hdr.peerid, 0, v->lsa,
359 				    ntohs(v->lsa->hdr.len));
360 			}
361 			if (l != 0)
362 				log_warnx("rde_dispatch_imsg: peerid %lu, "
363 				    "trailing garbage in LS Request "
364 				    "packet", imsg.hdr.peerid);
365 			break;
366 		case IMSG_LS_UPD:
367 			nbr = rde_nbr_find(imsg.hdr.peerid);
368 			if (nbr == NULL)
369 				break;
370 
371 			lsa = malloc(imsg.hdr.len - IMSG_HEADER_SIZE);
372 			if (lsa == NULL)
373 				fatal(NULL);
374 			memcpy(lsa, imsg.data, imsg.hdr.len - IMSG_HEADER_SIZE);
375 
376 			if (!lsa_check(nbr, lsa,
377 			    imsg.hdr.len - IMSG_HEADER_SIZE)) {
378 				free(lsa);
379 				break;
380 			}
381 
382 			v = lsa_find(nbr->iface, lsa->hdr.type, lsa->hdr.ls_id,
383 				    lsa->hdr.adv_rtr);
384 			if (v == NULL)
385 				db_hdr = NULL;
386 			else
387 				db_hdr = &v->lsa->hdr;
388 
389 			if (nbr->self) {
390 				lsa_merge(nbr, lsa, v);
391 				/* lsa_merge frees the right lsa */
392 				break;
393 			}
394 
395 			r = lsa_newer(&lsa->hdr, db_hdr);
396 			if (r > 0) {
397 				/* new LSA newer than DB */
398 				if (v && v->flooded &&
399 				    v->changed + MIN_LS_ARRIVAL >= now) {
400 					free(lsa);
401 					break;
402 				}
403 
404 				rde_req_list_del(nbr, &lsa->hdr);
405 
406 				if (!(self = lsa_self(nbr, lsa, v)))
407 					if (lsa_add(nbr, lsa))
408 						/* delayed lsa */
409 						break;
410 
411 				/* flood and perhaps ack LSA */
412 				imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
413 				    imsg.hdr.peerid, 0, lsa,
414 				    ntohs(lsa->hdr.len));
415 
416 				/* reflood self originated LSA */
417 				if (self && v)
418 					imsg_compose(ibuf_ospfe, IMSG_LS_FLOOD,
419 					    v->peerid, 0, v->lsa,
420 					    ntohs(v->lsa->hdr.len));
421 				/* lsa not added so free it */
422 				if (self)
423 					free(lsa);
424 			} else if (r < 0) {
425 				/* lsa no longer needed */
426 				free(lsa);
427 
428 				/*
429 				 * point 6 of "The Flooding Procedure"
430 				 * We are violating the RFC here because
431 				 * it does not make sense to reset a session
432 				 * because an equal LSA is already in the table.
433 				 * Only if the LSA sent is older than the one
434 				 * in the table we should reset the session.
435 				 */
436 				if (rde_req_list_exists(nbr, &lsa->hdr)) {
437 					imsg_compose(ibuf_ospfe, IMSG_LS_BADREQ,
438 					    imsg.hdr.peerid, 0, NULL, 0);
439 					break;
440 				}
441 
442 				/* new LSA older than DB */
443 				if (ntohl(db_hdr->seq_num) == MAX_SEQ_NUM &&
444 				    ntohs(db_hdr->age) == MAX_AGE)
445 					/* seq-num wrap */
446 					break;
447 
448 				if (v->changed + MIN_LS_ARRIVAL >= now)
449 					break;
450 
451 				/* directly send current LSA, no ack */
452 				imsg_compose(ibuf_ospfe, IMSG_LS_UPD,
453 				    imsg.hdr.peerid, 0, v->lsa,
454 				    ntohs(v->lsa->hdr.len));
455 			} else {
456 				/* LSA equal send direct ack */
457 				imsg_compose(ibuf_ospfe, IMSG_LS_ACK,
458 				    imsg.hdr.peerid, 0, &lsa->hdr,
459 				    sizeof(lsa->hdr));
460 				free(lsa);
461 			}
462 			break;
463 		case IMSG_LS_MAXAGE:
464 			nbr = rde_nbr_find(imsg.hdr.peerid);
465 			if (nbr == NULL)
466 				break;
467 
468 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
469 			    sizeof(struct lsa_hdr))
470 				fatalx("invalid size of OE request");
471 			memcpy(&lsa_hdr, imsg.data, sizeof(lsa_hdr));
472 
473 			if (rde_nbr_loading(nbr->area))
474 				break;
475 
476 			v = lsa_find(nbr->iface, lsa_hdr.type, lsa_hdr.ls_id,
477 				    lsa_hdr.adv_rtr);
478 			if (v == NULL)
479 				db_hdr = NULL;
480 			else
481 				db_hdr = &v->lsa->hdr;
482 
483 			/*
484 			 * only delete LSA if the one in the db is not newer
485 			 */
486 			if (lsa_newer(db_hdr, &lsa_hdr) <= 0)
487 				lsa_del(nbr, &lsa_hdr);
488 			break;
489 		case IMSG_CTL_SHOW_DATABASE:
490 		case IMSG_CTL_SHOW_DB_EXT:
491 		case IMSG_CTL_SHOW_DB_NET:
492 		case IMSG_CTL_SHOW_DB_RTR:
493 		case IMSG_CTL_SHOW_DB_SELF:
494 		case IMSG_CTL_SHOW_DB_SUM:
495 		case IMSG_CTL_SHOW_DB_ASBR:
496 			if (imsg.hdr.len != IMSG_HEADER_SIZE &&
497 			    imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(aid)) {
498 				log_warnx("rde_dispatch_imsg: wrong imsg len");
499 				break;
500 			}
501 			if (imsg.hdr.len == IMSG_HEADER_SIZE) {
502 				LIST_FOREACH(area, &rdeconf->area_list, entry) {
503 					rde_dump_area(area, imsg.hdr.type,
504 					    imsg.hdr.pid);
505 				}
506 				lsa_dump(&asext_tree, imsg.hdr.type,
507 				    imsg.hdr.pid);
508 			} else {
509 				memcpy(&aid, imsg.data, sizeof(aid));
510 				if ((area = area_find(rdeconf, aid)) != NULL) {
511 					rde_dump_area(area, imsg.hdr.type,
512 					    imsg.hdr.pid);
513 					if (!area->stub)
514 						lsa_dump(&asext_tree,
515 						    imsg.hdr.type,
516 						    imsg.hdr.pid);
517 				}
518 			}
519 			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
520 			    NULL, 0);
521 			break;
522 		case IMSG_CTL_SHOW_RIB:
523 			LIST_FOREACH(area, &rdeconf->area_list, entry) {
524 				imsg_compose(ibuf_ospfe, IMSG_CTL_AREA,
525 				    0, imsg.hdr.pid, area, sizeof(*area));
526 
527 				rt_dump(area->id, imsg.hdr.pid, RIB_RTR);
528 				rt_dump(area->id, imsg.hdr.pid, RIB_NET);
529 			}
530 			aid.s_addr = 0;
531 			rt_dump(aid, imsg.hdr.pid, RIB_EXT);
532 
533 			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
534 			    NULL, 0);
535 			break;
536 		case IMSG_CTL_SHOW_SUM:
537 			rde_send_summary(imsg.hdr.pid);
538 			LIST_FOREACH(area, &rdeconf->area_list, entry)
539 				rde_send_summary_area(area, imsg.hdr.pid);
540 			imsg_compose(ibuf_ospfe, IMSG_CTL_END, 0, imsg.hdr.pid,
541 			    NULL, 0);
542 			break;
543 		default:
544 			log_debug("rde_dispatch_imsg: unexpected imsg %d",
545 			    imsg.hdr.type);
546 			break;
547 		}
548 		imsg_free(&imsg);
549 	}
550 	if (!shut)
551 		imsg_event_add(ibuf);
552 	else {
553 		/* this pipe is dead, so remove the event handler */
554 		event_del(&ibuf->ev);
555 		event_loopexit(NULL);
556 	}
557 }
558 
559 /* ARGSUSED */
560 void
561 rde_dispatch_parent(int fd, short event, void *bula)
562 {
563 	static struct area	*narea;
564 	struct iface		*niface, *iface;
565 	struct imsg		 imsg;
566 	struct kroute		 kr;
567 	struct rroute		 rr;
568 	struct imsgbuf		*ibuf = bula;
569 	struct lsa		*lsa;
570 	struct vertex		*v;
571 	struct rt_node		*rn;
572 	ssize_t			 n;
573 	int			 shut = 0;
574 	unsigned int		 ifindex;
575 
576 	switch (event) {
577 	case EV_READ:
578 		if ((n = imsg_read(ibuf)) == -1)
579 			fatal("imsg_read error");
580 		if (n == 0)	/* connection closed */
581 			shut = 1;
582 		break;
583 	case EV_WRITE:
584 		if (msgbuf_write(&ibuf->w) == -1)
585 			fatal("msgbuf_write");
586 		imsg_event_add(ibuf);
587 		return;
588 	default:
589 		fatalx("unknown event");
590 	}
591 
592 	for (;;) {
593 		if ((n = imsg_get(ibuf, &imsg)) == -1)
594 			fatal("rde_dispatch_parent: imsg_read error");
595 		if (n == 0)
596 			break;
597 
598 		switch (imsg.hdr.type) {
599 		case IMSG_NETWORK_ADD:
600 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
601 				log_warnx("rde_dispatch_parent: "
602 				    "wrong imsg len");
603 				break;
604 			}
605 			memcpy(&rr, imsg.data, sizeof(rr));
606 
607 			if ((lsa = rde_asext_get(&rr)) != NULL) {
608 				v = lsa_find(NULL, lsa->hdr.type,
609 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
610 
611 				lsa_merge(nbrself, lsa, v);
612 			}
613 			break;
614 		case IMSG_NETWORK_DEL:
615 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(rr)) {
616 				log_warnx("rde_dispatch_parent: "
617 				    "wrong imsg len");
618 				break;
619 			}
620 			memcpy(&rr, imsg.data, sizeof(rr));
621 
622 			if ((lsa = rde_asext_put(&rr)) != NULL) {
623 				v = lsa_find(NULL, lsa->hdr.type,
624 				    lsa->hdr.ls_id, lsa->hdr.adv_rtr);
625 
626 				/*
627 				 * if v == NULL no LSA is in the table and
628 				 * nothing has to be done.
629 				 */
630 				if (v)
631 					lsa_merge(nbrself, lsa, v);
632 			}
633 			break;
634 		case IMSG_KROUTE_GET:
635 			if (imsg.hdr.len != IMSG_HEADER_SIZE + sizeof(kr)) {
636 				log_warnx("rde_dispatch_parent: "
637 				    "wrong imsg len");
638 				break;
639 			}
640 			memcpy(&kr, imsg.data, sizeof(kr));
641 
642 			if ((rn = rt_find(&kr.prefix, kr.prefixlen,
643 			    DT_NET)) != NULL)
644 				rde_send_change_kroute(rn);
645 			else
646 				/* should not happen */
647 				imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0,
648 				    0, &kr, sizeof(kr));
649 			break;
650 		case IMSG_IFADD:
651 			if ((niface = malloc(sizeof(struct iface))) == NULL)
652 				fatal(NULL);
653 			memcpy(niface, imsg.data, sizeof(struct iface));
654 
655 			LIST_INIT(&niface->nbr_list);
656 			TAILQ_INIT(&niface->ls_ack_list);
657 			RB_INIT(&niface->lsa_tree);
658 
659 			narea = area_find(rdeconf, niface->area_id);
660 			LIST_INSERT_HEAD(&narea->iface_list, niface, entry);
661 			break;
662 		case IMSG_IFDELETE:
663 			if (imsg.hdr.len != IMSG_HEADER_SIZE +
664 			    sizeof(ifindex))
665 				fatalx("IFINFO imsg with wrong len");
666 
667 			memcpy(&ifindex, imsg.data, sizeof(ifindex));
668 			iface = if_find(ifindex);
669 			if (iface == NULL)
670 				fatalx("interface lost in ospfe");
671 
672 			LIST_REMOVE(iface, entry);
673 			if_del(iface);
674 			break;
675 		case IMSG_RECONF_CONF:
676 			if ((nconf = malloc(sizeof(struct ospfd_conf))) ==
677 			    NULL)
678 				fatal(NULL);
679 			memcpy(nconf, imsg.data, sizeof(struct ospfd_conf));
680 
681 			LIST_INIT(&nconf->area_list);
682 			LIST_INIT(&nconf->cand_list);
683 			break;
684 		case IMSG_RECONF_AREA:
685 			if ((narea = area_new()) == NULL)
686 				fatal(NULL);
687 			memcpy(narea, imsg.data, sizeof(struct area));
688 
689 			LIST_INIT(&narea->iface_list);
690 			LIST_INIT(&narea->nbr_list);
691 			RB_INIT(&narea->lsa_tree);
692 
693 			LIST_INSERT_HEAD(&nconf->area_list, narea, entry);
694 			break;
695 		case IMSG_RECONF_END:
696 			merge_config(rdeconf, nconf);
697 			nconf = NULL;
698 			break;
699 		default:
700 			log_debug("rde_dispatch_parent: unexpected imsg %d",
701 			    imsg.hdr.type);
702 			break;
703 		}
704 		imsg_free(&imsg);
705 	}
706 	if (!shut)
707 		imsg_event_add(ibuf);
708 	else {
709 		/* this pipe is dead, so remove the event handler */
710 		event_del(&ibuf->ev);
711 		event_loopexit(NULL);
712 	}
713 }
714 
715 void
716 rde_dump_area(struct area *area, int imsg_type, pid_t pid)
717 {
718 	struct iface	*iface;
719 
720 	/* dump header */
721 	imsg_compose(ibuf_ospfe, IMSG_CTL_AREA, 0, pid, area, sizeof(*area));
722 
723 	/* dump link local lsa */
724 	LIST_FOREACH(iface, &area->iface_list, entry) {
725 		imsg_compose(ibuf_ospfe, IMSG_CTL_IFACE,
726 		    0, pid, iface, sizeof(*iface));
727 		lsa_dump(&iface->lsa_tree, imsg_type, pid);
728 	}
729 
730 	/* dump area lsa */
731 	lsa_dump(&area->lsa_tree, imsg_type, pid);
732 }
733 
734 u_int32_t
735 rde_router_id(void)
736 {
737 	return (rdeconf->rtr_id.s_addr);
738 }
739 
740 void
741 rde_send_change_kroute(struct rt_node *r)
742 {
743 	struct kroute		 kr;
744 	struct rt_nexthop	*rn;
745 
746 	TAILQ_FOREACH(rn, &r->nexthop, entry) {
747 		if (!rn->invalid)
748 			break;
749 	}
750 	if (!rn)
751 		fatalx("rde_send_change_kroute: no valid nexthop found");
752 
753 	bzero(&kr, sizeof(kr));
754 	kr.prefix = r->prefix;
755 	kr.nexthop = rn->nexthop;
756 	kr.prefixlen = r->prefixlen;
757 	kr.ext_tag = r->ext_tag;
758 
759 	imsg_compose(ibuf_main, IMSG_KROUTE_CHANGE, 0, 0, &kr, sizeof(kr));
760 }
761 
762 void
763 rde_send_delete_kroute(struct rt_node *r)
764 {
765 	struct kroute	 kr;
766 
767 	bzero(&kr, sizeof(kr));
768 	kr.prefix = r->prefix;
769 	kr.prefixlen = r->prefixlen;
770 
771 	imsg_compose(ibuf_main, IMSG_KROUTE_DELETE, 0, 0, &kr, sizeof(kr));
772 }
773 
774 void
775 rde_send_summary(pid_t pid)
776 {
777 	static struct ctl_sum	 sumctl;
778 	struct timeval		 now;
779 	struct area		*area;
780 	struct vertex		*v;
781 
782 	bzero(&sumctl, sizeof(struct ctl_sum));
783 
784 	sumctl.rtr_id.s_addr = rde_router_id();
785 	sumctl.spf_delay = rdeconf->spf_delay;
786 	sumctl.spf_hold_time = rdeconf->spf_hold_time;
787 
788 	LIST_FOREACH(area, &rdeconf->area_list, entry)
789 		sumctl.num_area++;
790 
791 	RB_FOREACH(v, lsa_tree, &asext_tree)
792 		sumctl.num_ext_lsa++;
793 
794 	gettimeofday(&now, NULL);
795 	if (rdeconf->uptime < now.tv_sec)
796 		sumctl.uptime = now.tv_sec - rdeconf->uptime;
797 	else
798 		sumctl.uptime = 0;
799 
800 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM, 0, pid, &sumctl,
801 	    sizeof(sumctl));
802 }
803 
804 void
805 rde_send_summary_area(struct area *area, pid_t pid)
806 {
807 	static struct ctl_sum_area	 sumareactl;
808 	struct iface			*iface;
809 	struct rde_nbr			*nbr;
810 	struct lsa_tree			*tree = &area->lsa_tree;
811 	struct vertex			*v;
812 
813 	bzero(&sumareactl, sizeof(struct ctl_sum_area));
814 
815 	sumareactl.area.s_addr = area->id.s_addr;
816 	sumareactl.num_spf_calc = area->num_spf_calc;
817 
818 	LIST_FOREACH(iface, &area->iface_list, entry)
819 		sumareactl.num_iface++;
820 
821 	LIST_FOREACH(nbr, &area->nbr_list, entry)
822 		if (nbr->state == NBR_STA_FULL && !nbr->self)
823 			sumareactl.num_adj_nbr++;
824 
825 	RB_FOREACH(v, lsa_tree, tree)
826 		sumareactl.num_lsa++;
827 
828 	rde_imsg_compose_ospfe(IMSG_CTL_SHOW_SUM_AREA, 0, pid, &sumareactl,
829 	    sizeof(sumareactl));
830 }
831 
832 LIST_HEAD(rde_nbr_head, rde_nbr);
833 
834 struct nbr_table {
835 	struct rde_nbr_head	*hashtbl;
836 	u_int32_t		 hashmask;
837 } rdenbrtable;
838 
839 #define RDE_NBR_HASH(x)		\
840 	&rdenbrtable.hashtbl[(x) & rdenbrtable.hashmask]
841 
842 void
843 rde_nbr_init(u_int32_t hashsize)
844 {
845 	struct rde_nbr_head	*head;
846 	u_int32_t		 hs, i;
847 
848 	for (hs = 1; hs < hashsize; hs <<= 1)
849 		;
850 	rdenbrtable.hashtbl = calloc(hs, sizeof(struct rde_nbr_head));
851 	if (rdenbrtable.hashtbl == NULL)
852 		fatal("rde_nbr_init");
853 
854 	for (i = 0; i < hs; i++)
855 		LIST_INIT(&rdenbrtable.hashtbl[i]);
856 
857 	rdenbrtable.hashmask = hs - 1;
858 
859 	if ((nbrself = calloc(1, sizeof(*nbrself))) == NULL)
860 		fatal("rde_nbr_init");
861 
862 	nbrself->id.s_addr = rde_router_id();
863 	nbrself->peerid = NBR_IDSELF;
864 	nbrself->state = NBR_STA_DOWN;
865 	nbrself->self = 1;
866 	head = RDE_NBR_HASH(NBR_IDSELF);
867 	LIST_INSERT_HEAD(head, nbrself, hash);
868 }
869 
870 void
871 rde_nbr_free(void)
872 {
873 	free(nbrself);
874 	free(rdenbrtable.hashtbl);
875 }
876 
877 struct rde_nbr *
878 rde_nbr_find(u_int32_t peerid)
879 {
880 	struct rde_nbr_head	*head;
881 	struct rde_nbr		*nbr;
882 
883 	head = RDE_NBR_HASH(peerid);
884 
885 	LIST_FOREACH(nbr, head, hash) {
886 		if (nbr->peerid == peerid)
887 			return (nbr);
888 	}
889 
890 	return (NULL);
891 }
892 
893 struct rde_nbr *
894 rde_nbr_new(u_int32_t peerid, struct rde_nbr *new)
895 {
896 	struct rde_nbr_head	*head;
897 	struct rde_nbr		*nbr;
898 	struct area		*area;
899 	struct iface		*iface;
900 
901 	if (rde_nbr_find(peerid))
902 		return (NULL);
903 	if ((area = area_find(rdeconf, new->area_id)) == NULL)
904 		fatalx("rde_nbr_new: unknown area");
905 
906 	LIST_FOREACH(iface, &area->iface_list, entry) {
907 		if (iface->ifindex == new->ifindex)
908 			break;
909 	}
910 	if (iface == NULL)
911 		fatalx("rde_nbr_new: unknown interface");
912 
913 	if ((nbr = calloc(1, sizeof(*nbr))) == NULL)
914 		fatal("rde_nbr_new");
915 
916 	memcpy(nbr, new, sizeof(*nbr));
917 	nbr->peerid = peerid;
918 	nbr->area = area;
919 	nbr->iface = iface;
920 
921 	TAILQ_INIT(&nbr->req_list);
922 
923 	head = RDE_NBR_HASH(peerid);
924 	LIST_INSERT_HEAD(head, nbr, hash);
925 	LIST_INSERT_HEAD(&area->nbr_list, nbr, entry);
926 
927 	return (nbr);
928 }
929 
930 void
931 rde_nbr_del(struct rde_nbr *nbr)
932 {
933 	if (nbr == NULL)
934 		return;
935 
936 	rde_req_list_free(nbr);
937 
938 	LIST_REMOVE(nbr, entry);
939 	LIST_REMOVE(nbr, hash);
940 
941 	free(nbr);
942 }
943 
944 int
945 rde_nbr_loading(struct area *area)
946 {
947 	struct rde_nbr		*nbr;
948 	int			 checkall = 0;
949 
950 	if (area == NULL) {
951 		area = LIST_FIRST(&rdeconf->area_list);
952 		checkall = 1;
953 	}
954 
955 	while (area != NULL) {
956 		LIST_FOREACH(nbr, &area->nbr_list, entry) {
957 			if (nbr->self)
958 				continue;
959 			if (nbr->state & NBR_STA_XCHNG ||
960 			    nbr->state & NBR_STA_LOAD)
961 				return (1);
962 		}
963 		if (!checkall)
964 			break;
965 		area = LIST_NEXT(area, entry);
966 	}
967 
968 	return (0);
969 }
970 
971 struct rde_nbr *
972 rde_nbr_self(struct area *area)
973 {
974 	struct rde_nbr		*nbr;
975 
976 	LIST_FOREACH(nbr, &area->nbr_list, entry)
977 		if (nbr->self)
978 			return (nbr);
979 
980 	/* this may not happen */
981 	fatalx("rde_nbr_self: area without self");
982 	return (NULL);
983 }
984 
985 /*
986  * LSA req list
987  */
988 void
989 rde_req_list_add(struct rde_nbr *nbr, struct lsa_hdr *lsa)
990 {
991 	struct rde_req_entry	*le;
992 
993 	if ((le = calloc(1, sizeof(*le))) == NULL)
994 		fatal("rde_req_list_add");
995 
996 	TAILQ_INSERT_TAIL(&nbr->req_list, le, entry);
997 	le->type = lsa->type;
998 	le->ls_id = lsa->ls_id;
999 	le->adv_rtr = lsa->adv_rtr;
1000 }
1001 
1002 int
1003 rde_req_list_exists(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1004 {
1005 	struct rde_req_entry	*le;
1006 
1007 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1008 		if ((lsa_hdr->type == le->type) &&
1009 		    (lsa_hdr->ls_id == le->ls_id) &&
1010 		    (lsa_hdr->adv_rtr == le->adv_rtr))
1011 			return (1);
1012 	}
1013 	return (0);
1014 }
1015 
1016 void
1017 rde_req_list_del(struct rde_nbr *nbr, struct lsa_hdr *lsa_hdr)
1018 {
1019 	struct rde_req_entry	*le;
1020 
1021 	TAILQ_FOREACH(le, &nbr->req_list, entry) {
1022 		if ((lsa_hdr->type == le->type) &&
1023 		    (lsa_hdr->ls_id == le->ls_id) &&
1024 		    (lsa_hdr->adv_rtr == le->adv_rtr)) {
1025 			TAILQ_REMOVE(&nbr->req_list, le, entry);
1026 			free(le);
1027 			return;
1028 		}
1029 	}
1030 }
1031 
1032 void
1033 rde_req_list_free(struct rde_nbr *nbr)
1034 {
1035 	struct rde_req_entry	*le;
1036 
1037 	while ((le = TAILQ_FIRST(&nbr->req_list)) != NULL) {
1038 		TAILQ_REMOVE(&nbr->req_list, le, entry);
1039 		free(le);
1040 	}
1041 }
1042 
1043 /*
1044  * as-external LSA handling
1045  */
1046 struct lsa *
1047 rde_asext_get(struct rroute *rr)
1048 {
1049 #if 0
1050 	struct area	*area;
1051 	struct iface	*iface;
1052 XXX
1053 	LIST_FOREACH(area, &rdeconf->area_list, entry)
1054 		LIST_FOREACH(iface, &area->iface_list, entry) {
1055 			if ((iface->addr.s_addr & iface->mask.s_addr) ==
1056 			    rr->kr.prefix.s_addr && iface->mask.s_addr ==
1057 			    prefixlen2mask(rr->kr.prefixlen)) {
1058 				/* already announced as (stub) net LSA */
1059 				log_debug("rde_asext_get: %s/%d is net LSA",
1060 				    inet_ntoa(rr->kr.prefix), rr->kr.prefixlen);
1061 				return (NULL);
1062 			}
1063 		}
1064 #endif
1065 	/* update of seqnum is done by lsa_merge */
1066 	return (orig_asext_lsa(rr, DEFAULT_AGE));
1067 }
1068 
1069 struct lsa *
1070 rde_asext_put(struct rroute *rr)
1071 {
1072 	/*
1073 	 * just try to remove the LSA. If the prefix is announced as
1074 	 * stub net LSA lsa_find() will fail later and nothing will happen.
1075 	 */
1076 
1077 	/* remove by reflooding with MAX_AGE */
1078 	return (orig_asext_lsa(rr, MAX_AGE));
1079 }
1080 
1081 /*
1082  * summary LSA stuff
1083  */
1084 void
1085 rde_summary_update(struct rt_node *rte, struct area *area)
1086 {
1087 	struct vertex		*v = NULL;
1088 //XXX	struct lsa		*lsa;
1089 	u_int16_t		 type = 0;
1090 
1091 	/* first check if we actually need to announce this route */
1092 	if (!(rte->d_type == DT_NET || rte->flags & OSPF_RTR_E))
1093 		return;
1094 	/* never create summaries for as-ext LSA */
1095 	if (rte->p_type == PT_TYPE1_EXT || rte->p_type == PT_TYPE2_EXT)
1096 		return;
1097 	/* no need for summary LSA in the originating area */
1098 	if (rte->area.s_addr == area->id.s_addr)
1099 		return;
1100 	/* no need to originate inter-area routes to the backbone */
1101 	if (rte->p_type == PT_INTER_AREA && area->id.s_addr == INADDR_ANY)
1102 		return;
1103 	/* TODO nexthop check, nexthop part of area -> no summary */
1104 	if (rte->cost >= LS_INFINITY)
1105 		return;
1106 	/* TODO AS border router specific checks */
1107 	/* TODO inter-area network route stuff */
1108 	/* TODO intra-area stuff -- condense LSA ??? */
1109 
1110 	if (rte->d_type == DT_NET) {
1111 		type = LSA_TYPE_INTER_A_PREFIX;
1112 	} else if (rte->d_type == DT_RTR) {
1113 		type = LSA_TYPE_INTER_A_ROUTER;
1114 	} else
1115 
1116 #if 0 /* XXX a lot todo */
1117 	/* update lsa but only if it was changed */
1118 	v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1119 	lsa = orig_sum_lsa(rte, area, type, rte->invalid);
1120 	lsa_merge(rde_nbr_self(area), lsa, v);
1121 
1122 	if (v == NULL)
1123 		v = lsa_find(area, type, rte->prefix.s_addr, rde_router_id());
1124 #endif
1125 
1126 	/* suppressed/deleted routes are not found in the second lsa_find */
1127 	if (v)
1128 		v->cost = rte->cost;
1129 }
1130 
1131 
1132 /*
1133  * functions for self-originated LSA
1134  */
1135 struct lsa *
1136 orig_asext_lsa(struct rroute *rr, u_int16_t age)
1137 {
1138 #if 0 /* XXX a lot todo */
1139 	struct lsa	*lsa;
1140 	u_int16_t	 len;
1141 
1142 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_asext);
1143 	if ((lsa = calloc(1, len)) == NULL)
1144 		fatal("orig_asext_lsa");
1145 
1146 	log_debug("orig_asext_lsa: %s/%d age %d",
1147 	    log_in6addr(&rr->kr.prefix), rr->kr.prefixlen, age);
1148 
1149 	/* LSA header */
1150 	lsa->hdr.age = htons(age);
1151 	lsa->hdr.type = LSA_TYPE_EXTERNAL;
1152 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1153 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1154 	lsa->hdr.len = htons(len);
1155 
1156 	/* prefix and mask */
1157 	/*
1158 	 * TODO ls_id must be unique, for overlapping routes this may
1159 	 * not be true. In this case a hack needs to be done to
1160 	 * make the ls_id unique.
1161 	 */
1162 	lsa->hdr.ls_id = rr->kr.prefix.s_addr;
1163 	lsa->data.asext.mask = prefixlen2mask(rr->kr.prefixlen);
1164 
1165 	/*
1166 	 * nexthop -- on connected routes we are the nexthop,
1167 	 * on all other cases we announce the true nexthop.
1168 	 * XXX this is wrong as the true nexthop may be outside
1169 	 * of the ospf cloud and so unreachable. For now we force
1170 	 * all traffic to be directed to us.
1171 	 */
1172 	lsa->data.asext.fw_addr = 0;
1173 
1174 	lsa->data.asext.metric = htonl(rr->metric);
1175 	lsa->data.asext.ext_tag = htonl(rr->kr.ext_tag);
1176 
1177 	lsa->hdr.ls_chksum = 0;
1178 	lsa->hdr.ls_chksum =
1179 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1180 
1181 	return (lsa);
1182 #endif
1183 	return NULL;
1184 }
1185 
1186 struct lsa *
1187 orig_sum_lsa(struct rt_node *rte, struct area *area, u_int8_t type, int invalid)
1188 {
1189 #if 0 /* XXX a lot todo */
1190 	struct lsa	*lsa;
1191 	u_int16_t	 len;
1192 
1193 	len = sizeof(struct lsa_hdr) + sizeof(struct lsa_sum);
1194 	if ((lsa = calloc(1, len)) == NULL)
1195 		fatal("orig_sum_lsa");
1196 
1197 	/* LSA header */
1198 	lsa->hdr.age = htons(invalid ? MAX_AGE : DEFAULT_AGE);
1199 	lsa->hdr.type = type;
1200 	lsa->hdr.adv_rtr = rdeconf->rtr_id.s_addr;
1201 	lsa->hdr.seq_num = htonl(INIT_SEQ_NUM);
1202 	lsa->hdr.len = htons(len);
1203 
1204 	/* prefix and mask */
1205 	/*
1206 	 * TODO ls_id must be unique, for overlapping routes this may
1207 	 * not be true. In this case a hack needs to be done to
1208 	 * make the ls_id unique.
1209 	 */
1210 	lsa->hdr.ls_id = rte->prefix.s_addr;
1211 	if (type == LSA_TYPE_SUM_NETWORK)
1212 		lsa->data.sum.mask = prefixlen2mask(rte->prefixlen);
1213 	else
1214 		lsa->data.sum.mask = 0;	/* must be zero per RFC */
1215 
1216 	lsa->data.sum.metric = htonl(rte->cost & LSA_METRIC_MASK);
1217 
1218 	lsa->hdr.ls_chksum = 0;
1219 	lsa->hdr.ls_chksum =
1220 	    htons(iso_cksum(lsa, len, LS_CKSUM_OFFSET));
1221 
1222 	return (lsa);
1223 #endif
1224 	return NULL;
1225 }
1226