xref: /openbsd-src/usr.sbin/bgpd/rde_peer.c (revision e3db1f63b9983ca4cf18b686be12853eccdfd031)
1*e3db1f63Sclaudio /*	$OpenBSD: rde_peer.c,v 1.46 2025/01/27 15:22:11 claudio Exp $ */
2097b9208Sclaudio 
3097b9208Sclaudio /*
4097b9208Sclaudio  * Copyright (c) 2019 Claudio Jeker <claudio@openbsd.org>
5097b9208Sclaudio  *
6097b9208Sclaudio  * Permission to use, copy, modify, and distribute this software for any
7097b9208Sclaudio  * purpose with or without fee is hereby granted, provided that the above
8097b9208Sclaudio  * copyright notice and this permission notice appear in all copies.
9097b9208Sclaudio  *
10097b9208Sclaudio  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
11097b9208Sclaudio  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
12097b9208Sclaudio  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
13097b9208Sclaudio  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
14097b9208Sclaudio  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
15097b9208Sclaudio  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
16097b9208Sclaudio  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
17097b9208Sclaudio  */
18097b9208Sclaudio #include <sys/types.h>
19097b9208Sclaudio #include <sys/queue.h>
20097b9208Sclaudio 
21097b9208Sclaudio #include <stdlib.h>
22097b9208Sclaudio #include <stdio.h>
23097b9208Sclaudio #include <string.h>
2456f99a53Sclaudio #include <unistd.h>
25097b9208Sclaudio 
26097b9208Sclaudio #include "bgpd.h"
27097b9208Sclaudio #include "rde.h"
28097b9208Sclaudio 
29f6662524Sclaudio struct peer_tree	 peertable = RB_INITIALIZER(&peertable);
30f6662524Sclaudio struct peer_tree	 zombietable = RB_INITIALIZER(&zombietable);
3156f99a53Sclaudio struct rde_peer		*peerself;
3286c75b53Sclaudio static long		 imsg_pending;
3356f99a53Sclaudio 
3430a353caSdenis CTASSERT(sizeof(peerself->recv_eor) * 8 >= AID_MAX);
3530a353caSdenis CTASSERT(sizeof(peerself->sent_eor) * 8 >= AID_MAX);
3675ddeeb9Sclaudio 
37097b9208Sclaudio struct iq {
38097b9208Sclaudio 	SIMPLEQ_ENTRY(iq)	entry;
39097b9208Sclaudio 	struct imsg		imsg;
40097b9208Sclaudio };
41097b9208Sclaudio 
42e4b4fff8Sclaudio int
43e4b4fff8Sclaudio peer_has_as4byte(struct rde_peer *peer)
44e4b4fff8Sclaudio {
45000b2be6Sclaudio 	return peer->capa.as4byte;
46e4b4fff8Sclaudio }
47e4b4fff8Sclaudio 
48110c1584Sclaudio /*
49110c1584Sclaudio  * Check if ADD_PATH is enabled for aid and mode (rx / tx). If aid is
50110c1584Sclaudio  * AID_UNSPEC then the function returns true if any aid has mode enabled.
51110c1584Sclaudio  */
52e4b4fff8Sclaudio int
5339386878Sclaudio peer_has_add_path(struct rde_peer *peer, uint8_t aid, int mode)
5429b527fbSclaudio {
554b4f2e38Sjsg 	if (aid >= AID_MAX)
5629b527fbSclaudio 		return 0;
57000b2be6Sclaudio 	return peer->capa.add_path[aid] & mode;
58000b2be6Sclaudio }
59000b2be6Sclaudio 
60000b2be6Sclaudio int
61000b2be6Sclaudio peer_has_ext_msg(struct rde_peer *peer)
62000b2be6Sclaudio {
63000b2be6Sclaudio 	return peer->capa.ext_msg;
64000b2be6Sclaudio }
65000b2be6Sclaudio 
66000b2be6Sclaudio int
67000b2be6Sclaudio peer_has_ext_nexthop(struct rde_peer *peer, uint8_t aid)
68000b2be6Sclaudio {
69000b2be6Sclaudio 	if (aid >= AID_MAX)
70000b2be6Sclaudio 		return 0;
71927cc6daSclaudio 	return peer->capa.ext_nh[aid];
7229b527fbSclaudio }
7329b527fbSclaudio 
7429b527fbSclaudio int
75*e3db1f63Sclaudio peer_permit_as_set(struct rde_peer *peer)
76e4b4fff8Sclaudio {
77*e3db1f63Sclaudio 	return peer->flags & PEERFLAG_PERMIT_AS_SET;
78e4b4fff8Sclaudio }
79e4b4fff8Sclaudio 
8056f99a53Sclaudio void
81b900620cSclaudio peer_init(struct filter_head *rules)
8256f99a53Sclaudio {
8356f99a53Sclaudio 	struct peer_config pc;
8456f99a53Sclaudio 
85eafe309eSclaudio 	memset(&pc, 0, sizeof(pc));
8656f99a53Sclaudio 	snprintf(pc.descr, sizeof(pc.descr), "LOCAL");
8756f99a53Sclaudio 	pc.id = PEER_ID_SELF;
8856f99a53Sclaudio 
89b900620cSclaudio 	peerself = peer_add(PEER_ID_SELF, &pc, rules);
9056f99a53Sclaudio 	peerself->state = PEER_UP;
9156f99a53Sclaudio }
9256f99a53Sclaudio 
9356f99a53Sclaudio void
9456f99a53Sclaudio peer_shutdown(void)
9556f99a53Sclaudio {
96f6662524Sclaudio 	struct rde_peer *peer, *np;
97f6662524Sclaudio 
98f6662524Sclaudio 	RB_FOREACH_SAFE(peer, peer_tree, &peertable, np)
99442a0320Sclaudio 		peer_delete(peer);
100f6662524Sclaudio 
101f6662524Sclaudio 	while (!RB_EMPTY(&zombietable))
102f6662524Sclaudio 		peer_reaper(NULL);
103f6662524Sclaudio 
104dcbb4522Sclaudio 	if (!RB_EMPTY(&peertable))
105dcbb4522Sclaudio 		log_warnx("%s: free non-free table", __func__);
10656f99a53Sclaudio }
10756f99a53Sclaudio 
10856f99a53Sclaudio /*
10956f99a53Sclaudio  * Traverse all peers calling callback for each peer.
11056f99a53Sclaudio  */
11156f99a53Sclaudio void
11256f99a53Sclaudio peer_foreach(void (*callback)(struct rde_peer *, void *), void *arg)
11356f99a53Sclaudio {
11456f99a53Sclaudio 	struct rde_peer *peer, *np;
11556f99a53Sclaudio 
116dcbb4522Sclaudio 	RB_FOREACH_SAFE(peer, peer_tree, &peertable, np)
11756f99a53Sclaudio 		callback(peer, arg);
11856f99a53Sclaudio }
11956f99a53Sclaudio 
12056f99a53Sclaudio /*
12156f99a53Sclaudio  * Lookup a peer by peer_id, return NULL if not found.
12256f99a53Sclaudio  */
12356f99a53Sclaudio struct rde_peer *
12439386878Sclaudio peer_get(uint32_t id)
12556f99a53Sclaudio {
126dcbb4522Sclaudio 	struct rde_peer	needle;
12756f99a53Sclaudio 
128dcbb4522Sclaudio 	needle.conf.id = id;
129dcbb4522Sclaudio 	return RB_FIND(peer_tree, &peertable, &needle);
13056f99a53Sclaudio }
13156f99a53Sclaudio 
13256f99a53Sclaudio /*
13356f99a53Sclaudio  * Find next peer that matches neighbor options in *n.
13456f99a53Sclaudio  * If peerid was set then pickup the lookup after that peer.
13556f99a53Sclaudio  * Returns NULL if no more peers match.
13656f99a53Sclaudio  */
13756f99a53Sclaudio struct rde_peer *
13839386878Sclaudio peer_match(struct ctl_neighbor *n, uint32_t peerid)
13956f99a53Sclaudio {
14056f99a53Sclaudio 	struct rde_peer		*peer;
14156f99a53Sclaudio 
142dcbb4522Sclaudio 	if (peerid != 0) {
143dcbb4522Sclaudio 		peer = peer_get(peerid);
144dcbb4522Sclaudio 		if (peer)
145dcbb4522Sclaudio 			peer = RB_NEXT(peer_tree, &peertable, peer);
146dcbb4522Sclaudio 	} else
147dcbb4522Sclaudio 		peer = RB_MIN(peer_tree, &peertable);
14856f99a53Sclaudio 
149dcbb4522Sclaudio 	for (; peer != NULL; peer = RB_NEXT(peer_tree, &peertable, peer)) {
15056f99a53Sclaudio 		if (rde_match_peer(peer, n))
151b900620cSclaudio 			return peer;
15256f99a53Sclaudio 	}
153b900620cSclaudio 	return NULL;
15456f99a53Sclaudio }
15556f99a53Sclaudio 
15656f99a53Sclaudio struct rde_peer *
157b900620cSclaudio peer_add(uint32_t id, struct peer_config *p_conf, struct filter_head *rules)
15856f99a53Sclaudio {
15956f99a53Sclaudio 	struct rde_peer		*peer;
1606c499f25Sclaudio 	int			 conflict;
16156f99a53Sclaudio 
16256f99a53Sclaudio 	if ((peer = peer_get(id))) {
16356f99a53Sclaudio 		memcpy(&peer->conf, p_conf, sizeof(struct peer_config));
164000b2be6Sclaudio 		return peer;
16556f99a53Sclaudio 	}
16656f99a53Sclaudio 
16756f99a53Sclaudio 	peer = calloc(1, sizeof(struct rde_peer));
16856f99a53Sclaudio 	if (peer == NULL)
16956f99a53Sclaudio 		fatal("peer_add");
17056f99a53Sclaudio 
17156f99a53Sclaudio 	memcpy(&peer->conf, p_conf, sizeof(struct peer_config));
17256f99a53Sclaudio 	peer->remote_bgpid = 0;
17356f99a53Sclaudio 	peer->loc_rib_id = rib_find(peer->conf.rib);
17456f99a53Sclaudio 	if (peer->loc_rib_id == RIB_NOTFOUND)
17556f99a53Sclaudio 		fatalx("King Bula's new peer met an unknown RIB");
17656f99a53Sclaudio 	peer->state = PEER_NONE;
1775014683fSclaudio 	peer->eval = peer->conf.eval;
178f8fade75Sclaudio 	peer->role = peer->conf.role;
179d0b311ebSclaudio 	peer->export_type = peer->conf.export_type;
180d0b311ebSclaudio 	peer->flags = peer->conf.flags;
18156f99a53Sclaudio 	SIMPLEQ_INIT(&peer->imsg_queue);
18256f99a53Sclaudio 
183b900620cSclaudio 	peer_apply_out_filter(peer, rules);
184b900620cSclaudio 
1856c499f25Sclaudio 	/*
1866c499f25Sclaudio 	 * Assign an even random unique transmit path id.
1876c499f25Sclaudio 	 * Odd path_id_tx numbers are for peers using add-path recv.
1886c499f25Sclaudio 	 */
1896c499f25Sclaudio 	do {
1906c499f25Sclaudio 		struct rde_peer *p;
1916c499f25Sclaudio 
1926c499f25Sclaudio 		conflict = 0;
1936c499f25Sclaudio 		peer->path_id_tx = arc4random() << 1;
1946c499f25Sclaudio 		RB_FOREACH(p, peer_tree, &peertable) {
1956c499f25Sclaudio 			if (p->path_id_tx == peer->path_id_tx) {
1966c499f25Sclaudio 				conflict = 1;
1976c499f25Sclaudio 				break;
1986c499f25Sclaudio 			}
1996c499f25Sclaudio 		}
2006c499f25Sclaudio 	} while (conflict);
2016c499f25Sclaudio 
202dcbb4522Sclaudio 	if (RB_INSERT(peer_tree, &peertable, peer) != NULL)
203dcbb4522Sclaudio 		fatalx("rde peer table corrupted");
20456f99a53Sclaudio 
205000b2be6Sclaudio 	return peer;
20656f99a53Sclaudio }
20756f99a53Sclaudio 
208b900620cSclaudio struct filter_head *
209b900620cSclaudio peer_apply_out_filter(struct rde_peer *peer, struct filter_head *rules)
210b900620cSclaudio {
211b900620cSclaudio 	struct filter_head *old;
212b900620cSclaudio 	struct filter_rule *fr, *new;
213b900620cSclaudio 
214b900620cSclaudio 	old = peer->out_rules;
215b900620cSclaudio 	if ((peer->out_rules = malloc(sizeof(*peer->out_rules))) == NULL)
216b900620cSclaudio 		fatal(NULL);
217b900620cSclaudio 	TAILQ_INIT(peer->out_rules);
218b900620cSclaudio 
219b900620cSclaudio 	TAILQ_FOREACH(fr, rules, entry) {
220b900620cSclaudio 		if (rde_filter_skip_rule(peer, fr))
221b900620cSclaudio 			continue;
222b900620cSclaudio 
223b900620cSclaudio 		if ((new = malloc(sizeof(*new))) == NULL)
224b900620cSclaudio 			fatal(NULL);
225b900620cSclaudio 		memcpy(new, fr, sizeof(*new));
226b900620cSclaudio 		filterset_copy(&fr->set, &new->set);
227b900620cSclaudio 
228b900620cSclaudio 		TAILQ_INSERT_TAIL(peer->out_rules, new, entry);
229b900620cSclaudio 	}
230b900620cSclaudio 
231b900620cSclaudio 	return old;
232b900620cSclaudio }
233b900620cSclaudio 
234dcbb4522Sclaudio static inline int
235dcbb4522Sclaudio peer_cmp(struct rde_peer *a, struct rde_peer *b)
236dcbb4522Sclaudio {
237dcbb4522Sclaudio 	if (a->conf.id > b->conf.id)
238dcbb4522Sclaudio 		return 1;
239dcbb4522Sclaudio 	if (a->conf.id < b->conf.id)
240dcbb4522Sclaudio 		return -1;
241dcbb4522Sclaudio 	return 0;
242dcbb4522Sclaudio }
243dcbb4522Sclaudio 
244dcbb4522Sclaudio RB_GENERATE(peer_tree, rde_peer, entry, peer_cmp);
245dcbb4522Sclaudio 
246434c6258Sclaudio static void
247988ba0baSclaudio peer_generate_update(struct rde_peer *peer, struct rib_entry *re,
248910ddab4Sclaudio     struct prefix *newpath, struct prefix *oldpath,
249910ddab4Sclaudio     enum eval_mode mode)
250434c6258Sclaudio {
251434c6258Sclaudio 	uint8_t		 aid;
252434c6258Sclaudio 
253988ba0baSclaudio 	aid = re->prefix->aid;
254434c6258Sclaudio 
255434c6258Sclaudio 	/* skip ourself */
256434c6258Sclaudio 	if (peer == peerself)
257434c6258Sclaudio 		return;
258442a0320Sclaudio 	/* skip peers that never had a session open */
259442a0320Sclaudio 	if (peer->state == PEER_NONE)
260434c6258Sclaudio 		return;
261434c6258Sclaudio 	/* skip peers using a different rib */
262988ba0baSclaudio 	if (peer->loc_rib_id != re->rib_id)
263434c6258Sclaudio 		return;
264434c6258Sclaudio 	/* check if peer actually supports the address family */
265434c6258Sclaudio 	if (peer->capa.mp[aid] == 0)
266434c6258Sclaudio 		return;
267434c6258Sclaudio 	/* skip peers with special export types */
268434c6258Sclaudio 	if (peer->export_type == EXPORT_NONE ||
269434c6258Sclaudio 	    peer->export_type == EXPORT_DEFAULT_ROUTE)
270434c6258Sclaudio 		return;
271434c6258Sclaudio 
272434c6258Sclaudio 	/* if reconf skip peers which don't need to reconfigure */
273434c6258Sclaudio 	if (mode == EVAL_RECONF && peer->reconf_out == 0)
274434c6258Sclaudio 		return;
2755014683fSclaudio 
2765014683fSclaudio 	/* handle peers with add-path */
2775014683fSclaudio 	if (peer_has_add_path(peer, aid, CAPA_AP_SEND)) {
278910ddab4Sclaudio 		if (peer->eval.mode == ADDPATH_EVAL_ALL)
279b900620cSclaudio 			up_generate_addpath_all(peer, re, newpath, oldpath);
280910ddab4Sclaudio 		else
281b900620cSclaudio 			up_generate_addpath(peer, re);
2825014683fSclaudio 		return;
2835014683fSclaudio 	}
2845014683fSclaudio 
285434c6258Sclaudio 	/* skip regular peers if the best path didn't change */
286434c6258Sclaudio 	if (mode == EVAL_ALL && (peer->flags & PEERFLAG_EVALUATE_ALL) == 0)
287434c6258Sclaudio 		return;
288b900620cSclaudio 	up_generate_updates(peer, re);
289434c6258Sclaudio }
290434c6258Sclaudio 
291434c6258Sclaudio void
292988ba0baSclaudio rde_generate_updates(struct rib_entry *re, struct prefix *newpath,
293988ba0baSclaudio     struct prefix *oldpath, enum eval_mode mode)
294434c6258Sclaudio {
295434c6258Sclaudio 	struct rde_peer	*peer;
296434c6258Sclaudio 
297dcbb4522Sclaudio 	RB_FOREACH(peer, peer_tree, &peertable)
298988ba0baSclaudio 		peer_generate_update(peer, re, newpath, oldpath, mode);
299434c6258Sclaudio }
300434c6258Sclaudio 
30156f99a53Sclaudio /*
30256f99a53Sclaudio  * Various RIB walker callbacks.
30356f99a53Sclaudio  */
30456f99a53Sclaudio struct peer_flush {
30556f99a53Sclaudio 	struct rde_peer *peer;
30656f99a53Sclaudio 	time_t		 staletime;
30756f99a53Sclaudio };
30856f99a53Sclaudio 
30956f99a53Sclaudio static void
31056f99a53Sclaudio peer_flush_upcall(struct rib_entry *re, void *arg)
31156f99a53Sclaudio {
31256f99a53Sclaudio 	struct rde_peer *peer = ((struct peer_flush *)arg)->peer;
31356f99a53Sclaudio 	struct rde_aspath *asp;
31456f99a53Sclaudio 	struct bgpd_addr addr;
31556f99a53Sclaudio 	struct prefix *p, *np, *rp;
31656f99a53Sclaudio 	time_t staletime = ((struct peer_flush *)arg)->staletime;
31739386878Sclaudio 	uint32_t i;
31839386878Sclaudio 	uint8_t prefixlen;
31956f99a53Sclaudio 
32056f99a53Sclaudio 	pt_getaddr(re->prefix, &addr);
32156f99a53Sclaudio 	prefixlen = re->prefix->prefixlen;
322df08e9a0Sclaudio 	TAILQ_FOREACH_SAFE(p, &re->prefix_h, entry.list.rib, np) {
32356f99a53Sclaudio 		if (peer != prefix_peer(p))
32456f99a53Sclaudio 			continue;
32556f99a53Sclaudio 		if (staletime && p->lastchange > staletime)
32656f99a53Sclaudio 			continue;
32756f99a53Sclaudio 
32856f99a53Sclaudio 		for (i = RIB_LOC_START; i < rib_size; i++) {
32956f99a53Sclaudio 			struct rib *rib = rib_byid(i);
33056f99a53Sclaudio 			if (rib == NULL)
33156f99a53Sclaudio 				continue;
33229b527fbSclaudio 			rp = prefix_get(rib, peer, p->path_id,
33329b527fbSclaudio 			    &addr, prefixlen);
33456f99a53Sclaudio 			if (rp) {
33556f99a53Sclaudio 				asp = prefix_aspath(rp);
336b58d89daSclaudio 				if (asp && asp->pftableid)
337b58d89daSclaudio 					rde_pftable_del(asp->pftableid, rp);
33856f99a53Sclaudio 
33956f99a53Sclaudio 				prefix_destroy(rp);
34056f99a53Sclaudio 				rde_update_log("flush", i, peer, NULL,
34156f99a53Sclaudio 				    &addr, prefixlen);
34256f99a53Sclaudio 			}
34356f99a53Sclaudio 		}
34456f99a53Sclaudio 
34556f99a53Sclaudio 		prefix_destroy(p);
34682625ff8Sclaudio 		peer->stats.prefix_cnt--;
34756f99a53Sclaudio 	}
34856f99a53Sclaudio }
34956f99a53Sclaudio 
35056f99a53Sclaudio /*
35156f99a53Sclaudio  * Session got established, bring peer up, load RIBs do initial table dump.
35256f99a53Sclaudio  */
353ddbc7ef4Sclaudio void
35456f99a53Sclaudio peer_up(struct rde_peer *peer, struct session_up *sup)
35556f99a53Sclaudio {
35639386878Sclaudio 	uint8_t	 i;
357442a0320Sclaudio 	int force_sync = 1;
35856f99a53Sclaudio 
35956f99a53Sclaudio 	if (peer->state == PEER_ERR) {
36056f99a53Sclaudio 		/*
36156f99a53Sclaudio 		 * There is a race condition when doing PEER_ERR -> PEER_DOWN.
36256f99a53Sclaudio 		 * So just do a full reset of the peer here.
36356f99a53Sclaudio 		 */
364ddbc7ef4Sclaudio 		rib_dump_terminate(peer);
365ddbc7ef4Sclaudio 		peer_imsg_flush(peer);
36656f99a53Sclaudio 		peer_flush(peer, AID_UNSPEC, 0);
36782625ff8Sclaudio 		peer->stats.prefix_cnt = 0;
36856f99a53Sclaudio 		peer->state = PEER_DOWN;
36956f99a53Sclaudio 	}
370442a0320Sclaudio 
371442a0320Sclaudio 	/*
372442a0320Sclaudio 	 * Check if no value changed during flap to decide if the RIB
373442a0320Sclaudio 	 * is in sync. The capa check is maybe too strict but it should
374442a0320Sclaudio 	 * not matter for normal operation.
375442a0320Sclaudio 	 */
376442a0320Sclaudio 	if (memcmp(&peer->remote_addr, &sup->remote_addr,
377442a0320Sclaudio 	    sizeof(sup->remote_addr)) == 0 &&
378442a0320Sclaudio 	    memcmp(&peer->local_v4_addr, &sup->local_v4_addr,
379442a0320Sclaudio 	    sizeof(sup->local_v4_addr)) == 0 &&
380442a0320Sclaudio 	    memcmp(&peer->local_v6_addr, &sup->local_v6_addr,
381442a0320Sclaudio 	    sizeof(sup->local_v6_addr)) == 0 &&
382442a0320Sclaudio 	    memcmp(&peer->capa, &sup->capa, sizeof(sup->capa)) == 0)
383442a0320Sclaudio 		force_sync = 0;
384442a0320Sclaudio 
385be6ced5eSclaudio 	peer->remote_addr = sup->remote_addr;
386be6ced5eSclaudio 	peer->local_v4_addr = sup->local_v4_addr;
387be6ced5eSclaudio 	peer->local_v6_addr = sup->local_v6_addr;
388442a0320Sclaudio 	memcpy(&peer->capa, &sup->capa, sizeof(sup->capa));
389442a0320Sclaudio 	/* the Adj-RIB-Out does not depend on those */
390442a0320Sclaudio 	peer->remote_bgpid = sup->remote_bgpid;
391cf5008fdSclaudio 	peer->local_if_scope = sup->if_scope;
392442a0320Sclaudio 	peer->short_as = sup->short_as;
39356f99a53Sclaudio 
39475ddeeb9Sclaudio 	/* clear eor markers depending on GR flags */
39575ddeeb9Sclaudio 	if (peer->capa.grestart.restart) {
39675ddeeb9Sclaudio 		peer->sent_eor = 0;
39775ddeeb9Sclaudio 		peer->recv_eor = 0;
39875ddeeb9Sclaudio 	} else {
39975ddeeb9Sclaudio 		/* no EOR expected */
40075ddeeb9Sclaudio 		peer->sent_eor = ~0;
40175ddeeb9Sclaudio 		peer->recv_eor = ~0;
40275ddeeb9Sclaudio 	}
40356f99a53Sclaudio 	peer->state = PEER_UP;
40456f99a53Sclaudio 
405442a0320Sclaudio 	if (!force_sync) {
406442a0320Sclaudio 		for (i = AID_MIN; i < AID_MAX; i++) {
407442a0320Sclaudio 			if (peer->capa.mp[i])
408442a0320Sclaudio 				peer_blast(peer, i);
409442a0320Sclaudio 		}
410442a0320Sclaudio 	} else {
411110c1584Sclaudio 		for (i = AID_MIN; i < AID_MAX; i++) {
41256f99a53Sclaudio 			if (peer->capa.mp[i])
41356f99a53Sclaudio 				peer_dump(peer, i);
41456f99a53Sclaudio 		}
41556f99a53Sclaudio 	}
416442a0320Sclaudio }
41756f99a53Sclaudio 
41856f99a53Sclaudio /*
41956f99a53Sclaudio  * Session dropped and no graceful restart is done. Stop everything for
42056f99a53Sclaudio  * this peer and clean up.
42156f99a53Sclaudio  */
42256f99a53Sclaudio void
423f6662524Sclaudio peer_down(struct rde_peer *peer)
42456f99a53Sclaudio {
42556f99a53Sclaudio 	peer->remote_bgpid = 0;
42656f99a53Sclaudio 	peer->state = PEER_DOWN;
427ddbc7ef4Sclaudio 	/*
428ddbc7ef4Sclaudio 	 * stop all pending dumps which may depend on this peer
429ddbc7ef4Sclaudio 	 * and flush all pending imsg from the SE.
430ddbc7ef4Sclaudio 	 */
43156f99a53Sclaudio 	rib_dump_terminate(peer);
432442a0320Sclaudio 	prefix_adjout_flush_pending(peer);
433ddbc7ef4Sclaudio 	peer_imsg_flush(peer);
43456f99a53Sclaudio 
43556f99a53Sclaudio 	/* flush Adj-RIB-In */
43656f99a53Sclaudio 	peer_flush(peer, AID_UNSPEC, 0);
43782625ff8Sclaudio 	peer->stats.prefix_cnt = 0;
438442a0320Sclaudio }
439442a0320Sclaudio 
440442a0320Sclaudio void
441442a0320Sclaudio peer_delete(struct rde_peer *peer)
442442a0320Sclaudio {
443442a0320Sclaudio 	if (peer->state != PEER_DOWN)
444442a0320Sclaudio 		peer_down(peer);
44556f99a53Sclaudio 
446b900620cSclaudio 	/* free filters */
447b900620cSclaudio 	filterlist_free(peer->out_rules);
448f6662524Sclaudio 
449442a0320Sclaudio 	RB_REMOVE(peer_tree, &peertable, peer);
450f6662524Sclaudio 	while (RB_INSERT(peer_tree, &zombietable, peer) != NULL) {
451f6662524Sclaudio 		log_warnx("zombie peer conflict");
452f6662524Sclaudio 		peer->conf.id = arc4random();
453f6662524Sclaudio 	}
454f6662524Sclaudio 
455f6662524Sclaudio 	/* start reaping the zombie */
456f6662524Sclaudio 	peer_reaper(peer);
45756f99a53Sclaudio }
45856f99a53Sclaudio 
45956f99a53Sclaudio /*
46056f99a53Sclaudio  * Flush all routes older then staletime. If staletime is 0 all routes will
46156f99a53Sclaudio  * be flushed.
46256f99a53Sclaudio  */
46356f99a53Sclaudio void
46439386878Sclaudio peer_flush(struct rde_peer *peer, uint8_t aid, time_t staletime)
46556f99a53Sclaudio {
46656f99a53Sclaudio 	struct peer_flush pf = { peer, staletime };
46756f99a53Sclaudio 
46856f99a53Sclaudio 	/* this dump must run synchronous, too much depends on that right now */
46956f99a53Sclaudio 	if (rib_dump_new(RIB_ADJ_IN, aid, 0, &pf, peer_flush_upcall,
47056f99a53Sclaudio 	    NULL, NULL) == -1)
47156f99a53Sclaudio 		fatal("%s: rib_dump_new", __func__);
47256f99a53Sclaudio 
473a76d4dbaSclaudio 	/* every route is gone so reset staletime */
47456f99a53Sclaudio 	if (aid == AID_UNSPEC) {
47539386878Sclaudio 		uint8_t i;
476110c1584Sclaudio 		for (i = AID_MIN; i < AID_MAX; i++)
47756f99a53Sclaudio 			peer->staletime[i] = 0;
47856f99a53Sclaudio 	} else {
47956f99a53Sclaudio 		peer->staletime[aid] = 0;
48056f99a53Sclaudio 	}
48156f99a53Sclaudio }
48256f99a53Sclaudio 
48356f99a53Sclaudio /*
48456f99a53Sclaudio  * During graceful restart mark a peer as stale if the session goes down.
4851b49c333Sclaudio  * For the specified AID the Adj-RIB-Out is marked stale and the staletime
48656f99a53Sclaudio  * is set to the current timestamp for identifying stale routes in Adj-RIB-In.
48756f99a53Sclaudio  */
48856f99a53Sclaudio void
489ddbc7ef4Sclaudio peer_stale(struct rde_peer *peer, uint8_t aid, int flushall)
49056f99a53Sclaudio {
49156f99a53Sclaudio 	time_t now;
49256f99a53Sclaudio 
49356f99a53Sclaudio 	/* flush the now even staler routes out */
49456f99a53Sclaudio 	if (peer->staletime[aid])
49556f99a53Sclaudio 		peer_flush(peer, aid, peer->staletime[aid]);
49656f99a53Sclaudio 
49756f99a53Sclaudio 	peer->staletime[aid] = now = getmonotime();
49856f99a53Sclaudio 	peer->state = PEER_DOWN;
49956f99a53Sclaudio 
500ddbc7ef4Sclaudio 	/*
501ddbc7ef4Sclaudio 	 * stop all pending dumps which may depend on this peer
502ddbc7ef4Sclaudio 	 * and flush all pending imsg from the SE.
503ddbc7ef4Sclaudio 	 */
504ddbc7ef4Sclaudio 	rib_dump_terminate(peer);
505442a0320Sclaudio 	prefix_adjout_flush_pending(peer);
506ddbc7ef4Sclaudio 	peer_imsg_flush(peer);
507ddbc7ef4Sclaudio 
508ddbc7ef4Sclaudio 	if (flushall)
509ddbc7ef4Sclaudio 		peer_flush(peer, aid, 0);
510ddbc7ef4Sclaudio 
51156f99a53Sclaudio 	/* make sure new prefixes start on a higher timestamp */
51256f99a53Sclaudio 	while (now >= getmonotime())
51356f99a53Sclaudio 		sleep(1);
51456f99a53Sclaudio }
51556f99a53Sclaudio 
51656f99a53Sclaudio /*
517c8208cf1Sclaudio  * RIB walker callback for peer_blast.
518c8208cf1Sclaudio  * Enqueue a prefix onto the update queue so it can be sent out.
51956f99a53Sclaudio  */
520c8208cf1Sclaudio static void
521c8208cf1Sclaudio peer_blast_upcall(struct prefix *p, void *ptr)
522c8208cf1Sclaudio {
523442a0320Sclaudio 	if (p->flags & PREFIX_FLAG_DEAD) {
524c8208cf1Sclaudio 		/* ignore dead prefixes, they will go away soon */
525c8208cf1Sclaudio 	} else if ((p->flags & PREFIX_FLAG_MASK) == 0) {
526c8208cf1Sclaudio 		/* put entries on the update queue if not already on a queue */
527c8208cf1Sclaudio 		p->flags |= PREFIX_FLAG_UPDATE;
528c8208cf1Sclaudio 		if (RB_INSERT(prefix_tree, &prefix_peer(p)->updates[p->pt->aid],
529c8208cf1Sclaudio 		    p) != NULL)
530c8208cf1Sclaudio 			fatalx("%s: RB tree invariant violated", __func__);
531c8208cf1Sclaudio 	}
532c8208cf1Sclaudio }
533c8208cf1Sclaudio 
534c8208cf1Sclaudio /*
535c8208cf1Sclaudio  * Called after all prefixes are put onto the update queue and we are
536c8208cf1Sclaudio  * ready to blast out updates to the peer.
537c8208cf1Sclaudio  */
538c8208cf1Sclaudio static void
539c8208cf1Sclaudio peer_blast_done(void *ptr, uint8_t aid)
540c8208cf1Sclaudio {
541c8208cf1Sclaudio 	struct rde_peer		*peer = ptr;
542c8208cf1Sclaudio 
543c8208cf1Sclaudio 	/* Adj-RIB-Out ready, unthrottle peer and inject EOR */
544c8208cf1Sclaudio 	peer->throttled = 0;
545c8208cf1Sclaudio 	if (peer->capa.grestart.restart)
546c8208cf1Sclaudio 		prefix_add_eor(peer, aid);
547c8208cf1Sclaudio }
548c8208cf1Sclaudio 
549c8208cf1Sclaudio /*
550c8208cf1Sclaudio  * Send out the full Adj-RIB-Out by putting all prefixes onto the update
551c8208cf1Sclaudio  * queue.
552c8208cf1Sclaudio  */
553d31a8791Sclaudio void
554c8208cf1Sclaudio peer_blast(struct rde_peer *peer, uint8_t aid)
55556f99a53Sclaudio {
55675ddeeb9Sclaudio 	if (peer->capa.enhanced_rr && (peer->sent_eor & (1 << aid)))
55775ddeeb9Sclaudio 		rde_peer_send_rrefresh(peer, aid, ROUTE_REFRESH_BEGIN_RR);
55875ddeeb9Sclaudio 
559c8208cf1Sclaudio 	/* force out all updates from the Adj-RIB-Out */
560c8208cf1Sclaudio 	if (prefix_dump_new(peer, aid, 0, peer, peer_blast_upcall,
561c8208cf1Sclaudio 	    peer_blast_done, NULL) == -1)
562c8208cf1Sclaudio 		fatal("%s: prefix_dump_new", __func__);
563c8208cf1Sclaudio }
564c8208cf1Sclaudio 
565c8208cf1Sclaudio /* RIB walker callbacks for peer_dump. */
566c8208cf1Sclaudio static void
567c8208cf1Sclaudio peer_dump_upcall(struct rib_entry *re, void *ptr)
568c8208cf1Sclaudio {
569c8208cf1Sclaudio 	struct rde_peer		*peer = ptr;
570c8208cf1Sclaudio 	struct prefix		*p;
571c8208cf1Sclaudio 
572c8208cf1Sclaudio 	if ((p = prefix_best(re)) == NULL)
573c8208cf1Sclaudio 		/* no eligible prefix, not even for 'evaluate all' */
574c8208cf1Sclaudio 		return;
575c8208cf1Sclaudio 
576c8208cf1Sclaudio 	peer_generate_update(peer, re, NULL, NULL, 0);
577c8208cf1Sclaudio }
578c8208cf1Sclaudio 
579c8208cf1Sclaudio static void
580c8208cf1Sclaudio peer_dump_done(void *ptr, uint8_t aid)
581c8208cf1Sclaudio {
582c8208cf1Sclaudio 	struct rde_peer		*peer = ptr;
583c8208cf1Sclaudio 
584c8208cf1Sclaudio 	/* Adj-RIB-Out is ready, blast it out */
585c8208cf1Sclaudio 	peer_blast(peer, aid);
586c8208cf1Sclaudio }
587c8208cf1Sclaudio 
588c8208cf1Sclaudio /*
589c8208cf1Sclaudio  * Load the Adj-RIB-Out of a peer normally called when a session comes up
590c8208cf1Sclaudio  * for the first time. Once the Adj-RIB-Out is ready it will blast the
591c8208cf1Sclaudio  * updates out.
592c8208cf1Sclaudio  */
593c8208cf1Sclaudio void
594c8208cf1Sclaudio peer_dump(struct rde_peer *peer, uint8_t aid)
595c8208cf1Sclaudio {
59656f99a53Sclaudio 	/* throttle peer until dump is done */
59756f99a53Sclaudio 	peer->throttled = 1;
598c8208cf1Sclaudio 
599c8208cf1Sclaudio 	if (peer->export_type == EXPORT_NONE) {
600c8208cf1Sclaudio 		peer_blast(peer, aid);
601c8208cf1Sclaudio 	} else if (peer->export_type == EXPORT_DEFAULT_ROUTE) {
602c8208cf1Sclaudio 		up_generate_default(peer, aid);
603c8208cf1Sclaudio 		peer_blast(peer, aid);
604c8208cf1Sclaudio 	} else if (aid == AID_FLOWSPECv4 || aid == AID_FLOWSPECv6) {
605c8208cf1Sclaudio 		prefix_flowspec_dump(aid, peer, peer_dump_upcall,
606c8208cf1Sclaudio 		    peer_dump_done);
607c8208cf1Sclaudio 	} else {
608c8208cf1Sclaudio 		if (rib_dump_new(peer->loc_rib_id, aid, RDE_RUNNER_ROUNDS, peer,
609c8208cf1Sclaudio 		    peer_dump_upcall, peer_dump_done, NULL) == -1)
610c8208cf1Sclaudio 			fatal("%s: rib_dump_new", __func__);
61156f99a53Sclaudio 	}
61256f99a53Sclaudio }
61356f99a53Sclaudio 
614097b9208Sclaudio /*
61575ddeeb9Sclaudio  * Start of an enhanced route refresh. Mark all routes as stale.
61675ddeeb9Sclaudio  * Once the route refresh ends a End of Route Refresh message is sent
61775ddeeb9Sclaudio  * which calls peer_flush() to remove all stale routes.
61875ddeeb9Sclaudio  */
61975ddeeb9Sclaudio void
62039386878Sclaudio peer_begin_rrefresh(struct rde_peer *peer, uint8_t aid)
62175ddeeb9Sclaudio {
62275ddeeb9Sclaudio 	time_t now;
62375ddeeb9Sclaudio 
62475ddeeb9Sclaudio 	/* flush the now even staler routes out */
62575ddeeb9Sclaudio 	if (peer->staletime[aid])
62675ddeeb9Sclaudio 		peer_flush(peer, aid, peer->staletime[aid]);
62775ddeeb9Sclaudio 
62875ddeeb9Sclaudio 	peer->staletime[aid] = now = getmonotime();
62975ddeeb9Sclaudio 
63075ddeeb9Sclaudio 	/* make sure new prefixes start on a higher timestamp */
63175ddeeb9Sclaudio 	while (now >= getmonotime())
63275ddeeb9Sclaudio 		sleep(1);
63375ddeeb9Sclaudio }
63475ddeeb9Sclaudio 
635f6662524Sclaudio void
636f6662524Sclaudio peer_reaper(struct rde_peer *peer)
637f6662524Sclaudio {
638f6662524Sclaudio 	if (peer == NULL)
639f6662524Sclaudio 		peer = RB_ROOT(&zombietable);
640f6662524Sclaudio 	if (peer == NULL)
641f6662524Sclaudio 		return;
642f6662524Sclaudio 
643f6662524Sclaudio 	if (!prefix_adjout_reaper(peer))
644f6662524Sclaudio 		return;
645f6662524Sclaudio 
646f6662524Sclaudio 	RB_REMOVE(peer_tree, &zombietable, peer);
647f6662524Sclaudio 	free(peer);
648f6662524Sclaudio }
649f6662524Sclaudio 
650f6662524Sclaudio /*
651f6662524Sclaudio  * Check if any imsg are pending or any zombie peers are around.
652f6662524Sclaudio  * Return 0 if no work is pending.
653f6662524Sclaudio  */
654f6662524Sclaudio int
655f6662524Sclaudio peer_work_pending(void)
656f6662524Sclaudio {
657f6662524Sclaudio 	if (!RB_EMPTY(&zombietable))
658f6662524Sclaudio 		return 1;
659f6662524Sclaudio 	return imsg_pending != 0;
660f6662524Sclaudio }
661f6662524Sclaudio 
66275ddeeb9Sclaudio /*
663097b9208Sclaudio  * move an imsg from src to dst, disconnecting any dynamic memory from src.
664097b9208Sclaudio  */
665097b9208Sclaudio static void
666097b9208Sclaudio imsg_move(struct imsg *dst, struct imsg *src)
667097b9208Sclaudio {
668097b9208Sclaudio 	*dst = *src;
669a594e53aSclaudio 	memset(src, 0, sizeof(*src));
670097b9208Sclaudio }
671097b9208Sclaudio 
672097b9208Sclaudio /*
673097b9208Sclaudio  * push an imsg onto the peer imsg queue.
674097b9208Sclaudio  */
675097b9208Sclaudio void
676097b9208Sclaudio peer_imsg_push(struct rde_peer *peer, struct imsg *imsg)
677097b9208Sclaudio {
678097b9208Sclaudio 	struct iq *iq;
679097b9208Sclaudio 
680097b9208Sclaudio 	if ((iq = calloc(1, sizeof(*iq))) == NULL)
681097b9208Sclaudio 		fatal(NULL);
682097b9208Sclaudio 	imsg_move(&iq->imsg, imsg);
683097b9208Sclaudio 	SIMPLEQ_INSERT_TAIL(&peer->imsg_queue, iq, entry);
68486c75b53Sclaudio 	imsg_pending++;
685097b9208Sclaudio }
686097b9208Sclaudio 
687097b9208Sclaudio /*
688097b9208Sclaudio  * pop first imsg from peer imsg queue and move it into imsg argument.
689097b9208Sclaudio  * Returns 1 if an element is returned else 0.
690097b9208Sclaudio  */
691097b9208Sclaudio int
692097b9208Sclaudio peer_imsg_pop(struct rde_peer *peer, struct imsg *imsg)
693097b9208Sclaudio {
694097b9208Sclaudio 	struct iq *iq;
695097b9208Sclaudio 
696097b9208Sclaudio 	iq = SIMPLEQ_FIRST(&peer->imsg_queue);
697097b9208Sclaudio 	if (iq == NULL)
698097b9208Sclaudio 		return 0;
699097b9208Sclaudio 
700097b9208Sclaudio 	imsg_move(imsg, &iq->imsg);
701097b9208Sclaudio 
702097b9208Sclaudio 	SIMPLEQ_REMOVE_HEAD(&peer->imsg_queue, entry);
703097b9208Sclaudio 	free(iq);
70486c75b53Sclaudio 	imsg_pending--;
705097b9208Sclaudio 
706097b9208Sclaudio 	return 1;
707097b9208Sclaudio }
708097b9208Sclaudio 
709097b9208Sclaudio /*
710097b9208Sclaudio  * flush all imsg queued for a peer.
711097b9208Sclaudio  */
712097b9208Sclaudio void
713097b9208Sclaudio peer_imsg_flush(struct rde_peer *peer)
714097b9208Sclaudio {
715097b9208Sclaudio 	struct iq *iq;
716097b9208Sclaudio 
717097b9208Sclaudio 	while ((iq = SIMPLEQ_FIRST(&peer->imsg_queue)) != NULL) {
718097b9208Sclaudio 		SIMPLEQ_REMOVE_HEAD(&peer->imsg_queue, entry);
719097b9208Sclaudio 		free(iq);
72086c75b53Sclaudio 		imsg_pending--;
721097b9208Sclaudio 	}
722097b9208Sclaudio }
723