xref: /openbsd-src/usr.sbin/ospf6d/lsupdate.c (revision 91f110e064cd7c194e59e019b83bb7496c1c84d4)
1 /*	$OpenBSD: lsupdate.c,v 1.10 2013/03/25 14:29:35 markus Exp $ */
2 
3 /*
4  * Copyright (c) 2005 Claudio Jeker <claudio@openbsd.org>
5  * Copyright (c) 2004, 2005, 2007 Esben Norby <norby@openbsd.org>
6  *
7  * Permission to use, copy, modify, and distribute this software for any
8  * purpose with or without fee is hereby granted, provided that the above
9  * copyright notice and this permission notice appear in all copies.
10  *
11  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18  */
19 
20 #include <sys/types.h>
21 #include <sys/hash.h>
22 #include <sys/socket.h>
23 #include <netinet/in.h>
24 #include <netinet/ip6.h>
25 #include <netinet/ip_ah.h>
26 #include <arpa/inet.h>
27 
28 #include <stdlib.h>
29 #include <string.h>
30 
31 #include "ospf6.h"
32 #include "ospf6d.h"
33 #include "log.h"
34 #include "ospfe.h"
35 #include "rde.h"
36 
37 extern struct ospfd_conf	*oeconf;
38 extern struct imsgev		*iev_rde;
39 
40 struct ibuf *prepare_ls_update(struct iface *, int);
41 int	add_ls_update(struct ibuf *, struct iface *, void *, int, u_int16_t);
42 int	send_ls_update(struct ibuf *, struct iface *, struct in6_addr, u_int32_t);
43 
44 void	ls_retrans_list_insert(struct nbr *, struct lsa_entry *);
45 void	ls_retrans_list_remove(struct nbr *, struct lsa_entry *);
46 
47 /* link state update packet handling */
48 int
49 lsa_flood(struct iface *iface, struct nbr *originator, struct lsa_hdr *lsa_hdr,
50     void *data)
51 {
52 	struct nbr		*nbr;
53 	struct lsa_entry	*le = NULL;
54 	int			 queued = 0, dont_ack = 0;
55 	int			 r;
56 
57 	LIST_FOREACH(nbr, &iface->nbr_list, entry) {
58 		if (nbr == iface->self)
59 			continue;
60 		if (!(nbr->state & NBR_STA_FLOOD))
61 			continue;
62 
63 		if (iface->state & IF_STA_DROTHER && !queued)
64 			while ((le = ls_retrans_list_get(iface->self, lsa_hdr)))
65 			    ls_retrans_list_free(iface->self, le);
66 
67 		while ((le = ls_retrans_list_get(nbr, lsa_hdr)))
68 			ls_retrans_list_free(nbr, le);
69 
70 		if (!(nbr->state & NBR_STA_FULL) &&
71 		    (le = ls_req_list_get(nbr, lsa_hdr)) != NULL) {
72 			r = lsa_newer(lsa_hdr, le->le_lsa);
73 			if (r > 0) {
74 				/* to flood LSA is newer than requested */
75 				ls_req_list_free(nbr, le);
76 				/* new needs to be flooded */
77 			} else if (r < 0) {
78 				/* to flood LSA is older than requested */
79 				continue;
80 			} else {
81 				/* LSA are equal */
82 				ls_req_list_free(nbr, le);
83 				continue;
84 			}
85 		}
86 
87 		if (nbr == originator) {
88 			dont_ack++;
89 			continue;
90 		}
91 
92 		/* non DR or BDR router keep all lsa in one retrans list */
93 		if (iface->state & IF_STA_DROTHER) {
94 			if (!queued)
95 				ls_retrans_list_add(iface->self, data,
96 				    iface->rxmt_interval, 0);
97 			queued = 1;
98 		} else {
99 			ls_retrans_list_add(nbr, data, iface->rxmt_interval, 0);
100 			queued = 1;
101 		}
102 	}
103 
104 	if (!queued)
105 		return (0);
106 
107 	if (iface == originator->iface && iface->self != originator) {
108 		if (iface->dr == originator || iface->bdr == originator)
109 			return (0);
110 		if (iface->state & IF_STA_BACKUP)
111 			return (0);
112 		dont_ack++;
113 	}
114 
115 	/*
116 	 * initial flood needs to be queued separately, timeout is zero
117 	 * and oneshot has to be set because the retransimssion queues
118 	 * are already loaded.
119 	 */
120 	switch (iface->type) {
121 	case IF_TYPE_POINTOPOINT:
122 	case IF_TYPE_BROADCAST:
123 		ls_retrans_list_add(iface->self, data, 0, 1);
124 		break;
125 	case IF_TYPE_NBMA:
126 	case IF_TYPE_POINTOMULTIPOINT:
127 	case IF_TYPE_VIRTUALLINK:
128 		LIST_FOREACH(nbr, &iface->nbr_list, entry) {
129 			if (nbr == iface->self)
130 				continue;
131 			if (!(nbr->state & NBR_STA_FLOOD))
132 				continue;
133 			if (!TAILQ_EMPTY(&nbr->ls_retrans_list)) {
134 				le = TAILQ_LAST(&nbr->ls_retrans_list,
135 				    lsa_head);
136 				if (lsa_hdr->type != le->le_lsa->type ||
137 				    lsa_hdr->ls_id != le->le_lsa->ls_id ||
138 				    lsa_hdr->adv_rtr != le->le_lsa->adv_rtr)
139 					continue;
140 			}
141 			ls_retrans_list_add(nbr, data, 0, 1);
142 		}
143 		break;
144 	default:
145 		fatalx("lsa_flood: unknown interface type");
146 	}
147 
148 	return (dont_ack == 2);
149 }
150 
151 struct ibuf *
152 prepare_ls_update(struct iface *iface, int bigpkt)
153 {
154 	struct ibuf		*buf;
155 	size_t			 size;
156 
157 	size = bigpkt ? IPV6_MAXPACKET : iface->mtu;
158 	if (size < IPV6_MMTU)
159 		size = IPV6_MMTU;
160 	size -= sizeof(struct ip6_hdr);
161 	/*
162 	 * Reserve space for optional ah or esp encryption.  The
163 	 * algorithm is taken from ah_output and esp_output, the
164 	 * values are the maxima of crypto/xform.c.
165 	 */
166 	size -= max(
167 	    /* base-ah-header replay authsize */
168 	    AH_FLENGTH + sizeof(u_int32_t) + 32,
169 	    /* spi sequence ivlen blocksize pad-length next-header authsize */
170 	    2 * sizeof(u_int32_t) + 16 + 16 + 2 * sizeof(u_int8_t) + 32);
171 
172 	if ((buf = ibuf_open(size)) == NULL)
173 		fatal("prepare_ls_update");
174 
175 	/* OSPF header */
176 	if (gen_ospf_hdr(buf, iface, PACKET_TYPE_LS_UPDATE))
177 		goto fail;
178 
179 	/* reserve space for number of lsa field */
180 	if (ibuf_reserve(buf, sizeof(u_int32_t)) == NULL)
181 		goto fail;
182 
183 	return (buf);
184 fail:
185 	log_warn("prepare_ls_update");
186 	ibuf_free(buf);
187 	return (NULL);
188 }
189 
190 int
191 add_ls_update(struct ibuf *buf, struct iface *iface, void *data, int len,
192     u_int16_t older)
193 {
194 	size_t		pos;
195 	u_int16_t	age;
196 
197 	if (buf->wpos + len >= buf->max)
198 		return (0);
199 
200 	pos = buf->wpos;
201 	if (ibuf_add(buf, data, len)) {
202 		log_warn("add_ls_update");
203 		return (0);
204 	}
205 
206 	/* age LSA before sending it out */
207 	memcpy(&age, data, sizeof(age));
208 	age = ntohs(age);
209 	if ((age += older + iface->transmit_delay) >= MAX_AGE)
210 		age = MAX_AGE;
211 	age = htons(age);
212 	memcpy(ibuf_seek(buf, pos, sizeof(age)), &age, sizeof(age));
213 
214 	return (1);
215 }
216 
217 int
218 send_ls_update(struct ibuf *buf, struct iface *iface, struct in6_addr addr,
219     u_int32_t nlsa)
220 {
221 	int			 ret;
222 
223 	nlsa = htonl(nlsa);
224 	memcpy(ibuf_seek(buf, sizeof(struct ospf_hdr), sizeof(nlsa)),
225 	    &nlsa, sizeof(nlsa));
226 	/* calculate checksum */
227 	if (upd_ospf_hdr(buf, iface))
228 		goto fail;
229 
230 	ret = send_packet(iface, buf->buf, buf->wpos, &addr);
231 
232 	ibuf_free(buf);
233 	return (ret);
234 fail:
235 	log_warn("send_ls_update");
236 	ibuf_free(buf);
237 	return (-1);
238 }
239 
240 void
241 recv_ls_update(struct nbr *nbr, char *buf, u_int16_t len)
242 {
243 	struct lsa_hdr		 lsa;
244 	u_int32_t		 nlsa;
245 
246 	if (len < sizeof(nlsa)) {
247 		log_warnx("recv_ls_update: bad packet size, neighbor ID %s",
248 		    inet_ntoa(nbr->id));
249 		return;
250 	}
251 	memcpy(&nlsa, buf, sizeof(nlsa));
252 	nlsa = ntohl(nlsa);
253 	buf += sizeof(nlsa);
254 	len -= sizeof(nlsa);
255 
256 	switch (nbr->state) {
257 	case NBR_STA_DOWN:
258 	case NBR_STA_ATTEMPT:
259 	case NBR_STA_INIT:
260 	case NBR_STA_2_WAY:
261 	case NBR_STA_XSTRT:
262 	case NBR_STA_SNAP:
263 		log_debug("recv_ls_update: packet ignored in state %s, "
264 		    "neighbor ID %s", nbr_state_name(nbr->state),
265 		    inet_ntoa(nbr->id));
266 		break;
267 	case NBR_STA_XCHNG:
268 	case NBR_STA_LOAD:
269 	case NBR_STA_FULL:
270 		for (; nlsa > 0 && len > 0; nlsa--) {
271 			if (len < sizeof(lsa)) {
272 				log_warnx("recv_ls_update: bad packet size, "
273 				    "neighbor ID %s", inet_ntoa(nbr->id));
274 				return;
275 			}
276 			memcpy(&lsa, buf, sizeof(lsa));
277 			if (len < ntohs(lsa.len)) {
278 				log_warnx("recv_ls_update: bad packet size, "
279 				    "neighbor ID %s", inet_ntoa(nbr->id));
280 				return;
281 			}
282 			imsg_compose_event(iev_rde, IMSG_LS_UPD, nbr->peerid, 0,
283 			    -1, buf, ntohs(lsa.len));
284 			buf += ntohs(lsa.len);
285 			len -= ntohs(lsa.len);
286 		}
287 		if (nlsa > 0 || len > 0) {
288 			log_warnx("recv_ls_update: bad packet size, "
289 			    "neighbor ID %s", inet_ntoa(nbr->id));
290 			return;
291 		}
292 		break;
293 	default:
294 		fatalx("recv_ls_update: unknown neighbor state");
295 	}
296 }
297 
298 /* link state retransmit list */
299 void
300 ls_retrans_list_add(struct nbr *nbr, struct lsa_hdr *lsa,
301     unsigned short timeout, unsigned short oneshot)
302 {
303 	struct timeval		 tv;
304 	struct lsa_entry	*le;
305 	struct lsa_ref		*ref;
306 
307 	if ((ref = lsa_cache_get(lsa)) == NULL)
308 		fatalx("King Bula sez: somebody forgot to lsa_cache_add");
309 
310 	if ((le = calloc(1, sizeof(*le))) == NULL)
311 		fatal("ls_retrans_list_add");
312 
313 	le->le_ref = ref;
314 	le->le_when = timeout;
315 	le->le_oneshot = oneshot;
316 
317 	ls_retrans_list_insert(nbr, le);
318 
319 	if (!evtimer_pending(&nbr->ls_retrans_timer, NULL)) {
320 		timerclear(&tv);
321 		tv.tv_sec = TAILQ_FIRST(&nbr->ls_retrans_list)->le_when;
322 
323 		if (evtimer_add(&nbr->ls_retrans_timer, &tv) == -1)
324 			fatal("ls_retrans_list_add");
325 	}
326 }
327 
328 int
329 ls_retrans_list_del(struct nbr *nbr, struct lsa_hdr *lsa_hdr)
330 {
331 	struct lsa_entry	*le;
332 
333 	if ((le = ls_retrans_list_get(nbr, lsa_hdr)) == NULL)
334 		return (-1);
335 	if (lsa_hdr->seq_num == le->le_ref->hdr.seq_num &&
336 	    lsa_hdr->ls_chksum == le->le_ref->hdr.ls_chksum) {
337 		ls_retrans_list_free(nbr, le);
338 		return (0);
339 	}
340 
341 	return (-1);
342 }
343 
344 struct lsa_entry *
345 ls_retrans_list_get(struct nbr *nbr, struct lsa_hdr *lsa_hdr)
346 {
347 	struct lsa_entry	*le;
348 
349 	TAILQ_FOREACH(le, &nbr->ls_retrans_list, entry) {
350 		if ((lsa_hdr->type == le->le_ref->hdr.type) &&
351 		    (lsa_hdr->ls_id == le->le_ref->hdr.ls_id) &&
352 		    (lsa_hdr->adv_rtr == le->le_ref->hdr.adv_rtr))
353 			return (le);
354 	}
355 	return (NULL);
356 }
357 
358 void
359 ls_retrans_list_insert(struct nbr *nbr, struct lsa_entry *new)
360 {
361 	struct lsa_entry	*le;
362 	unsigned short		 when = new->le_when;
363 
364 	TAILQ_FOREACH(le, &nbr->ls_retrans_list, entry) {
365 		if (when < le->le_when) {
366 			new->le_when = when;
367 			TAILQ_INSERT_BEFORE(le, new, entry);
368 			nbr->ls_ret_cnt++;
369 			return;
370 		}
371 		when -= le->le_when;
372 	}
373 	new->le_when = when;
374 	TAILQ_INSERT_TAIL(&nbr->ls_retrans_list, new, entry);
375 	nbr->ls_ret_cnt++;
376 }
377 
378 void
379 ls_retrans_list_remove(struct nbr *nbr, struct lsa_entry *le)
380 {
381 	struct timeval		 tv;
382 	struct lsa_entry	*next = TAILQ_NEXT(le, entry);
383 	int			 reset = 0;
384 
385 	/* adjust timeout of next entry */
386 	if (next)
387 		next->le_when += le->le_when;
388 
389 	if (TAILQ_FIRST(&nbr->ls_retrans_list) == le &&
390 	    evtimer_pending(&nbr->ls_retrans_timer, NULL))
391 		reset = 1;
392 
393 	TAILQ_REMOVE(&nbr->ls_retrans_list, le, entry);
394 	nbr->ls_ret_cnt--;
395 
396 	if (reset && TAILQ_FIRST(&nbr->ls_retrans_list)) {
397 		if (evtimer_del(&nbr->ls_retrans_timer) == -1)
398 			fatal("ls_retrans_list_remove");
399 
400 		timerclear(&tv);
401 		tv.tv_sec = TAILQ_FIRST(&nbr->ls_retrans_list)->le_when;
402 
403 		if (evtimer_add(&nbr->ls_retrans_timer, &tv) == -1)
404 			fatal("ls_retrans_list_remove");
405 	}
406 }
407 
408 void
409 ls_retrans_list_free(struct nbr *nbr, struct lsa_entry *le)
410 {
411 	ls_retrans_list_remove(nbr, le);
412 
413 	lsa_cache_put(le->le_ref, nbr);
414 	free(le);
415 }
416 
417 void
418 ls_retrans_list_clr(struct nbr *nbr)
419 {
420 	struct lsa_entry	*le;
421 
422 	while ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL)
423 		ls_retrans_list_free(nbr, le);
424 
425 	nbr->ls_ret_cnt = 0;
426 }
427 
428 /* ARGSUSED */
429 void
430 ls_retrans_timer(int fd, short event, void *bula)
431 {
432 	struct timeval		 tv;
433 	struct timespec		 tp;
434 	struct in6_addr		 addr;
435 	struct nbr		*nbr = bula;
436 	struct lsa_entry	*le;
437 	struct ibuf		*buf;
438 	time_t			 now;
439 	int			 bigpkt, d;
440 	u_int32_t		 nlsa = 0;
441 
442 	if ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL)
443 		le->le_when = 0;	/* timer fired */
444 	else
445 		return;			/* queue empty, nothing to do */
446 
447 	clock_gettime(CLOCK_MONOTONIC, &tp);
448 	now = tp.tv_sec;
449 
450 	if (nbr->iface->self == nbr) {
451 		/*
452 		 * oneshot needs to be set for lsa queued for flooding,
453 		 * if oneshot is not set then the lsa needs to be converted
454 		 * because the router switched lately to DR or BDR
455 		 */
456 		if (le->le_oneshot && nbr->iface->state & IF_STA_DRORBDR)
457 			inet_pton(AF_INET6, AllSPFRouters, &addr);
458 		else if (nbr->iface->state & IF_STA_DRORBDR) {
459 			/*
460 			 * old retransmission needs to be converted into
461 			 * flood by rerunning the lsa_flood.
462 			 */
463 			lsa_flood(nbr->iface, nbr, &le->le_ref->hdr,
464 			    le->le_ref->data);
465 			ls_retrans_list_free(nbr, le);
466 			/* ls_retrans_list_free retriggers the timer */
467 			return;
468 		} else if (nbr->iface->type == IF_TYPE_POINTOPOINT)
469 			memcpy(&addr, &nbr->iface->dst, sizeof(addr));
470 		else
471 			inet_pton(AF_INET6, AllDRouters, &addr);
472 	} else
473 		memcpy(&addr, &nbr->addr, sizeof(addr));
474 
475 	/*
476 	 * Allow big ipv6 packets that may get fragmented if a
477 	 * single lsa might be too big for an unfragmented packet.
478 	 * To avoid the exact algorithm duplicated here, just make
479 	 * a good guess.  If the first lsa is bigger than 1024
480 	 * bytes, reserve a separate big packet for it.  The kernel
481 	 * will figure out if fragmentation is necessary.  For
482 	 * smaller lsas, we avoid big packets and fragmentation.
483 	 */
484 	bigpkt = le->le_ref->len > 1024;
485 	if ((buf = prepare_ls_update(nbr->iface, bigpkt)) == NULL) {
486 		le->le_when = 1;
487 		goto done;
488 	}
489 
490 	while ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL &&
491 	    le->le_when == 0) {
492 		d = now - le->le_ref->stamp;
493 		if (d < 0)
494 			d = 0;
495 		else if (d > MAX_AGE)
496 			d = MAX_AGE;
497 
498 		if (add_ls_update(buf, nbr->iface, le->le_ref->data,
499 		    le->le_ref->len, d) == 0) {
500 			if (nlsa)
501 				break;
502 			/*
503 			 * A single lsa is too big to fit into an update
504 			 * packet.  In this case drop the lsa, otherwise
505 			 * we send empty update packets in an endless loop.
506 			 */
507 			log_warnx("ls_retrans_timer: cannot send lsa, dropped");
508 			log_debug("ls_retrans_timer: type: %04x len: %u",
509 			    ntohs(le->le_ref->hdr.type), le->le_ref->len);
510 			ls_retrans_list_free(nbr, le);
511 			continue;
512 		}
513 		nlsa++;
514 		if (le->le_oneshot)
515 			ls_retrans_list_free(nbr, le);
516 		else {
517 			TAILQ_REMOVE(&nbr->ls_retrans_list, le, entry);
518 			nbr->ls_ret_cnt--;
519 			le->le_when = nbr->iface->rxmt_interval;
520 			ls_retrans_list_insert(nbr, le);
521 		}
522 		/* do not put additional lsa into fragmented big packet */
523 		if (bigpkt)
524 			break;
525 	}
526 	send_ls_update(buf, nbr->iface, addr, nlsa);
527 
528 done:
529 	if ((le = TAILQ_FIRST(&nbr->ls_retrans_list)) != NULL) {
530 		timerclear(&tv);
531 		tv.tv_sec = le->le_when;
532 
533 		if (evtimer_add(&nbr->ls_retrans_timer, &tv) == -1)
534 			fatal("ls_retrans_timer");
535 	}
536 }
537 
538 LIST_HEAD(lsa_cache_head, lsa_ref);
539 
540 struct lsa_cache {
541 	struct lsa_cache_head	*hashtbl;
542 	u_int32_t		 hashmask;
543 } lsacache;
544 
545 struct lsa_ref		*lsa_cache_look(struct lsa_hdr *);
546 
547 void
548 lsa_cache_init(u_int32_t hashsize)
549 {
550 	u_int32_t        hs, i;
551 
552 	for (hs = 1; hs < hashsize; hs <<= 1)
553 		;
554 	lsacache.hashtbl = calloc(hs, sizeof(struct lsa_cache_head));
555 	if (lsacache.hashtbl == NULL)
556 		fatal("lsa_cache_init");
557 
558 	for (i = 0; i < hs; i++)
559 		LIST_INIT(&lsacache.hashtbl[i]);
560 
561 	lsacache.hashmask = hs - 1;
562 }
563 
564 struct lsa_ref *
565 lsa_cache_add(void *data, u_int16_t len)
566 {
567 	struct lsa_cache_head	*head;
568 	struct lsa_ref		*ref, *old;
569 	struct timespec		 tp;
570 
571 	if ((ref = calloc(1, sizeof(*ref))) == NULL)
572 		fatal("lsa_cache_add");
573 	memcpy(&ref->hdr, data, sizeof(ref->hdr));
574 
575 	if ((old = lsa_cache_look(&ref->hdr))) {
576 		free(ref);
577 		old->refcnt++;
578 		return (old);
579 	}
580 
581 	if ((ref->data = malloc(len)) == NULL)
582 		fatal("lsa_cache_add");
583 	memcpy(ref->data, data, len);
584 
585 	clock_gettime(CLOCK_MONOTONIC, &tp);
586 	ref->stamp = tp.tv_sec;
587 	ref->len = len;
588 	ref->refcnt = 1;
589 
590 	head = &lsacache.hashtbl[hash32_buf(&ref->hdr, sizeof(ref->hdr),
591 	    HASHINIT) & lsacache.hashmask];
592 	LIST_INSERT_HEAD(head, ref, entry);
593 	return (ref);
594 }
595 
596 struct lsa_ref *
597 lsa_cache_get(struct lsa_hdr *lsa_hdr)
598 {
599 	struct lsa_ref		*ref;
600 
601 	ref = lsa_cache_look(lsa_hdr);
602 	if (ref)
603 		ref->refcnt++;
604 
605 	return (ref);
606 }
607 
608 void
609 lsa_cache_put(struct lsa_ref *ref, struct nbr *nbr)
610 {
611 	if (--ref->refcnt > 0)
612 		return;
613 
614 	if (ntohs(ref->hdr.age) >= MAX_AGE)
615 		ospfe_imsg_compose_rde(IMSG_LS_MAXAGE, nbr->peerid, 0,
616 		    ref->data, sizeof(struct lsa_hdr));
617 
618 	free(ref->data);
619 	LIST_REMOVE(ref, entry);
620 	free(ref);
621 }
622 
623 struct lsa_ref *
624 lsa_cache_look(struct lsa_hdr *lsa_hdr)
625 {
626 	struct lsa_cache_head	*head;
627 	struct lsa_ref		*ref;
628 
629 	head = &lsacache.hashtbl[hash32_buf(lsa_hdr, sizeof(*lsa_hdr),
630 	    HASHINIT) & lsacache.hashmask];
631 
632 	LIST_FOREACH(ref, head, entry) {
633 		if (memcmp(&ref->hdr, lsa_hdr, sizeof(*lsa_hdr)) == 0)
634 			/* found match */
635 			return (ref);
636 	}
637 
638 	return (NULL);
639 }
640