xref: /openbsd-src/sys/net/pf_norm.c (revision db3296cf5c1dd9058ceecc3a29fe4aaa0bd26000)
1 /*	$OpenBSD: pf_norm.c,v 1.70 2003/07/17 16:25:52 frantzen Exp $ */
2 
3 /*
4  * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include "pflog.h"
29 
30 #include <sys/param.h>
31 #include <sys/systm.h>
32 #include <sys/mbuf.h>
33 #include <sys/filio.h>
34 #include <sys/fcntl.h>
35 #include <sys/socket.h>
36 #include <sys/kernel.h>
37 #include <sys/time.h>
38 #include <sys/pool.h>
39 
40 #include <dev/rndvar.h>
41 #include <net/if.h>
42 #include <net/if_types.h>
43 #include <net/bpf.h>
44 #include <net/route.h>
45 #include <net/if_pflog.h>
46 
47 #include <netinet/in.h>
48 #include <netinet/in_var.h>
49 #include <netinet/in_systm.h>
50 #include <netinet/ip.h>
51 #include <netinet/ip_var.h>
52 #include <netinet/tcp.h>
53 #include <netinet/tcp_seq.h>
54 #include <netinet/udp.h>
55 #include <netinet/ip_icmp.h>
56 
57 #ifdef INET6
58 #include <netinet/ip6.h>
59 #endif /* INET6 */
60 
61 #include <net/pfvar.h>
62 
63 struct pf_frent {
64 	LIST_ENTRY(pf_frent) fr_next;
65 	struct ip *fr_ip;
66 	struct mbuf *fr_m;
67 };
68 
69 struct pf_frcache {
70 	LIST_ENTRY(pf_frcache) fr_next;
71 	uint16_t	fr_off;
72 	uint16_t	fr_end;
73 };
74 
75 #define PFFRAG_SEENLAST	0x0001		/* Seen the last fragment for this */
76 #define PFFRAG_NOBUFFER	0x0002		/* Non-buffering fragment cache */
77 #define PFFRAG_DROP	0x0004		/* Drop all fragments */
78 #define BUFFER_FRAGMENTS(fr)	(!((fr)->fr_flags & PFFRAG_NOBUFFER))
79 
80 struct pf_fragment {
81 	RB_ENTRY(pf_fragment) fr_entry;
82 	TAILQ_ENTRY(pf_fragment) frag_next;
83 	struct in_addr	fr_src;
84 	struct in_addr	fr_dst;
85 	u_int8_t	fr_p;		/* protocol of this fragment */
86 	u_int8_t	fr_flags;	/* status flags */
87 	u_int16_t	fr_id;		/* fragment id for reassemble */
88 	u_int16_t	fr_max;		/* fragment data max */
89 	u_int32_t	fr_timeout;
90 #define fr_queue	fr_u.fru_queue
91 #define fr_cache	fr_u.fru_cache
92 	union {
93 		LIST_HEAD(pf_fragq, pf_frent) fru_queue;	/* buffering */
94 		LIST_HEAD(pf_cacheq, pf_frcache) fru_cache;	/* non-buf */
95 	} fr_u;
96 };
97 
98 TAILQ_HEAD(pf_fragqueue, pf_fragment)	pf_fragqueue;
99 TAILQ_HEAD(pf_cachequeue, pf_fragment)	pf_cachequeue;
100 
101 static __inline int	 pf_frag_compare(struct pf_fragment *,
102 			    struct pf_fragment *);
103 RB_HEAD(pf_frag_tree, pf_fragment)	pf_frag_tree, pf_cache_tree;
104 RB_PROTOTYPE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
105 RB_GENERATE(pf_frag_tree, pf_fragment, fr_entry, pf_frag_compare);
106 
107 /* Private prototypes */
108 void			 pf_ip2key(struct pf_fragment *, struct ip *);
109 void			 pf_remove_fragment(struct pf_fragment *);
110 void			 pf_flush_fragments(void);
111 void			 pf_free_fragment(struct pf_fragment *);
112 struct pf_fragment	*pf_find_fragment(struct ip *, struct pf_frag_tree *);
113 struct mbuf		*pf_reassemble(struct mbuf **, struct pf_fragment *,
114 			    struct pf_frent *, int);
115 struct mbuf		*pf_fragcache(struct mbuf **, struct ip*,
116 			    struct pf_fragment *, int, int, int *);
117 u_int16_t		 pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t);
118 int			 pf_normalize_tcpopt(struct pf_rule *, struct mbuf *,
119 			    struct tcphdr *, int);
120 
121 #define	DPFPRINTF(x)	if (pf_status.debug >= PF_DEBUG_MISC) \
122 			    { printf("%s: ", __func__); printf x ;}
123 
124 /* Globals */
125 struct pool		 pf_frent_pl, pf_frag_pl, pf_cache_pl, pf_cent_pl;
126 struct pool		 pf_state_scrub_pl;
127 int			 pf_nfrents, pf_ncache;
128 
129 void
130 pf_normalize_init(void)
131 {
132 	pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
133 	    NULL);
134 	pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
135 	    NULL);
136 	pool_init(&pf_cache_pl, sizeof(struct pf_fragment), 0, 0, 0,
137 	    "pffrcache", NULL);
138 	pool_init(&pf_cent_pl, sizeof(struct pf_frcache), 0, 0, 0, "pffrcent",
139 	    NULL);
140 	pool_init(&pf_state_scrub_pl, sizeof(struct pf_state_scrub), 0, 0, 0,
141 	    "pfstscr", NULL);
142 
143 	pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
144 	pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
145 	pool_sethardlimit(&pf_cache_pl, PFFRAG_FRCACHE_HIWAT, NULL, 0);
146 	pool_sethardlimit(&pf_cent_pl, PFFRAG_FRCENT_HIWAT, NULL, 0);
147 
148 	TAILQ_INIT(&pf_fragqueue);
149 	TAILQ_INIT(&pf_cachequeue);
150 }
151 
152 static __inline int
153 pf_frag_compare(struct pf_fragment *a, struct pf_fragment *b)
154 {
155 	int	diff;
156 
157 	if ((diff = a->fr_id - b->fr_id))
158 		return (diff);
159 	else if ((diff = a->fr_p - b->fr_p))
160 		return (diff);
161 	else if (a->fr_src.s_addr < b->fr_src.s_addr)
162 		return (-1);
163 	else if (a->fr_src.s_addr > b->fr_src.s_addr)
164 		return (1);
165 	else if (a->fr_dst.s_addr < b->fr_dst.s_addr)
166 		return (-1);
167 	else if (a->fr_dst.s_addr > b->fr_dst.s_addr)
168 		return (1);
169 	return (0);
170 }
171 
172 void
173 pf_purge_expired_fragments(void)
174 {
175 	struct pf_fragment	*frag;
176 	u_int32_t		 expire = time.tv_sec -
177 				    pf_default_rule.timeout[PFTM_FRAG];
178 
179 	while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
180 		KASSERT(BUFFER_FRAGMENTS(frag));
181 		if (frag->fr_timeout > expire)
182 			break;
183 
184 		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
185 		pf_free_fragment(frag);
186 	}
187 
188 	while ((frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue)) != NULL) {
189 		KASSERT(!BUFFER_FRAGMENTS(frag));
190 		if (frag->fr_timeout > expire)
191 			break;
192 
193 		DPFPRINTF(("expiring %d(%p)\n", frag->fr_id, frag));
194 		pf_free_fragment(frag);
195 		KASSERT(TAILQ_EMPTY(&pf_cachequeue) ||
196 		    TAILQ_LAST(&pf_cachequeue, pf_cachequeue) != frag);
197 	}
198 }
199 
200 /*
201  * Try to flush old fragments to make space for new ones
202  */
203 
204 void
205 pf_flush_fragments(void)
206 {
207 	struct pf_fragment	*frag;
208 	int			 goal;
209 
210 	goal = pf_nfrents * 9 / 10;
211 	DPFPRINTF(("trying to free > %d frents\n",
212 	    pf_nfrents - goal));
213 	while (goal < pf_nfrents) {
214 		frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
215 		if (frag == NULL)
216 			break;
217 		pf_free_fragment(frag);
218 	}
219 
220 
221 	goal = pf_ncache * 9 / 10;
222 	DPFPRINTF(("trying to free > %d cache entries\n",
223 	    pf_ncache - goal));
224 	while (goal < pf_ncache) {
225 		frag = TAILQ_LAST(&pf_cachequeue, pf_cachequeue);
226 		if (frag == NULL)
227 			break;
228 		pf_free_fragment(frag);
229 	}
230 }
231 
232 /* Frees the fragments and all associated entries */
233 
234 void
235 pf_free_fragment(struct pf_fragment *frag)
236 {
237 	struct pf_frent		*frent;
238 	struct pf_frcache	*frcache;
239 
240 	/* Free all fragments */
241 	if (BUFFER_FRAGMENTS(frag)) {
242 		for (frent = LIST_FIRST(&frag->fr_queue); frent;
243 		    frent = LIST_FIRST(&frag->fr_queue)) {
244 			LIST_REMOVE(frent, fr_next);
245 
246 			m_freem(frent->fr_m);
247 			pool_put(&pf_frent_pl, frent);
248 			pf_nfrents--;
249 		}
250 	} else {
251 		for (frcache = LIST_FIRST(&frag->fr_cache); frcache;
252 		    frcache = LIST_FIRST(&frag->fr_cache)) {
253 			LIST_REMOVE(frcache, fr_next);
254 
255 			KASSERT(LIST_EMPTY(&frag->fr_cache) ||
256 			    LIST_FIRST(&frag->fr_cache)->fr_off >
257 			    frcache->fr_end);
258 
259 			pool_put(&pf_cent_pl, frcache);
260 			pf_ncache--;
261 		}
262 	}
263 
264 	pf_remove_fragment(frag);
265 }
266 
267 void
268 pf_ip2key(struct pf_fragment *key, struct ip *ip)
269 {
270 	key->fr_p = ip->ip_p;
271 	key->fr_id = ip->ip_id;
272 	key->fr_src.s_addr = ip->ip_src.s_addr;
273 	key->fr_dst.s_addr = ip->ip_dst.s_addr;
274 }
275 
276 struct pf_fragment *
277 pf_find_fragment(struct ip *ip, struct pf_frag_tree *tree)
278 {
279 	struct pf_fragment	 key;
280 	struct pf_fragment	*frag;
281 
282 	pf_ip2key(&key, ip);
283 
284 	frag = RB_FIND(pf_frag_tree, tree, &key);
285 	if (frag != NULL) {
286 		/* XXX Are we sure we want to update the timeout? */
287 		frag->fr_timeout = time.tv_sec;
288 		if (BUFFER_FRAGMENTS(frag)) {
289 			TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
290 			TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
291 		} else {
292 			TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
293 			TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
294 		}
295 	}
296 
297 	return (frag);
298 }
299 
300 /* Removes a fragment from the fragment queue and frees the fragment */
301 
302 void
303 pf_remove_fragment(struct pf_fragment *frag)
304 {
305 	if (BUFFER_FRAGMENTS(frag)) {
306 		RB_REMOVE(pf_frag_tree, &pf_frag_tree, frag);
307 		TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
308 		pool_put(&pf_frag_pl, frag);
309 	} else {
310 		RB_REMOVE(pf_frag_tree, &pf_cache_tree, frag);
311 		TAILQ_REMOVE(&pf_cachequeue, frag, frag_next);
312 		pool_put(&pf_cache_pl, frag);
313 	}
314 }
315 
316 #define FR_IP_OFF(fr)	((ntohs((fr)->fr_ip->ip_off) & IP_OFFMASK) << 3)
317 struct mbuf *
318 pf_reassemble(struct mbuf **m0, struct pf_fragment *frag,
319     struct pf_frent *frent, int mff)
320 {
321 	struct mbuf	*m = *m0, *m2;
322 	struct pf_frent	*frea, *next;
323 	struct pf_frent	*frep = NULL;
324 	struct ip	*ip = frent->fr_ip;
325 	int		 hlen = ip->ip_hl << 2;
326 	u_int16_t	 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
327 	u_int16_t	 ip_len = ntohs(ip->ip_len) - ip->ip_hl * 4;
328 	u_int16_t	 max = ip_len + off;
329 
330 	KASSERT(frag == NULL || BUFFER_FRAGMENTS(frag));
331 
332 	/* Strip off ip header */
333 	m->m_data += hlen;
334 	m->m_len -= hlen;
335 
336 	/* Create a new reassembly queue for this packet */
337 	if (frag == NULL) {
338 		frag = pool_get(&pf_frag_pl, PR_NOWAIT);
339 		if (frag == NULL) {
340 			pf_flush_fragments();
341 			frag = pool_get(&pf_frag_pl, PR_NOWAIT);
342 			if (frag == NULL)
343 				goto drop_fragment;
344 		}
345 
346 		frag->fr_flags = 0;
347 		frag->fr_max = 0;
348 		frag->fr_src = frent->fr_ip->ip_src;
349 		frag->fr_dst = frent->fr_ip->ip_dst;
350 		frag->fr_p = frent->fr_ip->ip_p;
351 		frag->fr_id = frent->fr_ip->ip_id;
352 		frag->fr_timeout = time.tv_sec;
353 		LIST_INIT(&frag->fr_queue);
354 
355 		RB_INSERT(pf_frag_tree, &pf_frag_tree, frag);
356 		TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
357 
358 		/* We do not have a previous fragment */
359 		frep = NULL;
360 		goto insert;
361 	}
362 
363 	/*
364 	 * Find a fragment after the current one:
365 	 *  - off contains the real shifted offset.
366 	 */
367 	LIST_FOREACH(frea, &frag->fr_queue, fr_next) {
368 		if (FR_IP_OFF(frea) > off)
369 			break;
370 		frep = frea;
371 	}
372 
373 	KASSERT(frep != NULL || frea != NULL);
374 
375 	if (frep != NULL &&
376 	    FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl *
377 	        4 > off)
378 	{
379 		u_int16_t	precut;
380 
381 		precut = FR_IP_OFF(frep) + ntohs(frep->fr_ip->ip_len) -
382 		    frep->fr_ip->ip_hl * 4 - off;
383 		if (precut >= ip_len)
384 			goto drop_fragment;
385 		m_adj(frent->fr_m, precut);
386 		DPFPRINTF(("overlap -%d\n", precut));
387 		/* Enforce 8 byte boundaries */
388 		ip->ip_off = htons(ntohs(ip->ip_off) + (precut >> 3));
389 		off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3;
390 		ip_len -= precut;
391 		ip->ip_len = htons(ip_len);
392 	}
393 
394 	for (; frea != NULL && ip_len + off > FR_IP_OFF(frea);
395 	    frea = next)
396 	{
397 		u_int16_t	aftercut;
398 
399 		aftercut = ip_len + off - FR_IP_OFF(frea);
400 		DPFPRINTF(("adjust overlap %d\n", aftercut));
401 		if (aftercut < ntohs(frea->fr_ip->ip_len) - frea->fr_ip->ip_hl
402 		    * 4)
403 		{
404 			frea->fr_ip->ip_len =
405 			    htons(ntohs(frea->fr_ip->ip_len) - aftercut);
406 			frea->fr_ip->ip_off = htons(ntohs(frea->fr_ip->ip_off) +
407 			    (aftercut >> 3));
408 			m_adj(frea->fr_m, aftercut);
409 			break;
410 		}
411 
412 		/* This fragment is completely overlapped, loose it */
413 		next = LIST_NEXT(frea, fr_next);
414 		m_freem(frea->fr_m);
415 		LIST_REMOVE(frea, fr_next);
416 		pool_put(&pf_frent_pl, frea);
417 		pf_nfrents--;
418 	}
419 
420  insert:
421 	/* Update maximum data size */
422 	if (frag->fr_max < max)
423 		frag->fr_max = max;
424 	/* This is the last segment */
425 	if (!mff)
426 		frag->fr_flags |= PFFRAG_SEENLAST;
427 
428 	if (frep == NULL)
429 		LIST_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
430 	else
431 		LIST_INSERT_AFTER(frep, frent, fr_next);
432 
433 	/* Check if we are completely reassembled */
434 	if (!(frag->fr_flags & PFFRAG_SEENLAST))
435 		return (NULL);
436 
437 	/* Check if we have all the data */
438 	off = 0;
439 	for (frep = LIST_FIRST(&frag->fr_queue); frep; frep = next) {
440 		next = LIST_NEXT(frep, fr_next);
441 
442 		off += ntohs(frep->fr_ip->ip_len) - frep->fr_ip->ip_hl * 4;
443 		if (off < frag->fr_max &&
444 		    (next == NULL || FR_IP_OFF(next) != off))
445 		{
446 			DPFPRINTF(("missing fragment at %d, next %d, max %d\n",
447 			    off, next == NULL ? -1 : FR_IP_OFF(next),
448 			    frag->fr_max));
449 			return (NULL);
450 		}
451 	}
452 	DPFPRINTF(("%d < %d?\n", off, frag->fr_max));
453 	if (off < frag->fr_max)
454 		return (NULL);
455 
456 	/* We have all the data */
457 	frent = LIST_FIRST(&frag->fr_queue);
458 	KASSERT(frent != NULL);
459 	if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
460 		DPFPRINTF(("drop: too big: %d\n", off));
461 		pf_free_fragment(frag);
462 		return (NULL);
463 	}
464 	next = LIST_NEXT(frent, fr_next);
465 
466 	/* Magic from ip_input */
467 	ip = frent->fr_ip;
468 	m = frent->fr_m;
469 	m2 = m->m_next;
470 	m->m_next = NULL;
471 	m_cat(m, m2);
472 	pool_put(&pf_frent_pl, frent);
473 	pf_nfrents--;
474 	for (frent = next; frent != NULL; frent = next) {
475 		next = LIST_NEXT(frent, fr_next);
476 
477 		m2 = frent->fr_m;
478 		pool_put(&pf_frent_pl, frent);
479 		pf_nfrents--;
480 		m_cat(m, m2);
481 	}
482 
483 	ip->ip_src = frag->fr_src;
484 	ip->ip_dst = frag->fr_dst;
485 
486 	/* Remove from fragment queue */
487 	pf_remove_fragment(frag);
488 
489 	hlen = ip->ip_hl << 2;
490 	ip->ip_len = htons(off + hlen);
491 	m->m_len += hlen;
492 	m->m_data -= hlen;
493 
494 	/* some debugging cruft by sklower, below, will go away soon */
495 	/* XXX this should be done elsewhere */
496 	if (m->m_flags & M_PKTHDR) {
497 		int plen = 0;
498 		for (m2 = m; m2; m2 = m2->m_next)
499 			plen += m2->m_len;
500 		m->m_pkthdr.len = plen;
501 	}
502 
503 	DPFPRINTF(("complete: %p(%d)\n", m, ntohs(ip->ip_len)));
504 	return (m);
505 
506  drop_fragment:
507 	/* Oops - fail safe - drop packet */
508 	pool_put(&pf_frent_pl, frent);
509 	pf_nfrents--;
510 	m_freem(m);
511 	return (NULL);
512 }
513 
514 struct mbuf *
515 pf_fragcache(struct mbuf **m0, struct ip *h, struct pf_fragment *frag, int mff,
516     int drop, int *nomem)
517 {
518 	struct mbuf		*m = *m0;
519 	struct pf_frcache	*frp, *fra, *cur = NULL;
520 	int			 ip_len = ntohs(h->ip_len) - (h->ip_hl << 2);
521 	u_int16_t		 off = ntohs(h->ip_off) << 3;
522 	u_int16_t		 max = ip_len + off;
523 	int			 hosed = 0;
524 
525 	KASSERT(frag == NULL || !BUFFER_FRAGMENTS(frag));
526 
527 	/* Create a new range queue for this packet */
528 	if (frag == NULL) {
529 		frag = pool_get(&pf_cache_pl, PR_NOWAIT);
530 		if (frag == NULL) {
531 			pf_flush_fragments();
532 			frag = pool_get(&pf_cache_pl, PR_NOWAIT);
533 			if (frag == NULL)
534 				goto no_mem;
535 		}
536 
537 		/* Get an entry for the queue */
538 		cur = pool_get(&pf_cent_pl, PR_NOWAIT);
539 		if (cur == NULL) {
540 			pool_put(&pf_cache_pl, frag);
541 			goto no_mem;
542 		}
543 		pf_ncache++;
544 
545 		frag->fr_flags = PFFRAG_NOBUFFER;
546 		frag->fr_max = 0;
547 		frag->fr_src = h->ip_src;
548 		frag->fr_dst = h->ip_dst;
549 		frag->fr_p = h->ip_p;
550 		frag->fr_id = h->ip_id;
551 		frag->fr_timeout = time.tv_sec;
552 
553 		cur->fr_off = off;
554 		cur->fr_end = max;
555 		LIST_INIT(&frag->fr_cache);
556 		LIST_INSERT_HEAD(&frag->fr_cache, cur, fr_next);
557 
558 		RB_INSERT(pf_frag_tree, &pf_cache_tree, frag);
559 		TAILQ_INSERT_HEAD(&pf_cachequeue, frag, frag_next);
560 
561 		DPFPRINTF(("fragcache[%d]: new %d-%d\n", h->ip_id, off, max));
562 
563 		goto pass;
564 	}
565 
566 	/*
567 	 * Find a fragment after the current one:
568 	 *  - off contains the real shifted offset.
569 	 */
570 	frp = NULL;
571 	LIST_FOREACH(fra, &frag->fr_cache, fr_next) {
572 		if (fra->fr_off > off)
573 			break;
574 		frp = fra;
575 	}
576 
577 	KASSERT(frp != NULL || fra != NULL);
578 
579 	if (frp != NULL) {
580 		int	precut;
581 
582 		precut = frp->fr_end - off;
583 		if (precut >= ip_len) {
584 			/* Fragment is entirely a duplicate */
585 			DPFPRINTF(("fragcache[%d]: dead (%d-%d) %d-%d\n",
586 			    h->ip_id, frp->fr_off, frp->fr_end, off, max));
587 			goto drop_fragment;
588 		}
589 		if (precut == 0) {
590 			/* They are adjacent.  Fixup cache entry */
591 			DPFPRINTF(("fragcache[%d]: adjacent (%d-%d) %d-%d\n",
592 			    h->ip_id, frp->fr_off, frp->fr_end, off, max));
593 			frp->fr_end = max;
594 		} else if (precut > 0) {
595 			/* The first part of this payload overlaps with a
596 			 * fragment that has already been passed.
597 			 * Need to trim off the first part of the payload.
598 			 * But to do so easily, we need to create another
599 			 * mbuf to throw the original header into.
600 			 */
601 
602 			DPFPRINTF(("fragcache[%d]: chop %d (%d-%d) %d-%d\n",
603 			    h->ip_id, precut, frp->fr_off, frp->fr_end, off,
604 			    max));
605 
606 			off += precut;
607 			max -= precut;
608 			/* Update the previous frag to encompas this one */
609 			frp->fr_end = max;
610 
611 			if (!drop) {
612 				/* XXX Optimization opportunity
613 				 * This is a very heavy way to trim the payload.
614 				 * we could do it much faster by diddling mbuf
615 				 * internals but that would be even less legible
616 				 * than this mbuf magic.  For my next trick,
617 				 * I'll pull a rabbit out of my laptop.
618 				 */
619 				*m0 = m_copym2(m, 0, h->ip_hl << 2, M_NOWAIT);
620 				if (*m0 == NULL)
621 					goto no_mem;
622 				KASSERT((*m0)->m_next == NULL);
623 				m_adj(m, precut + (h->ip_hl << 2));
624 				m_cat(*m0, m);
625 				m = *m0;
626 				if (m->m_flags & M_PKTHDR) {
627 					int plen = 0;
628 					struct mbuf *t;
629 					for (t = m; t; t = t->m_next)
630 						plen += t->m_len;
631 					m->m_pkthdr.len = plen;
632 				}
633 
634 
635 				h = mtod(m, struct ip *);
636 
637 
638 				KASSERT((int)m->m_len == ntohs(h->ip_len) - precut);
639 				h->ip_off = htons(ntohs(h->ip_off) + (precut >> 3));
640 				h->ip_len = htons(ntohs(h->ip_len) - precut);
641 			} else {
642 				hosed++;
643 			}
644 		} else {
645 			/* There is a gap between fragments */
646 
647 			DPFPRINTF(("fragcache[%d]: gap %d (%d-%d) %d-%d\n",
648 			    h->ip_id, -precut, frp->fr_off, frp->fr_end, off,
649 			    max));
650 
651 			cur = pool_get(&pf_cent_pl, PR_NOWAIT);
652 			if (cur == NULL)
653 				goto no_mem;
654 			pf_ncache++;
655 
656 			cur->fr_off = off;
657 			cur->fr_end = max;
658 			LIST_INSERT_AFTER(frp, cur, fr_next);
659 		}
660 	}
661 
662 	if (fra != NULL) {
663 		int	aftercut;
664 		int	merge = 0;
665 
666 		aftercut = max - fra->fr_off;
667 		if (aftercut == 0) {
668 			/* Adjacent fragments */
669 			DPFPRINTF(("fragcache[%d]: adjacent %d-%d (%d-%d)\n",
670 			    h->ip_id, off, max, fra->fr_off, fra->fr_end));
671 			fra->fr_off = off;
672 			merge = 1;
673 		} else if (aftercut > 0) {
674 			/* Need to chop off the tail of this fragment */
675 			DPFPRINTF(("fragcache[%d]: chop %d %d-%d (%d-%d)\n",
676 			    h->ip_id, aftercut, off, max, fra->fr_off,
677 			    fra->fr_end));
678 			fra->fr_off = off;
679 			max -= aftercut;
680 
681 			merge = 1;
682 
683 			if (!drop) {
684 				m_adj(m, -aftercut);
685 				if (m->m_flags & M_PKTHDR) {
686 					int plen = 0;
687 					struct mbuf *t;
688 					for (t = m; t; t = t->m_next)
689 						plen += t->m_len;
690 					m->m_pkthdr.len = plen;
691 				}
692 				h = mtod(m, struct ip *);
693 				KASSERT((int)m->m_len == ntohs(h->ip_len) - aftercut);
694 				h->ip_len = htons(ntohs(h->ip_len) - aftercut);
695 			} else {
696 				hosed++;
697 			}
698 		} else {
699 			/* There is a gap between fragments */
700 			DPFPRINTF(("fragcache[%d]: gap %d %d-%d (%d-%d)\n",
701 			    h->ip_id, -aftercut, off, max, fra->fr_off,
702 			    fra->fr_end));
703 
704 			cur = pool_get(&pf_cent_pl, PR_NOWAIT);
705 			if (cur == NULL)
706 				goto no_mem;
707 			pf_ncache++;
708 
709 			cur->fr_off = off;
710 			cur->fr_end = max;
711 			LIST_INSERT_BEFORE(fra, cur, fr_next);
712 		}
713 
714 
715 		/* Need to glue together two seperate fragment descriptors */
716 		if (merge) {
717 			if (cur && fra->fr_off <= cur->fr_end) {
718 				/* Need to merge in a previous 'cur' */
719 				DPFPRINTF(("fragcache[%d]: adjacent(merge "
720 				    "%d-%d) %d-%d (%d-%d)\n",
721 				    h->ip_id, cur->fr_off, cur->fr_end, off,
722 				    max, fra->fr_off, fra->fr_end));
723 				fra->fr_off = cur->fr_off;
724 				LIST_REMOVE(cur, fr_next);
725 				pool_put(&pf_cent_pl, cur);
726 				pf_ncache--;
727 				cur = NULL;
728 
729 			} else if (frp && fra->fr_off <= frp->fr_end) {
730 				/* Need to merge in a modified 'frp' */
731 				KASSERT(cur == NULL);
732 				DPFPRINTF(("fragcache[%d]: adjacent(merge "
733 				    "%d-%d) %d-%d (%d-%d)\n",
734 				    h->ip_id, frp->fr_off, frp->fr_end, off,
735 				    max, fra->fr_off, fra->fr_end));
736 				fra->fr_off = frp->fr_off;
737 				LIST_REMOVE(frp, fr_next);
738 				pool_put(&pf_cent_pl, frp);
739 				pf_ncache--;
740 				frp = NULL;
741 
742 			}
743 		}
744 	}
745 
746 	if (hosed) {
747 		/*
748 		 * We must keep tracking the overall fragment even when
749 		 * we're going to drop it anyway so that we know when to
750 		 * free the overall descriptor.  Thus we drop the frag late.
751 		 */
752 		goto drop_fragment;
753 	}
754 
755 
756  pass:
757 	/* Update maximum data size */
758 	if (frag->fr_max < max)
759 		frag->fr_max = max;
760 
761 	/* This is the last segment */
762 	if (!mff)
763 		frag->fr_flags |= PFFRAG_SEENLAST;
764 
765 	/* Check if we are completely reassembled */
766 	if ((frag->fr_flags & PFFRAG_SEENLAST) &&
767 	    LIST_FIRST(&frag->fr_cache)->fr_off == 0 &&
768 	    LIST_FIRST(&frag->fr_cache)->fr_end == frag->fr_max) {
769 		/* Remove from fragment queue */
770 		DPFPRINTF(("fragcache[%d]: done 0-%d\n", h->ip_id,
771 		    frag->fr_max));
772 		pf_free_fragment(frag);
773 	}
774 
775 	return (m);
776 
777  no_mem:
778 	*nomem = 1;
779 
780 	/* Still need to pay attention to !IP_MF */
781 	if (!mff && frag)
782 		frag->fr_flags |= PFFRAG_SEENLAST;
783 
784 	m_freem(m);
785 	return (NULL);
786 
787  drop_fragment:
788 
789 	/* Still need to pay attention to !IP_MF */
790 	if (!mff && frag)
791 		frag->fr_flags |= PFFRAG_SEENLAST;
792 
793 	if (drop) {
794 		/* This fragment has been deemed bad.  Don't reass */
795 		if ((frag->fr_flags & PFFRAG_DROP) == 0)
796 			DPFPRINTF(("fragcache[%d]: dropping overall fragment\n",
797 			    h->ip_id));
798 		frag->fr_flags |= PFFRAG_DROP;
799 	}
800 
801 	m_freem(m);
802 	return (NULL);
803 }
804 
805 int
806 pf_normalize_ip(struct mbuf **m0, int dir, struct ifnet *ifp, u_short *reason)
807 {
808 	struct mbuf		*m = *m0;
809 	struct pf_rule		*r;
810 	struct pf_frent		*frent;
811 	struct pf_fragment	*frag = NULL;
812 	struct ip		*h = mtod(m, struct ip *);
813 	int			 mff = (ntohs(h->ip_off) & IP_MF);
814 	int			 hlen = h->ip_hl << 2;
815 	u_int16_t		 fragoff = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
816 	u_int16_t		 max;
817 	int			 ip_len;
818 	int			 ip_off;
819 
820 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
821 	while (r != NULL) {
822 		r->evaluations++;
823 		if (r->ifp != NULL && r->ifp != ifp)
824 			r = r->skip[PF_SKIP_IFP].ptr;
825 		else if (r->direction && r->direction != dir)
826 			r = r->skip[PF_SKIP_DIR].ptr;
827 		else if (r->af && r->af != AF_INET)
828 			r = r->skip[PF_SKIP_AF].ptr;
829 		else if (r->proto && r->proto != h->ip_p)
830 			r = r->skip[PF_SKIP_PROTO].ptr;
831 		else if (PF_MISMATCHAW(&r->src.addr,
832 		    (struct pf_addr *)&h->ip_src.s_addr, AF_INET, r->src.not))
833 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
834 		else if (PF_MISMATCHAW(&r->dst.addr,
835 		    (struct pf_addr *)&h->ip_dst.s_addr, AF_INET, r->dst.not))
836 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
837 		else
838 			break;
839 	}
840 
841 	if (r == NULL)
842 		return (PF_PASS);
843 	else
844 		r->packets++;
845 
846 	/* Check for illegal packets */
847 	if (hlen < (int)sizeof(struct ip))
848 		goto drop;
849 
850 	if (hlen > ntohs(h->ip_len))
851 		goto drop;
852 
853 	/* Clear IP_DF if the rule uses the no-df option */
854 	if (r->rule_flag & PFRULE_NODF)
855 		h->ip_off &= htons(~IP_DF);
856 
857 	/* We will need other tests here */
858 	if (!fragoff && !mff)
859 		goto no_fragment;
860 
861 	/* We're dealing with a fragment now. Don't allow fragments
862 	 * with IP_DF to enter the cache. If the flag was cleared by
863 	 * no-df above, fine. Otherwise drop it.
864 	 */
865 	if (h->ip_off & htons(IP_DF)) {
866 		DPFPRINTF(("IP_DF\n"));
867 		goto bad;
868 	}
869 
870 	ip_len = ntohs(h->ip_len) - hlen;
871 	ip_off = (ntohs(h->ip_off) & IP_OFFMASK) << 3;
872 
873 	/* All fragments are 8 byte aligned */
874 	if (mff && (ip_len & 0x7)) {
875 		DPFPRINTF(("mff and %d\n", ip_len));
876 		goto bad;
877 	}
878 
879 	/* Respect maximum length */
880 	if (fragoff + ip_len > IP_MAXPACKET) {
881 		DPFPRINTF(("max packet %d\n", fragoff + ip_len));
882 		goto bad;
883 	}
884 	max = fragoff + ip_len;
885 
886 	if ((r->rule_flag & (PFRULE_FRAGCROP|PFRULE_FRAGDROP)) == 0) {
887 		/* Fully buffer all of the fragments */
888 
889 		frag = pf_find_fragment(h, &pf_frag_tree);
890 
891 		/* Check if we saw the last fragment already */
892 		if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
893 		    max > frag->fr_max)
894 			goto bad;
895 
896 		/* Get an entry for the fragment queue */
897 		frent = pool_get(&pf_frent_pl, PR_NOWAIT);
898 		if (frent == NULL) {
899 			REASON_SET(reason, PFRES_MEMORY);
900 			return (PF_DROP);
901 		}
902 		pf_nfrents++;
903 		frent->fr_ip = h;
904 		frent->fr_m = m;
905 
906 		/* Might return a completely reassembled mbuf, or NULL */
907 		DPFPRINTF(("reass frag %d @ %d-%d\n", h->ip_id, fragoff, max));
908 		*m0 = m = pf_reassemble(m0, frag, frent, mff);
909 
910 		if (m == NULL)
911 			return (PF_DROP);
912 
913 		if (frag && (frag->fr_flags & PFFRAG_DROP))
914 			goto drop;
915 
916 		h = mtod(m, struct ip *);
917 	} else {
918 		/* non-buffering fragment cache (drops or masks overlaps) */
919 		int	nomem = 0;
920 
921 		if (dir == PF_OUT) {
922 			if (m_tag_find(m, PACKET_TAG_PF_FRAGCACHE, NULL) !=
923 			    NULL) {
924 				/* Already passed the fragment cache in the
925 				 * input direction.  If we continued, it would
926 				 * appear to be a dup and would be dropped.
927 				 */
928 				goto fragment_pass;
929 			}
930 		}
931 
932 		frag = pf_find_fragment(h, &pf_cache_tree);
933 
934 		/* Check if we saw the last fragment already */
935 		if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
936 		    max > frag->fr_max) {
937 			if (r->rule_flag & PFRULE_FRAGDROP)
938 				frag->fr_flags |= PFFRAG_DROP;
939 			goto bad;
940 		}
941 
942 		*m0 = m = pf_fragcache(m0, h, frag, mff,
943 		    (r->rule_flag & PFRULE_FRAGDROP) ? 1 : 0, &nomem);
944 		if (m == NULL) {
945 			if (nomem)
946 				goto no_mem;
947 			goto drop;
948 		}
949 
950 		if (dir == PF_IN) {
951 			struct m_tag	*mtag;
952 
953 			mtag = m_tag_get(PACKET_TAG_PF_FRAGCACHE, 0, M_NOWAIT);
954 			if (mtag == NULL)
955 				goto no_mem;
956 			m_tag_prepend(m, mtag);
957 		}
958 		if (frag && (frag->fr_flags & PFFRAG_DROP))
959 			goto drop;
960 		goto fragment_pass;
961 	}
962 
963  no_fragment:
964 	/* At this point, only IP_DF is allowed in ip_off */
965 	h->ip_off &= htons(IP_DF);
966 
967 	/* Enforce a minimum ttl, may cause endless packet loops */
968 	if (r->min_ttl && h->ip_ttl < r->min_ttl)
969 		h->ip_ttl = r->min_ttl;
970 
971 	if (r->rule_flag & PFRULE_RANDOMID)
972 		h->ip_id = ip_randomid();
973 
974 	return (PF_PASS);
975 
976  fragment_pass:
977 	/* Enforce a minimum ttl, may cause endless packet loops */
978 	if (r->min_ttl && h->ip_ttl < r->min_ttl)
979 		h->ip_ttl = r->min_ttl;
980 
981 	return (PF_PASS);
982 
983  no_mem:
984 	REASON_SET(reason, PFRES_MEMORY);
985 	if (r != NULL && r->log)
986 		PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r, NULL, NULL);
987 	return (PF_DROP);
988 
989  drop:
990 	REASON_SET(reason, PFRES_NORM);
991 	if (r != NULL && r->log)
992 		PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r, NULL, NULL);
993 	return (PF_DROP);
994 
995  bad:
996 	DPFPRINTF(("dropping bad fragment\n"));
997 
998 	/* Free assoicated fragments */
999 	if (frag != NULL)
1000 		pf_free_fragment(frag);
1001 
1002 	REASON_SET(reason, PFRES_FRAG);
1003 	if (r != NULL && r->log)
1004 		PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r, NULL, NULL);
1005 
1006 	return (PF_DROP);
1007 }
1008 
1009 #ifdef INET6
1010 int
1011 pf_normalize_ip6(struct mbuf **m0, int dir, struct ifnet *ifp, u_short *reason)
1012 {
1013 	struct mbuf		*m = *m0;
1014 	struct pf_rule		*r;
1015 	struct ip6_hdr		*h = mtod(m, struct ip6_hdr *);
1016 	int			 off;
1017 	struct ip6_ext		 ext;
1018 	struct ip6_opt		 opt;
1019 	struct ip6_opt_jumbo	 jumbo;
1020 	struct ip6_frag		 frag;
1021 	u_int32_t		 jumbolen = 0, plen;
1022 	u_int16_t		 fragoff = 0;
1023 	int			 optend;
1024 	int			 ooff;
1025 	u_int8_t		 proto;
1026 	int			 terminal;
1027 
1028 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1029 	while (r != NULL) {
1030 		r->evaluations++;
1031 		if (r->ifp != NULL && r->ifp != ifp)
1032 			r = r->skip[PF_SKIP_IFP].ptr;
1033 		else if (r->direction && r->direction != dir)
1034 			r = r->skip[PF_SKIP_DIR].ptr;
1035 		else if (r->af && r->af != AF_INET6)
1036 			r = r->skip[PF_SKIP_AF].ptr;
1037 #if 0 /* header chain! */
1038 		else if (r->proto && r->proto != h->ip6_nxt)
1039 			r = r->skip[PF_SKIP_PROTO].ptr;
1040 #endif
1041 		else if (PF_MISMATCHAW(&r->src.addr,
1042 		    (struct pf_addr *)&h->ip6_src, AF_INET6, r->src.not))
1043 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1044 		else if (PF_MISMATCHAW(&r->dst.addr,
1045 		    (struct pf_addr *)&h->ip6_dst, AF_INET6, r->dst.not))
1046 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1047 		else
1048 			break;
1049 	}
1050 
1051 	if (r == NULL)
1052 		return (PF_PASS);
1053 	else
1054 		r->packets++;
1055 
1056 	/* Check for illegal packets */
1057 	if (sizeof(struct ip6_hdr) + IPV6_MAXPACKET < m->m_pkthdr.len)
1058 		goto drop;
1059 
1060 	off = sizeof(struct ip6_hdr);
1061 	proto = h->ip6_nxt;
1062 	terminal = 0;
1063 	do {
1064 		switch (proto) {
1065 		case IPPROTO_FRAGMENT:
1066 			goto fragment;
1067 			break;
1068 		case IPPROTO_AH:
1069 		case IPPROTO_ROUTING:
1070 		case IPPROTO_DSTOPTS:
1071 			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1072 			    NULL, AF_INET6))
1073 				goto shortpkt;
1074 			if (proto == IPPROTO_AH)
1075 				off += (ext.ip6e_len + 2) * 4;
1076 			else
1077 				off += (ext.ip6e_len + 1) * 8;
1078 			proto = ext.ip6e_nxt;
1079 			break;
1080 		case IPPROTO_HOPOPTS:
1081 			if (!pf_pull_hdr(m, off, &ext, sizeof(ext), NULL,
1082 			    NULL, AF_INET6))
1083 				goto shortpkt;
1084 			optend = off + (ext.ip6e_len + 1) * 8;
1085 			ooff = off + sizeof(ext);
1086 			do {
1087 				if (!pf_pull_hdr(m, ooff, &opt.ip6o_type,
1088 				    sizeof(opt.ip6o_type), NULL, NULL,
1089 				    AF_INET6))
1090 					goto shortpkt;
1091 				if (opt.ip6o_type == IP6OPT_PAD1) {
1092 					ooff++;
1093 					continue;
1094 				}
1095 				if (!pf_pull_hdr(m, ooff, &opt, sizeof(opt),
1096 				    NULL, NULL, AF_INET6))
1097 					goto shortpkt;
1098 				if (ooff + sizeof(opt) + opt.ip6o_len > optend)
1099 					goto drop;
1100 				switch (opt.ip6o_type) {
1101 				case IP6OPT_JUMBO:
1102 					if (h->ip6_plen != 0)
1103 						goto drop;
1104 					if (!pf_pull_hdr(m, ooff, &jumbo,
1105 					    sizeof(jumbo), NULL, NULL,
1106 					    AF_INET6))
1107 						goto shortpkt;
1108 					memcpy(&jumbolen, jumbo.ip6oj_jumbo_len,
1109 					    sizeof(jumbolen));
1110 					jumbolen = ntohl(jumbolen);
1111 					if (jumbolen <= IPV6_MAXPACKET)
1112 						goto drop;
1113 					if (sizeof(struct ip6_hdr) + jumbolen !=
1114 					    m->m_pkthdr.len)
1115 						goto drop;
1116 					break;
1117 				default:
1118 					break;
1119 				}
1120 				ooff += sizeof(opt) + opt.ip6o_len;
1121 			} while (ooff < optend);
1122 
1123 			off = optend;
1124 			proto = ext.ip6e_nxt;
1125 			break;
1126 		default:
1127 			terminal = 1;
1128 			break;
1129 		}
1130 	} while (!terminal);
1131 
1132 	/* jumbo payload option must be present, or plen > 0 */
1133 	if (ntohs(h->ip6_plen) == 0)
1134 		plen = jumbolen;
1135 	else
1136 		plen = ntohs(h->ip6_plen);
1137 	if (plen == 0)
1138 		goto drop;
1139 	if (sizeof(struct ip6_hdr) + plen > m->m_pkthdr.len)
1140 		goto shortpkt;
1141 
1142 	/* Enforce a minimum ttl, may cause endless packet loops */
1143 	if (r->min_ttl && h->ip6_hlim < r->min_ttl)
1144 		h->ip6_hlim = r->min_ttl;
1145 
1146 	return (PF_PASS);
1147 
1148  fragment:
1149 	if (ntohs(h->ip6_plen) == 0 || jumbolen)
1150 		goto drop;
1151 	plen = ntohs(h->ip6_plen);
1152 
1153 	if (!pf_pull_hdr(m, off, &frag, sizeof(frag), NULL, NULL, AF_INET6))
1154 		goto shortpkt;
1155 	fragoff = ntohs(frag.ip6f_offlg & IP6F_OFF_MASK);
1156 	if (fragoff + (plen - off - sizeof(frag)) > IPV6_MAXPACKET)
1157 		goto badfrag;
1158 
1159 	/* do something about it */
1160 	return (PF_PASS);
1161 
1162  shortpkt:
1163 	REASON_SET(reason, PFRES_SHORT);
1164 	if (r != NULL && r->log)
1165 		PFLOG_PACKET(ifp, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1166 	return (PF_DROP);
1167 
1168  drop:
1169 	REASON_SET(reason, PFRES_NORM);
1170 	if (r != NULL && r->log)
1171 		PFLOG_PACKET(ifp, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1172 	return (PF_DROP);
1173 
1174  badfrag:
1175 	REASON_SET(reason, PFRES_FRAG);
1176 	if (r != NULL && r->log)
1177 		PFLOG_PACKET(ifp, h, m, AF_INET6, dir, *reason, r, NULL, NULL);
1178 	return (PF_DROP);
1179 }
1180 #endif
1181 
1182 int
1183 pf_normalize_tcp(int dir, struct ifnet *ifp, struct mbuf *m, int ipoff,
1184     int off, void *h, struct pf_pdesc *pd)
1185 {
1186 	struct pf_rule	*r, *rm = NULL;
1187 	struct tcphdr	*th = pd->hdr.tcp;
1188 	int		 rewrite = 0;
1189 	u_short		 reason;
1190 	u_int8_t	 flags;
1191 	sa_family_t	 af = pd->af;
1192 
1193 	r = TAILQ_FIRST(pf_main_ruleset.rules[PF_RULESET_SCRUB].active.ptr);
1194 	while (r != NULL) {
1195 		r->evaluations++;
1196 		if (r->ifp != NULL && r->ifp != ifp)
1197 			r = r->skip[PF_SKIP_IFP].ptr;
1198 		else if (r->direction && r->direction != dir)
1199 			r = r->skip[PF_SKIP_DIR].ptr;
1200 		else if (r->af && r->af != af)
1201 			r = r->skip[PF_SKIP_AF].ptr;
1202 		else if (r->proto && r->proto != pd->proto)
1203 			r = r->skip[PF_SKIP_PROTO].ptr;
1204 		else if (PF_MISMATCHAW(&r->src.addr, pd->src, af, r->src.not))
1205 			r = r->skip[PF_SKIP_SRC_ADDR].ptr;
1206 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
1207 			    r->src.port[0], r->src.port[1], th->th_sport))
1208 			r = r->skip[PF_SKIP_SRC_PORT].ptr;
1209 		else if (PF_MISMATCHAW(&r->dst.addr, pd->dst, af, r->dst.not))
1210 			r = r->skip[PF_SKIP_DST_ADDR].ptr;
1211 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
1212 			    r->dst.port[0], r->dst.port[1], th->th_dport))
1213 			r = r->skip[PF_SKIP_DST_PORT].ptr;
1214 		else {
1215 			rm = r;
1216 			break;
1217 		}
1218 	}
1219 
1220 	if (rm == NULL)
1221 		return (PF_PASS);
1222 	else
1223 		r->packets++;
1224 
1225 	if (rm->rule_flag & PFRULE_REASSEMBLE_TCP)
1226 		pd->flags |= PFDESC_TCP_NORM;
1227 
1228 	flags = th->th_flags;
1229 	if (flags & TH_SYN) {
1230 		/* Illegal packet */
1231 		if (flags & TH_RST)
1232 			goto tcp_drop;
1233 
1234 		if (flags & TH_FIN)
1235 			flags &= ~TH_FIN;
1236 	} else {
1237 		/* Illegal packet */
1238 		if (!(flags & (TH_ACK|TH_RST)))
1239 			goto tcp_drop;
1240 	}
1241 
1242 	if (!(flags & TH_ACK)) {
1243 		/* These flags are only valid if ACK is set */
1244 		if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
1245 			goto tcp_drop;
1246 	}
1247 
1248 	/* Check for illegal header length */
1249 	if (th->th_off < (sizeof(struct tcphdr) >> 2))
1250 		goto tcp_drop;
1251 
1252 	/* If flags changed, or reserved data set, then adjust */
1253 	if (flags != th->th_flags || th->th_x2 != 0) {
1254 		u_int16_t	ov, nv;
1255 
1256 		ov = *(u_int16_t *)(&th->th_ack + 1);
1257 		th->th_flags = flags;
1258 		th->th_x2 = 0;
1259 		nv = *(u_int16_t *)(&th->th_ack + 1);
1260 
1261 		th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv);
1262 		rewrite = 1;
1263 	}
1264 
1265 	/* Remove urgent pointer, if TH_URG is not set */
1266 	if (!(flags & TH_URG) && th->th_urp) {
1267 		th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0);
1268 		th->th_urp = 0;
1269 		rewrite = 1;
1270 	}
1271 
1272 	/* Process options */
1273 	if (r->max_mss && pf_normalize_tcpopt(r, m, th, off))
1274 		rewrite = 1;
1275 
1276 	/* copy back packet headers if we sanitized */
1277 	if (rewrite)
1278 		m_copyback(m, off, sizeof(*th), (caddr_t)th);
1279 
1280 	return (PF_PASS);
1281 
1282  tcp_drop:
1283 	REASON_SET(&reason, PFRES_NORM);
1284 	if (rm != NULL && r->log)
1285 		PFLOG_PACKET(ifp, h, m, AF_INET, dir, reason, r, NULL, NULL);
1286 	return (PF_DROP);
1287 }
1288 
1289 int
1290 pf_normalize_tcp_init(struct mbuf *m, int off, struct pf_pdesc *pd,
1291     struct tcphdr *th, struct pf_state_peer *src, struct pf_state_peer *dst)
1292 {
1293 	u_int8_t hdr[60];
1294 	u_int8_t *opt;
1295 
1296 	KASSERT(src->scrub == NULL);
1297 
1298 	src->scrub = pool_get(&pf_state_scrub_pl, PR_NOWAIT);
1299 	if (src->scrub == NULL)
1300 		return (1);
1301 	bzero(src->scrub, sizeof(*src->scrub));
1302 
1303 	switch (pd->af) {
1304 #ifdef INET
1305 	case AF_INET: {
1306 		struct ip *h = mtod(m, struct ip *);
1307 		src->scrub->pfss_ttl = h->ip_ttl;
1308 		break;
1309 	}
1310 #endif /* INET */
1311 #ifdef INET6
1312 	case AF_INET6: {
1313 		struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1314 		src->scrub->pfss_ttl = h->ip6_hlim;
1315 		break;
1316 	}
1317 #endif /* INET6 */
1318 	}
1319 
1320 
1321 	/*
1322 	 * All normalizations below are only begun if we see the start of
1323 	 * the connections.  They must all set an enabled bit in pfss_flags
1324 	 */
1325 	if ((th->th_flags & TH_SYN) == 0)
1326 		return 0;
1327 
1328 
1329 	if (th->th_off > (sizeof(struct tcphdr) >> 2) && src->scrub &&
1330 	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1331 		/* Diddle with TCP options */
1332 		int hlen;
1333 		opt = hdr + sizeof(struct tcphdr);
1334 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1335 		while (hlen >= TCPOLEN_TIMESTAMP) {
1336 			switch (*opt) {
1337 			case TCPOPT_EOL:	/* FALLTHROUH */
1338 			case TCPOPT_NOP:
1339 				opt++;
1340 				hlen--;
1341 				break;
1342 			case TCPOPT_TIMESTAMP:
1343 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1344 					src->scrub->pfss_flags |=
1345 					    PFSS_TIMESTAMP;
1346 					src->scrub->pfss_ts_mod = arc4random();
1347 				}
1348 				/* FALLTHROUGH */
1349 			default:
1350 				hlen -= opt[1];
1351 				opt += opt[1];
1352 				break;
1353 			}
1354 		}
1355 	}
1356 
1357 	return (0);
1358 }
1359 
1360 void
1361 pf_normalize_tcp_cleanup(struct pf_state *state)
1362 {
1363 	if (state->src.scrub)
1364 		pool_put(&pf_state_scrub_pl, state->src.scrub);
1365 	if (state->dst.scrub)
1366 		pool_put(&pf_state_scrub_pl, state->dst.scrub);
1367 
1368 	/* Someday... flush the TCP segment reassembly descriptors. */
1369 }
1370 
1371 int
1372 pf_normalize_tcp_stateful(struct mbuf *m, int off, struct pf_pdesc *pd,
1373     u_short *reason, struct tcphdr *th, struct pf_state_peer *src,
1374     struct pf_state_peer *dst, int *writeback)
1375 {
1376 	u_int8_t hdr[60];
1377 	u_int8_t *opt;
1378 	int copyback = 0;
1379 
1380 	KASSERT(src->scrub || dst->scrub);
1381 
1382 	/*
1383 	 * Enforce the minimum TTL seen for this connection.  Negate a common
1384 	 * technique to evade an intrusion detection system and confuse
1385 	 * firewall state code.
1386 	 */
1387 	switch (pd->af) {
1388 #ifdef INET
1389 	case AF_INET: {
1390 		if (src->scrub) {
1391 			struct ip *h = mtod(m, struct ip *);
1392 			if (h->ip_ttl > src->scrub->pfss_ttl)
1393 				src->scrub->pfss_ttl = h->ip_ttl;
1394 			h->ip_ttl = src->scrub->pfss_ttl;
1395 		}
1396 		break;
1397 	}
1398 #endif /* INET */
1399 #ifdef INET6
1400 	case AF_INET6: {
1401 		if (dst->scrub) {
1402 			struct ip6_hdr *h = mtod(m, struct ip6_hdr *);
1403 			if (h->ip6_hlim > src->scrub->pfss_ttl)
1404 				src->scrub->pfss_ttl = h->ip6_hlim;
1405 			h->ip6_hlim = src->scrub->pfss_ttl;
1406 		}
1407 		break;
1408 	}
1409 #endif /* INET6 */
1410 	}
1411 
1412 	if (th->th_off > (sizeof(struct tcphdr) >> 2) &&
1413 	    ((src->scrub && (src->scrub->pfss_flags & PFSS_TIMESTAMP)) ||
1414 	    (dst->scrub && (dst->scrub->pfss_flags & PFSS_TIMESTAMP))) &&
1415 	    pf_pull_hdr(m, off, hdr, th->th_off << 2, NULL, NULL, pd->af)) {
1416 		/* Diddle with TCP options */
1417 		int hlen;
1418 		opt = hdr + sizeof(struct tcphdr);
1419 		hlen = (th->th_off << 2) - sizeof(struct tcphdr);
1420 		while (hlen >= TCPOLEN_TIMESTAMP) {
1421 			switch (*opt) {
1422 			case TCPOPT_EOL:	/* FALLTHROUH */
1423 			case TCPOPT_NOP:
1424 				opt++;
1425 				hlen--;
1426 				break;
1427 			case TCPOPT_TIMESTAMP:
1428 				/* Modulate the timestamps.  Can be used for
1429 				 * NAT detection, OS uptime determination or
1430 				 * reboot detection.
1431 				 */
1432 				if (opt[1] >= TCPOLEN_TIMESTAMP) {
1433 					u_int32_t ts_value;
1434 					if (src->scrub &&
1435 					    (src->scrub->pfss_flags &
1436 					    PFSS_TIMESTAMP)) {
1437 						memcpy(&ts_value, &opt[2],
1438 						    sizeof(u_int32_t));
1439 						ts_value = htonl(ntohl(ts_value)
1440 						    + src->scrub->pfss_ts_mod);
1441 						pf_change_a(&opt[2],
1442 						    &th->th_sum, ts_value, 0);
1443 						copyback = 1;
1444 					}
1445 					if (dst->scrub &&
1446 					    (dst->scrub->pfss_flags &
1447 					    PFSS_TIMESTAMP)) {
1448 						memcpy(&ts_value, &opt[6],
1449 						    sizeof(u_int32_t));
1450 						ts_value = htonl(ntohl(ts_value)
1451 						    - dst->scrub->pfss_ts_mod);
1452 						pf_change_a(&opt[6],
1453 						    &th->th_sum, ts_value, 0);
1454 						copyback = 1;
1455 					}
1456 				}
1457 				/* FALLTHROUGH */
1458 			default:
1459 				hlen -= opt[1];
1460 				opt += opt[1];
1461 				break;
1462 			}
1463 		}
1464 		if (copyback) {
1465 			/* Copyback the options, caller copys back header */
1466 			*writeback = 1;
1467 			m_copyback(m, off + sizeof(struct tcphdr),
1468 			    (th->th_off << 2) - sizeof(struct tcphdr), hdr +
1469 			    sizeof(struct tcphdr));
1470 		}
1471 	}
1472 
1473 
1474 	/* I have a dream....  TCP segment reassembly.... */
1475 	return (0);
1476 }
1477 int
1478 pf_normalize_tcpopt(struct pf_rule *r, struct mbuf *m, struct tcphdr *th,
1479     int off)
1480 {
1481 	u_int16_t	*mss;
1482 	int		 thoff;
1483 	int		 opt, cnt, optlen = 0;
1484 	int		 rewrite = 0;
1485 	u_char		*optp;
1486 
1487 	thoff = th->th_off << 2;
1488 	cnt = thoff - sizeof(struct tcphdr);
1489 	optp = mtod(m, caddr_t) + off + sizeof(struct tcphdr);
1490 
1491 	for (; cnt > 0; cnt -= optlen, optp += optlen) {
1492 		opt = optp[0];
1493 		if (opt == TCPOPT_EOL)
1494 			break;
1495 		if (opt == TCPOPT_NOP)
1496 			optlen = 1;
1497 		else {
1498 			if (cnt < 2)
1499 				break;
1500 			optlen = optp[1];
1501 			if (optlen < 2 || optlen > cnt)
1502 				break;
1503 		}
1504 		switch (opt) {
1505 		case TCPOPT_MAXSEG:
1506 			mss = (u_int16_t *)(optp + 2);
1507 			if ((ntohs(*mss)) > r->max_mss) {
1508 				th->th_sum = pf_cksum_fixup(th->th_sum,
1509 				    *mss, htons(r->max_mss));
1510 				*mss = htons(r->max_mss);
1511 				rewrite = 1;
1512 			}
1513 			break;
1514 		default:
1515 			break;
1516 		}
1517 	}
1518 
1519 	return (rewrite);
1520 }
1521