xref: /openbsd-src/sys/net/pf_norm.c (revision 3a3fbb3f2e2521ab7c4a56b7ff7462ebd9095ec5)
1 /*	$OpenBSD: pf_norm.c,v 1.16 2001/12/03 22:25:06 dhartmei Exp $ */
2 
3 /*
4  * Copyright 2001 Niels Provos <provos@citi.umich.edu>
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
17  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
18  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
19  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
21  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
22  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
23  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
24  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
25  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26  */
27 
28 #include <sys/param.h>
29 #include <sys/systm.h>
30 #include <sys/mbuf.h>
31 #include <sys/filio.h>
32 #include <sys/fcntl.h>
33 #include <sys/socket.h>
34 #include <sys/kernel.h>
35 #include <sys/malloc.h>
36 #include <sys/time.h>
37 #include <sys/pool.h>
38 
39 #include <net/if.h>
40 #include <net/if_types.h>
41 #include <net/bpf.h>
42 #include <net/route.h>
43 #include <net/if_pflog.h>
44 
45 #include <netinet/in.h>
46 #include <netinet/in_var.h>
47 #include <netinet/in_systm.h>
48 #include <netinet/ip.h>
49 #include <netinet/ip_var.h>
50 #include <netinet/tcp.h>
51 #include <netinet/tcp_seq.h>
52 #include <netinet/udp.h>
53 #include <netinet/ip_icmp.h>
54 
55 #include <net/pfvar.h>
56 
57 #include "pflog.h"
58 
59 struct pf_frent {
60 	LIST_ENTRY(pf_frent) fr_next;
61 	struct ip *fr_ip;
62 	struct mbuf *fr_m;
63 };
64 
65 #define PFFRAG_SEENLAST	0x0001		/* Seen the last fragment for this */
66 
67 struct pf_fragment {
68 	TAILQ_ENTRY(pf_fragment) frag_next;
69 	struct in_addr	fr_src;
70 	struct in_addr	fr_dst;
71 	u_int8_t	fr_p;		/* protocol of this fragment */
72 	u_int8_t	fr_flags;	/* status flags */
73 	u_int16_t	fr_id;		/* fragment id for reassemble */
74 	u_int16_t	fr_max;		/* fragment data max */
75 	struct timeval	fr_timeout;
76 	LIST_HEAD(pf_fragq, pf_frent) fr_queue;
77 };
78 
79 TAILQ_HEAD(pf_fragqueue, pf_fragment)	pf_fragqueue;
80 
81 /* Private prototypes */
82 void			 pf_ip2key(struct pf_tree_key *, struct ip *);
83 void			 pf_remove_fragment(struct pf_fragment *);
84 void			 pf_flush_fragments(void);
85 void			 pf_free_fragment(struct pf_fragment *);
86 struct pf_fragment	*pf_find_fragment(struct ip *);
87 struct mbuf		*pf_reassemble(struct mbuf **, struct pf_fragment *,
88 			    struct pf_frent *, int);
89 u_int16_t		 pf_cksum_fixup(u_int16_t, u_int16_t, u_int16_t);
90 int			 pf_normalize_tcp(int, struct ifnet *, struct mbuf *,
91 			    int, int, void *, struct pf_pdesc *);
92 
93 #define PFFRAG_FRENT_HIWAT	5000	/* Number of fragment entries */
94 #define PFFRAG_FRAG_HIWAT	1000	/* Number of fragmented packets */
95 
96 #define DPFPRINTF(x)		if (pf_status.debug) printf x
97 
98 #if NPFLOG > 0
99 #define PFLOG_PACKET(i,x,a,b,c,d,e) \
100         do { \
101                 if (b == AF_INET) { \
102                         HTONS(((struct ip *)x)->ip_len); \
103                         HTONS(((struct ip *)x)->ip_off); \
104                         pflog_packet(i,a,b,c,d,e); \
105                         NTOHS(((struct ip *)x)->ip_len); \
106                         NTOHS(((struct ip *)x)->ip_off); \
107                 } else { \
108                         pflog_packet(i,a,b,c,d,e); \
109                 } \
110         } while (0)
111 #else
112 #define		 PFLOG_PACKET(i,x,a,b,c,d,e)	((void)0)
113 #endif
114 
115 /* Globals */
116 struct pf_tree_node	*tree_fragment;
117 struct pool		 pf_frent_pl, pf_frag_pl;
118 int			 pf_nfrents;
119 extern int		 pftm_frag;	/* Fragment expire timeout */
120 
121 void
122 pf_normalize_init(void)
123 {
124 	pool_init(&pf_frent_pl, sizeof(struct pf_frent), 0, 0, 0, "pffrent",
125 	    0, NULL, NULL, 0);
126 	pool_init(&pf_frag_pl, sizeof(struct pf_fragment), 0, 0, 0, "pffrag",
127 	    0, NULL, NULL, 0);
128 
129 	pool_sethiwat(&pf_frag_pl, PFFRAG_FRAG_HIWAT);
130 	pool_sethardlimit(&pf_frent_pl, PFFRAG_FRENT_HIWAT, NULL, 0);
131 
132 	TAILQ_INIT(&pf_fragqueue);
133 }
134 
135 void
136 pf_purge_expired_fragments(void)
137 {
138 	struct pf_fragment *frag;
139 	struct timeval now, expire;
140 
141 	microtime(&now);
142 
143 	timerclear(&expire);
144 	expire.tv_sec = pftm_frag;
145 	timersub(&now, &expire, &expire);
146 
147 	while ((frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue)) != NULL) {
148 		if (timercmp(&frag->fr_timeout, &expire, >))
149 			break;
150 
151 		DPFPRINTF((__FUNCTION__": expiring %p\n", frag));
152 		pf_free_fragment(frag);
153 	}
154 }
155 
156 /*
157  *  Try to flush old fragments to make space for new ones
158  */
159 
160 void
161 pf_flush_fragments(void)
162 {
163 	struct pf_fragment *frag;
164 	int goal = pf_nfrents * 9 / 10;
165 
166 	DPFPRINTF((__FUNCTION__": trying to free > %d frents\n",
167 		   pf_nfrents - goal));
168 
169 	while (goal < pf_nfrents) {
170 		frag = TAILQ_LAST(&pf_fragqueue, pf_fragqueue);
171 		if (frag == NULL)
172 			break;
173 		pf_free_fragment(frag);
174 	}
175 }
176 
177 /* Frees the fragments and all associated entries */
178 
179 void
180 pf_free_fragment(struct pf_fragment *frag)
181 {
182 	struct pf_frent *frent;
183 
184 	/* Free all fragments */
185 	for (frent = LIST_FIRST(&frag->fr_queue); frent;
186 	    frent = LIST_FIRST(&frag->fr_queue)) {
187 		LIST_REMOVE(frent, fr_next);
188 
189 		m_freem(frent->fr_m);
190 		pool_put(&pf_frent_pl, frent);
191 		pf_nfrents--;
192 	}
193 
194 	pf_remove_fragment(frag);
195 }
196 
197 void
198 pf_ip2key(struct pf_tree_key *key, struct ip *ip)
199 {
200 	key->proto = ip->ip_p;
201 	key->af = AF_INET;
202 	key->addr[0].addr32[0] = ip->ip_src.s_addr;
203 	key->addr[1].addr32[0] = ip->ip_dst.s_addr;
204 	key->port[0] = ip->ip_id;
205 	key->port[1] = 0;
206 }
207 
208 struct pf_fragment *
209 pf_find_fragment(struct ip *ip)
210 {
211 	struct pf_tree_key key;
212 	struct pf_fragment *frag;
213 
214 	pf_ip2key(&key, ip);
215 
216 	frag = (struct pf_fragment *)pf_find_state(tree_fragment, &key);
217 
218 	if (frag != NULL) {
219 		microtime(&frag->fr_timeout);
220 		TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
221 		TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
222 	}
223 
224 	return (frag);
225 }
226 
227 /* Removes a fragment from the fragment queue and frees the fragment */
228 
229 void
230 pf_remove_fragment(struct pf_fragment *frag)
231 {
232 	struct pf_tree_key key;
233 
234 	/* XXX keep in sync with pf_ip2key */
235 	key.proto = frag->fr_p;
236 	key.af = AF_INET;
237 	key.addr[0].addr32[0] = frag->fr_src.s_addr;
238 	key.addr[1].addr32[0] = frag->fr_dst.s_addr;
239 	key.port[0] = frag->fr_id;
240 	key.port[1] = 0;
241 
242 	pf_tree_remove(&tree_fragment, NULL, &key);
243 	TAILQ_REMOVE(&pf_fragqueue, frag, frag_next);
244 
245 	pool_put(&pf_frag_pl, frag);
246 }
247 
248 struct mbuf *
249 pf_reassemble(struct mbuf **m0, struct pf_fragment *frag,
250     struct pf_frent *frent, int mff)
251 {
252 	struct mbuf *m = *m0, *m2;
253 	struct pf_frent *frea, *next;
254 	struct pf_frent *frep = NULL;
255 	struct ip *ip = frent->fr_ip;
256 	int hlen = ip->ip_hl << 2;
257 	u_int16_t off = ip->ip_off;
258 	u_int16_t max = ip->ip_len + off;
259 
260 	/* Strip off ip header */
261 	m->m_data += hlen;
262 	m->m_len -= hlen;
263 
264 	/* Create a new reassembly queue for this packet */
265 	if (frag == NULL) {
266 		struct pf_tree_key key;
267 
268 		frag = pool_get(&pf_frag_pl, M_NOWAIT);
269 		if (frag == NULL) {
270 			pf_flush_fragments();
271 			frag = pool_get(&pf_frag_pl, M_NOWAIT);
272 			if (frag == NULL)
273 				goto drop_fragment;
274 		}
275 
276 		frag->fr_flags = 0;
277 		frag->fr_max = 0;
278 		frag->fr_src = frent->fr_ip->ip_src;
279 		frag->fr_dst = frent->fr_ip->ip_dst;
280 		frag->fr_p = frent->fr_ip->ip_p;
281 		frag->fr_id = frent->fr_ip->ip_id;
282 		LIST_INIT(&frag->fr_queue);
283 
284 		pf_ip2key(&key, frent->fr_ip);
285 
286 		pf_tree_insert(&tree_fragment, NULL, &key,
287 		    (struct pf_state *)frag);
288 		TAILQ_INSERT_HEAD(&pf_fragqueue, frag, frag_next);
289 
290 		/* We do not have a previous fragment */
291 		frep = NULL;
292 		goto insert;
293 	}
294 
295 	/*
296 	 * Find a fragment after the current one:
297 	 *  - off contains the real shifted offset.
298 	 */
299 	LIST_FOREACH(frea, &frag->fr_queue, fr_next) {
300 		if (frea->fr_ip->ip_off > off)
301 			break;
302 		frep = frea;
303 	}
304 
305 	KASSERT(frep != NULL || frea != NULL);
306 
307 	if (frep != NULL) {
308 		u_int16_t precut;
309 
310 		precut = frep->fr_ip->ip_off + frep->fr_ip->ip_len - off;
311 		if (precut >= ip->ip_len)
312 			goto drop_fragment;
313 		if (precut) {
314 			m_adj(frent->fr_m, precut);
315 
316 			DPFPRINTF((__FUNCTION__": overlap -%d\n", precut));
317 			/* Enforce 8 byte boundaries */
318 			off = ip->ip_off += precut;
319 			ip->ip_len -= precut;
320 		}
321 	}
322 
323 	for (; frea != NULL && ip->ip_len + off > frea->fr_ip->ip_off;
324 	    frea = next) {
325 		u_int16_t aftercut;
326 
327 		aftercut = (ip->ip_len + off) - frea->fr_ip->ip_off;
328 		DPFPRINTF((__FUNCTION__": adjust overlap %d\n", aftercut));
329 		if (aftercut < frea->fr_ip->ip_len) {
330 			frea->fr_ip->ip_len -= aftercut;
331 			frea->fr_ip->ip_off += aftercut;
332 			m_adj(frea->fr_m, aftercut);
333 			break;
334 		}
335 
336 		/* This fragment is completely overlapped, loose it */
337 		next = LIST_NEXT(frea, fr_next);
338 		m_freem(frea->fr_m);
339 		LIST_REMOVE(frea, fr_next);
340 		pool_put(&pf_frent_pl, frea);
341 		pf_nfrents--;
342 	}
343 
344  insert:
345 	/* Update maxmimum data size */
346 	if (frag->fr_max < max)
347 		frag->fr_max = max;
348 	/* This is the last segment */
349 	if (!mff)
350 		frag->fr_flags |= PFFRAG_SEENLAST;
351 
352 	if (frep == NULL)
353 		LIST_INSERT_HEAD(&frag->fr_queue, frent, fr_next);
354 	else
355 		LIST_INSERT_AFTER(frep, frent, fr_next);
356 
357 	/* Check if we are completely reassembled */
358 	if (!(frag->fr_flags & PFFRAG_SEENLAST))
359 		return (NULL);
360 
361 	/* Check if we have all the data */
362 	off = 0;
363 	for (frep = LIST_FIRST(&frag->fr_queue); frep; frep = next) {
364 		next = LIST_NEXT(frep, fr_next);
365 
366 		off += frep->fr_ip->ip_len;
367 		if (off < frag->fr_max &&
368 		    (next == NULL || next->fr_ip->ip_off != off)) {
369 			DPFPRINTF((__FUNCTION__
370 			    ": missing fragment at %d, next %d, max %d\n",
371 			    off, next == NULL ? -1 : next->fr_ip->ip_off,
372 			    frag->fr_max));
373 			return (NULL);
374 		}
375 	}
376 	DPFPRINTF((__FUNCTION__": %d < %d?\n", off, frag->fr_max));
377 	if (off < frag->fr_max)
378 		return (NULL);
379 
380 	/* We have all the data */
381 	frent = LIST_FIRST(&frag->fr_queue);
382 	KASSERT(frent != NULL);
383 	if ((frent->fr_ip->ip_hl << 2) + off > IP_MAXPACKET) {
384 		DPFPRINTF((__FUNCTION__": drop: too big: %d\n", off));
385 		pf_free_fragment(frag);
386 		return (NULL);
387 	}
388 	next = LIST_NEXT(frent, fr_next);
389 
390 	/* Magic from ip_input */
391 	ip = frent->fr_ip;
392 	m = frent->fr_m;
393 	m2 = m->m_next;
394 	m->m_next = NULL;
395 	m_cat(m, m2);
396 	pool_put(&pf_frent_pl, frent);
397 	pf_nfrents--;
398 	for (frent = next; frent != NULL; frent = next) {
399 		next = LIST_NEXT(frent, fr_next);
400 
401 		m2 = frent->fr_m;
402 		pool_put(&pf_frent_pl, frent);
403 		pf_nfrents--;
404 		m_cat(m, m2);
405 	}
406 
407 	ip->ip_src = frag->fr_src;
408 	ip->ip_dst = frag->fr_dst;
409 
410 	/* Remove from fragment queue */
411 	pf_remove_fragment(frag);
412 
413 	hlen = ip->ip_hl << 2;
414 	ip->ip_len = off + hlen;
415 	m->m_len += hlen;
416 	m->m_data -= hlen;
417 
418 	/* some debugging cruft by sklower, below, will go away soon */
419 	/* XXX this should be done elsewhere */
420 	if (m->m_flags & M_PKTHDR) {
421 		int plen = 0;
422 		for (m2 = m; m2; m2 = m2->m_next)
423 			plen += m2->m_len;
424 		m->m_pkthdr.len = plen;
425 	}
426 
427 	DPFPRINTF((__FUNCTION__": complete: %p(%d)\n", m, ip->ip_len));
428 	return (m);
429 
430  drop_fragment:
431 	/* Oops - fail safe - drop packet */
432 	m_freem(m);
433 	return (NULL);
434 }
435 
436 int
437 pf_normalize_ip(struct mbuf **m0, int dir, struct ifnet *ifp, u_short *reason)
438 {
439 	struct mbuf *m = *m0;
440 	struct pf_rule *r;
441 	struct pf_frent *frent;
442 	struct pf_fragment *frag;
443 	struct ip *h = mtod(m, struct ip *);
444 	int mff = (h->ip_off & IP_MF), hlen = h->ip_hl << 2;
445 	u_int16_t fragoff = (h->ip_off & IP_OFFMASK) << 3;
446 	u_int16_t max;
447 
448 	TAILQ_FOREACH(r, pf_rules_active, entries) {
449 		if ((r->action == PF_SCRUB) &&
450 		    MATCH_TUPLE(h, r, dir, ifp, AF_INET))
451 			break;
452 	}
453 
454 	if (r == NULL)
455 		return (PF_PASS);
456 
457 	/* Check for illegal packets */
458 	if (hlen < sizeof(struct ip))
459 		goto drop;
460 
461 	if (hlen > h->ip_len)
462 		goto drop;
463 
464 	/* We will need other tests here */
465 	if (!fragoff && !mff)
466 		goto no_fragment;
467 
468 	/* Now we are dealing with a fragmented packet */
469 	frag = pf_find_fragment(h);
470 
471 	/* This can not happen */
472 	if (h->ip_off & IP_DF) {
473 		DPFPRINTF((__FUNCTION__": IP_DF\n"));
474 		goto bad;
475 	}
476 
477 	h->ip_len -= hlen;
478 	h->ip_off <<= 3;
479 
480 	/* All fragments are 8 byte aligned */
481 	if (mff && (h->ip_len & 0x7)) {
482 		DPFPRINTF((__FUNCTION__": mff and %d\n", h->ip_len));
483 		goto bad;
484 	}
485 
486 	max = fragoff + h->ip_len;
487 	/* Respect maximum length */
488 	if (max > IP_MAXPACKET) {
489 		DPFPRINTF((__FUNCTION__": max packet %d\n", max));
490 		goto bad;
491 	}
492 	/* Check if we saw the last fragment already */
493 	if (frag != NULL && (frag->fr_flags & PFFRAG_SEENLAST) &&
494 	    max > frag->fr_max)
495 		goto bad;
496 
497 	/* Get an entry for the fragment queue */
498 	frent = pool_get(&pf_frent_pl, PR_NOWAIT);
499 	if (frent == NULL) {
500 		/* Try to clean up old fragments */
501 		pf_flush_fragments();
502 		frent = pool_get(&pf_frent_pl, PR_NOWAIT);
503 		if (frent == NULL) {
504 			REASON_SET(reason, PFRES_MEMORY);
505 			return (PF_DROP);
506 		}
507 	}
508 	pf_nfrents++;
509 	frent->fr_ip = h;
510 	frent->fr_m = m;
511 
512 	/* Might return a completely reassembled mbuf, or NULL */
513 	DPFPRINTF((__FUNCTION__": reass frag %d @ %d\n", h->ip_id, fragoff));
514 	*m0 = m = pf_reassemble(m0, frag, frent, mff);
515 
516 	if (m == NULL)
517 		return (PF_DROP);
518 
519 	h = mtod(m, struct ip *);
520 
521  no_fragment:
522 	if (dir != PF_OUT)
523 		return (PF_PASS);
524 
525 	/* At this point, only IP_DF is allowed in ip_off */
526 	if (r->rule_flag & PFRULE_NODF)
527 		h->ip_off = 0;
528 	else
529 		h->ip_off &= IP_DF;
530 
531 	/* Enforce a minimum ttl, may cause endless packet loops */
532 	if (r->min_ttl && h->ip_ttl < r->min_ttl)
533 		h->ip_ttl = r->min_ttl;
534 
535 	return (PF_PASS);
536 
537  drop:
538 	REASON_SET(reason, PFRES_NORM);
539 	if (r != NULL && r->log)
540 		PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r);
541 	return (PF_DROP);
542 
543  bad:
544 	DPFPRINTF((__FUNCTION__": dropping bad fragment\n"));
545 
546 	/* Free assoicated fragments */
547 	if (frag != NULL)
548 		pf_free_fragment(frag);
549 
550 	REASON_SET(reason, PFRES_FRAG);
551 	if (r != NULL && r->log)
552 		PFLOG_PACKET(ifp, h, m, AF_INET, dir, *reason, r);
553 
554 	return (PF_DROP);
555 }
556 
557 int
558 pf_normalize_tcp(int dir, struct ifnet *ifp, struct mbuf *m, int ipoff,
559     int off, void *h, struct pf_pdesc *pd)
560 {
561 	struct pf_rule *r, *rm = NULL;
562 	struct tcphdr *th = pd->hdr.tcp;
563 	int rewrite = 0;
564 	u_short reason;
565 	u_int8_t flags, af = pd->af;
566 
567 	r = TAILQ_FIRST(pf_rules_active);
568 	while (r != NULL) {
569 		if (r->action != PF_SCRUB) {
570 			r = TAILQ_NEXT(r, entries);
571 			continue;
572 		}
573 		if (r->ifp != NULL && r->ifp != ifp)
574 			r = r->skip[PF_SKIP_IFP];
575 		else if (r->af && r->af != af)
576 			r = r->skip[PF_SKIP_AF];
577 		else if (r->proto && r->proto != pd->proto)
578 			r = r->skip[PF_SKIP_PROTO];
579 		else if (!PF_AZERO(&r->src.mask, af) &&
580 		    !PF_MATCHA(r->src.not, &r->src.addr, &r->src.mask,
581 			    pd->src, af))
582 			r = r->skip[PF_SKIP_SRC_ADDR];
583 		else if (r->src.port_op && !pf_match_port(r->src.port_op,
584 			    r->src.port[0], r->src.port[1], th->th_sport))
585 			r = r->skip[PF_SKIP_SRC_PORT];
586 		else if (!PF_AZERO(&r->dst.mask, af) &&
587 			    !PF_MATCHA(r->dst.not,
588 			    &r->dst.addr, &r->dst.mask,
589 			    pd->dst, af))
590 			r = r->skip[PF_SKIP_DST_ADDR];
591 		else if (r->dst.port_op && !pf_match_port(r->dst.port_op,
592 			    r->dst.port[0], r->dst.port[1], th->th_dport))
593 			r = r->skip[PF_SKIP_DST_PORT];
594 		else if (r->direction != dir)
595 			r = TAILQ_NEXT(r, entries);
596 		else if (r->ifp != NULL && r->ifp != ifp)
597 			r = TAILQ_NEXT(r, entries);
598 		else {
599 			rm = r;
600 			break;
601 		}
602 	}
603 
604 	if (rm == NULL)
605 		return (PF_PASS);
606 
607 	flags = th->th_flags;
608 	if (flags & TH_SYN) {
609 		/* Illegal packet */
610 		if (flags & TH_RST)
611 			goto tcp_drop;
612 
613 		if (flags & TH_FIN)
614 			flags &= ~TH_FIN;
615 	} else {
616 		/* Illegal packet */
617 		if (!(flags & (TH_ACK|TH_RST)))
618 			goto tcp_drop;
619 	}
620 
621 	if (!(flags & TH_ACK)) {
622 		/* These flags are only valid if ACK is set */
623 		if ((flags & TH_FIN) || (flags & TH_PUSH) || (flags & TH_URG))
624 			goto tcp_drop;
625 	}
626 
627 	/* Check for illegal header length */
628 	if (th->th_off < (sizeof(struct tcphdr) >> 2))
629 		goto tcp_drop;
630 
631 	/* If flags changed, or reserved data set, then adjust */
632 	if (flags != th->th_flags || th->th_x2 != 0) {
633 		u_int16_t ov, nv;
634 
635 		ov = *(u_int16_t *)(&th->th_ack + 1);
636 		th->th_flags = flags;
637 		th->th_x2 = 0;
638 		nv = *(u_int16_t *)(&th->th_ack + 1);
639 
640 		th->th_sum = pf_cksum_fixup(th->th_sum, ov, nv);
641 		rewrite = 1;
642 	}
643 
644 	/* Remove urgent pointer, if TH_URG is not set */
645 	if (!(flags & TH_URG) && th->th_urp) {
646 		th->th_sum = pf_cksum_fixup(th->th_sum, th->th_urp, 0);
647 		th->th_urp = 0;
648 		rewrite = 1;
649 	}
650 
651 	/* copy back packet headers if we sanitized */
652 	if (rewrite)
653 		m_copyback(m, off, sizeof(*th), (caddr_t)th);
654 
655 	return (PF_PASS);
656 
657  tcp_drop:
658 	REASON_SET(&reason, PFRES_NORM);
659 	if (rm != NULL && rm->log)
660 		PFLOG_PACKET(ifp, h, m, AF_INET, dir, reason, rm);
661 	return (PF_DROP);
662 }
663