xref: /netbsd-src/sys/net/npf/npf_inet.c (revision bdc22b2e01993381dcefeff2bc9b56ca75a4235c)
1 /*	$NetBSD: npf_inet.c,v 1.50 2018/04/08 05:51:45 maxv Exp $	*/
2 
3 /*-
4  * Copyright (c) 2009-2014 The NetBSD Foundation, Inc.
5  * All rights reserved.
6  *
7  * This material is based upon work partially supported by The
8  * NetBSD Foundation under a contract with Mindaugas Rasiukevicius.
9  *
10  * Redistribution and use in source and binary forms, with or without
11  * modification, are permitted provided that the following conditions
12  * are met:
13  * 1. Redistributions of source code must retain the above copyright
14  *    notice, this list of conditions and the following disclaimer.
15  * 2. Redistributions in binary form must reproduce the above copyright
16  *    notice, this list of conditions and the following disclaimer in the
17  *    documentation and/or other materials provided with the distribution.
18  *
19  * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
20  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
21  * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
23  * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
24  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
25  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
26  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
27  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
28  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
29  * POSSIBILITY OF SUCH DAMAGE.
30  */
31 
32 /*
33  * Various protocol related helper routines.
34  *
35  * This layer manipulates npf_cache_t structure i.e. caches requested headers
36  * and stores which information was cached in the information bit field.
37  * It is also responsibility of this layer to update or invalidate the cache
38  * on rewrites (e.g. by translation routines).
39  */
40 
41 #ifdef _KERNEL
42 #include <sys/cdefs.h>
43 __KERNEL_RCSID(0, "$NetBSD: npf_inet.c,v 1.50 2018/04/08 05:51:45 maxv Exp $");
44 
45 #include <sys/param.h>
46 #include <sys/types.h>
47 
48 #include <net/pfil.h>
49 #include <net/if.h>
50 #include <net/ethertypes.h>
51 #include <net/if_ether.h>
52 
53 #include <netinet/in_systm.h>
54 #include <netinet/in.h>
55 #include <netinet6/in6_var.h>
56 #include <netinet/ip.h>
57 #include <netinet/ip6.h>
58 #include <netinet/tcp.h>
59 #include <netinet/udp.h>
60 #include <netinet/ip_icmp.h>
61 #endif
62 
63 #include "npf_impl.h"
64 
65 /*
66  * npf_fixup{16,32}_cksum: incremental update of the Internet checksum.
67  */
68 
69 uint16_t
70 npf_fixup16_cksum(uint16_t cksum, uint16_t odatum, uint16_t ndatum)
71 {
72 	uint32_t sum;
73 
74 	/*
75 	 * RFC 1624:
76 	 *	HC' = ~(~HC + ~m + m')
77 	 *
78 	 * Note: 1's complement sum is endian-independent (RFC 1071, page 2).
79 	 */
80 	sum = ~cksum & 0xffff;
81 	sum += (~odatum & 0xffff) + ndatum;
82 	sum = (sum >> 16) + (sum & 0xffff);
83 	sum += (sum >> 16);
84 
85 	return ~sum & 0xffff;
86 }
87 
88 uint16_t
89 npf_fixup32_cksum(uint16_t cksum, uint32_t odatum, uint32_t ndatum)
90 {
91 	uint32_t sum;
92 
93 	/*
94 	 * Checksum 32-bit datum as as two 16-bit.  Note, the first
95 	 * 32->16 bit reduction is not necessary.
96 	 */
97 	sum = ~cksum & 0xffff;
98 	sum += (~odatum & 0xffff) + (ndatum & 0xffff);
99 
100 	sum += (~odatum >> 16) + (ndatum >> 16);
101 	sum = (sum >> 16) + (sum & 0xffff);
102 	sum += (sum >> 16);
103 	return ~sum & 0xffff;
104 }
105 
106 /*
107  * npf_addr_cksum: calculate checksum of the address, either IPv4 or IPv6.
108  */
109 uint16_t
110 npf_addr_cksum(uint16_t cksum, int sz, const npf_addr_t *oaddr,
111     const npf_addr_t *naddr)
112 {
113 	const uint32_t *oip32 = (const uint32_t *)oaddr;
114 	const uint32_t *nip32 = (const uint32_t *)naddr;
115 
116 	KASSERT(sz % sizeof(uint32_t) == 0);
117 	do {
118 		cksum = npf_fixup32_cksum(cksum, *oip32++, *nip32++);
119 		sz -= sizeof(uint32_t);
120 	} while (sz);
121 
122 	return cksum;
123 }
124 
125 /*
126  * npf_addr_sum: provide IP addresses as a XORed 32-bit integer.
127  * Note: used for hash function.
128  */
129 uint32_t
130 npf_addr_mix(const int sz, const npf_addr_t *a1, const npf_addr_t *a2)
131 {
132 	uint32_t mix = 0;
133 
134 	KASSERT(sz > 0 && a1 != NULL && a2 != NULL);
135 
136 	for (int i = 0; i < (sz >> 2); i++) {
137 		mix ^= a1->word32[i];
138 		mix ^= a2->word32[i];
139 	}
140 	return mix;
141 }
142 
143 /*
144  * npf_addr_mask: apply the mask to a given address and store the result.
145  */
146 void
147 npf_addr_mask(const npf_addr_t *addr, const npf_netmask_t mask,
148     const int alen, npf_addr_t *out)
149 {
150 	const int nwords = alen >> 2;
151 	uint_fast8_t length = mask;
152 
153 	/* Note: maximum length is 32 for IPv4 and 128 for IPv6. */
154 	KASSERT(length <= NPF_MAX_NETMASK);
155 
156 	for (int i = 0; i < nwords; i++) {
157 		uint32_t wordmask;
158 
159 		if (length >= 32) {
160 			wordmask = htonl(0xffffffff);
161 			length -= 32;
162 		} else if (length) {
163 			wordmask = htonl(0xffffffff << (32 - length));
164 			length = 0;
165 		} else {
166 			wordmask = 0;
167 		}
168 		out->word32[i] = addr->word32[i] & wordmask;
169 	}
170 }
171 
172 /*
173  * npf_addr_cmp: compare two addresses, either IPv4 or IPv6.
174  *
175  * => Return 0 if equal and negative/positive if less/greater accordingly.
176  * => Ignore the mask, if NPF_NO_NETMASK is specified.
177  */
178 int
179 npf_addr_cmp(const npf_addr_t *addr1, const npf_netmask_t mask1,
180     const npf_addr_t *addr2, const npf_netmask_t mask2, const int alen)
181 {
182 	npf_addr_t realaddr1, realaddr2;
183 
184 	if (mask1 != NPF_NO_NETMASK) {
185 		npf_addr_mask(addr1, mask1, alen, &realaddr1);
186 		addr1 = &realaddr1;
187 	}
188 	if (mask2 != NPF_NO_NETMASK) {
189 		npf_addr_mask(addr2, mask2, alen, &realaddr2);
190 		addr2 = &realaddr2;
191 	}
192 	return memcmp(addr1, addr2, alen);
193 }
194 
195 /*
196  * npf_tcpsaw: helper to fetch SEQ, ACK, WIN and return TCP data length.
197  *
198  * => Returns all values in host byte-order.
199  */
200 int
201 npf_tcpsaw(const npf_cache_t *npc, tcp_seq *seq, tcp_seq *ack, uint32_t *win)
202 {
203 	const struct tcphdr *th = npc->npc_l4.tcp;
204 	u_int thlen;
205 
206 	KASSERT(npf_iscached(npc, NPC_TCP));
207 
208 	*seq = ntohl(th->th_seq);
209 	*ack = ntohl(th->th_ack);
210 	*win = (uint32_t)ntohs(th->th_win);
211 	thlen = th->th_off << 2;
212 
213 	if (npf_iscached(npc, NPC_IP4)) {
214 		const struct ip *ip = npc->npc_ip.v4;
215 		return ntohs(ip->ip_len) - npc->npc_hlen - thlen;
216 	} else if (npf_iscached(npc, NPC_IP6)) {
217 		const struct ip6_hdr *ip6 = npc->npc_ip.v6;
218 		return ntohs(ip6->ip6_plen) -
219 		    (npc->npc_hlen - sizeof(*ip6)) - thlen;
220 	}
221 	return 0;
222 }
223 
224 /*
225  * npf_fetch_tcpopts: parse and return TCP options.
226  */
227 bool
228 npf_fetch_tcpopts(npf_cache_t *npc, uint16_t *mss, int *wscale)
229 {
230 	nbuf_t *nbuf = npc->npc_nbuf;
231 	const struct tcphdr *th = npc->npc_l4.tcp;
232 	int cnt, optlen = 0;
233 	bool setmss = false;
234 	uint8_t *cp, opt;
235 	uint8_t val;
236 	bool ok;
237 
238 	KASSERT(npf_iscached(npc, NPC_IP46));
239 	KASSERT(npf_iscached(npc, NPC_TCP));
240 
241 	/* Determine if there are any TCP options, get their length. */
242 	cnt = (th->th_off << 2) - sizeof(struct tcphdr);
243 	if (cnt <= 0) {
244 		/* No options. */
245 		return false;
246 	}
247 	KASSERT(cnt <= MAX_TCPOPTLEN);
248 
249 	/* Determine if we want to set or get the mss. */
250 	if (mss) {
251 		setmss = (*mss != 0);
252 	}
253 
254 	/* Fetch all the options at once. */
255 	nbuf_reset(nbuf);
256 	const int step = npc->npc_hlen + sizeof(struct tcphdr);
257 	if ((cp = nbuf_advance(nbuf, step, cnt)) == NULL) {
258 		ok = false;
259 		goto done;
260 	}
261 
262 	/* Scan the options. */
263 	for (; cnt > 0; cnt -= optlen, cp += optlen) {
264 		opt = cp[0];
265 		if (opt == TCPOPT_EOL)
266 			break;
267 		if (opt == TCPOPT_NOP)
268 			optlen = 1;
269 		else {
270 			if (cnt < 2)
271 				break;
272 			optlen = cp[1];
273 			if (optlen < 2 || optlen > cnt)
274 				break;
275 		}
276 
277 		switch (opt) {
278 		case TCPOPT_MAXSEG:
279 			if (optlen != TCPOLEN_MAXSEG)
280 				continue;
281 			if (mss) {
282 				if (setmss) {
283 					memcpy(cp + 2, mss, sizeof(uint16_t));
284 				} else {
285 					memcpy(mss, cp + 2, sizeof(uint16_t));
286 				}
287 			}
288 			break;
289 		case TCPOPT_WINDOW:
290 			if (optlen != TCPOLEN_WINDOW)
291 				continue;
292 			val = *(cp + 2);
293 			*wscale = (val > TCP_MAX_WINSHIFT) ? TCP_MAX_WINSHIFT : val;
294 			break;
295 		default:
296 			break;
297 		}
298 	}
299 
300 	ok = true;
301 done:
302 	if (nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)) {
303 		npf_recache(npc);
304 	}
305 	return ok;
306 }
307 
308 static int
309 npf_cache_ip(npf_cache_t *npc, nbuf_t *nbuf)
310 {
311 	const void *nptr = nbuf_dataptr(nbuf);
312 	const uint8_t ver = *(const uint8_t *)nptr;
313 	int flags = 0;
314 
315 	/*
316 	 * We intentionally don't read the L4 payload after IPPROTO_AH.
317 	 */
318 
319 	switch (ver >> 4) {
320 	case IPVERSION: {
321 		struct ip *ip;
322 
323 		ip = nbuf_ensure_contig(nbuf, sizeof(struct ip));
324 		if (ip == NULL) {
325 			return NPC_FMTERR;
326 		}
327 
328 		/* Retrieve the complete header. */
329 		if ((u_int)(ip->ip_hl << 2) < sizeof(struct ip)) {
330 			return NPC_FMTERR;
331 		}
332 		ip = nbuf_ensure_contig(nbuf, (u_int)(ip->ip_hl << 2));
333 		if (ip == NULL) {
334 			return NPC_FMTERR;
335 		}
336 
337 		if (ip->ip_off & ~htons(IP_DF | IP_RF)) {
338 			/* Note fragmentation. */
339 			flags |= NPC_IPFRAG;
340 		}
341 
342 		/* Cache: layer 3 - IPv4. */
343 		npc->npc_alen = sizeof(struct in_addr);
344 		npc->npc_ips[NPF_SRC] = (npf_addr_t *)&ip->ip_src;
345 		npc->npc_ips[NPF_DST] = (npf_addr_t *)&ip->ip_dst;
346 		npc->npc_hlen = ip->ip_hl << 2;
347 		npc->npc_proto = ip->ip_p;
348 
349 		npc->npc_ip.v4 = ip;
350 		flags |= NPC_IP4;
351 		break;
352 	}
353 
354 	case (IPV6_VERSION >> 4): {
355 		struct ip6_hdr *ip6;
356 		struct ip6_ext *ip6e;
357 		struct ip6_frag *ip6f;
358 		size_t off, hlen;
359 		int frag_present;
360 
361 		ip6 = nbuf_ensure_contig(nbuf, sizeof(struct ip6_hdr));
362 		if (ip6 == NULL) {
363 			return NPC_FMTERR;
364 		}
365 
366 		/*
367 		 * XXX: We don't handle IPv6 Jumbograms.
368 		 */
369 
370 		/* Set initial next-protocol value. */
371 		hlen = sizeof(struct ip6_hdr);
372 		npc->npc_proto = ip6->ip6_nxt;
373 		npc->npc_hlen = hlen;
374 
375 		frag_present = 0;
376 
377 		/*
378 		 * Advance by the length of the current header.
379 		 */
380 		off = nbuf_offset(nbuf);
381 		while ((ip6e = nbuf_advance(nbuf, hlen, sizeof(*ip6e))) != NULL) {
382 			/*
383 			 * Determine whether we are going to continue.
384 			 */
385 			switch (npc->npc_proto) {
386 			case IPPROTO_HOPOPTS:
387 			case IPPROTO_DSTOPTS:
388 			case IPPROTO_ROUTING:
389 				hlen = (ip6e->ip6e_len + 1) << 3;
390 				break;
391 			case IPPROTO_FRAGMENT:
392 				if (frag_present++)
393 					return NPC_FMTERR;
394 				ip6f = nbuf_ensure_contig(nbuf, sizeof(*ip6f));
395 				if (ip6f == NULL)
396 					return NPC_FMTERR;
397 
398 				/* RFC6946: Skip dummy fragments. */
399 				if (!ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK) &&
400 				    !(ip6f->ip6f_offlg & IP6F_MORE_FRAG)) {
401 					hlen = sizeof(struct ip6_frag);
402 					break;
403 				}
404 
405 				hlen = 0;
406 				flags |= NPC_IPFRAG;
407 
408 				break;
409 			default:
410 				hlen = 0;
411 				break;
412 			}
413 
414 			if (!hlen) {
415 				break;
416 			}
417 			npc->npc_proto = ip6e->ip6e_nxt;
418 			npc->npc_hlen += hlen;
419 		}
420 
421 		if (ip6e == NULL) {
422 			return NPC_FMTERR;
423 		}
424 
425 		/*
426 		 * Re-fetch the header pointers (nbufs might have been
427 		 * reallocated).  Restore the original offset (if any).
428 		 */
429 		nbuf_reset(nbuf);
430 		ip6 = nbuf_dataptr(nbuf);
431 		if (off) {
432 			nbuf_advance(nbuf, off, 0);
433 		}
434 
435 		/* Cache: layer 3 - IPv6. */
436 		npc->npc_alen = sizeof(struct in6_addr);
437 		npc->npc_ips[NPF_SRC] = (npf_addr_t *)&ip6->ip6_src;
438 		npc->npc_ips[NPF_DST] = (npf_addr_t *)&ip6->ip6_dst;
439 
440 		npc->npc_ip.v6 = ip6;
441 		flags |= NPC_IP6;
442 		break;
443 	}
444 	default:
445 		break;
446 	}
447 	return flags;
448 }
449 
450 /*
451  * npf_cache_all: general routine to cache all relevant IP (v4 or v6)
452  * and TCP, UDP or ICMP headers.
453  *
454  * => nbuf offset shall be set accordingly.
455  */
456 int
457 npf_cache_all(npf_cache_t *npc)
458 {
459 	nbuf_t *nbuf = npc->npc_nbuf;
460 	int flags, l4flags;
461 	u_int hlen;
462 
463 	/*
464 	 * This routine is a main point where the references are cached,
465 	 * therefore clear the flag as we reset.
466 	 */
467 again:
468 	nbuf_unset_flag(nbuf, NBUF_DATAREF_RESET);
469 
470 	/*
471 	 * First, cache the L3 header (IPv4 or IPv6).  If IP packet is
472 	 * fragmented, then we cannot look into L4.
473 	 */
474 	flags = npf_cache_ip(npc, nbuf);
475 	if ((flags & NPC_IP46) == 0 || (flags & NPC_IPFRAG) != 0 ||
476 	    (flags & NPC_FMTERR) != 0) {
477 		goto out;
478 	}
479 	hlen = npc->npc_hlen;
480 
481 	/*
482 	 * Note: we guarantee that the potential "Query Id" field of the
483 	 * ICMPv4/ICMPv6 packets is in the nbuf. This field is used in the
484 	 * ICMP ALG.
485 	 */
486 	switch (npc->npc_proto) {
487 	case IPPROTO_TCP:
488 		/* Cache: layer 4 - TCP. */
489 		npc->npc_l4.tcp = nbuf_advance(nbuf, hlen,
490 		    sizeof(struct tcphdr));
491 		l4flags = NPC_LAYER4 | NPC_TCP;
492 		break;
493 	case IPPROTO_UDP:
494 		/* Cache: layer 4 - UDP. */
495 		npc->npc_l4.udp = nbuf_advance(nbuf, hlen,
496 		    sizeof(struct udphdr));
497 		l4flags = NPC_LAYER4 | NPC_UDP;
498 		break;
499 	case IPPROTO_ICMP:
500 		/* Cache: layer 4 - ICMPv4. */
501 		npc->npc_l4.icmp = nbuf_advance(nbuf, hlen,
502 		    ICMP_MINLEN);
503 		l4flags = NPC_LAYER4 | NPC_ICMP;
504 		break;
505 	case IPPROTO_ICMPV6:
506 		/* Cache: layer 4 - ICMPv6. */
507 		npc->npc_l4.icmp6 = nbuf_advance(nbuf, hlen,
508 		    sizeof(struct icmp6_hdr));
509 		l4flags = NPC_LAYER4 | NPC_ICMP;
510 		break;
511 	default:
512 		l4flags = 0;
513 		break;
514 	}
515 
516 	/* Error out if nbuf_advance failed. */
517 	if (l4flags && npc->npc_l4.hdr == NULL) {
518 		goto err;
519 	}
520 
521 	if (nbuf_flag_p(nbuf, NBUF_DATAREF_RESET)) {
522 		goto again;
523 	}
524 
525 	flags |= l4flags;
526 	npc->npc_info |= flags;
527 	return flags;
528 
529 err:
530 	flags = NPC_FMTERR;
531 out:
532 	nbuf_unset_flag(nbuf, NBUF_DATAREF_RESET);
533 	npc->npc_info |= flags;
534 	return flags;
535 }
536 
537 void
538 npf_recache(npf_cache_t *npc)
539 {
540 	nbuf_t *nbuf = npc->npc_nbuf;
541 	const int mflags __diagused = npc->npc_info & (NPC_IP46 | NPC_LAYER4);
542 	int flags __diagused;
543 
544 	nbuf_reset(nbuf);
545 	npc->npc_info = 0;
546 	flags = npf_cache_all(npc);
547 
548 	KASSERT((flags & mflags) == mflags);
549 	KASSERT(nbuf_flag_p(nbuf, NBUF_DATAREF_RESET) == 0);
550 }
551 
552 /*
553  * npf_rwrip: rewrite required IP address.
554  */
555 bool
556 npf_rwrip(const npf_cache_t *npc, u_int which, const npf_addr_t *addr)
557 {
558 	KASSERT(npf_iscached(npc, NPC_IP46));
559 	KASSERT(which == NPF_SRC || which == NPF_DST);
560 
561 	memcpy(npc->npc_ips[which], addr, npc->npc_alen);
562 	return true;
563 }
564 
565 /*
566  * npf_rwrport: rewrite required TCP/UDP port.
567  */
568 bool
569 npf_rwrport(const npf_cache_t *npc, u_int which, const in_port_t port)
570 {
571 	const int proto = npc->npc_proto;
572 	in_port_t *oport;
573 
574 	KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
575 	KASSERT(proto == IPPROTO_TCP || proto == IPPROTO_UDP);
576 	KASSERT(which == NPF_SRC || which == NPF_DST);
577 
578 	/* Get the offset and store the port in it. */
579 	if (proto == IPPROTO_TCP) {
580 		struct tcphdr *th = npc->npc_l4.tcp;
581 		oport = (which == NPF_SRC) ? &th->th_sport : &th->th_dport;
582 	} else {
583 		struct udphdr *uh = npc->npc_l4.udp;
584 		oport = (which == NPF_SRC) ? &uh->uh_sport : &uh->uh_dport;
585 	}
586 	memcpy(oport, &port, sizeof(in_port_t));
587 	return true;
588 }
589 
590 /*
591  * npf_rwrcksum: rewrite IPv4 and/or TCP/UDP checksum.
592  */
593 bool
594 npf_rwrcksum(const npf_cache_t *npc, u_int which,
595     const npf_addr_t *addr, const in_port_t port)
596 {
597 	const npf_addr_t *oaddr = npc->npc_ips[which];
598 	const int proto = npc->npc_proto;
599 	const int alen = npc->npc_alen;
600 	uint16_t *ocksum;
601 	in_port_t oport;
602 
603 	KASSERT(npf_iscached(npc, NPC_LAYER4));
604 	KASSERT(which == NPF_SRC || which == NPF_DST);
605 
606 	if (npf_iscached(npc, NPC_IP4)) {
607 		struct ip *ip = npc->npc_ip.v4;
608 		uint16_t ipsum = ip->ip_sum;
609 
610 		/* Recalculate IPv4 checksum and rewrite. */
611 		ip->ip_sum = npf_addr_cksum(ipsum, alen, oaddr, addr);
612 	} else {
613 		/* No checksum for IPv6. */
614 		KASSERT(npf_iscached(npc, NPC_IP6));
615 	}
616 
617 	/* Nothing else to do for ICMP. */
618 	if (proto == IPPROTO_ICMP || proto == IPPROTO_ICMPV6) {
619 		return true;
620 	}
621 	KASSERT(npf_iscached(npc, NPC_TCP) || npf_iscached(npc, NPC_UDP));
622 
623 	/*
624 	 * Calculate TCP/UDP checksum:
625 	 * - Skip if UDP and the current checksum is zero.
626 	 * - Fixup the IP address change.
627 	 * - Fixup the port change, if required (non-zero).
628 	 */
629 	if (proto == IPPROTO_TCP) {
630 		struct tcphdr *th = npc->npc_l4.tcp;
631 
632 		ocksum = &th->th_sum;
633 		oport = (which == NPF_SRC) ? th->th_sport : th->th_dport;
634 	} else {
635 		struct udphdr *uh = npc->npc_l4.udp;
636 
637 		KASSERT(proto == IPPROTO_UDP);
638 		ocksum = &uh->uh_sum;
639 		if (*ocksum == 0) {
640 			/* No need to update. */
641 			return true;
642 		}
643 		oport = (which == NPF_SRC) ? uh->uh_sport : uh->uh_dport;
644 	}
645 
646 	uint16_t cksum = npf_addr_cksum(*ocksum, alen, oaddr, addr);
647 	if (port) {
648 		cksum = npf_fixup16_cksum(cksum, oport, port);
649 	}
650 
651 	/* Rewrite TCP/UDP checksum. */
652 	memcpy(ocksum, &cksum, sizeof(uint16_t));
653 	return true;
654 }
655 
656 /*
657  * npf_napt_rwr: perform address and/or port translation.
658  */
659 int
660 npf_napt_rwr(const npf_cache_t *npc, u_int which,
661     const npf_addr_t *addr, const in_addr_t port)
662 {
663 	const unsigned proto = npc->npc_proto;
664 
665 	/*
666 	 * Rewrite IP and/or TCP/UDP checksums first, since we need the
667 	 * current (old) address/port for the calculations.  Then perform
668 	 * the address translation i.e. rewrite source or destination.
669 	 */
670 	if (!npf_rwrcksum(npc, which, addr, port)) {
671 		return EINVAL;
672 	}
673 	if (!npf_rwrip(npc, which, addr)) {
674 		return EINVAL;
675 	}
676 	if (port == 0) {
677 		/* Done. */
678 		return 0;
679 	}
680 
681 	switch (proto) {
682 	case IPPROTO_TCP:
683 	case IPPROTO_UDP:
684 		/* Rewrite source/destination port. */
685 		if (!npf_rwrport(npc, which, port)) {
686 			return EINVAL;
687 		}
688 		break;
689 	case IPPROTO_ICMP:
690 	case IPPROTO_ICMPV6:
691 		KASSERT(npf_iscached(npc, NPC_ICMP));
692 		/* Nothing. */
693 		break;
694 	default:
695 		return ENOTSUP;
696 	}
697 	return 0;
698 }
699 
700 /*
701  * IPv6-to-IPv6 Network Prefix Translation (NPTv6), as per RFC 6296.
702  */
703 
704 int
705 npf_npt66_rwr(const npf_cache_t *npc, u_int which, const npf_addr_t *pref,
706     npf_netmask_t len, uint16_t adj)
707 {
708 	npf_addr_t *addr = npc->npc_ips[which];
709 	unsigned remnant, word, preflen = len >> 4;
710 	uint32_t sum;
711 
712 	KASSERT(which == NPF_SRC || which == NPF_DST);
713 
714 	if (!npf_iscached(npc, NPC_IP6)) {
715 		return EINVAL;
716 	}
717 	if (len <= 48) {
718 		/*
719 		 * The word to adjust.  Cannot translate the 0xffff
720 		 * subnet if /48 or shorter.
721 		 */
722 		word = 3;
723 		if (addr->word16[word] == 0xffff) {
724 			return EINVAL;
725 		}
726 	} else {
727 		/*
728 		 * Also, all 0s or 1s in the host part are disallowed for
729 		 * longer than /48 prefixes.
730 		 */
731 		if ((addr->word32[2] == 0 && addr->word32[3] == 0) ||
732 		    (addr->word32[2] == ~0U && addr->word32[3] == ~0U))
733 			return EINVAL;
734 
735 		/* Determine the 16-bit word to adjust. */
736 		for (word = 4; word < 8; word++)
737 			if (addr->word16[word] != 0xffff)
738 				break;
739 	}
740 
741 	/* Rewrite the prefix. */
742 	for (unsigned i = 0; i < preflen; i++) {
743 		addr->word16[i] = pref->word16[i];
744 	}
745 
746 	/*
747 	 * If prefix length is within a 16-bit word (not dividable by 16),
748 	 * then prepare a mask, determine the word and adjust it.
749 	 */
750 	if ((remnant = len - (preflen << 4)) != 0) {
751 		const uint16_t wordmask = (1U << remnant) - 1;
752 		const unsigned i = preflen;
753 
754 		addr->word16[i] = (pref->word16[i] & wordmask) |
755 		    (addr->word16[i] & ~wordmask);
756 	}
757 
758 	/*
759 	 * Performing 1's complement sum/difference.
760 	 */
761 	sum = addr->word16[word] + adj;
762 	while (sum >> 16) {
763 		sum = (sum >> 16) + (sum & 0xffff);
764 	}
765 	if (sum == 0xffff) {
766 		/* RFC 1071. */
767 		sum = 0x0000;
768 	}
769 	addr->word16[word] = sum;
770 	return 0;
771 }
772 
773 #if defined(DDB) || defined(_NPF_TESTING)
774 
775 const char *
776 npf_addr_dump(const npf_addr_t *addr, int alen)
777 {
778 	if (alen == sizeof(struct in_addr)) {
779 		struct in_addr ip;
780 		memcpy(&ip, addr, alen);
781 		return inet_ntoa(ip);
782 	}
783 	return "[IPv6]";
784 }
785 
786 #endif
787