1 /* $NetBSD: frag6.c,v 1.24 2003/05/14 06:47:39 itojun Exp $ */ 2 /* $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $ */ 3 4 /* 5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the project nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: frag6.c,v 1.24 2003/05/14 06:47:39 itojun Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/domain.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/errno.h> 44 #include <sys/time.h> 45 #include <sys/kernel.h> 46 #include <sys/syslog.h> 47 48 #include <net/if.h> 49 #include <net/route.h> 50 51 #include <netinet/in.h> 52 #include <netinet/in_var.h> 53 #include <netinet/ip6.h> 54 #include <netinet6/in6_pcb.h> 55 #include <netinet6/ip6_var.h> 56 #include <netinet/icmp6.h> 57 58 #include <net/net_osdep.h> 59 60 /* 61 * Define it to get a correct behavior on per-interface statistics. 62 * You will need to perform an extra routing table lookup, per fragment, 63 * to do it. This may, or may not be, a performance hit. 64 */ 65 #define IN6_IFSTAT_STRICT 66 67 static void frag6_enq __P((struct ip6asfrag *, struct ip6asfrag *)); 68 static void frag6_deq __P((struct ip6asfrag *)); 69 static void frag6_insque __P((struct ip6q *, struct ip6q *)); 70 static void frag6_remque __P((struct ip6q *)); 71 static void frag6_freef __P((struct ip6q *)); 72 73 static int ip6q_locked; 74 u_int frag6_nfragpackets; 75 u_int frag6_nfrags; 76 struct ip6q ip6q; /* ip6 reassemble queue */ 77 78 static __inline int ip6q_lock_try __P((void)); 79 static __inline void ip6q_unlock __P((void)); 80 81 static __inline int 82 ip6q_lock_try() 83 { 84 int s; 85 86 /* 87 * Use splvm() -- we're bloking things that would cause 88 * mbuf allocation. 89 */ 90 s = splvm(); 91 if (ip6q_locked) { 92 splx(s); 93 return (0); 94 } 95 ip6q_locked = 1; 96 splx(s); 97 return (1); 98 } 99 100 static __inline void 101 ip6q_unlock() 102 { 103 int s; 104 105 s = splvm(); 106 ip6q_locked = 0; 107 splx(s); 108 } 109 110 #ifdef DIAGNOSTIC 111 #define IP6Q_LOCK() \ 112 do { \ 113 if (ip6q_lock_try() == 0) { \ 114 printf("%s:%d: ip6q already locked\n", __FILE__, __LINE__); \ 115 panic("ip6q_lock"); \ 116 } \ 117 } while (/*CONSTCOND*/ 0) 118 #define IP6Q_LOCK_CHECK() \ 119 do { \ 120 if (ip6q_locked == 0) { \ 121 printf("%s:%d: ip6q lock not held\n", __FILE__, __LINE__); \ 122 panic("ip6q lock check"); \ 123 } \ 124 } while (/*CONSTCOND*/ 0) 125 #else 126 #define IP6Q_LOCK() (void) ip6q_lock_try() 127 #define IP6Q_LOCK_CHECK() /* nothing */ 128 #endif 129 130 #define IP6Q_UNLOCK() ip6q_unlock() 131 132 #ifndef offsetof /* XXX */ 133 #define offsetof(type, member) ((size_t)(&((type *)0)->member)) 134 #endif 135 136 /* 137 * Initialise reassembly queue and fragment identifier. 138 */ 139 void 140 frag6_init() 141 { 142 143 ip6_id = arc4random(); 144 ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q; 145 } 146 147 /* 148 * In RFC2460, fragment and reassembly rule do not agree with each other, 149 * in terms of next header field handling in fragment header. 150 * While the sender will use the same value for all of the fragmented packets, 151 * receiver is suggested not to check the consistency. 152 * 153 * fragment rule (p20): 154 * (2) A Fragment header containing: 155 * The Next Header value that identifies the first header of 156 * the Fragmentable Part of the original packet. 157 * -> next header field is same for all fragments 158 * 159 * reassembly rule (p21): 160 * The Next Header field of the last header of the Unfragmentable 161 * Part is obtained from the Next Header field of the first 162 * fragment's Fragment header. 163 * -> should grab it from the first fragment only 164 * 165 * The following note also contradicts with fragment rule - noone is going to 166 * send different fragment with different next header field. 167 * 168 * additional note (p22): 169 * The Next Header values in the Fragment headers of different 170 * fragments of the same original packet may differ. Only the value 171 * from the Offset zero fragment packet is used for reassembly. 172 * -> should grab it from the first fragment only 173 * 174 * There is no explicit reason given in the RFC. Historical reason maybe? 175 */ 176 /* 177 * Fragment input 178 */ 179 int 180 frag6_input(mp, offp, proto) 181 struct mbuf **mp; 182 int *offp, proto; 183 { 184 struct mbuf *m = *mp, *t; 185 struct ip6_hdr *ip6; 186 struct ip6_frag *ip6f; 187 struct ip6q *q6; 188 struct ip6asfrag *af6, *ip6af, *af6dwn; 189 int offset = *offp, nxt, i, next; 190 int first_frag = 0; 191 int fragoff, frgpartlen; /* must be larger than u_int16_t */ 192 struct ifnet *dstifp; 193 #ifdef IN6_IFSTAT_STRICT 194 static struct route_in6 ro; 195 struct sockaddr_in6 *dst; 196 #endif 197 198 ip6 = mtod(m, struct ip6_hdr *); 199 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f)); 200 if (ip6f == NULL) 201 return IPPROTO_DONE; 202 203 dstifp = NULL; 204 #ifdef IN6_IFSTAT_STRICT 205 /* find the destination interface of the packet. */ 206 dst = (struct sockaddr_in6 *)&ro.ro_dst; 207 if (ro.ro_rt 208 && ((ro.ro_rt->rt_flags & RTF_UP) == 0 209 || !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_dst))) { 210 RTFREE(ro.ro_rt); 211 ro.ro_rt = (struct rtentry *)0; 212 } 213 if (ro.ro_rt == NULL) { 214 bzero(dst, sizeof(*dst)); 215 dst->sin6_family = AF_INET6; 216 dst->sin6_len = sizeof(struct sockaddr_in6); 217 dst->sin6_addr = ip6->ip6_dst; 218 } 219 rtalloc((struct route *)&ro); 220 if (ro.ro_rt != NULL && ro.ro_rt->rt_ifa != NULL) 221 dstifp = ((struct in6_ifaddr *)ro.ro_rt->rt_ifa)->ia_ifp; 222 #else 223 /* we are violating the spec, this is not the destination interface */ 224 if ((m->m_flags & M_PKTHDR) != 0) 225 dstifp = m->m_pkthdr.rcvif; 226 #endif 227 228 /* jumbo payload can't contain a fragment header */ 229 if (ip6->ip6_plen == 0) { 230 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); 231 in6_ifstat_inc(dstifp, ifs6_reass_fail); 232 return IPPROTO_DONE; 233 } 234 235 /* 236 * check whether fragment packet's fragment length is 237 * multiple of 8 octets. 238 * sizeof(struct ip6_frag) == 8 239 * sizeof(struct ip6_hdr) = 40 240 */ 241 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && 242 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { 243 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 244 offsetof(struct ip6_hdr, ip6_plen)); 245 in6_ifstat_inc(dstifp, ifs6_reass_fail); 246 return IPPROTO_DONE; 247 } 248 249 ip6stat.ip6s_fragments++; 250 in6_ifstat_inc(dstifp, ifs6_reass_reqd); 251 252 /* offset now points to data portion */ 253 offset += sizeof(struct ip6_frag); 254 255 IP6Q_LOCK(); 256 257 /* 258 * Enforce upper bound on number of fragments. 259 * If maxfrag is 0, never accept fragments. 260 * If maxfrag is -1, accept all fragments without limitation. 261 */ 262 if (ip6_maxfrags < 0) 263 ; 264 else if (frag6_nfrags >= (u_int)ip6_maxfrags) 265 goto dropfrag; 266 267 for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next) 268 if (ip6f->ip6f_ident == q6->ip6q_ident && 269 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && 270 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)) 271 break; 272 273 if (q6 == &ip6q) { 274 /* 275 * the first fragment to arrive, create a reassembly queue. 276 */ 277 first_frag = 1; 278 279 /* 280 * Enforce upper bound on number of fragmented packets 281 * for which we attempt reassembly; 282 * If maxfragpackets is 0, never accept fragments. 283 * If maxfragpackets is -1, accept all fragments without 284 * limitation. 285 */ 286 if (ip6_maxfragpackets < 0) 287 ; 288 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets) 289 goto dropfrag; 290 frag6_nfragpackets++; 291 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE, 292 M_DONTWAIT); 293 if (q6 == NULL) 294 goto dropfrag; 295 bzero(q6, sizeof(*q6)); 296 297 frag6_insque(q6, &ip6q); 298 299 /* ip6q_nxt will be filled afterwards, from 1st fragment */ 300 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; 301 #ifdef notyet 302 q6->ip6q_nxtp = (u_char *)nxtp; 303 #endif 304 q6->ip6q_ident = ip6f->ip6f_ident; 305 q6->ip6q_arrive = 0; /* Is it used anywhere? */ 306 q6->ip6q_ttl = IPV6_FRAGTTL; 307 q6->ip6q_src = ip6->ip6_src; 308 q6->ip6q_dst = ip6->ip6_dst; 309 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ 310 311 q6->ip6q_nfrag = 0; 312 } 313 314 /* 315 * If it's the 1st fragment, record the length of the 316 * unfragmentable part and the next header of the fragment header. 317 */ 318 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); 319 if (fragoff == 0) { 320 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - 321 sizeof(struct ip6_frag); 322 q6->ip6q_nxt = ip6f->ip6f_nxt; 323 } 324 325 /* 326 * Check that the reassembled packet would not exceed 65535 bytes 327 * in size. 328 * If it would exceed, discard the fragment and return an ICMP error. 329 */ 330 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; 331 if (q6->ip6q_unfrglen >= 0) { 332 /* The 1st fragment has already arrived. */ 333 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { 334 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 335 offset - sizeof(struct ip6_frag) + 336 offsetof(struct ip6_frag, ip6f_offlg)); 337 IP6Q_UNLOCK(); 338 return (IPPROTO_DONE); 339 } 340 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { 341 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 342 offset - sizeof(struct ip6_frag) + 343 offsetof(struct ip6_frag, ip6f_offlg)); 344 IP6Q_UNLOCK(); 345 return (IPPROTO_DONE); 346 } 347 /* 348 * If it's the first fragment, do the above check for each 349 * fragment already stored in the reassembly queue. 350 */ 351 if (fragoff == 0) { 352 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 353 af6 = af6dwn) { 354 af6dwn = af6->ip6af_down; 355 356 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > 357 IPV6_MAXPACKET) { 358 struct mbuf *merr = IP6_REASS_MBUF(af6); 359 struct ip6_hdr *ip6err; 360 int erroff = af6->ip6af_offset; 361 362 /* dequeue the fragment. */ 363 frag6_deq(af6); 364 free(af6, M_FTABLE); 365 366 /* adjust pointer. */ 367 ip6err = mtod(merr, struct ip6_hdr *); 368 369 /* 370 * Restore source and destination addresses 371 * in the erroneous IPv6 header. 372 */ 373 ip6err->ip6_src = q6->ip6q_src; 374 ip6err->ip6_dst = q6->ip6q_dst; 375 376 icmp6_error(merr, ICMP6_PARAM_PROB, 377 ICMP6_PARAMPROB_HEADER, 378 erroff - sizeof(struct ip6_frag) + 379 offsetof(struct ip6_frag, ip6f_offlg)); 380 } 381 } 382 } 383 384 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE, 385 M_DONTWAIT); 386 if (ip6af == NULL) 387 goto dropfrag; 388 bzero(ip6af, sizeof(*ip6af)); 389 ip6af->ip6af_head = ip6->ip6_flow; 390 ip6af->ip6af_len = ip6->ip6_plen; 391 ip6af->ip6af_nxt = ip6->ip6_nxt; 392 ip6af->ip6af_hlim = ip6->ip6_hlim; 393 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; 394 ip6af->ip6af_off = fragoff; 395 ip6af->ip6af_frglen = frgpartlen; 396 ip6af->ip6af_offset = offset; 397 IP6_REASS_MBUF(ip6af) = m; 398 399 if (first_frag) { 400 af6 = (struct ip6asfrag *)q6; 401 goto insert; 402 } 403 404 /* 405 * Find a segment which begins after this one does. 406 */ 407 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 408 af6 = af6->ip6af_down) 409 if (af6->ip6af_off > ip6af->ip6af_off) 410 break; 411 412 #if 0 413 /* 414 * If there is a preceding segment, it may provide some of 415 * our data already. If so, drop the data from the incoming 416 * segment. If it provides all of our data, drop us. 417 */ 418 if (af6->ip6af_up != (struct ip6asfrag *)q6) { 419 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen 420 - ip6af->ip6af_off; 421 if (i > 0) { 422 if (i >= ip6af->ip6af_frglen) 423 goto dropfrag; 424 m_adj(IP6_REASS_MBUF(ip6af), i); 425 ip6af->ip6af_off += i; 426 ip6af->ip6af_frglen -= i; 427 } 428 } 429 430 /* 431 * While we overlap succeeding segments trim them or, 432 * if they are completely covered, dequeue them. 433 */ 434 while (af6 != (struct ip6asfrag *)q6 && 435 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) { 436 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 437 if (i < af6->ip6af_frglen) { 438 af6->ip6af_frglen -= i; 439 af6->ip6af_off += i; 440 m_adj(IP6_REASS_MBUF(af6), i); 441 break; 442 } 443 af6 = af6->ip6af_down; 444 m_freem(IP6_REASS_MBUF(af6->ip6af_up)); 445 frag6_deq(af6->ip6af_up); 446 } 447 #else 448 /* 449 * If the incoming framgent overlaps some existing fragments in 450 * the reassembly queue, drop it, since it is dangerous to override 451 * existing fragments from a security point of view. 452 * We don't know which fragment is the bad guy - here we trust 453 * fragment that came in earlier, with no real reason. 454 */ 455 if (af6->ip6af_up != (struct ip6asfrag *)q6) { 456 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen 457 - ip6af->ip6af_off; 458 if (i > 0) { 459 #if 0 /* suppress the noisy log */ 460 log(LOG_ERR, "%d bytes of a fragment from %s " 461 "overlaps the previous fragment\n", 462 i, ip6_sprintf(&q6->ip6q_src)); 463 #endif 464 free(ip6af, M_FTABLE); 465 goto dropfrag; 466 } 467 } 468 if (af6 != (struct ip6asfrag *)q6) { 469 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 470 if (i > 0) { 471 #if 0 /* suppress the noisy log */ 472 log(LOG_ERR, "%d bytes of a fragment from %s " 473 "overlaps the succeeding fragment", 474 i, ip6_sprintf(&q6->ip6q_src)); 475 #endif 476 free(ip6af, M_FTABLE); 477 goto dropfrag; 478 } 479 } 480 #endif 481 482 insert: 483 484 /* 485 * Stick new segment in its place; 486 * check for complete reassembly. 487 * Move to front of packet queue, as we are 488 * the most recently active fragmented packet. 489 */ 490 frag6_enq(ip6af, af6->ip6af_up); 491 frag6_nfrags++; 492 q6->ip6q_nfrag++; 493 #if 0 /* xxx */ 494 if (q6 != ip6q.ip6q_next) { 495 frag6_remque(q6); 496 frag6_insque(q6, &ip6q); 497 } 498 #endif 499 next = 0; 500 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 501 af6 = af6->ip6af_down) { 502 if (af6->ip6af_off != next) { 503 IP6Q_UNLOCK(); 504 return IPPROTO_DONE; 505 } 506 next += af6->ip6af_frglen; 507 } 508 if (af6->ip6af_up->ip6af_mff) { 509 IP6Q_UNLOCK(); 510 return IPPROTO_DONE; 511 } 512 513 /* 514 * Reassembly is complete; concatenate fragments. 515 */ 516 ip6af = q6->ip6q_down; 517 t = m = IP6_REASS_MBUF(ip6af); 518 af6 = ip6af->ip6af_down; 519 frag6_deq(ip6af); 520 while (af6 != (struct ip6asfrag *)q6) { 521 af6dwn = af6->ip6af_down; 522 frag6_deq(af6); 523 while (t->m_next) 524 t = t->m_next; 525 t->m_next = IP6_REASS_MBUF(af6); 526 m_adj(t->m_next, af6->ip6af_offset); 527 free(af6, M_FTABLE); 528 af6 = af6dwn; 529 } 530 531 /* adjust offset to point where the original next header starts */ 532 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); 533 free(ip6af, M_FTABLE); 534 ip6 = mtod(m, struct ip6_hdr *); 535 ip6->ip6_plen = htons((u_short)next + offset - sizeof(struct ip6_hdr)); 536 ip6->ip6_src = q6->ip6q_src; 537 ip6->ip6_dst = q6->ip6q_dst; 538 nxt = q6->ip6q_nxt; 539 #ifdef notyet 540 *q6->ip6q_nxtp = (u_char)(nxt & 0xff); 541 #endif 542 543 /* 544 * Delete frag6 header with as a few cost as possible. 545 */ 546 if (offset < m->m_len) { 547 ovbcopy((caddr_t)ip6, (caddr_t)ip6 + sizeof(struct ip6_frag), 548 offset); 549 m->m_data += sizeof(struct ip6_frag); 550 m->m_len -= sizeof(struct ip6_frag); 551 } else { 552 /* this comes with no copy if the boundary is on cluster */ 553 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) { 554 frag6_remque(q6); 555 frag6_nfrags -= q6->ip6q_nfrag; 556 free(q6, M_FTABLE); 557 frag6_nfragpackets--; 558 goto dropfrag; 559 } 560 m_adj(t, sizeof(struct ip6_frag)); 561 m_cat(m, t); 562 } 563 564 /* 565 * Store NXT to the original. 566 */ 567 { 568 u_int8_t *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */ 569 *prvnxtp = nxt; 570 } 571 572 frag6_remque(q6); 573 frag6_nfrags -= q6->ip6q_nfrag; 574 free(q6, M_FTABLE); 575 frag6_nfragpackets--; 576 577 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ 578 int plen = 0; 579 for (t = m; t; t = t->m_next) 580 plen += t->m_len; 581 m->m_pkthdr.len = plen; 582 } 583 584 ip6stat.ip6s_reassembled++; 585 in6_ifstat_inc(dstifp, ifs6_reass_ok); 586 587 /* 588 * Tell launch routine the next header 589 */ 590 591 *mp = m; 592 *offp = offset; 593 594 IP6Q_UNLOCK(); 595 return nxt; 596 597 dropfrag: 598 in6_ifstat_inc(dstifp, ifs6_reass_fail); 599 ip6stat.ip6s_fragdropped++; 600 m_freem(m); 601 IP6Q_UNLOCK(); 602 return IPPROTO_DONE; 603 } 604 605 /* 606 * Free a fragment reassembly header and all 607 * associated datagrams. 608 */ 609 void 610 frag6_freef(q6) 611 struct ip6q *q6; 612 { 613 struct ip6asfrag *af6, *down6; 614 615 IP6Q_LOCK_CHECK(); 616 617 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 618 af6 = down6) { 619 struct mbuf *m = IP6_REASS_MBUF(af6); 620 621 down6 = af6->ip6af_down; 622 frag6_deq(af6); 623 624 /* 625 * Return ICMP time exceeded error for the 1st fragment. 626 * Just free other fragments. 627 */ 628 if (af6->ip6af_off == 0) { 629 struct ip6_hdr *ip6; 630 631 /* adjust pointer */ 632 ip6 = mtod(m, struct ip6_hdr *); 633 634 /* restoure source and destination addresses */ 635 ip6->ip6_src = q6->ip6q_src; 636 ip6->ip6_dst = q6->ip6q_dst; 637 638 icmp6_error(m, ICMP6_TIME_EXCEEDED, 639 ICMP6_TIME_EXCEED_REASSEMBLY, 0); 640 } else 641 m_freem(m); 642 free(af6, M_FTABLE); 643 } 644 frag6_remque(q6); 645 frag6_nfrags -= q6->ip6q_nfrag; 646 free(q6, M_FTABLE); 647 frag6_nfragpackets--; 648 } 649 650 /* 651 * Put an ip fragment on a reassembly chain. 652 * Like insque, but pointers in middle of structure. 653 */ 654 void 655 frag6_enq(af6, up6) 656 struct ip6asfrag *af6, *up6; 657 { 658 659 IP6Q_LOCK_CHECK(); 660 661 af6->ip6af_up = up6; 662 af6->ip6af_down = up6->ip6af_down; 663 up6->ip6af_down->ip6af_up = af6; 664 up6->ip6af_down = af6; 665 } 666 667 /* 668 * To frag6_enq as remque is to insque. 669 */ 670 void 671 frag6_deq(af6) 672 struct ip6asfrag *af6; 673 { 674 675 IP6Q_LOCK_CHECK(); 676 677 af6->ip6af_up->ip6af_down = af6->ip6af_down; 678 af6->ip6af_down->ip6af_up = af6->ip6af_up; 679 } 680 681 void 682 frag6_insque(new, old) 683 struct ip6q *new, *old; 684 { 685 686 IP6Q_LOCK_CHECK(); 687 688 new->ip6q_prev = old; 689 new->ip6q_next = old->ip6q_next; 690 old->ip6q_next->ip6q_prev= new; 691 old->ip6q_next = new; 692 } 693 694 void 695 frag6_remque(p6) 696 struct ip6q *p6; 697 { 698 699 IP6Q_LOCK_CHECK(); 700 701 p6->ip6q_prev->ip6q_next = p6->ip6q_next; 702 p6->ip6q_next->ip6q_prev = p6->ip6q_prev; 703 } 704 705 /* 706 * IPv6 reassembling timer processing; 707 * if a timer expires on a reassembly 708 * queue, discard it. 709 */ 710 void 711 frag6_slowtimo() 712 { 713 struct ip6q *q6; 714 int s = splsoftnet(); 715 716 IP6Q_LOCK(); 717 q6 = ip6q.ip6q_next; 718 if (q6) 719 while (q6 != &ip6q) { 720 --q6->ip6q_ttl; 721 q6 = q6->ip6q_next; 722 if (q6->ip6q_prev->ip6q_ttl == 0) { 723 ip6stat.ip6s_fragtimeout++; 724 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 725 frag6_freef(q6->ip6q_prev); 726 } 727 } 728 /* 729 * If we are over the maximum number of fragments 730 * (due to the limit being lowered), drain off 731 * enough to get down to the new limit. 732 */ 733 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets && 734 ip6q.ip6q_prev) { 735 ip6stat.ip6s_fragoverflow++; 736 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 737 frag6_freef(ip6q.ip6q_prev); 738 } 739 IP6Q_UNLOCK(); 740 741 #if 0 742 /* 743 * Routing changes might produce a better route than we last used; 744 * make sure we notice eventually, even if forwarding only for one 745 * destination and the cache is never replaced. 746 */ 747 if (ip6_forward_rt.ro_rt) { 748 RTFREE(ip6_forward_rt.ro_rt); 749 ip6_forward_rt.ro_rt = 0; 750 } 751 if (ipsrcchk_rt.ro_rt) { 752 RTFREE(ipsrcchk_rt.ro_rt); 753 ipsrcchk_rt.ro_rt = 0; 754 } 755 #endif 756 757 splx(s); 758 } 759 760 /* 761 * Drain off all datagram fragments. 762 */ 763 void 764 frag6_drain() 765 { 766 767 if (ip6q_lock_try() == 0) 768 return; 769 while (ip6q.ip6q_next != &ip6q) { 770 ip6stat.ip6s_fragdropped++; 771 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 772 frag6_freef(ip6q.ip6q_next); 773 } 774 IP6Q_UNLOCK(); 775 } 776