1 /* $NetBSD: frag6.c,v 1.31 2006/11/16 01:33:45 christos Exp $ */ 2 /* $KAME: frag6.c,v 1.40 2002/05/27 21:40:31 itojun Exp $ */ 3 4 /* 5 * Copyright (C) 1995, 1996, 1997, and 1998 WIDE Project. 6 * All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. Neither the name of the project nor the names of its contributors 17 * may be used to endorse or promote products derived from this software 18 * without specific prior written permission. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE PROJECT AND CONTRIBUTORS ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE PROJECT OR CONTRIBUTORS BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: frag6.c,v 1.31 2006/11/16 01:33:45 christos Exp $"); 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/mbuf.h> 40 #include <sys/domain.h> 41 #include <sys/protosw.h> 42 #include <sys/socket.h> 43 #include <sys/errno.h> 44 #include <sys/time.h> 45 #include <sys/kernel.h> 46 #include <sys/syslog.h> 47 48 #include <net/if.h> 49 #include <net/route.h> 50 51 #include <netinet/in.h> 52 #include <netinet/in_var.h> 53 #include <netinet/ip6.h> 54 #include <netinet6/ip6_var.h> 55 #include <netinet/icmp6.h> 56 57 #include <net/net_osdep.h> 58 59 /* 60 * Define it to get a correct behavior on per-interface statistics. 61 * You will need to perform an extra routing table lookup, per fragment, 62 * to do it. This may, or may not be, a performance hit. 63 */ 64 #define IN6_IFSTAT_STRICT 65 66 static void frag6_enq __P((struct ip6asfrag *, struct ip6asfrag *)); 67 static void frag6_deq __P((struct ip6asfrag *)); 68 static void frag6_insque __P((struct ip6q *, struct ip6q *)); 69 static void frag6_remque __P((struct ip6q *)); 70 static void frag6_freef __P((struct ip6q *)); 71 72 static int ip6q_locked; 73 u_int frag6_nfragpackets; 74 u_int frag6_nfrags; 75 struct ip6q ip6q; /* ip6 reassemble queue */ 76 77 static inline int ip6q_lock_try __P((void)); 78 static inline void ip6q_unlock __P((void)); 79 80 static inline int 81 ip6q_lock_try() 82 { 83 int s; 84 85 /* 86 * Use splvm() -- we're bloking things that would cause 87 * mbuf allocation. 88 */ 89 s = splvm(); 90 if (ip6q_locked) { 91 splx(s); 92 return (0); 93 } 94 ip6q_locked = 1; 95 splx(s); 96 return (1); 97 } 98 99 static inline void 100 ip6q_unlock() 101 { 102 int s; 103 104 s = splvm(); 105 ip6q_locked = 0; 106 splx(s); 107 } 108 109 #ifdef DIAGNOSTIC 110 #define IP6Q_LOCK() \ 111 do { \ 112 if (ip6q_lock_try() == 0) { \ 113 printf("%s:%d: ip6q already locked\n", __FILE__, __LINE__); \ 114 panic("ip6q_lock"); \ 115 } \ 116 } while (/*CONSTCOND*/ 0) 117 #define IP6Q_LOCK_CHECK() \ 118 do { \ 119 if (ip6q_locked == 0) { \ 120 printf("%s:%d: ip6q lock not held\n", __FILE__, __LINE__); \ 121 panic("ip6q lock check"); \ 122 } \ 123 } while (/*CONSTCOND*/ 0) 124 #else 125 #define IP6Q_LOCK() (void) ip6q_lock_try() 126 #define IP6Q_LOCK_CHECK() /* nothing */ 127 #endif 128 129 #define IP6Q_UNLOCK() ip6q_unlock() 130 131 #ifndef offsetof /* XXX */ 132 #define offsetof(type, member) ((size_t)(&((type *)0)->member)) 133 #endif 134 135 /* 136 * Initialise reassembly queue and fragment identifier. 137 */ 138 void 139 frag6_init() 140 { 141 142 ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q; 143 } 144 145 /* 146 * In RFC2460, fragment and reassembly rule do not agree with each other, 147 * in terms of next header field handling in fragment header. 148 * While the sender will use the same value for all of the fragmented packets, 149 * receiver is suggested not to check the consistency. 150 * 151 * fragment rule (p20): 152 * (2) A Fragment header containing: 153 * The Next Header value that identifies the first header of 154 * the Fragmentable Part of the original packet. 155 * -> next header field is same for all fragments 156 * 157 * reassembly rule (p21): 158 * The Next Header field of the last header of the Unfragmentable 159 * Part is obtained from the Next Header field of the first 160 * fragment's Fragment header. 161 * -> should grab it from the first fragment only 162 * 163 * The following note also contradicts with fragment rule - noone is going to 164 * send different fragment with different next header field. 165 * 166 * additional note (p22): 167 * The Next Header values in the Fragment headers of different 168 * fragments of the same original packet may differ. Only the value 169 * from the Offset zero fragment packet is used for reassembly. 170 * -> should grab it from the first fragment only 171 * 172 * There is no explicit reason given in the RFC. Historical reason maybe? 173 */ 174 /* 175 * Fragment input 176 */ 177 int 178 frag6_input(struct mbuf **mp, int *offp, int proto) 179 { 180 struct mbuf *m = *mp, *t; 181 struct ip6_hdr *ip6; 182 struct ip6_frag *ip6f; 183 struct ip6q *q6; 184 struct ip6asfrag *af6, *ip6af, *af6dwn; 185 int offset = *offp, nxt, i, next; 186 int first_frag = 0; 187 int fragoff, frgpartlen; /* must be larger than u_int16_t */ 188 struct ifnet *dstifp; 189 #ifdef IN6_IFSTAT_STRICT 190 static struct route_in6 ro; 191 struct sockaddr_in6 *dst; 192 #endif 193 194 ip6 = mtod(m, struct ip6_hdr *); 195 IP6_EXTHDR_GET(ip6f, struct ip6_frag *, m, offset, sizeof(*ip6f)); 196 if (ip6f == NULL) 197 return IPPROTO_DONE; 198 199 dstifp = NULL; 200 #ifdef IN6_IFSTAT_STRICT 201 /* find the destination interface of the packet. */ 202 dst = (struct sockaddr_in6 *)&ro.ro_dst; 203 if (ro.ro_rt 204 && ((ro.ro_rt->rt_flags & RTF_UP) == 0 205 || !IN6_ARE_ADDR_EQUAL(&dst->sin6_addr, &ip6->ip6_dst))) { 206 RTFREE(ro.ro_rt); 207 ro.ro_rt = (struct rtentry *)0; 208 } 209 if (ro.ro_rt == NULL) { 210 bzero(dst, sizeof(*dst)); 211 dst->sin6_family = AF_INET6; 212 dst->sin6_len = sizeof(struct sockaddr_in6); 213 dst->sin6_addr = ip6->ip6_dst; 214 } 215 rtalloc((struct route *)&ro); 216 if (ro.ro_rt != NULL && ro.ro_rt->rt_ifa != NULL) 217 dstifp = ((struct in6_ifaddr *)ro.ro_rt->rt_ifa)->ia_ifp; 218 #else 219 /* we are violating the spec, this is not the destination interface */ 220 if ((m->m_flags & M_PKTHDR) != 0) 221 dstifp = m->m_pkthdr.rcvif; 222 #endif 223 224 /* jumbo payload can't contain a fragment header */ 225 if (ip6->ip6_plen == 0) { 226 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, offset); 227 in6_ifstat_inc(dstifp, ifs6_reass_fail); 228 return IPPROTO_DONE; 229 } 230 231 /* 232 * check whether fragment packet's fragment length is 233 * multiple of 8 octets. 234 * sizeof(struct ip6_frag) == 8 235 * sizeof(struct ip6_hdr) = 40 236 */ 237 if ((ip6f->ip6f_offlg & IP6F_MORE_FRAG) && 238 (((ntohs(ip6->ip6_plen) - offset) & 0x7) != 0)) { 239 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 240 offsetof(struct ip6_hdr, ip6_plen)); 241 in6_ifstat_inc(dstifp, ifs6_reass_fail); 242 return IPPROTO_DONE; 243 } 244 245 ip6stat.ip6s_fragments++; 246 in6_ifstat_inc(dstifp, ifs6_reass_reqd); 247 248 /* offset now points to data portion */ 249 offset += sizeof(struct ip6_frag); 250 251 IP6Q_LOCK(); 252 253 /* 254 * Enforce upper bound on number of fragments. 255 * If maxfrag is 0, never accept fragments. 256 * If maxfrag is -1, accept all fragments without limitation. 257 */ 258 if (ip6_maxfrags < 0) 259 ; 260 else if (frag6_nfrags >= (u_int)ip6_maxfrags) 261 goto dropfrag; 262 263 for (q6 = ip6q.ip6q_next; q6 != &ip6q; q6 = q6->ip6q_next) 264 if (ip6f->ip6f_ident == q6->ip6q_ident && 265 IN6_ARE_ADDR_EQUAL(&ip6->ip6_src, &q6->ip6q_src) && 266 IN6_ARE_ADDR_EQUAL(&ip6->ip6_dst, &q6->ip6q_dst)) 267 break; 268 269 if (q6 == &ip6q) { 270 /* 271 * the first fragment to arrive, create a reassembly queue. 272 */ 273 first_frag = 1; 274 275 /* 276 * Enforce upper bound on number of fragmented packets 277 * for which we attempt reassembly; 278 * If maxfragpackets is 0, never accept fragments. 279 * If maxfragpackets is -1, accept all fragments without 280 * limitation. 281 */ 282 if (ip6_maxfragpackets < 0) 283 ; 284 else if (frag6_nfragpackets >= (u_int)ip6_maxfragpackets) 285 goto dropfrag; 286 frag6_nfragpackets++; 287 q6 = (struct ip6q *)malloc(sizeof(struct ip6q), M_FTABLE, 288 M_DONTWAIT); 289 if (q6 == NULL) 290 goto dropfrag; 291 bzero(q6, sizeof(*q6)); 292 293 frag6_insque(q6, &ip6q); 294 295 /* ip6q_nxt will be filled afterwards, from 1st fragment */ 296 q6->ip6q_down = q6->ip6q_up = (struct ip6asfrag *)q6; 297 #ifdef notyet 298 q6->ip6q_nxtp = (u_char *)nxtp; 299 #endif 300 q6->ip6q_ident = ip6f->ip6f_ident; 301 q6->ip6q_arrive = 0; /* Is it used anywhere? */ 302 q6->ip6q_ttl = IPV6_FRAGTTL; 303 q6->ip6q_src = ip6->ip6_src; 304 q6->ip6q_dst = ip6->ip6_dst; 305 q6->ip6q_unfrglen = -1; /* The 1st fragment has not arrived. */ 306 307 q6->ip6q_nfrag = 0; 308 } 309 310 /* 311 * If it's the 1st fragment, record the length of the 312 * unfragmentable part and the next header of the fragment header. 313 */ 314 fragoff = ntohs(ip6f->ip6f_offlg & IP6F_OFF_MASK); 315 if (fragoff == 0) { 316 q6->ip6q_unfrglen = offset - sizeof(struct ip6_hdr) - 317 sizeof(struct ip6_frag); 318 q6->ip6q_nxt = ip6f->ip6f_nxt; 319 } 320 321 /* 322 * Check that the reassembled packet would not exceed 65535 bytes 323 * in size. 324 * If it would exceed, discard the fragment and return an ICMP error. 325 */ 326 frgpartlen = sizeof(struct ip6_hdr) + ntohs(ip6->ip6_plen) - offset; 327 if (q6->ip6q_unfrglen >= 0) { 328 /* The 1st fragment has already arrived. */ 329 if (q6->ip6q_unfrglen + fragoff + frgpartlen > IPV6_MAXPACKET) { 330 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 331 offset - sizeof(struct ip6_frag) + 332 offsetof(struct ip6_frag, ip6f_offlg)); 333 IP6Q_UNLOCK(); 334 return (IPPROTO_DONE); 335 } 336 } else if (fragoff + frgpartlen > IPV6_MAXPACKET) { 337 icmp6_error(m, ICMP6_PARAM_PROB, ICMP6_PARAMPROB_HEADER, 338 offset - sizeof(struct ip6_frag) + 339 offsetof(struct ip6_frag, ip6f_offlg)); 340 IP6Q_UNLOCK(); 341 return (IPPROTO_DONE); 342 } 343 /* 344 * If it's the first fragment, do the above check for each 345 * fragment already stored in the reassembly queue. 346 */ 347 if (fragoff == 0) { 348 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 349 af6 = af6dwn) { 350 af6dwn = af6->ip6af_down; 351 352 if (q6->ip6q_unfrglen + af6->ip6af_off + af6->ip6af_frglen > 353 IPV6_MAXPACKET) { 354 struct mbuf *merr = IP6_REASS_MBUF(af6); 355 struct ip6_hdr *ip6err; 356 int erroff = af6->ip6af_offset; 357 358 /* dequeue the fragment. */ 359 frag6_deq(af6); 360 free(af6, M_FTABLE); 361 362 /* adjust pointer. */ 363 ip6err = mtod(merr, struct ip6_hdr *); 364 365 /* 366 * Restore source and destination addresses 367 * in the erroneous IPv6 header. 368 */ 369 ip6err->ip6_src = q6->ip6q_src; 370 ip6err->ip6_dst = q6->ip6q_dst; 371 372 icmp6_error(merr, ICMP6_PARAM_PROB, 373 ICMP6_PARAMPROB_HEADER, 374 erroff - sizeof(struct ip6_frag) + 375 offsetof(struct ip6_frag, ip6f_offlg)); 376 } 377 } 378 } 379 380 ip6af = (struct ip6asfrag *)malloc(sizeof(struct ip6asfrag), M_FTABLE, 381 M_DONTWAIT); 382 if (ip6af == NULL) 383 goto dropfrag; 384 bzero(ip6af, sizeof(*ip6af)); 385 ip6af->ip6af_head = ip6->ip6_flow; 386 ip6af->ip6af_len = ip6->ip6_plen; 387 ip6af->ip6af_nxt = ip6->ip6_nxt; 388 ip6af->ip6af_hlim = ip6->ip6_hlim; 389 ip6af->ip6af_mff = ip6f->ip6f_offlg & IP6F_MORE_FRAG; 390 ip6af->ip6af_off = fragoff; 391 ip6af->ip6af_frglen = frgpartlen; 392 ip6af->ip6af_offset = offset; 393 IP6_REASS_MBUF(ip6af) = m; 394 395 if (first_frag) { 396 af6 = (struct ip6asfrag *)q6; 397 goto insert; 398 } 399 400 /* 401 * Find a segment which begins after this one does. 402 */ 403 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 404 af6 = af6->ip6af_down) 405 if (af6->ip6af_off > ip6af->ip6af_off) 406 break; 407 408 #if 0 409 /* 410 * If there is a preceding segment, it may provide some of 411 * our data already. If so, drop the data from the incoming 412 * segment. If it provides all of our data, drop us. 413 */ 414 if (af6->ip6af_up != (struct ip6asfrag *)q6) { 415 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen 416 - ip6af->ip6af_off; 417 if (i > 0) { 418 if (i >= ip6af->ip6af_frglen) 419 goto dropfrag; 420 m_adj(IP6_REASS_MBUF(ip6af), i); 421 ip6af->ip6af_off += i; 422 ip6af->ip6af_frglen -= i; 423 } 424 } 425 426 /* 427 * While we overlap succeeding segments trim them or, 428 * if they are completely covered, dequeue them. 429 */ 430 while (af6 != (struct ip6asfrag *)q6 && 431 ip6af->ip6af_off + ip6af->ip6af_frglen > af6->ip6af_off) { 432 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 433 if (i < af6->ip6af_frglen) { 434 af6->ip6af_frglen -= i; 435 af6->ip6af_off += i; 436 m_adj(IP6_REASS_MBUF(af6), i); 437 break; 438 } 439 af6 = af6->ip6af_down; 440 m_freem(IP6_REASS_MBUF(af6->ip6af_up)); 441 frag6_deq(af6->ip6af_up); 442 } 443 #else 444 /* 445 * If the incoming framgent overlaps some existing fragments in 446 * the reassembly queue, drop it, since it is dangerous to override 447 * existing fragments from a security point of view. 448 * We don't know which fragment is the bad guy - here we trust 449 * fragment that came in earlier, with no real reason. 450 */ 451 if (af6->ip6af_up != (struct ip6asfrag *)q6) { 452 i = af6->ip6af_up->ip6af_off + af6->ip6af_up->ip6af_frglen 453 - ip6af->ip6af_off; 454 if (i > 0) { 455 #if 0 /* suppress the noisy log */ 456 log(LOG_ERR, "%d bytes of a fragment from %s " 457 "overlaps the previous fragment\n", 458 i, ip6_sprintf(&q6->ip6q_src)); 459 #endif 460 free(ip6af, M_FTABLE); 461 goto dropfrag; 462 } 463 } 464 if (af6 != (struct ip6asfrag *)q6) { 465 i = (ip6af->ip6af_off + ip6af->ip6af_frglen) - af6->ip6af_off; 466 if (i > 0) { 467 #if 0 /* suppress the noisy log */ 468 log(LOG_ERR, "%d bytes of a fragment from %s " 469 "overlaps the succeeding fragment", 470 i, ip6_sprintf(&q6->ip6q_src)); 471 #endif 472 free(ip6af, M_FTABLE); 473 goto dropfrag; 474 } 475 } 476 #endif 477 478 insert: 479 480 /* 481 * Stick new segment in its place; 482 * check for complete reassembly. 483 * Move to front of packet queue, as we are 484 * the most recently active fragmented packet. 485 */ 486 frag6_enq(ip6af, af6->ip6af_up); 487 frag6_nfrags++; 488 q6->ip6q_nfrag++; 489 #if 0 /* xxx */ 490 if (q6 != ip6q.ip6q_next) { 491 frag6_remque(q6); 492 frag6_insque(q6, &ip6q); 493 } 494 #endif 495 next = 0; 496 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 497 af6 = af6->ip6af_down) { 498 if (af6->ip6af_off != next) { 499 IP6Q_UNLOCK(); 500 return IPPROTO_DONE; 501 } 502 next += af6->ip6af_frglen; 503 } 504 if (af6->ip6af_up->ip6af_mff) { 505 IP6Q_UNLOCK(); 506 return IPPROTO_DONE; 507 } 508 509 /* 510 * Reassembly is complete; concatenate fragments. 511 */ 512 ip6af = q6->ip6q_down; 513 t = m = IP6_REASS_MBUF(ip6af); 514 af6 = ip6af->ip6af_down; 515 frag6_deq(ip6af); 516 while (af6 != (struct ip6asfrag *)q6) { 517 af6dwn = af6->ip6af_down; 518 frag6_deq(af6); 519 while (t->m_next) 520 t = t->m_next; 521 t->m_next = IP6_REASS_MBUF(af6); 522 m_adj(t->m_next, af6->ip6af_offset); 523 free(af6, M_FTABLE); 524 af6 = af6dwn; 525 } 526 527 /* adjust offset to point where the original next header starts */ 528 offset = ip6af->ip6af_offset - sizeof(struct ip6_frag); 529 free(ip6af, M_FTABLE); 530 ip6 = mtod(m, struct ip6_hdr *); 531 ip6->ip6_plen = htons(next + offset - sizeof(struct ip6_hdr)); 532 ip6->ip6_src = q6->ip6q_src; 533 ip6->ip6_dst = q6->ip6q_dst; 534 nxt = q6->ip6q_nxt; 535 #ifdef notyet 536 *q6->ip6q_nxtp = (u_char)(nxt & 0xff); 537 #endif 538 539 /* 540 * Delete frag6 header with as a few cost as possible. 541 */ 542 if (offset < m->m_len) { 543 ovbcopy((caddr_t)ip6, (caddr_t)ip6 + sizeof(struct ip6_frag), 544 offset); 545 m->m_data += sizeof(struct ip6_frag); 546 m->m_len -= sizeof(struct ip6_frag); 547 } else { 548 /* this comes with no copy if the boundary is on cluster */ 549 if ((t = m_split(m, offset, M_DONTWAIT)) == NULL) { 550 frag6_remque(q6); 551 frag6_nfrags -= q6->ip6q_nfrag; 552 free(q6, M_FTABLE); 553 frag6_nfragpackets--; 554 goto dropfrag; 555 } 556 m_adj(t, sizeof(struct ip6_frag)); 557 m_cat(m, t); 558 } 559 560 /* 561 * Store NXT to the original. 562 */ 563 { 564 u_int8_t *prvnxtp = ip6_get_prevhdr(m, offset); /* XXX */ 565 *prvnxtp = nxt; 566 } 567 568 frag6_remque(q6); 569 frag6_nfrags -= q6->ip6q_nfrag; 570 free(q6, M_FTABLE); 571 frag6_nfragpackets--; 572 573 if (m->m_flags & M_PKTHDR) { /* Isn't it always true? */ 574 int plen = 0; 575 for (t = m; t; t = t->m_next) 576 plen += t->m_len; 577 m->m_pkthdr.len = plen; 578 } 579 580 ip6stat.ip6s_reassembled++; 581 in6_ifstat_inc(dstifp, ifs6_reass_ok); 582 583 /* 584 * Tell launch routine the next header 585 */ 586 587 *mp = m; 588 *offp = offset; 589 590 IP6Q_UNLOCK(); 591 return nxt; 592 593 dropfrag: 594 in6_ifstat_inc(dstifp, ifs6_reass_fail); 595 ip6stat.ip6s_fragdropped++; 596 m_freem(m); 597 IP6Q_UNLOCK(); 598 return IPPROTO_DONE; 599 } 600 601 /* 602 * Free a fragment reassembly header and all 603 * associated datagrams. 604 */ 605 void 606 frag6_freef(q6) 607 struct ip6q *q6; 608 { 609 struct ip6asfrag *af6, *down6; 610 611 IP6Q_LOCK_CHECK(); 612 613 for (af6 = q6->ip6q_down; af6 != (struct ip6asfrag *)q6; 614 af6 = down6) { 615 struct mbuf *m = IP6_REASS_MBUF(af6); 616 617 down6 = af6->ip6af_down; 618 frag6_deq(af6); 619 620 /* 621 * Return ICMP time exceeded error for the 1st fragment. 622 * Just free other fragments. 623 */ 624 if (af6->ip6af_off == 0) { 625 struct ip6_hdr *ip6; 626 627 /* adjust pointer */ 628 ip6 = mtod(m, struct ip6_hdr *); 629 630 /* restoure source and destination addresses */ 631 ip6->ip6_src = q6->ip6q_src; 632 ip6->ip6_dst = q6->ip6q_dst; 633 634 icmp6_error(m, ICMP6_TIME_EXCEEDED, 635 ICMP6_TIME_EXCEED_REASSEMBLY, 0); 636 } else 637 m_freem(m); 638 free(af6, M_FTABLE); 639 } 640 frag6_remque(q6); 641 frag6_nfrags -= q6->ip6q_nfrag; 642 free(q6, M_FTABLE); 643 frag6_nfragpackets--; 644 } 645 646 /* 647 * Put an ip fragment on a reassembly chain. 648 * Like insque, but pointers in middle of structure. 649 */ 650 void 651 frag6_enq(af6, up6) 652 struct ip6asfrag *af6, *up6; 653 { 654 655 IP6Q_LOCK_CHECK(); 656 657 af6->ip6af_up = up6; 658 af6->ip6af_down = up6->ip6af_down; 659 up6->ip6af_down->ip6af_up = af6; 660 up6->ip6af_down = af6; 661 } 662 663 /* 664 * To frag6_enq as remque is to insque. 665 */ 666 void 667 frag6_deq(af6) 668 struct ip6asfrag *af6; 669 { 670 671 IP6Q_LOCK_CHECK(); 672 673 af6->ip6af_up->ip6af_down = af6->ip6af_down; 674 af6->ip6af_down->ip6af_up = af6->ip6af_up; 675 } 676 677 void 678 frag6_insque(new, old) 679 struct ip6q *new, *old; 680 { 681 682 IP6Q_LOCK_CHECK(); 683 684 new->ip6q_prev = old; 685 new->ip6q_next = old->ip6q_next; 686 old->ip6q_next->ip6q_prev= new; 687 old->ip6q_next = new; 688 } 689 690 void 691 frag6_remque(p6) 692 struct ip6q *p6; 693 { 694 695 IP6Q_LOCK_CHECK(); 696 697 p6->ip6q_prev->ip6q_next = p6->ip6q_next; 698 p6->ip6q_next->ip6q_prev = p6->ip6q_prev; 699 } 700 701 /* 702 * IPv6 reassembling timer processing; 703 * if a timer expires on a reassembly 704 * queue, discard it. 705 */ 706 void 707 frag6_slowtimo() 708 { 709 struct ip6q *q6; 710 int s = splsoftnet(); 711 712 IP6Q_LOCK(); 713 q6 = ip6q.ip6q_next; 714 if (q6) 715 while (q6 != &ip6q) { 716 --q6->ip6q_ttl; 717 q6 = q6->ip6q_next; 718 if (q6->ip6q_prev->ip6q_ttl == 0) { 719 ip6stat.ip6s_fragtimeout++; 720 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 721 frag6_freef(q6->ip6q_prev); 722 } 723 } 724 /* 725 * If we are over the maximum number of fragments 726 * (due to the limit being lowered), drain off 727 * enough to get down to the new limit. 728 */ 729 while (frag6_nfragpackets > (u_int)ip6_maxfragpackets && 730 ip6q.ip6q_prev) { 731 ip6stat.ip6s_fragoverflow++; 732 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 733 frag6_freef(ip6q.ip6q_prev); 734 } 735 IP6Q_UNLOCK(); 736 737 #if 0 738 /* 739 * Routing changes might produce a better route than we last used; 740 * make sure we notice eventually, even if forwarding only for one 741 * destination and the cache is never replaced. 742 */ 743 if (ip6_forward_rt.ro_rt) { 744 RTFREE(ip6_forward_rt.ro_rt); 745 ip6_forward_rt.ro_rt = 0; 746 } 747 if (ipsrcchk_rt.ro_rt) { 748 RTFREE(ipsrcchk_rt.ro_rt); 749 ipsrcchk_rt.ro_rt = 0; 750 } 751 #endif 752 753 splx(s); 754 } 755 756 /* 757 * Drain off all datagram fragments. 758 */ 759 void 760 frag6_drain() 761 { 762 763 if (ip6q_lock_try() == 0) 764 return; 765 while (ip6q.ip6q_next != &ip6q) { 766 ip6stat.ip6s_fragdropped++; 767 /* XXX in6_ifstat_inc(ifp, ifs6_reass_fail) */ 768 frag6_freef(ip6q.ip6q_next); 769 } 770 IP6Q_UNLOCK(); 771 } 772