1 /* $NetBSD: ip_reass.c,v 1.16 2018/05/03 07:25:49 maxv Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1988, 1993 5 * The Regents of the University of California. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)ip_input.c 8.2 (Berkeley) 1/4/94 32 */ 33 34 /* 35 * IP reassembly. 36 * 37 * Additive-Increase/Multiplicative-Decrease (AIMD) strategy for IP 38 * reassembly queue buffer managment. 39 * 40 * We keep a count of total IP fragments (NB: not fragmented packets), 41 * awaiting reassembly (ip_nfrags) and a limit (ip_maxfrags) on fragments. 42 * If ip_nfrags exceeds ip_maxfrags the limit, we drop half the total 43 * fragments in reassembly queues. This AIMD policy avoids repeatedly 44 * deleting single packets under heavy fragmentation load (e.g., from lossy 45 * NFS peers). 46 */ 47 48 #include <sys/cdefs.h> 49 __KERNEL_RCSID(0, "$NetBSD: ip_reass.c,v 1.16 2018/05/03 07:25:49 maxv Exp $"); 50 51 #include <sys/param.h> 52 #include <sys/types.h> 53 54 #include <sys/malloc.h> 55 #include <sys/mbuf.h> 56 #include <sys/mutex.h> 57 #include <sys/pool.h> 58 #include <sys/queue.h> 59 #include <sys/sysctl.h> 60 #include <sys/systm.h> 61 62 #include <net/if.h> 63 64 #include <netinet/in.h> 65 #include <netinet/in_systm.h> 66 #include <netinet/ip.h> 67 #include <netinet/in_pcb.h> 68 #include <netinet/ip_var.h> 69 #include <netinet/ip_private.h> 70 #include <netinet/in_var.h> 71 72 /* 73 * IP reassembly queue structures. Each fragment being reassembled is 74 * attached to one of these structures. They are timed out after TTL 75 * drops to 0, and may also be reclaimed if memory becomes tight. 76 */ 77 78 typedef struct ipfr_qent { 79 TAILQ_ENTRY(ipfr_qent) ipqe_q; 80 struct ip * ipqe_ip; 81 struct mbuf * ipqe_m; 82 bool ipqe_mff; 83 } ipfr_qent_t; 84 85 TAILQ_HEAD(ipfr_qent_head, ipfr_qent); 86 87 typedef struct ipfr_queue { 88 LIST_ENTRY(ipfr_queue) ipq_q; /* to other reass headers */ 89 struct ipfr_qent_head ipq_fragq; /* queue of fragment entries */ 90 uint8_t ipq_ttl; /* time for reass q to live */ 91 uint8_t ipq_p; /* protocol of this fragment */ 92 uint16_t ipq_id; /* sequence id for reassembly */ 93 struct in_addr ipq_src; 94 struct in_addr ipq_dst; 95 uint16_t ipq_nfrags; /* frags in this queue entry */ 96 uint8_t ipq_tos; /* TOS of this fragment */ 97 } ipfr_queue_t; 98 99 /* 100 * Hash table of IP reassembly queues. 101 */ 102 #define IPREASS_HASH_SHIFT 6 103 #define IPREASS_HASH_SIZE (1 << IPREASS_HASH_SHIFT) 104 #define IPREASS_HASH_MASK (IPREASS_HASH_SIZE - 1) 105 #define IPREASS_HASH(x, y) \ 106 (((((x) & 0xf) | ((((x) >> 8) & 0xf) << 4)) ^ (y)) & IPREASS_HASH_MASK) 107 108 static LIST_HEAD(, ipfr_queue) ip_frags[IPREASS_HASH_SIZE]; 109 static pool_cache_t ipfren_cache; 110 static kmutex_t ipfr_lock; 111 112 /* Number of packets in reassembly queue and total number of fragments. */ 113 static int ip_nfragpackets; 114 static int ip_nfrags; 115 116 /* Limits on packet and fragments. */ 117 static int ip_maxfragpackets; 118 static int ip_maxfrags; 119 120 /* 121 * Cached copy of nmbclusters. If nbclusters is different, recalculate 122 * IP parameters derived from nmbclusters. 123 */ 124 static int ip_nmbclusters; 125 126 /* 127 * IP reassembly TTL machinery for multiplicative drop. 128 */ 129 static u_int fragttl_histo[IPFRAGTTL + 1]; 130 131 static struct sysctllog *ip_reass_sysctllog; 132 133 void sysctl_ip_reass_setup(void); 134 static void ip_nmbclusters_changed(void); 135 136 static struct mbuf * ip_reass(ipfr_qent_t *, ipfr_queue_t *, u_int); 137 static u_int ip_reass_ttl_decr(u_int ticks); 138 static void ip_reass_drophalf(void); 139 static void ip_freef(ipfr_queue_t *); 140 141 /* 142 * ip_reass_init: 143 * 144 * Initialization of IP reassembly mechanism. 145 */ 146 void 147 ip_reass_init(void) 148 { 149 int i; 150 151 ipfren_cache = pool_cache_init(sizeof(ipfr_qent_t), coherency_unit, 152 0, 0, "ipfrenpl", NULL, IPL_NET, NULL, NULL, NULL); 153 mutex_init(&ipfr_lock, MUTEX_DEFAULT, IPL_VM); 154 155 for (i = 0; i < IPREASS_HASH_SIZE; i++) { 156 LIST_INIT(&ip_frags[i]); 157 } 158 ip_maxfragpackets = 200; 159 ip_maxfrags = 0; 160 ip_nmbclusters_changed(); 161 162 sysctl_ip_reass_setup(); 163 } 164 165 void 166 sysctl_ip_reass_setup(void) 167 { 168 169 sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL, 170 CTLFLAG_PERMANENT, 171 CTLTYPE_NODE, "inet", 172 SYSCTL_DESCR("PF_INET related settings"), 173 NULL, 0, NULL, 0, 174 CTL_NET, PF_INET, CTL_EOL); 175 sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL, 176 CTLFLAG_PERMANENT, 177 CTLTYPE_NODE, "ip", 178 SYSCTL_DESCR("IPv4 related settings"), 179 NULL, 0, NULL, 0, 180 CTL_NET, PF_INET, IPPROTO_IP, CTL_EOL); 181 182 sysctl_createv(&ip_reass_sysctllog, 0, NULL, NULL, 183 CTLFLAG_PERMANENT|CTLFLAG_READWRITE, 184 CTLTYPE_INT, "maxfragpackets", 185 SYSCTL_DESCR("Maximum number of fragments to retain for " 186 "possible reassembly"), 187 NULL, 0, &ip_maxfragpackets, 0, 188 CTL_NET, PF_INET, IPPROTO_IP, IPCTL_MAXFRAGPACKETS, CTL_EOL); 189 } 190 191 #define CHECK_NMBCLUSTER_PARAMS() \ 192 do { \ 193 if (__predict_false(ip_nmbclusters != nmbclusters)) \ 194 ip_nmbclusters_changed(); \ 195 } while (/*CONSTCOND*/0) 196 197 /* 198 * Compute IP limits derived from the value of nmbclusters. 199 */ 200 static void 201 ip_nmbclusters_changed(void) 202 { 203 ip_maxfrags = nmbclusters / 4; 204 ip_nmbclusters = nmbclusters; 205 } 206 207 /* 208 * ip_reass: 209 * 210 * Take incoming datagram fragment and try to reassemble it into whole 211 * datagram. If a chain for reassembly of this datagram already exists, 212 * then it is given as 'fp'; otherwise have to make a chain. 213 */ 214 static struct mbuf * 215 ip_reass(ipfr_qent_t *ipqe, ipfr_queue_t *fp, const u_int hash) 216 { 217 struct ip *ip = ipqe->ipqe_ip, *qip; 218 const int hlen = ip->ip_hl << 2; 219 struct mbuf *m = ipqe->ipqe_m, *t; 220 ipfr_qent_t *nq, *p, *q; 221 int i, next; 222 223 KASSERT(mutex_owned(&ipfr_lock)); 224 225 /* 226 * Presence of header sizes in mbufs would confuse code below. 227 */ 228 m->m_data += hlen; 229 m->m_len -= hlen; 230 231 #ifdef notyet 232 /* Make sure fragment limit is up-to-date. */ 233 CHECK_NMBCLUSTER_PARAMS(); 234 235 /* If we have too many fragments, drop the older half. */ 236 if (ip_nfrags >= ip_maxfrags) { 237 ip_reass_drophalf(void); 238 } 239 #endif 240 241 /* 242 * We are about to add a fragment; increment frag count. 243 */ 244 ip_nfrags++; 245 246 /* 247 * If first fragment to arrive, create a reassembly queue. 248 */ 249 if (fp == NULL) { 250 /* 251 * Enforce upper bound on number of fragmented packets 252 * for which we attempt reassembly: a) if maxfrag is 0, 253 * never accept fragments b) if maxfrag is -1, accept 254 * all fragments without limitation. 255 */ 256 if (ip_maxfragpackets < 0) 257 ; 258 else if (ip_nfragpackets >= ip_maxfragpackets) { 259 goto dropfrag; 260 } 261 fp = malloc(sizeof(ipfr_queue_t), M_FTABLE, M_NOWAIT); 262 if (fp == NULL) { 263 goto dropfrag; 264 } 265 ip_nfragpackets++; 266 TAILQ_INIT(&fp->ipq_fragq); 267 fp->ipq_nfrags = 1; 268 fp->ipq_ttl = IPFRAGTTL; 269 fp->ipq_p = ip->ip_p; 270 fp->ipq_id = ip->ip_id; 271 fp->ipq_tos = ip->ip_tos; 272 fp->ipq_src = ip->ip_src; 273 fp->ipq_dst = ip->ip_dst; 274 LIST_INSERT_HEAD(&ip_frags[hash], fp, ipq_q); 275 p = NULL; 276 goto insert; 277 } else { 278 fp->ipq_nfrags++; 279 } 280 281 /* 282 * Find a segment which begins after this one does. 283 */ 284 TAILQ_FOREACH(q, &fp->ipq_fragq, ipqe_q) { 285 if (ntohs(q->ipqe_ip->ip_off) > ntohs(ip->ip_off)) 286 break; 287 } 288 if (q != NULL) { 289 p = TAILQ_PREV(q, ipfr_qent_head, ipqe_q); 290 } else { 291 p = TAILQ_LAST(&fp->ipq_fragq, ipfr_qent_head); 292 } 293 294 /* 295 * If there is a preceding segment, it may provide some of our 296 * data already. If so, drop the data from the incoming segment. 297 * If it provides all of our data, drop us. 298 */ 299 if (p != NULL) { 300 i = ntohs(p->ipqe_ip->ip_off) + ntohs(p->ipqe_ip->ip_len) - 301 ntohs(ip->ip_off); 302 if (i > 0) { 303 if (i >= ntohs(ip->ip_len)) { 304 goto dropfrag; 305 } 306 m_adj(ipqe->ipqe_m, i); 307 ip->ip_off = htons(ntohs(ip->ip_off) + i); 308 ip->ip_len = htons(ntohs(ip->ip_len) - i); 309 } 310 } 311 312 /* 313 * While we overlap succeeding segments trim them or, if they are 314 * completely covered, dequeue them. 315 */ 316 while (q != NULL) { 317 size_t end; 318 319 qip = q->ipqe_ip; 320 end = ntohs(ip->ip_off) + ntohs(ip->ip_len); 321 if (end <= ntohs(qip->ip_off)) { 322 break; 323 } 324 i = end - ntohs(qip->ip_off); 325 if (i < ntohs(qip->ip_len)) { 326 qip->ip_len = htons(ntohs(qip->ip_len) - i); 327 qip->ip_off = htons(ntohs(qip->ip_off) + i); 328 m_adj(q->ipqe_m, i); 329 break; 330 } 331 nq = TAILQ_NEXT(q, ipqe_q); 332 m_freem(q->ipqe_m); 333 TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q); 334 pool_cache_put(ipfren_cache, q); 335 fp->ipq_nfrags--; 336 ip_nfrags--; 337 q = nq; 338 } 339 340 insert: 341 /* 342 * Stick new segment in its place; check for complete reassembly. 343 */ 344 if (p == NULL) { 345 TAILQ_INSERT_HEAD(&fp->ipq_fragq, ipqe, ipqe_q); 346 } else { 347 TAILQ_INSERT_AFTER(&fp->ipq_fragq, p, ipqe, ipqe_q); 348 } 349 next = 0; 350 TAILQ_FOREACH(q, &fp->ipq_fragq, ipqe_q) { 351 qip = q->ipqe_ip; 352 if (ntohs(qip->ip_off) != next) { 353 mutex_exit(&ipfr_lock); 354 return NULL; 355 } 356 next += ntohs(qip->ip_len); 357 } 358 p = TAILQ_LAST(&fp->ipq_fragq, ipfr_qent_head); 359 if (p->ipqe_mff) { 360 mutex_exit(&ipfr_lock); 361 return NULL; 362 } 363 364 /* 365 * Reassembly is complete. Check for a bogus message size. 366 */ 367 q = TAILQ_FIRST(&fp->ipq_fragq); 368 ip = q->ipqe_ip; 369 if ((next + (ip->ip_hl << 2)) > IP_MAXPACKET) { 370 IP_STATINC(IP_STAT_TOOLONG); 371 ip_freef(fp); 372 mutex_exit(&ipfr_lock); 373 return NULL; 374 } 375 LIST_REMOVE(fp, ipq_q); 376 ip_nfrags -= fp->ipq_nfrags; 377 ip_nfragpackets--; 378 mutex_exit(&ipfr_lock); 379 380 /* Concatenate all fragments. */ 381 m = q->ipqe_m; 382 t = m->m_next; 383 m->m_next = NULL; 384 m_cat(m, t); 385 nq = TAILQ_NEXT(q, ipqe_q); 386 pool_cache_put(ipfren_cache, q); 387 388 for (q = nq; q != NULL; q = nq) { 389 t = q->ipqe_m; 390 nq = TAILQ_NEXT(q, ipqe_q); 391 pool_cache_put(ipfren_cache, q); 392 m_remove_pkthdr(t); 393 m_cat(m, t); 394 } 395 396 /* 397 * Create header for new packet by modifying header of first 398 * packet. Dequeue and discard fragment reassembly header. Make 399 * header visible. 400 */ 401 ip->ip_len = htons((ip->ip_hl << 2) + next); 402 ip->ip_src = fp->ipq_src; 403 ip->ip_dst = fp->ipq_dst; 404 free(fp, M_FTABLE); 405 406 m->m_len += (ip->ip_hl << 2); 407 m->m_data -= (ip->ip_hl << 2); 408 409 /* Fix up mbuf. XXX This should be done elsewhere. */ 410 { 411 KASSERT(m->m_flags & M_PKTHDR); 412 int plen = 0; 413 for (t = m; t; t = t->m_next) { 414 plen += t->m_len; 415 } 416 m->m_pkthdr.len = plen; 417 m->m_pkthdr.csum_flags = 0; 418 } 419 return m; 420 421 dropfrag: 422 if (fp != NULL) { 423 fp->ipq_nfrags--; 424 } 425 ip_nfrags--; 426 IP_STATINC(IP_STAT_FRAGDROPPED); 427 mutex_exit(&ipfr_lock); 428 429 pool_cache_put(ipfren_cache, ipqe); 430 m_freem(m); 431 return NULL; 432 } 433 434 /* 435 * ip_freef: 436 * 437 * Free a fragment reassembly header and all associated datagrams. 438 */ 439 static void 440 ip_freef(ipfr_queue_t *fp) 441 { 442 ipfr_qent_t *q; 443 444 KASSERT(mutex_owned(&ipfr_lock)); 445 446 LIST_REMOVE(fp, ipq_q); 447 ip_nfrags -= fp->ipq_nfrags; 448 ip_nfragpackets--; 449 450 while ((q = TAILQ_FIRST(&fp->ipq_fragq)) != NULL) { 451 TAILQ_REMOVE(&fp->ipq_fragq, q, ipqe_q); 452 m_freem(q->ipqe_m); 453 pool_cache_put(ipfren_cache, q); 454 } 455 free(fp, M_FTABLE); 456 } 457 458 /* 459 * ip_reass_ttl_decr: 460 * 461 * Decrement TTL of all reasembly queue entries by `ticks'. Count 462 * number of distinct fragments (as opposed to partial, fragmented 463 * datagrams) inthe reassembly queue. While we traverse the entire 464 * reassembly queue, compute and return the median TTL over all 465 * fragments. 466 */ 467 static u_int 468 ip_reass_ttl_decr(u_int ticks) 469 { 470 u_int nfrags, median, dropfraction, keepfraction; 471 ipfr_queue_t *fp, *nfp; 472 int i; 473 474 nfrags = 0; 475 memset(fragttl_histo, 0, sizeof(fragttl_histo)); 476 477 for (i = 0; i < IPREASS_HASH_SIZE; i++) { 478 for (fp = LIST_FIRST(&ip_frags[i]); fp != NULL; fp = nfp) { 479 fp->ipq_ttl = ((fp->ipq_ttl <= ticks) ? 480 0 : fp->ipq_ttl - ticks); 481 nfp = LIST_NEXT(fp, ipq_q); 482 if (fp->ipq_ttl == 0) { 483 IP_STATINC(IP_STAT_FRAGTIMEOUT); 484 ip_freef(fp); 485 } else { 486 nfrags += fp->ipq_nfrags; 487 fragttl_histo[fp->ipq_ttl] += fp->ipq_nfrags; 488 } 489 } 490 } 491 492 KASSERT(ip_nfrags == nfrags); 493 494 /* Find median (or other drop fraction) in histogram. */ 495 dropfraction = (ip_nfrags / 2); 496 keepfraction = ip_nfrags - dropfraction; 497 for (i = IPFRAGTTL, median = 0; i >= 0; i--) { 498 median += fragttl_histo[i]; 499 if (median >= keepfraction) 500 break; 501 } 502 503 /* Return TTL of median (or other fraction). */ 504 return (u_int)i; 505 } 506 507 static void 508 ip_reass_drophalf(void) 509 { 510 u_int median_ticks; 511 512 KASSERT(mutex_owned(&ipfr_lock)); 513 514 /* 515 * Compute median TTL of all fragments, and count frags 516 * with that TTL or lower (roughly half of all fragments). 517 */ 518 median_ticks = ip_reass_ttl_decr(0); 519 520 /* Drop half. */ 521 median_ticks = ip_reass_ttl_decr(median_ticks); 522 } 523 524 /* 525 * ip_reass_drain: drain off all datagram fragments. Do not acquire 526 * softnet_lock as can be called from hardware interrupt context. 527 */ 528 void 529 ip_reass_drain(void) 530 { 531 532 /* 533 * We may be called from a device's interrupt context. If 534 * the ipq is already busy, just bail out now. 535 */ 536 if (mutex_tryenter(&ipfr_lock)) { 537 /* 538 * Drop half the total fragments now. If more mbufs are 539 * needed, we will be called again soon. 540 */ 541 ip_reass_drophalf(); 542 mutex_exit(&ipfr_lock); 543 } 544 } 545 546 /* 547 * ip_reass_slowtimo: 548 * 549 * If a timer expires on a reassembly queue, discard it. 550 */ 551 void 552 ip_reass_slowtimo(void) 553 { 554 static u_int dropscanidx = 0; 555 u_int i, median_ttl; 556 557 mutex_enter(&ipfr_lock); 558 559 /* Age TTL of all fragments by 1 tick .*/ 560 median_ttl = ip_reass_ttl_decr(1); 561 562 /* Make sure fragment limit is up-to-date. */ 563 CHECK_NMBCLUSTER_PARAMS(); 564 565 /* If we have too many fragments, drop the older half. */ 566 if (ip_nfrags > ip_maxfrags) { 567 ip_reass_ttl_decr(median_ttl); 568 } 569 570 /* 571 * If we are over the maximum number of fragmented packets (due to 572 * the limit being lowered), drain off enough to get down to the 573 * new limit. Start draining from the reassembly hashqueue most 574 * recently drained. 575 */ 576 if (ip_maxfragpackets < 0) 577 ; 578 else { 579 int wrapped = 0; 580 581 i = dropscanidx; 582 while (ip_nfragpackets > ip_maxfragpackets && wrapped == 0) { 583 while (LIST_FIRST(&ip_frags[i]) != NULL) { 584 ip_freef(LIST_FIRST(&ip_frags[i])); 585 } 586 if (++i >= IPREASS_HASH_SIZE) { 587 i = 0; 588 } 589 /* 590 * Do not scan forever even if fragment counters are 591 * wrong: stop after scanning entire reassembly queue. 592 */ 593 if (i == dropscanidx) { 594 wrapped = 1; 595 } 596 } 597 dropscanidx = i; 598 } 599 mutex_exit(&ipfr_lock); 600 } 601 602 /* 603 * ip_reass_packet: generic routine to perform IP reassembly. 604 * 605 * => Passed fragment should have IP_MF flag and/or offset set. 606 * => Fragment should not have other than IP_MF flags set. 607 * 608 * => Returns 0 on success or error otherwise. 609 * => On complete, m0 represents a constructed final packet. 610 */ 611 int 612 ip_reass_packet(struct mbuf **m0, struct ip *ip) 613 { 614 const int hlen = ip->ip_hl << 2; 615 const int len = ntohs(ip->ip_len); 616 struct mbuf *m = *m0; 617 ipfr_queue_t *fp; 618 ipfr_qent_t *ipqe; 619 u_int hash, off, flen; 620 bool mff; 621 622 /* 623 * Prevent TCP blind data attacks by not allowing non-initial 624 * fragments to start at less than 68 bytes (minimal fragment 625 * size) and making sure the first fragment is at least 68 626 * bytes. 627 */ 628 off = (ntohs(ip->ip_off) & IP_OFFMASK) << 3; 629 if ((off > 0 ? off + hlen : len) < IP_MINFRAGSIZE - 1) { 630 IP_STATINC(IP_STAT_BADFRAGS); 631 return EINVAL; 632 } 633 634 if (off + len > IP_MAXPACKET) { 635 IP_STATINC(IP_STAT_TOOLONG); 636 return EINVAL; 637 } 638 639 /* 640 * Fragment length and MF flag. Make sure that fragments have 641 * a data length which is non-zero and multiple of 8 bytes. 642 */ 643 flen = ntohs(ip->ip_len) - hlen; 644 mff = (ip->ip_off & htons(IP_MF)) != 0; 645 if (mff && (flen == 0 || (flen & 0x7) != 0)) { 646 IP_STATINC(IP_STAT_BADFRAGS); 647 return EINVAL; 648 } 649 650 /* 651 * Adjust total IP length to not reflect header and convert 652 * offset of this to bytes. XXX: clobbers struct ip. 653 */ 654 ip->ip_len = htons(flen); 655 ip->ip_off = htons(off); 656 657 /* Look for queue of fragments of this datagram. */ 658 mutex_enter(&ipfr_lock); 659 hash = IPREASS_HASH(ip->ip_src.s_addr, ip->ip_id); 660 LIST_FOREACH(fp, &ip_frags[hash], ipq_q) { 661 if (ip->ip_id != fp->ipq_id) 662 continue; 663 if (!in_hosteq(ip->ip_src, fp->ipq_src)) 664 continue; 665 if (!in_hosteq(ip->ip_dst, fp->ipq_dst)) 666 continue; 667 if (ip->ip_p != fp->ipq_p) 668 continue; 669 break; 670 } 671 672 /* Make sure that TOS matches previous fragments. */ 673 if (fp && fp->ipq_tos != ip->ip_tos) { 674 IP_STATINC(IP_STAT_BADFRAGS); 675 mutex_exit(&ipfr_lock); 676 return EINVAL; 677 } 678 679 /* 680 * Create new entry and attempt to reassembly. 681 */ 682 IP_STATINC(IP_STAT_FRAGMENTS); 683 ipqe = pool_cache_get(ipfren_cache, PR_NOWAIT); 684 if (ipqe == NULL) { 685 IP_STATINC(IP_STAT_RCVMEMDROP); 686 mutex_exit(&ipfr_lock); 687 return ENOMEM; 688 } 689 ipqe->ipqe_mff = mff; 690 ipqe->ipqe_m = m; 691 ipqe->ipqe_ip = ip; 692 693 *m0 = ip_reass(ipqe, fp, hash); 694 if (*m0) { 695 /* Note that finally reassembled. */ 696 IP_STATINC(IP_STAT_REASSEMBLED); 697 } 698 return 0; 699 } 700