1 /* 2 * Copyright (c) 1982, 1986, 1988 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that this notice is preserved and that due credit is given 7 * to the University of California at Berkeley. The name of the University 8 * may not be used to endorse or promote products derived from this 9 * software without specific prior written permission. This software 10 * is provided ``as is'' without express or implied warranty. 11 * 12 * @(#)uipc_mbuf.c 7.8 (Berkeley) 04/09/88 13 */ 14 15 #include "../machine/pte.h" 16 17 #include "param.h" 18 #include "dir.h" 19 #include "user.h" 20 #include "proc.h" 21 #include "cmap.h" 22 #include "map.h" 23 #include "mbuf.h" 24 #include "vm.h" 25 #include "kernel.h" 26 #include "syslog.h" 27 #include "domain.h" 28 #include "protosw.h" 29 30 mbinit() 31 { 32 int s; 33 34 #if CLBYTES < 4096 35 #define NCL_INIT (4096/CLBYTES) 36 #else 37 #define NCL_INIT 1 38 #endif 39 s = splimp(); 40 if (m_clalloc(NCL_INIT, MPG_MBUFS, M_DONTWAIT) == 0) 41 goto bad; 42 if (m_clalloc(NCL_INIT, MPG_CLUSTERS, M_DONTWAIT) == 0) 43 goto bad; 44 splx(s); 45 return; 46 bad: 47 panic("mbinit"); 48 } 49 50 /* 51 * Must be called at splimp. 52 */ 53 /* ARGSUSED */ 54 caddr_t 55 m_clalloc(ncl, how, canwait) 56 register int ncl; 57 int how; 58 { 59 int npg, mbx; 60 register struct mbuf *m; 61 register int i; 62 static int logged; 63 64 npg = ncl * CLSIZE; 65 mbx = rmalloc(mbmap, (long)npg); 66 if (mbx == 0) { 67 if (logged == 0) { 68 logged++; 69 log(LOG_ERR, "mbuf map full\n"); 70 } 71 return (0); 72 } 73 m = cltom(mbx * NBPG / MCLBYTES); 74 if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { 75 rmfree(mbmap, (long)npg, (long)mbx); 76 return (0); 77 } 78 vmaccess(&Mbmap[mbx], (caddr_t)m, npg); 79 switch (how) { 80 81 case MPG_CLUSTERS: 82 ncl = ncl * CLBYTES / MCLBYTES; 83 for (i = 0; i < ncl; i++) { 84 m->m_off = 0; 85 m->m_next = mclfree; 86 mclfree = m; 87 m += MCLBYTES / sizeof (*m); 88 mbstat.m_clfree++; 89 } 90 mbstat.m_clusters += ncl; 91 break; 92 93 case MPG_MBUFS: 94 for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { 95 m->m_off = 0; 96 m->m_type = MT_DATA; 97 mbstat.m_mtypes[MT_DATA]++; 98 mbstat.m_mbufs++; 99 (void) m_free(m); 100 m++; 101 } 102 break; 103 } 104 return ((caddr_t)m); 105 } 106 107 m_pgfree(addr, n) 108 caddr_t addr; 109 int n; 110 { 111 112 #ifdef lint 113 addr = addr; n = n; 114 #endif 115 } 116 117 /* 118 * Must be called at splimp. 119 */ 120 m_expand(canwait) 121 int canwait; 122 { 123 register struct domain *dp; 124 register struct protosw *pr; 125 int tries; 126 127 for (tries = 0;; ) { 128 if (m_clalloc(1, MPG_MBUFS, canwait)) 129 return (1); 130 if (canwait == 0 || tries++) 131 return (0); 132 133 /* ask protocols to free space */ 134 for (dp = domains; dp; dp = dp->dom_next) 135 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; 136 pr++) 137 if (pr->pr_drain) 138 (*pr->pr_drain)(); 139 mbstat.m_drain++; 140 } 141 } 142 143 /* NEED SOME WAY TO RELEASE SPACE */ 144 145 /* 146 * Space allocation routines. 147 * These are also available as macros 148 * for critical paths. 149 */ 150 struct mbuf * 151 m_get(canwait, type) 152 int canwait, type; 153 { 154 register struct mbuf *m; 155 156 MGET(m, canwait, type); 157 return (m); 158 } 159 160 struct mbuf * 161 m_getclr(canwait, type) 162 int canwait, type; 163 { 164 register struct mbuf *m; 165 166 MGET(m, canwait, type); 167 if (m == 0) 168 return (0); 169 bzero(mtod(m, caddr_t), MLEN); 170 return (m); 171 } 172 173 struct mbuf * 174 m_free(m) 175 struct mbuf *m; 176 { 177 register struct mbuf *n; 178 179 MFREE(m, n); 180 return (n); 181 } 182 183 /* 184 * Get more mbufs; called from MGET macro if mfree list is empty. 185 * Must be called at splimp. 186 */ 187 /*ARGSUSED*/ 188 struct mbuf * 189 m_more(canwait, type) 190 int canwait, type; 191 { 192 register struct mbuf *m; 193 194 while (m_expand(canwait) == 0) { 195 if (canwait == M_WAIT) { 196 mbstat.m_wait++; 197 m_want++; 198 sleep((caddr_t)&mfree, PZERO - 1); 199 if (mfree) 200 break; 201 } else { 202 mbstat.m_drops++; 203 return (NULL); 204 } 205 } 206 #define m_more(x,y) (panic("m_more"), (struct mbuf *)0) 207 MGET(m, canwait, type); 208 #undef m_more 209 return (m); 210 } 211 212 m_freem(m) 213 register struct mbuf *m; 214 { 215 register struct mbuf *n; 216 register int s; 217 218 if (m == NULL) 219 return; 220 s = splimp(); 221 do { 222 MFREE(m, n); 223 } while (m = n); 224 splx(s); 225 } 226 227 /* 228 * Mbuffer utility routines. 229 */ 230 231 /* 232 /* 233 * Make a copy of an mbuf chain starting "off" bytes from the beginning, 234 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 235 * Should get M_WAIT/M_DONTWAIT from caller. 236 */ 237 struct mbuf * 238 m_copy(m, off, len) 239 register struct mbuf *m; 240 int off; 241 register int len; 242 { 243 register struct mbuf *n, **np; 244 struct mbuf *top, *p; 245 246 if (len == 0) 247 return (0); 248 if (off < 0 || len < 0) 249 panic("m_copy"); 250 while (off > 0) { 251 if (m == 0) 252 panic("m_copy"); 253 if (off < m->m_len) 254 break; 255 off -= m->m_len; 256 m = m->m_next; 257 } 258 np = ⊤ 259 top = 0; 260 while (len > 0) { 261 if (m == 0) { 262 if (len != M_COPYALL) 263 panic("m_copy"); 264 break; 265 } 266 MGET(n, M_DONTWAIT, m->m_type); 267 *np = n; 268 if (n == 0) 269 goto nospace; 270 n->m_len = MIN(len, m->m_len - off); 271 if (m->m_off > MMAXOFF) { 272 p = mtod(m, struct mbuf *); 273 n->m_off = ((int)p - (int)n) + off; 274 mclrefcnt[mtocl(p)]++; 275 } else 276 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 277 (unsigned)n->m_len); 278 if (len != M_COPYALL) 279 len -= n->m_len; 280 off = 0; 281 m = m->m_next; 282 np = &n->m_next; 283 } 284 return (top); 285 nospace: 286 m_freem(top); 287 return (0); 288 } 289 290 /* 291 * Copy data from an mbuf chain starting "off" bytes from the beginning, 292 * continuing for "len" bytes, into the indicated buffer. 293 */ 294 m_copydata(m, off, len, cp) 295 register struct mbuf *m; 296 register int off; 297 register int len; 298 caddr_t cp; 299 { 300 register unsigned count; 301 302 if (off < 0 || len < 0) 303 panic("m_copydata"); 304 while (off > 0) { 305 if (m == 0) 306 panic("m_copydata"); 307 if (off < m->m_len) 308 break; 309 off -= m->m_len; 310 m = m->m_next; 311 } 312 while (len > 0) { 313 if (m == 0) 314 panic("m_copydata"); 315 count = MIN(m->m_len - off, len); 316 bcopy(mtod(m, caddr_t) + off, cp, count); 317 len -= count; 318 cp += count; 319 off = 0; 320 m = m->m_next; 321 } 322 } 323 324 m_cat(m, n) 325 register struct mbuf *m, *n; 326 { 327 while (m->m_next) 328 m = m->m_next; 329 while (n) { 330 if (m->m_off >= MMAXOFF || 331 m->m_off + m->m_len + n->m_len > MMAXOFF) { 332 /* just join the two chains */ 333 m->m_next = n; 334 return; 335 } 336 /* splat the data from one into the other */ 337 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 338 (u_int)n->m_len); 339 m->m_len += n->m_len; 340 n = m_free(n); 341 } 342 } 343 344 m_adj(mp, len) 345 struct mbuf *mp; 346 register int len; 347 { 348 register struct mbuf *m; 349 register count; 350 351 if ((m = mp) == NULL) 352 return; 353 if (len >= 0) { 354 while (m != NULL && len > 0) { 355 if (m->m_len <= len) { 356 len -= m->m_len; 357 m->m_len = 0; 358 m = m->m_next; 359 } else { 360 m->m_len -= len; 361 m->m_off += len; 362 break; 363 } 364 } 365 } else { 366 /* 367 * Trim from tail. Scan the mbuf chain, 368 * calculating its length and finding the last mbuf. 369 * If the adjustment only affects this mbuf, then just 370 * adjust and return. Otherwise, rescan and truncate 371 * after the remaining size. 372 */ 373 len = -len; 374 count = 0; 375 for (;;) { 376 count += m->m_len; 377 if (m->m_next == (struct mbuf *)0) 378 break; 379 m = m->m_next; 380 } 381 if (m->m_len >= len) { 382 m->m_len -= len; 383 return; 384 } 385 count -= len; 386 /* 387 * Correct length for chain is "count". 388 * Find the mbuf with last data, adjust its length, 389 * and toss data from remaining mbufs on chain. 390 */ 391 for (m = mp; m; m = m->m_next) { 392 if (m->m_len >= count) { 393 m->m_len = count; 394 break; 395 } 396 count -= m->m_len; 397 } 398 while (m = m->m_next) 399 m->m_len = 0; 400 } 401 } 402 403 /* 404 * Rearange an mbuf chain so that len bytes are contiguous 405 * and in the data area of an mbuf (so that mtod and dtom 406 * will work for a structure of size len). Returns the resulting 407 * mbuf chain on success, frees it and returns null on failure. 408 * If there is room, it will add up to MPULL_EXTRA bytes to the 409 * contiguous region in an attempt to avoid being called next time. 410 */ 411 struct mbuf * 412 m_pullup(n, len) 413 register struct mbuf *n; 414 int len; 415 { 416 register struct mbuf *m; 417 register int count; 418 int space; 419 420 if (n->m_off + len <= MMAXOFF && n->m_next) { 421 m = n; 422 n = n->m_next; 423 len -= m->m_len; 424 } else { 425 if (len > MLEN) 426 goto bad; 427 MGET(m, M_DONTWAIT, n->m_type); 428 if (m == 0) 429 goto bad; 430 m->m_len = 0; 431 } 432 space = MMAXOFF - m->m_off; 433 do { 434 count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len); 435 bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, 436 (unsigned)count); 437 len -= count; 438 m->m_len += count; 439 n->m_len -= count; 440 if (n->m_len) 441 n->m_off += count; 442 else 443 n = m_free(n); 444 } while (len > 0 && n); 445 if (len > 0) { 446 (void) m_free(m); 447 goto bad; 448 } 449 m->m_next = n; 450 return (m); 451 bad: 452 m_freem(n); 453 return (0); 454 } 455