1 /* 2 * Copyright (c) 1982, 1986, 1988 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that this notice is preserved and that due credit is given 7 * to the University of California at Berkeley. The name of the University 8 * may not be used to endorse or promote products derived from this 9 * software without specific prior written permission. This software 10 * is provided ``as is'' without express or implied warranty. 11 * 12 * @(#)uipc_mbuf.c 7.4.1.2 (Berkeley) 02/08/88 13 */ 14 15 #include "../machine/pte.h" 16 17 #include "param.h" 18 #include "dir.h" 19 #include "user.h" 20 #include "proc.h" 21 #include "cmap.h" 22 #include "map.h" 23 #include "mbuf.h" 24 #include "vm.h" 25 #include "kernel.h" 26 #include "syslog.h" 27 #include "domain.h" 28 #include "protosw.h" 29 30 mbinit() 31 { 32 int s; 33 34 #if CLBYTES < 4096 35 #define NCL_INIT (4096/CLBYTES) 36 #else 37 #define NCL_INIT 1 38 #endif 39 s = splimp(); 40 if (m_clalloc(NCL_INIT, MPG_MBUFS, M_DONTWAIT) == 0) 41 goto bad; 42 if (m_clalloc(NCL_INIT, MPG_CLUSTERS, M_DONTWAIT) == 0) 43 goto bad; 44 splx(s); 45 return; 46 bad: 47 panic("mbinit"); 48 } 49 50 /* 51 * Must be called at splimp. 52 */ 53 /* ARGSUSED */ 54 caddr_t 55 m_clalloc(ncl, how, canwait) 56 register int ncl; 57 int how; 58 { 59 int npg, mbx; 60 register struct mbuf *m; 61 register int i; 62 static int logged; 63 64 npg = ncl * CLSIZE; 65 mbx = rmalloc(mbmap, (long)npg); 66 if (mbx == 0) { 67 if (logged == 0) { 68 logged++; 69 log(LOG_ERR, "mbuf map full\n"); 70 } 71 return (0); 72 } 73 m = cltom(mbx * NBPG / MCLBYTES); 74 if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { 75 rmfree(mbmap, (long)npg, (long)mbx); 76 return (0); 77 } 78 vmaccess(&Mbmap[mbx], (caddr_t)m, npg); 79 switch (how) { 80 81 case MPG_CLUSTERS: 82 ncl = ncl * CLBYTES / MCLBYTES; 83 for (i = 0; i < ncl; i++) { 84 m->m_off = 0; 85 m->m_next = mclfree; 86 mclfree = m; 87 m += MCLBYTES / sizeof (*m); 88 mbstat.m_clfree++; 89 } 90 mbstat.m_clusters += ncl; 91 break; 92 93 case MPG_MBUFS: 94 for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { 95 m->m_off = 0; 96 m->m_type = MT_DATA; 97 mbstat.m_mtypes[MT_DATA]++; 98 mbstat.m_mbufs++; 99 (void) m_free(m); 100 m++; 101 } 102 break; 103 104 case MPG_SPACE: 105 mbstat.m_space++; 106 break; 107 } 108 return ((caddr_t)m); 109 } 110 111 m_pgfree(addr, n) 112 caddr_t addr; 113 int n; 114 { 115 116 #ifdef lint 117 addr = addr; n = n; 118 #endif 119 } 120 121 /* 122 * Must be called at splimp. 123 */ 124 m_expand(canwait) 125 int canwait; 126 { 127 register struct domain *dp; 128 register struct protosw *pr; 129 int tries; 130 131 for (tries = 0;; ) { 132 if (m_clalloc(1, MPG_MBUFS, canwait)) 133 return (1); 134 if (canwait == 0 || tries++) 135 return (0); 136 137 /* ask protocols to free space */ 138 for (dp = domains; dp; dp = dp->dom_next) 139 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; 140 pr++) 141 if (pr->pr_drain) 142 (*pr->pr_drain)(); 143 mbstat.m_drain++; 144 } 145 } 146 147 /* NEED SOME WAY TO RELEASE SPACE */ 148 149 /* 150 * Space allocation routines. 151 * These are also available as macros 152 * for critical paths. 153 */ 154 struct mbuf * 155 m_get(canwait, type) 156 int canwait, type; 157 { 158 register struct mbuf *m; 159 160 MGET(m, canwait, type); 161 return (m); 162 } 163 164 struct mbuf * 165 m_getclr(canwait, type) 166 int canwait, type; 167 { 168 register struct mbuf *m; 169 170 MGET(m, canwait, type); 171 if (m == 0) 172 return (0); 173 bzero(mtod(m, caddr_t), MLEN); 174 return (m); 175 } 176 177 struct mbuf * 178 m_free(m) 179 struct mbuf *m; 180 { 181 register struct mbuf *n; 182 183 MFREE(m, n); 184 return (n); 185 } 186 187 /* 188 * Get more mbufs; called from MGET macro if mfree list is empty. 189 * Must be called at splimp. 190 */ 191 /*ARGSUSED*/ 192 struct mbuf * 193 m_more(canwait, type) 194 int canwait, type; 195 { 196 register struct mbuf *m; 197 198 while (m_expand(canwait) == 0) { 199 if (canwait == M_WAIT) { 200 mbstat.m_wait++; 201 m_want++; 202 sleep((caddr_t)&mfree, PZERO - 1); 203 } else { 204 mbstat.m_drops++; 205 return (NULL); 206 } 207 } 208 #define m_more(x,y) (panic("m_more"), (struct mbuf *)0) 209 MGET(m, canwait, type); 210 #undef m_more 211 return (m); 212 } 213 214 m_freem(m) 215 register struct mbuf *m; 216 { 217 register struct mbuf *n; 218 register int s; 219 220 if (m == NULL) 221 return; 222 s = splimp(); 223 do { 224 MFREE(m, n); 225 } while (m = n); 226 splx(s); 227 } 228 229 /* 230 * Mbuffer utility routines. 231 */ 232 233 /* 234 * Make a copy of an mbuf chain starting "off" bytes from the beginning, 235 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 236 * Should get M_WAIT/M_DONTWAIT from caller. 237 */ 238 struct mbuf * 239 m_copy(m, off, len) 240 register struct mbuf *m; 241 int off; 242 register int len; 243 { 244 register struct mbuf *n, **np; 245 struct mbuf *top, *p; 246 247 if (len == 0) 248 return (0); 249 if (off < 0 || len < 0) 250 panic("m_copy"); 251 while (off > 0) { 252 if (m == 0) 253 panic("m_copy"); 254 if (off < m->m_len) 255 break; 256 off -= m->m_len; 257 m = m->m_next; 258 } 259 np = ⊤ 260 top = 0; 261 while (len > 0) { 262 if (m == 0) { 263 if (len != M_COPYALL) 264 panic("m_copy"); 265 break; 266 } 267 MGET(n, M_DONTWAIT, m->m_type); 268 *np = n; 269 if (n == 0) 270 goto nospace; 271 n->m_len = MIN(len, m->m_len - off); 272 if (m->m_off > MMAXOFF) { 273 p = mtod(m, struct mbuf *); 274 n->m_off = ((int)p - (int)n) + off; 275 mclrefcnt[mtocl(p)]++; 276 } else 277 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 278 (unsigned)n->m_len); 279 if (len != M_COPYALL) 280 len -= n->m_len; 281 off = 0; 282 m = m->m_next; 283 np = &n->m_next; 284 } 285 return (top); 286 nospace: 287 m_freem(top); 288 return (0); 289 } 290 291 m_cat(m, n) 292 register struct mbuf *m, *n; 293 { 294 while (m->m_next) 295 m = m->m_next; 296 while (n) { 297 if (m->m_off >= MMAXOFF || 298 m->m_off + m->m_len + n->m_len > MMAXOFF) { 299 /* just join the two chains */ 300 m->m_next = n; 301 return; 302 } 303 /* splat the data from one into the other */ 304 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 305 (u_int)n->m_len); 306 m->m_len += n->m_len; 307 n = m_free(n); 308 } 309 } 310 311 m_adj(mp, len) 312 struct mbuf *mp; 313 register int len; 314 { 315 register struct mbuf *m; 316 register count; 317 318 if ((m = mp) == NULL) 319 return; 320 if (len >= 0) { 321 while (m != NULL && len > 0) { 322 if (m->m_len <= len) { 323 len -= m->m_len; 324 m->m_len = 0; 325 m = m->m_next; 326 } else { 327 m->m_len -= len; 328 m->m_off += len; 329 break; 330 } 331 } 332 } else { 333 /* 334 * Trim from tail. Scan the mbuf chain, 335 * calculating its length and finding the last mbuf. 336 * If the adjustment only affects this mbuf, then just 337 * adjust and return. Otherwise, rescan and truncate 338 * after the remaining size. 339 */ 340 len = -len; 341 count = 0; 342 for (;;) { 343 count += m->m_len; 344 if (m->m_next == (struct mbuf *)0) 345 break; 346 m = m->m_next; 347 } 348 if (m->m_len >= len) { 349 m->m_len -= len; 350 return; 351 } 352 count -= len; 353 /* 354 * Correct length for chain is "count". 355 * Find the mbuf with last data, adjust its length, 356 * and toss data from remaining mbufs on chain. 357 */ 358 for (m = mp; m; m = m->m_next) { 359 if (m->m_len >= count) { 360 m->m_len = count; 361 break; 362 } 363 count -= m->m_len; 364 } 365 while (m = m->m_next) 366 m->m_len = 0; 367 } 368 } 369 370 /* 371 * Rearange an mbuf chain so that len bytes are contiguous 372 * and in the data area of an mbuf (so that mtod and dtom 373 * will work for a structure of size len). Returns the resulting 374 * mbuf chain on success, frees it and returns null on failure. 375 * If there is room, it will add up to MPULL_EXTRA bytes to the 376 * contiguous region in an attempt to avoid being called next time. 377 */ 378 struct mbuf * 379 m_pullup(n, len) 380 register struct mbuf *n; 381 int len; 382 { 383 register struct mbuf *m; 384 register int count; 385 int space; 386 387 if (n->m_off + len <= MMAXOFF && n->m_next) { 388 m = n; 389 n = n->m_next; 390 len -= m->m_len; 391 } else { 392 if (len > MLEN) 393 goto bad; 394 MGET(m, M_DONTWAIT, n->m_type); 395 if (m == 0) 396 goto bad; 397 m->m_len = 0; 398 } 399 space = MMAXOFF - m->m_off; 400 do { 401 count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len); 402 bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, 403 (unsigned)count); 404 len -= count; 405 m->m_len += count; 406 n->m_len -= count; 407 if (n->m_len) 408 n->m_off += count; 409 else 410 n = m_free(n); 411 } while (len > 0 && n); 412 if (len > 0) { 413 (void) m_free(m); 414 goto bad; 415 } 416 m->m_next = n; 417 return (m); 418 bad: 419 m_freem(n); 420 return (0); 421 } 422