1 /* 2 * Copyright (c) 1982, 1986 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)uipc_mbuf.c 7.4.1.1 (Berkeley) 11/24/87 7 */ 8 9 #include "../machine/pte.h" 10 11 #include "param.h" 12 #include "dir.h" 13 #include "user.h" 14 #include "proc.h" 15 #include "cmap.h" 16 #include "map.h" 17 #include "mbuf.h" 18 #include "vm.h" 19 #include "kernel.h" 20 #include "syslog.h" 21 #include "domain.h" 22 #include "protosw.h" 23 24 mbinit() 25 { 26 int s; 27 28 #if CLBYTES < 4096 29 #define NCL_INIT (4096/CLBYTES) 30 #else 31 #define NCL_INIT 1 32 #endif 33 s = splimp(); 34 if (m_clalloc(NCL_INIT, MPG_MBUFS, M_DONTWAIT) == 0) 35 goto bad; 36 if (m_clalloc(NCL_INIT, MPG_CLUSTERS, M_DONTWAIT) == 0) 37 goto bad; 38 splx(s); 39 return; 40 bad: 41 panic("mbinit"); 42 } 43 44 /* 45 * Must be called at splimp. 46 */ 47 /* ARGSUSED */ 48 caddr_t 49 m_clalloc(ncl, how, canwait) 50 register int ncl; 51 int how; 52 { 53 int npg, mbx; 54 register struct mbuf *m; 55 register int i; 56 static int logged; 57 58 npg = ncl * CLSIZE; 59 mbx = rmalloc(mbmap, (long)npg); 60 if (mbx == 0) { 61 if (logged == 0) { 62 logged++; 63 log(LOG_ERR, "mbuf map full\n"); 64 } 65 return (0); 66 } 67 m = cltom(mbx * NBPG / MCLBYTES); 68 if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { 69 rmfree(mbmap, (long)npg, (long)mbx); 70 return (0); 71 } 72 vmaccess(&Mbmap[mbx], (caddr_t)m, npg); 73 switch (how) { 74 75 case MPG_CLUSTERS: 76 ncl = ncl * CLBYTES / MCLBYTES; 77 for (i = 0; i < ncl; i++) { 78 m->m_off = 0; 79 m->m_next = mclfree; 80 mclfree = m; 81 m += MCLBYTES / sizeof (*m); 82 mbstat.m_clfree++; 83 } 84 mbstat.m_clusters += ncl; 85 break; 86 87 case MPG_MBUFS: 88 for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { 89 m->m_off = 0; 90 m->m_type = MT_DATA; 91 mbstat.m_mtypes[MT_DATA]++; 92 mbstat.m_mbufs++; 93 (void) m_free(m); 94 m++; 95 } 96 break; 97 98 case MPG_SPACE: 99 mbstat.m_space++; 100 break; 101 } 102 return ((caddr_t)m); 103 } 104 105 m_pgfree(addr, n) 106 caddr_t addr; 107 int n; 108 { 109 110 #ifdef lint 111 addr = addr; n = n; 112 #endif 113 } 114 115 /* 116 * Must be called at splimp. 117 */ 118 m_expand(canwait) 119 int canwait; 120 { 121 register struct domain *dp; 122 register struct protosw *pr; 123 int tries; 124 125 for (tries = 0;; ) { 126 if (m_clalloc(1, MPG_MBUFS, canwait)) 127 return (1); 128 if (canwait == 0 || tries++) 129 return (0); 130 131 /* ask protocols to free space */ 132 for (dp = domains; dp; dp = dp->dom_next) 133 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; 134 pr++) 135 if (pr->pr_drain) 136 (*pr->pr_drain)(); 137 mbstat.m_drain++; 138 } 139 } 140 141 /* NEED SOME WAY TO RELEASE SPACE */ 142 143 /* 144 * Space allocation routines. 145 * These are also available as macros 146 * for critical paths. 147 */ 148 struct mbuf * 149 m_get(canwait, type) 150 int canwait, type; 151 { 152 register struct mbuf *m; 153 154 MGET(m, canwait, type); 155 return (m); 156 } 157 158 struct mbuf * 159 m_getclr(canwait, type) 160 int canwait, type; 161 { 162 register struct mbuf *m; 163 164 MGET(m, canwait, type); 165 if (m == 0) 166 return (0); 167 bzero(mtod(m, caddr_t), MLEN); 168 return (m); 169 } 170 171 struct mbuf * 172 m_free(m) 173 struct mbuf *m; 174 { 175 register struct mbuf *n; 176 177 MFREE(m, n); 178 return (n); 179 } 180 181 /* 182 * Get more mbufs; called from MGET macro if mfree list is empty. 183 * Must be called at splimp. 184 */ 185 /*ARGSUSED*/ 186 struct mbuf * 187 m_more(canwait, type) 188 int canwait, type; 189 { 190 register struct mbuf *m; 191 192 while (m_expand(canwait) == 0) { 193 if (canwait == M_WAIT) { 194 mbstat.m_wait++; 195 m_want++; 196 sleep((caddr_t)&mfree, PZERO - 1); 197 } else { 198 mbstat.m_drops++; 199 return (NULL); 200 } 201 } 202 #define m_more(x,y) (panic("m_more"), (struct mbuf *)0) 203 MGET(m, canwait, type); 204 #undef m_more 205 return (m); 206 } 207 208 m_freem(m) 209 register struct mbuf *m; 210 { 211 register struct mbuf *n; 212 register int s; 213 214 if (m == NULL) 215 return; 216 s = splimp(); 217 do { 218 MFREE(m, n); 219 } while (m = n); 220 splx(s); 221 } 222 223 /* 224 * Mbuffer utility routines. 225 */ 226 227 /* 228 * Make a copy of an mbuf chain starting "off" bytes from the beginning, 229 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 230 * Should get M_WAIT/M_DONTWAIT from caller. 231 */ 232 struct mbuf * 233 m_copy(m, off, len) 234 register struct mbuf *m; 235 int off; 236 register int len; 237 { 238 register struct mbuf *n, **np; 239 struct mbuf *top, *p; 240 241 if (len == 0) 242 return (0); 243 if (off < 0 || len < 0) 244 panic("m_copy"); 245 while (off > 0) { 246 if (m == 0) 247 panic("m_copy"); 248 if (off < m->m_len) 249 break; 250 off -= m->m_len; 251 m = m->m_next; 252 } 253 np = ⊤ 254 top = 0; 255 while (len > 0) { 256 if (m == 0) { 257 if (len != M_COPYALL) 258 panic("m_copy"); 259 break; 260 } 261 MGET(n, M_DONTWAIT, m->m_type); 262 *np = n; 263 if (n == 0) 264 goto nospace; 265 n->m_len = MIN(len, m->m_len - off); 266 if (m->m_off > MMAXOFF) { 267 p = mtod(m, struct mbuf *); 268 n->m_off = ((int)p - (int)n) + off; 269 mclrefcnt[mtocl(p)]++; 270 } else 271 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 272 (unsigned)n->m_len); 273 if (len != M_COPYALL) 274 len -= n->m_len; 275 off = 0; 276 m = m->m_next; 277 np = &n->m_next; 278 } 279 return (top); 280 nospace: 281 m_freem(top); 282 return (0); 283 } 284 285 m_cat(m, n) 286 register struct mbuf *m, *n; 287 { 288 while (m->m_next) 289 m = m->m_next; 290 while (n) { 291 if (m->m_off >= MMAXOFF || 292 m->m_off + m->m_len + n->m_len > MMAXOFF) { 293 /* just join the two chains */ 294 m->m_next = n; 295 return; 296 } 297 /* splat the data from one into the other */ 298 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 299 (u_int)n->m_len); 300 m->m_len += n->m_len; 301 n = m_free(n); 302 } 303 } 304 305 m_adj(mp, len) 306 struct mbuf *mp; 307 register int len; 308 { 309 register struct mbuf *m; 310 register count; 311 312 if ((m = mp) == NULL) 313 return; 314 if (len >= 0) { 315 while (m != NULL && len > 0) { 316 if (m->m_len <= len) { 317 len -= m->m_len; 318 m->m_len = 0; 319 m = m->m_next; 320 } else { 321 m->m_len -= len; 322 m->m_off += len; 323 break; 324 } 325 } 326 } else { 327 /* 328 * Trim from tail. Scan the mbuf chain, 329 * calculating its length and finding the last mbuf. 330 * If the adjustment only affects this mbuf, then just 331 * adjust and return. Otherwise, rescan and truncate 332 * after the remaining size. 333 */ 334 len = -len; 335 count = 0; 336 for (;;) { 337 count += m->m_len; 338 if (m->m_next == (struct mbuf *)0) 339 break; 340 m = m->m_next; 341 } 342 if (m->m_len >= len) { 343 m->m_len -= len; 344 return; 345 } 346 count -= len; 347 /* 348 * Correct length for chain is "count". 349 * Find the mbuf with last data, adjust its length, 350 * and toss data from remaining mbufs on chain. 351 */ 352 for (m = mp; m; m = m->m_next) { 353 if (m->m_len >= count) { 354 m->m_len = count; 355 break; 356 } 357 count -= m->m_len; 358 } 359 while (m = m->m_next) 360 m->m_len = 0; 361 } 362 } 363 364 /* 365 * Rearange an mbuf chain so that len bytes are contiguous 366 * and in the data area of an mbuf (so that mtod and dtom 367 * will work for a structure of size len). Returns the resulting 368 * mbuf chain on success, frees it and returns null on failure. 369 * If there is room, it will add up to MPULL_EXTRA bytes to the 370 * contiguous region in an attempt to avoid being called next time. 371 */ 372 struct mbuf * 373 m_pullup(n, len) 374 register struct mbuf *n; 375 int len; 376 { 377 register struct mbuf *m; 378 register int count; 379 int space; 380 381 if (n->m_off + len <= MMAXOFF && n->m_next) { 382 m = n; 383 n = n->m_next; 384 len -= m->m_len; 385 } else { 386 if (len > MLEN) 387 goto bad; 388 MGET(m, M_DONTWAIT, n->m_type); 389 if (m == 0) 390 goto bad; 391 m->m_len = 0; 392 } 393 space = MMAXOFF - m->m_off; 394 do { 395 count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len); 396 bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, 397 (unsigned)count); 398 len -= count; 399 m->m_len += count; 400 n->m_len -= count; 401 if (n->m_len) 402 n->m_off += count; 403 else 404 n = m_free(n); 405 } while (len > 0 && n); 406 if (len > 0) { 407 (void) m_free(m); 408 goto bad; 409 } 410 m->m_next = n; 411 return (m); 412 bad: 413 m_freem(n); 414 return (0); 415 } 416