1 /* 2 * Copyright (c) 1982, 1986 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)uipc_mbuf.c 7.2 (Berkeley) 02/02/87 7 */ 8 9 #include "../machine/pte.h" 10 11 #include "param.h" 12 #include "dir.h" 13 #include "user.h" 14 #include "proc.h" 15 #include "cmap.h" 16 #include "map.h" 17 #include "mbuf.h" 18 #include "vm.h" 19 #include "kernel.h" 20 #include "syslog.h" 21 #include "domain.h" 22 #include "protosw.h" 23 24 mbinit() 25 { 26 int s; 27 28 s = splimp(); 29 if (m_clalloc(4096/CLBYTES, MPG_MBUFS, M_DONTWAIT) == 0) 30 goto bad; 31 if (m_clalloc(4096/CLBYTES, MPG_CLUSTERS, M_DONTWAIT) == 0) 32 goto bad; 33 splx(s); 34 return; 35 bad: 36 panic("mbinit"); 37 } 38 39 /* 40 * Must be called at splimp. 41 */ 42 /* ARGSUSED */ 43 caddr_t 44 m_clalloc(ncl, how, canwait) 45 register int ncl; 46 int how; 47 { 48 int npg, mbx; 49 register struct mbuf *m; 50 register int i; 51 static int logged; 52 53 npg = ncl * CLSIZE; 54 mbx = rmalloc(mbmap, (long)npg); 55 if (mbx == 0) { 56 if (logged == 0) { 57 logged++; 58 log(LOG_ERR, "mbuf map full\n"); 59 } 60 return (0); 61 } 62 m = cltom(mbx / CLSIZE); 63 if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { 64 rmfree(mbmap, (long)npg, (long)mbx); 65 return (0); 66 } 67 vmaccess(&Mbmap[mbx], (caddr_t)m, npg); 68 switch (how) { 69 70 case MPG_CLUSTERS: 71 for (i = 0; i < ncl; i++) { 72 m->m_off = 0; 73 m->m_next = mclfree; 74 mclfree = m; 75 m += CLBYTES / sizeof (*m); 76 mbstat.m_clfree++; 77 } 78 mbstat.m_clusters += ncl; 79 break; 80 81 case MPG_MBUFS: 82 for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { 83 m->m_off = 0; 84 m->m_type = MT_DATA; 85 mbstat.m_mtypes[MT_DATA]++; 86 mbstat.m_mbufs++; 87 (void) m_free(m); 88 m++; 89 } 90 break; 91 92 case MPG_SPACE: 93 mbstat.m_space++; 94 break; 95 } 96 return ((caddr_t)m); 97 } 98 99 m_pgfree(addr, n) 100 caddr_t addr; 101 int n; 102 { 103 104 #ifdef lint 105 addr = addr; n = n; 106 #endif 107 } 108 109 /* 110 * Must be called at splimp. 111 */ 112 m_expand(canwait) 113 int canwait; 114 { 115 register struct domain *dp; 116 register struct protosw *pr; 117 int tries; 118 119 for (tries = 0;; ) { 120 if (m_clalloc(1, MPG_MBUFS, canwait)) 121 return (1); 122 if (canwait == 0 || tries++) 123 return (0); 124 125 /* ask protocols to free space */ 126 for (dp = domains; dp; dp = dp->dom_next) 127 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; 128 pr++) 129 if (pr->pr_drain) 130 (*pr->pr_drain)(); 131 mbstat.m_drain++; 132 } 133 } 134 135 /* NEED SOME WAY TO RELEASE SPACE */ 136 137 /* 138 * Space allocation routines. 139 * These are also available as macros 140 * for critical paths. 141 */ 142 struct mbuf * 143 m_get(canwait, type) 144 int canwait, type; 145 { 146 register struct mbuf *m; 147 148 MGET(m, canwait, type); 149 return (m); 150 } 151 152 struct mbuf * 153 m_getclr(canwait, type) 154 int canwait, type; 155 { 156 register struct mbuf *m; 157 158 MGET(m, canwait, type); 159 if (m == 0) 160 return (0); 161 bzero(mtod(m, caddr_t), MLEN); 162 return (m); 163 } 164 165 struct mbuf * 166 m_free(m) 167 struct mbuf *m; 168 { 169 register struct mbuf *n; 170 171 MFREE(m, n); 172 return (n); 173 } 174 175 /* 176 * Get more mbufs; called from MGET macro if mfree list is empty. 177 * Must be called at splimp. 178 */ 179 /*ARGSUSED*/ 180 struct mbuf * 181 m_more(canwait, type) 182 int canwait, type; 183 { 184 register struct mbuf *m; 185 186 while (m_expand(canwait) == 0) { 187 if (canwait == M_WAIT) { 188 mbstat.m_wait++; 189 m_want++; 190 sleep((caddr_t)&mfree, PZERO - 1); 191 } else { 192 mbstat.m_drops++; 193 return (NULL); 194 } 195 } 196 #define m_more(x,y) (panic("m_more"), (struct mbuf *)0) 197 MGET(m, canwait, type); 198 #undef m_more 199 return (m); 200 } 201 202 m_freem(m) 203 register struct mbuf *m; 204 { 205 register struct mbuf *n; 206 register int s; 207 208 if (m == NULL) 209 return; 210 s = splimp(); 211 do { 212 MFREE(m, n); 213 } while (m = n); 214 splx(s); 215 } 216 217 /* 218 * Mbuffer utility routines. 219 */ 220 221 /* 222 * Make a copy of an mbuf chain starting "off" bytes from the beginning, 223 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 224 * Should get M_WAIT/M_DONTWAIT from caller. 225 */ 226 struct mbuf * 227 m_copy(m, off, len) 228 register struct mbuf *m; 229 int off; 230 register int len; 231 { 232 register struct mbuf *n, **np; 233 struct mbuf *top, *p; 234 235 if (len == 0) 236 return (0); 237 if (off < 0 || len < 0) 238 panic("m_copy"); 239 while (off > 0) { 240 if (m == 0) 241 panic("m_copy"); 242 if (off < m->m_len) 243 break; 244 off -= m->m_len; 245 m = m->m_next; 246 } 247 np = ⊤ 248 top = 0; 249 while (len > 0) { 250 if (m == 0) { 251 if (len != M_COPYALL) 252 panic("m_copy"); 253 break; 254 } 255 MGET(n, M_DONTWAIT, m->m_type); 256 *np = n; 257 if (n == 0) 258 goto nospace; 259 n->m_len = MIN(len, m->m_len - off); 260 if (m->m_off > MMAXOFF) { 261 p = mtod(m, struct mbuf *); 262 n->m_off = ((int)p - (int)n) + off; 263 mclrefcnt[mtocl(p)]++; 264 } else 265 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 266 (unsigned)n->m_len); 267 if (len != M_COPYALL) 268 len -= n->m_len; 269 off = 0; 270 m = m->m_next; 271 np = &n->m_next; 272 } 273 return (top); 274 nospace: 275 m_freem(top); 276 return (0); 277 } 278 279 m_cat(m, n) 280 register struct mbuf *m, *n; 281 { 282 while (m->m_next) 283 m = m->m_next; 284 while (n) { 285 if (m->m_off >= MMAXOFF || 286 m->m_off + m->m_len + n->m_len > MMAXOFF) { 287 /* just join the two chains */ 288 m->m_next = n; 289 return; 290 } 291 /* splat the data from one into the other */ 292 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 293 (u_int)n->m_len); 294 m->m_len += n->m_len; 295 n = m_free(n); 296 } 297 } 298 299 m_adj(mp, len) 300 struct mbuf *mp; 301 register int len; 302 { 303 register struct mbuf *m; 304 register count; 305 306 if ((m = mp) == NULL) 307 return; 308 if (len >= 0) { 309 while (m != NULL && len > 0) { 310 if (m->m_len <= len) { 311 len -= m->m_len; 312 m->m_len = 0; 313 m = m->m_next; 314 } else { 315 m->m_len -= len; 316 m->m_off += len; 317 break; 318 } 319 } 320 } else { 321 /* 322 * Trim from tail. Scan the mbuf chain, 323 * calculating its length and finding the last mbuf. 324 * If the adjustment only affects this mbuf, then just 325 * adjust and return. Otherwise, rescan and truncate 326 * after the remaining size. 327 */ 328 len = -len; 329 count = 0; 330 for (;;) { 331 count += m->m_len; 332 if (m->m_next == (struct mbuf *)0) 333 break; 334 m = m->m_next; 335 } 336 if (m->m_len >= len) { 337 m->m_len -= len; 338 return; 339 } 340 count -= len; 341 /* 342 * Correct length for chain is "count". 343 * Find the mbuf with last data, adjust its length, 344 * and toss data from remaining mbufs on chain. 345 */ 346 for (m = mp; m; m = m->m_next) { 347 if (m->m_len >= count) { 348 m->m_len = count; 349 break; 350 } 351 count -= m->m_len; 352 } 353 while (m = m->m_next) 354 m->m_len = 0; 355 } 356 } 357 358 /* 359 * Rearange an mbuf chain so that len bytes are contiguous 360 * and in the data area of an mbuf (so that mtod and dtom 361 * will work for a structure of size len). Returns the resulting 362 * mbuf chain on success, frees it and returns null on failure. 363 * If there is room, it will add up to MPULL_EXTRA bytes to the 364 * contiguous region in an attempt to avoid being called next time. 365 */ 366 struct mbuf * 367 m_pullup(n, len) 368 register struct mbuf *n; 369 int len; 370 { 371 register struct mbuf *m; 372 register int count; 373 int space; 374 375 if (n->m_off + len <= MMAXOFF && n->m_next) { 376 m = n; 377 n = n->m_next; 378 len -= m->m_len; 379 } else { 380 if (len > MLEN) 381 goto bad; 382 MGET(m, M_DONTWAIT, n->m_type); 383 if (m == 0) 384 goto bad; 385 m->m_len = 0; 386 } 387 space = MMAXOFF - m->m_off; 388 do { 389 count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len); 390 bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, 391 (unsigned)count); 392 len -= count; 393 m->m_len += count; 394 n->m_len -= count; 395 if (n->m_len) 396 n->m_off += count; 397 else 398 n = m_free(n); 399 } while (len > 0 && n); 400 if (len > 0) { 401 (void) m_free(m); 402 goto bad; 403 } 404 m->m_next = n; 405 return (m); 406 bad: 407 m_freem(n); 408 return (0); 409 } 410