1 /* 2 * Copyright (c) 1982, 1986 Regents of the University of California. 3 * All rights reserved. The Berkeley software License Agreement 4 * specifies the terms and conditions for redistribution. 5 * 6 * @(#)uipc_mbuf.c 7.3 (Berkeley) 06/21/87 7 */ 8 9 #include "../machine/pte.h" 10 11 #include "param.h" 12 #include "dir.h" 13 #include "user.h" 14 #include "proc.h" 15 #include "cmap.h" 16 #include "map.h" 17 #include "mbuf.h" 18 #include "vm.h" 19 #include "kernel.h" 20 #include "syslog.h" 21 #include "domain.h" 22 #include "protosw.h" 23 24 mbinit() 25 { 26 int s; 27 28 s = splimp(); 29 if (m_clalloc(4096/CLBYTES, MPG_MBUFS, M_DONTWAIT) == 0) 30 goto bad; 31 if (m_clalloc(4096/CLBYTES, MPG_CLUSTERS, M_DONTWAIT) == 0) 32 goto bad; 33 splx(s); 34 return; 35 bad: 36 panic("mbinit"); 37 } 38 39 /* 40 * Must be called at splimp. 41 */ 42 /* ARGSUSED */ 43 caddr_t 44 m_clalloc(ncl, how, canwait) 45 register int ncl; 46 int how; 47 { 48 int npg, mbx; 49 register struct mbuf *m; 50 register int i; 51 static int logged; 52 53 npg = ncl * CLSIZE; 54 mbx = rmalloc(mbmap, (long)npg); 55 if (mbx == 0) { 56 if (logged == 0) { 57 logged++; 58 log(LOG_ERR, "mbuf map full\n"); 59 } 60 return (0); 61 } 62 m = cltom(mbx / CLSIZE); 63 if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) { 64 rmfree(mbmap, (long)npg, (long)mbx); 65 return (0); 66 } 67 vmaccess(&Mbmap[mbx], (caddr_t)m, npg); 68 switch (how) { 69 70 case MPG_CLUSTERS: 71 for (i = 0; i < ncl; i++) { 72 m->m_off = 0; 73 m->m_next = mclfree; 74 mclfree = m; 75 m += CLBYTES / sizeof (*m); 76 mbstat.m_clfree++; 77 } 78 mbstat.m_clusters += ncl; 79 break; 80 81 case MPG_MBUFS: 82 for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { 83 m->m_off = 0; 84 m->m_type = MT_DATA; 85 mbstat.m_mtypes[MT_DATA]++; 86 mbstat.m_mbufs++; 87 (void) m_free(m); 88 m++; 89 } 90 break; 91 } 92 return ((caddr_t)m); 93 } 94 95 m_pgfree(addr, n) 96 caddr_t addr; 97 int n; 98 { 99 100 #ifdef lint 101 addr = addr; n = n; 102 #endif 103 } 104 105 /* 106 * Must be called at splimp. 107 */ 108 m_expand(canwait) 109 int canwait; 110 { 111 register struct domain *dp; 112 register struct protosw *pr; 113 int tries; 114 115 for (tries = 0;; ) { 116 if (m_clalloc(1, MPG_MBUFS, canwait)) 117 return (1); 118 if (canwait == 0 || tries++) 119 return (0); 120 121 /* ask protocols to free space */ 122 for (dp = domains; dp; dp = dp->dom_next) 123 for (pr = dp->dom_protosw; pr < dp->dom_protoswNPROTOSW; 124 pr++) 125 if (pr->pr_drain) 126 (*pr->pr_drain)(); 127 mbstat.m_drain++; 128 } 129 } 130 131 /* NEED SOME WAY TO RELEASE SPACE */ 132 133 /* 134 * Space allocation routines. 135 * These are also available as macros 136 * for critical paths. 137 */ 138 struct mbuf * 139 m_get(canwait, type) 140 int canwait, type; 141 { 142 register struct mbuf *m; 143 144 MGET(m, canwait, type); 145 return (m); 146 } 147 148 struct mbuf * 149 m_getclr(canwait, type) 150 int canwait, type; 151 { 152 register struct mbuf *m; 153 154 MGET(m, canwait, type); 155 if (m == 0) 156 return (0); 157 bzero(mtod(m, caddr_t), MLEN); 158 return (m); 159 } 160 161 struct mbuf * 162 m_free(m) 163 struct mbuf *m; 164 { 165 register struct mbuf *n; 166 167 MFREE(m, n); 168 return (n); 169 } 170 171 /* 172 * Get more mbufs; called from MGET macro if mfree list is empty. 173 * Must be called at splimp. 174 */ 175 /*ARGSUSED*/ 176 struct mbuf * 177 m_more(canwait, type) 178 int canwait, type; 179 { 180 register struct mbuf *m; 181 182 while (m_expand(canwait) == 0) { 183 if (canwait == M_WAIT) { 184 mbstat.m_wait++; 185 m_want++; 186 sleep((caddr_t)&mfree, PZERO - 1); 187 } else { 188 mbstat.m_drops++; 189 return (NULL); 190 } 191 } 192 #define m_more(x,y) (panic("m_more"), (struct mbuf *)0) 193 MGET(m, canwait, type); 194 #undef m_more 195 return (m); 196 } 197 198 m_freem(m) 199 register struct mbuf *m; 200 { 201 register struct mbuf *n; 202 register int s; 203 204 if (m == NULL) 205 return; 206 s = splimp(); 207 do { 208 MFREE(m, n); 209 } while (m = n); 210 splx(s); 211 } 212 213 /* 214 * Mbuffer utility routines. 215 */ 216 217 /* 218 * Make a copy of an mbuf chain starting "off" bytes from the beginning, 219 * continuing for "len" bytes. If len is M_COPYALL, copy to end of mbuf. 220 * Should get M_WAIT/M_DONTWAIT from caller. 221 */ 222 struct mbuf * 223 m_copy(m, off, len) 224 register struct mbuf *m; 225 int off; 226 register int len; 227 { 228 register struct mbuf *n, **np; 229 struct mbuf *top, *p; 230 231 if (len == 0) 232 return (0); 233 if (off < 0 || len < 0) 234 panic("m_copy"); 235 while (off > 0) { 236 if (m == 0) 237 panic("m_copy"); 238 if (off < m->m_len) 239 break; 240 off -= m->m_len; 241 m = m->m_next; 242 } 243 np = ⊤ 244 top = 0; 245 while (len > 0) { 246 if (m == 0) { 247 if (len != M_COPYALL) 248 panic("m_copy"); 249 break; 250 } 251 MGET(n, M_DONTWAIT, m->m_type); 252 *np = n; 253 if (n == 0) 254 goto nospace; 255 n->m_len = MIN(len, m->m_len - off); 256 if (m->m_off > MMAXOFF) { 257 p = mtod(m, struct mbuf *); 258 n->m_off = ((int)p - (int)n) + off; 259 mclrefcnt[mtocl(p)]++; 260 } else 261 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 262 (unsigned)n->m_len); 263 if (len != M_COPYALL) 264 len -= n->m_len; 265 off = 0; 266 m = m->m_next; 267 np = &n->m_next; 268 } 269 return (top); 270 nospace: 271 m_freem(top); 272 return (0); 273 } 274 275 m_cat(m, n) 276 register struct mbuf *m, *n; 277 { 278 while (m->m_next) 279 m = m->m_next; 280 while (n) { 281 if (m->m_off >= MMAXOFF || 282 m->m_off + m->m_len + n->m_len > MMAXOFF) { 283 /* just join the two chains */ 284 m->m_next = n; 285 return; 286 } 287 /* splat the data from one into the other */ 288 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 289 (u_int)n->m_len); 290 m->m_len += n->m_len; 291 n = m_free(n); 292 } 293 } 294 295 m_adj(mp, len) 296 struct mbuf *mp; 297 register int len; 298 { 299 register struct mbuf *m; 300 register count; 301 302 if ((m = mp) == NULL) 303 return; 304 if (len >= 0) { 305 while (m != NULL && len > 0) { 306 if (m->m_len <= len) { 307 len -= m->m_len; 308 m->m_len = 0; 309 m = m->m_next; 310 } else { 311 m->m_len -= len; 312 m->m_off += len; 313 break; 314 } 315 } 316 } else { 317 /* 318 * Trim from tail. Scan the mbuf chain, 319 * calculating its length and finding the last mbuf. 320 * If the adjustment only affects this mbuf, then just 321 * adjust and return. Otherwise, rescan and truncate 322 * after the remaining size. 323 */ 324 len = -len; 325 count = 0; 326 for (;;) { 327 count += m->m_len; 328 if (m->m_next == (struct mbuf *)0) 329 break; 330 m = m->m_next; 331 } 332 if (m->m_len >= len) { 333 m->m_len -= len; 334 return; 335 } 336 count -= len; 337 /* 338 * Correct length for chain is "count". 339 * Find the mbuf with last data, adjust its length, 340 * and toss data from remaining mbufs on chain. 341 */ 342 for (m = mp; m; m = m->m_next) { 343 if (m->m_len >= count) { 344 m->m_len = count; 345 break; 346 } 347 count -= m->m_len; 348 } 349 while (m = m->m_next) 350 m->m_len = 0; 351 } 352 } 353 354 /* 355 * Rearange an mbuf chain so that len bytes are contiguous 356 * and in the data area of an mbuf (so that mtod and dtom 357 * will work for a structure of size len). Returns the resulting 358 * mbuf chain on success, frees it and returns null on failure. 359 * If there is room, it will add up to MPULL_EXTRA bytes to the 360 * contiguous region in an attempt to avoid being called next time. 361 */ 362 struct mbuf * 363 m_pullup(n, len) 364 register struct mbuf *n; 365 int len; 366 { 367 register struct mbuf *m; 368 register int count; 369 int space; 370 371 if (n->m_off + len <= MMAXOFF && n->m_next) { 372 m = n; 373 n = n->m_next; 374 len -= m->m_len; 375 } else { 376 if (len > MLEN) 377 goto bad; 378 MGET(m, M_DONTWAIT, n->m_type); 379 if (m == 0) 380 goto bad; 381 m->m_len = 0; 382 } 383 space = MMAXOFF - m->m_off; 384 do { 385 count = MIN(MIN(space - m->m_len, len + MPULL_EXTRA), n->m_len); 386 bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, 387 (unsigned)count); 388 len -= count; 389 m->m_len += count; 390 n->m_len -= count; 391 if (n->m_len) 392 n->m_off += count; 393 else 394 n = m_free(n); 395 } while (len > 0 && n); 396 if (len > 0) { 397 (void) m_free(m); 398 goto bad; 399 } 400 m->m_next = n; 401 return (m); 402 bad: 403 m_freem(n); 404 return (0); 405 } 406