1 /* uipc_mbuf.c 1.36 82/06/20 */ 2 3 #include "../h/param.h" 4 #include "../h/dir.h" 5 #include "../h/user.h" 6 #include "../h/proc.h" 7 #include "../h/pte.h" 8 #include "../h/cmap.h" 9 #include "../h/map.h" 10 #include "../h/mbuf.h" 11 #include "../net/in_systm.h" /* XXX */ 12 #include "../h/vm.h" 13 14 mbinit() 15 { 16 17 if (m_clalloc(4, MPG_MBUFS) == 0) 18 goto bad; 19 if (m_clalloc(32, MPG_CLUSTERS) == 0) 20 goto bad; 21 return; 22 bad: 23 panic("mbinit"); 24 } 25 26 caddr_t 27 m_clalloc(ncl, how) 28 register int ncl; 29 int how; 30 { 31 int npg, mbx; 32 register struct mbuf *m; 33 register int i; 34 int s; 35 36 npg = ncl * CLSIZE; 37 s = splimp(); /* careful: rmalloc isn't reentrant */ 38 mbx = rmalloc(mbmap, npg); 39 splx(s); 40 if (mbx == 0) 41 return (0); 42 m = cltom(mbx / CLSIZE); 43 if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) 44 return (0); 45 vmaccess(&Mbmap[mbx], (caddr_t)m, npg); 46 switch (how) { 47 48 case MPG_CLUSTERS: 49 s = splimp(); 50 for (i = 0; i < ncl; i++) { 51 m->m_off = 0; 52 m->m_next = mclfree; 53 mclfree = m; 54 m += CLBYTES / sizeof (*m); 55 mbstat.m_clfree++; 56 } 57 mbstat.m_clusters += ncl; 58 splx(s); 59 break; 60 61 case MPG_MBUFS: 62 for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { 63 m->m_off = 0; 64 m->m_free = 0; 65 mbstat.m_mbufs++; 66 (void) m_free(m); 67 m++; 68 } 69 break; 70 } 71 return ((caddr_t)m); 72 } 73 74 m_pgfree(addr, n) 75 caddr_t addr; 76 int n; 77 { 78 79 #ifdef lint 80 addr = addr; n = n; 81 #endif 82 } 83 84 m_expand() 85 { 86 87 if (m_clalloc(1, MPG_MBUFS) == 0) 88 goto steal; 89 return (1); 90 steal: 91 /* should ask protocols to free code */ 92 return (0); 93 } 94 95 /* NEED SOME WAY TO RELEASE SPACE */ 96 97 /* 98 * Space allocation routines. 99 * These are also available as macros 100 * for critical paths. 101 */ 102 struct mbuf * 103 m_get(canwait) 104 int canwait; 105 { 106 register struct mbuf *m; 107 108 MGET(m, canwait); 109 return (m); 110 } 111 112 struct mbuf * 113 m_getclr(canwait) 114 int canwait; 115 { 116 register struct mbuf *m; 117 118 m = m_get(canwait); 119 if (m == 0) 120 return (0); 121 m->m_off = MMINOFF; 122 bzero(mtod(m, caddr_t), MLEN); 123 return (m); 124 } 125 126 struct mbuf * 127 m_free(m) 128 struct mbuf *m; 129 { 130 register struct mbuf *n; 131 132 MFREE(m, n); 133 return (n); 134 } 135 136 /*ARGSUSED*/ 137 struct mbuf * 138 m_more(type) 139 int type; 140 { 141 register struct mbuf *m; 142 143 if (!m_expand()) { 144 mbstat.m_drops++; 145 return (NULL); 146 } 147 #define m_more(x) (panic("m_more"), (struct mbuf *)0) 148 MGET(m, type); 149 #undef m_more 150 return (m); 151 } 152 153 m_freem(m) 154 register struct mbuf *m; 155 { 156 register struct mbuf *n; 157 register int s; 158 159 if (m == NULL) 160 return; 161 s = splimp(); 162 do { 163 MFREE(m, n); 164 } while (m = n); 165 splx(s); 166 } 167 168 /* 169 * Mbuffer utility routines. 170 */ 171 struct mbuf * 172 m_copy(m, off, len) 173 register struct mbuf *m; 174 int off; 175 register int len; 176 { 177 register struct mbuf *n, **np; 178 struct mbuf *top, *p; 179 180 if (len == 0) 181 return (0); 182 if (off < 0 || len < 0) 183 panic("m_copy"); 184 while (off > 0) { 185 if (m == 0) 186 panic("m_copy"); 187 if (off < m->m_len) 188 break; 189 off -= m->m_len; 190 m = m->m_next; 191 } 192 np = ⊤ 193 top = 0; 194 while (len > 0) { 195 if (m == 0) { 196 if (len != M_COPYALL) 197 panic("m_copy"); 198 break; 199 } 200 MGET(n, 1); 201 *np = n; 202 if (n == 0) 203 goto nospace; 204 n->m_len = MIN(len, m->m_len - off); 205 if (m->m_off > MMAXOFF) { 206 p = mtod(m, struct mbuf *); 207 n->m_off = ((int)p - (int)n) + off; 208 mclrefcnt[mtocl(p)]++; 209 } else { 210 n->m_off = MMINOFF; 211 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 212 (unsigned)n->m_len); 213 } 214 if (len != M_COPYALL) 215 len -= n->m_len; 216 off = 0; 217 m = m->m_next; 218 np = &n->m_next; 219 } 220 return (top); 221 nospace: 222 m_freem(top); 223 return (0); 224 } 225 226 m_cat(m, n) 227 register struct mbuf *m, *n; 228 { 229 while (m->m_next) 230 m = m->m_next; 231 while (n) { 232 if (m->m_off >= MMAXOFF || 233 m->m_off + m->m_len + n->m_len > MMAXOFF) { 234 /* just join the two chains */ 235 m->m_next = n; 236 return; 237 } 238 /* splat the data from one into the other */ 239 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 240 (u_int)n->m_len); 241 m->m_len += n->m_len; 242 n = m_free(n); 243 } 244 } 245 246 m_adj(mp, len) 247 struct mbuf *mp; 248 register int len; 249 { 250 register struct mbuf *m, *n; 251 252 if ((m = mp) == NULL) 253 return; 254 if (len >= 0) { 255 while (m != NULL && len > 0) { 256 if (m->m_len <= len) { 257 len -= m->m_len; 258 m->m_len = 0; 259 m = m->m_next; 260 } else { 261 m->m_len -= len; 262 m->m_off += len; 263 break; 264 } 265 } 266 } else { 267 /* a 2 pass algorithm might be better */ 268 len = -len; 269 while (len > 0 && m->m_len != 0) { 270 while (m != NULL && m->m_len != 0) { 271 n = m; 272 m = m->m_next; 273 } 274 if (n->m_len <= len) { 275 len -= n->m_len; 276 n->m_len = 0; 277 m = mp; 278 } else { 279 n->m_len -= len; 280 break; 281 } 282 } 283 } 284 } 285 286 struct mbuf * 287 m_pullup(m0, len) 288 struct mbuf *m0; 289 int len; 290 { 291 register struct mbuf *m, *n; 292 int count; 293 294 n = m0; 295 if (len > MLEN) 296 goto bad; 297 MGET(m, 0); 298 if (m == 0) 299 goto bad; 300 m->m_off = MMINOFF; 301 m->m_len = 0; 302 do { 303 count = MIN(MLEN - m->m_len, len); 304 if (count > n->m_len) 305 count = n->m_len; 306 bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, 307 (unsigned)count); 308 len -= count; 309 m->m_len += count; 310 n->m_off += count; 311 n->m_len -= count; 312 if (n->m_len) 313 break; 314 n = m_free(n); 315 } while (n); 316 if (len) { 317 (void) m_free(m); 318 goto bad; 319 } 320 m->m_next = n; 321 return (m); 322 bad: 323 m_freem(n); 324 return (0); 325 } 326