1 /* uipc_mbuf.c 1.38 82/10/09 */ 2 3 #include "../h/param.h" 4 #include "../h/dir.h" 5 #include "../h/user.h" 6 #include "../h/proc.h" 7 #include "../h/pte.h" 8 #include "../h/cmap.h" 9 #include "../h/map.h" 10 #include "../h/mbuf.h" 11 #include "../h/vm.h" 12 13 mbinit() 14 { 15 16 if (m_clalloc(4, MPG_MBUFS) == 0) 17 goto bad; 18 if (m_clalloc(32, MPG_CLUSTERS) == 0) 19 goto bad; 20 return; 21 bad: 22 panic("mbinit"); 23 } 24 25 caddr_t 26 m_clalloc(ncl, how) 27 register int ncl; 28 int how; 29 { 30 int npg, mbx; 31 register struct mbuf *m; 32 register int i; 33 int s; 34 35 npg = ncl * CLSIZE; 36 s = splimp(); /* careful: rmalloc isn't reentrant */ 37 mbx = rmalloc(mbmap, npg); 38 splx(s); 39 if (mbx == 0) 40 return (0); 41 m = cltom(mbx / CLSIZE); 42 if (memall(&Mbmap[mbx], npg, proc, CSYS) == 0) 43 return (0); 44 vmaccess(&Mbmap[mbx], (caddr_t)m, npg); 45 switch (how) { 46 47 case MPG_CLUSTERS: 48 s = splimp(); 49 for (i = 0; i < ncl; i++) { 50 m->m_off = 0; 51 m->m_next = mclfree; 52 mclfree = m; 53 m += CLBYTES / sizeof (*m); 54 mbstat.m_clfree++; 55 } 56 mbstat.m_clusters += ncl; 57 splx(s); 58 break; 59 60 case MPG_MBUFS: 61 for (i = ncl * CLBYTES / sizeof (*m); i > 0; i--) { 62 m->m_off = 0; 63 m->m_free = 0; 64 mbstat.m_mbufs++; 65 (void) m_free(m); 66 m++; 67 } 68 break; 69 } 70 return ((caddr_t)m); 71 } 72 73 m_pgfree(addr, n) 74 caddr_t addr; 75 int n; 76 { 77 78 #ifdef lint 79 addr = addr; n = n; 80 #endif 81 } 82 83 m_expand() 84 { 85 86 if (m_clalloc(1, MPG_MBUFS) == 0) 87 goto steal; 88 return (1); 89 steal: 90 /* should ask protocols to free code */ 91 return (0); 92 } 93 94 /* NEED SOME WAY TO RELEASE SPACE */ 95 96 /* 97 * Space allocation routines. 98 * These are also available as macros 99 * for critical paths. 100 */ 101 struct mbuf * 102 m_get(canwait) 103 int canwait; 104 { 105 register struct mbuf *m; 106 107 MGET(m, canwait); 108 return (m); 109 } 110 111 struct mbuf * 112 m_getclr(canwait) 113 int canwait; 114 { 115 register struct mbuf *m; 116 117 m = m_get(canwait); 118 if (m == 0) 119 return (0); 120 bzero(mtod(m, caddr_t), MLEN); 121 return (m); 122 } 123 124 struct mbuf * 125 m_free(m) 126 struct mbuf *m; 127 { 128 register struct mbuf *n; 129 130 MFREE(m, n); 131 return (n); 132 } 133 134 /*ARGSUSED*/ 135 struct mbuf * 136 m_more(type) 137 int type; 138 { 139 register struct mbuf *m; 140 141 if (!m_expand()) { 142 mbstat.m_drops++; 143 return (NULL); 144 } 145 #define m_more(x) (panic("m_more"), (struct mbuf *)0) 146 MGET(m, type); 147 #undef m_more 148 return (m); 149 } 150 151 m_freem(m) 152 register struct mbuf *m; 153 { 154 register struct mbuf *n; 155 register int s; 156 157 if (m == NULL) 158 return; 159 s = splimp(); 160 do { 161 MFREE(m, n); 162 } while (m = n); 163 splx(s); 164 } 165 166 /* 167 * Mbuffer utility routines. 168 */ 169 struct mbuf * 170 m_copy(m, off, len) 171 register struct mbuf *m; 172 int off; 173 register int len; 174 { 175 register struct mbuf *n, **np; 176 struct mbuf *top, *p; 177 178 if (len == 0) 179 return (0); 180 if (off < 0 || len < 0) 181 panic("m_copy"); 182 while (off > 0) { 183 if (m == 0) 184 panic("m_copy"); 185 if (off < m->m_len) 186 break; 187 off -= m->m_len; 188 m = m->m_next; 189 } 190 np = ⊤ 191 top = 0; 192 while (len > 0) { 193 if (m == 0) { 194 if (len != M_COPYALL) 195 panic("m_copy"); 196 break; 197 } 198 MGET(n, 1); 199 *np = n; 200 if (n == 0) 201 goto nospace; 202 n->m_len = MIN(len, m->m_len - off); 203 if (m->m_off > MMAXOFF) { 204 p = mtod(m, struct mbuf *); 205 n->m_off = ((int)p - (int)n) + off; 206 mclrefcnt[mtocl(p)]++; 207 } else 208 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 209 (unsigned)n->m_len); 210 if (len != M_COPYALL) 211 len -= n->m_len; 212 off = 0; 213 m = m->m_next; 214 np = &n->m_next; 215 } 216 return (top); 217 nospace: 218 m_freem(top); 219 return (0); 220 } 221 222 m_cat(m, n) 223 register struct mbuf *m, *n; 224 { 225 while (m->m_next) 226 m = m->m_next; 227 while (n) { 228 if (m->m_off >= MMAXOFF || 229 m->m_off + m->m_len + n->m_len > MMAXOFF) { 230 /* just join the two chains */ 231 m->m_next = n; 232 return; 233 } 234 /* splat the data from one into the other */ 235 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 236 (u_int)n->m_len); 237 m->m_len += n->m_len; 238 n = m_free(n); 239 } 240 } 241 242 m_adj(mp, len) 243 struct mbuf *mp; 244 register int len; 245 { 246 register struct mbuf *m, *n; 247 248 if ((m = mp) == NULL) 249 return; 250 if (len >= 0) { 251 while (m != NULL && len > 0) { 252 if (m->m_len <= len) { 253 len -= m->m_len; 254 m->m_len = 0; 255 m = m->m_next; 256 } else { 257 m->m_len -= len; 258 m->m_off += len; 259 break; 260 } 261 } 262 } else { 263 /* a 2 pass algorithm might be better */ 264 len = -len; 265 while (len > 0 && m->m_len != 0) { 266 while (m != NULL && m->m_len != 0) { 267 n = m; 268 m = m->m_next; 269 } 270 if (n->m_len <= len) { 271 len -= n->m_len; 272 n->m_len = 0; 273 m = mp; 274 } else { 275 n->m_len -= len; 276 break; 277 } 278 } 279 } 280 } 281 282 struct mbuf * 283 m_pullup(m0, len) 284 struct mbuf *m0; 285 int len; 286 { 287 register struct mbuf *m, *n; 288 int count; 289 290 n = m0; 291 if (len > MLEN) 292 goto bad; 293 MGET(m, M_DONTWAIT); 294 if (m == 0) 295 goto bad; 296 m->m_len = 0; 297 do { 298 count = MIN(MLEN - m->m_len, len); 299 if (count > n->m_len) 300 count = n->m_len; 301 bcopy(mtod(n, caddr_t), mtod(m, caddr_t)+m->m_len, 302 (unsigned)count); 303 len -= count; 304 m->m_len += count; 305 n->m_off += count; 306 n->m_len -= count; 307 if (n->m_len) 308 break; 309 n = m_free(n); 310 } while (n); 311 if (len) { 312 (void) m_free(m); 313 goto bad; 314 } 315 m->m_next = n; 316 return (m); 317 bad: 318 m_freem(n); 319 return (0); 320 } 321