1 /* uipc_mbuf.c 1.11 81/11/18 */ 2 3 #include "../h/param.h" 4 #include "../h/dir.h" 5 #include "../h/user.h" 6 #include "../h/proc.h" 7 #include "../h/pte.h" 8 #include "../h/cmap.h" 9 #include "../h/map.h" 10 #include "../h/mbuf.h" 11 #include "../net/inet_systm.h" /* XXX */ 12 #include "../h/vm.h" 13 14 m_reserve(mbufs) 15 int mbufs; 16 { 17 18 if (mbstat.m_lowat + mbufs > NMBPAGES * NMBPG - 32) 19 return (0); 20 mbstat.m_lowat += mbufs; 21 mbstat.m_hiwat = 2 * mbstat.m_lowat; 22 return (1); 23 } 24 25 m_release(mbufs) 26 int mbufs; 27 { 28 29 mbstat.m_lowat -= mbufs; 30 mbstat.m_hiwat = 2 * mbstat.m_lowat; 31 } 32 33 struct mbuf * 34 m_get(canwait) 35 int canwait; 36 { 37 register struct mbuf *m; 38 39 COUNT(M_GET); 40 MGET(m, canwait); 41 return (m); 42 } 43 44 struct mbuf * 45 m_getclr(canwait) 46 int canwait; 47 { 48 register struct mbuf *m; 49 50 COUNT(M_GETCLR); 51 m = m_get(canwait); 52 if (m == 0) 53 return (0); 54 m->m_off = MMINOFF; 55 bzero(mtod(m, caddr_t), MLEN); 56 return (m); 57 } 58 59 struct mbuf * 60 m_free(m) 61 struct mbuf *m; 62 { 63 register struct mbuf *n; 64 65 COUNT(M_FREE); 66 MFREE(m, n); 67 return (n); 68 } 69 70 /*ARGSUSED*/ 71 struct mbuf * 72 m_more(type) 73 int type; 74 { 75 register struct mbuf *m; 76 77 COUNT(M_MORE); 78 if (!m_expand()) { 79 mbstat.m_drops++; 80 return (NULL); 81 } 82 #define m_more(x) (panic("m_more"), (struct mbuf *)0) 83 MGET(m, type); 84 return (m); 85 } 86 87 m_freem(m) 88 register struct mbuf *m; 89 { 90 register struct mbuf *n; 91 register int s; 92 93 COUNT(M_FREEM); 94 if (m == NULL) 95 return; 96 s = splimp(); 97 do { 98 MFREE(m, n); 99 } while (m = n); 100 splx(s); 101 } 102 103 m_pullup(m, len) 104 struct mbuf *m; 105 int len; 106 { 107 108 return (0); 109 } 110 111 struct mbuf * 112 m_copy(m, off, len) 113 register struct mbuf *m; 114 int off; 115 register int len; 116 { 117 register struct mbuf *n, **np; 118 struct mbuf *top, *p; 119 COUNT(M_COPY); 120 121 if (len == 0) 122 return (0); 123 if (off < 0 || len < 0) 124 panic("m_copy"); 125 while (off > 0) { 126 if (m == 0) 127 panic("m_copy"); 128 if (off < m->m_len) 129 break; 130 off -= m->m_len; 131 m = m->m_next; 132 } 133 np = ⊤ 134 top = 0; 135 while (len > 0) { 136 MGET(n, 1); 137 *np = n; 138 if (n == 0) 139 goto nospace; 140 if (m == 0) 141 panic("m_copy"); 142 n->m_len = MIN(len, m->m_len - off); 143 if (m->m_off > MMAXOFF) { 144 p = mtod(m, struct mbuf *); 145 n->m_off = ((int)p - (int)n) + off; 146 mprefcnt[mtopf(p)]++; 147 } else { 148 n->m_off = MMINOFF; 149 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 150 (unsigned)n->m_len); 151 } 152 len -= n->m_len; 153 off = 0; 154 m = m->m_next; 155 np = &n->m_next; 156 } 157 return (top); 158 nospace: 159 printf("m_copy: no space\n"); 160 m_freem(top); 161 return (0); 162 } 163 164 mbinit() 165 { 166 register struct mbuf *m; 167 register i; 168 169 COUNT(MBUFINIT); 170 m = (struct mbuf *)&mbutl[0]; /* ->start of buffer virt mem */ 171 (void) vmemall(&Mbmap[0], 2, proc, CSYS); 172 vmaccess(&Mbmap[0], (caddr_t)m, 2); 173 for (i=0; i < NMBPG; i++) { 174 m->m_off = 0; 175 m_free(m); 176 m++; 177 } 178 (void) pg_alloc(3); 179 mbstat.m_pages = 4; 180 mbstat.m_bufs = 32; 181 mbstat.m_lowat = 16; 182 mbstat.m_hiwat = 32; 183 { int j,k,n; 184 n = 32; 185 k = n << 1; 186 if ((i = rmalloc(mbmap, n)) == 0) 187 panic("mbinit"); 188 j = i<<1; 189 m = pftom(i); 190 /* should use vmemall sometimes */ 191 if (memall(&Mbmap[j], k, proc, CSYS) == 0) { 192 printf("botch\n"); 193 return; 194 } 195 vmaccess(&Mbmap[j], (caddr_t)m, k); 196 for (j=0; j < n; j++) { 197 m->m_off = 0; 198 m->m_next = mpfree; 199 mpfree = m; 200 m += NMBPG; 201 nmpfree++; 202 } 203 } 204 } 205 206 pg_alloc(n) 207 register int n; 208 { 209 register i, j, k; 210 register struct mbuf *m; 211 int bufs, s; 212 213 COUNT(PG_ALLOC); 214 k = n << 1; 215 if ((i = rmalloc(mbmap, n)) == 0) 216 return (0); 217 j = i<<1; 218 m = pftom(i); 219 /* should use vmemall sometimes */ 220 if (memall(&Mbmap[j], k, proc, CSYS) == 0) 221 return (0); 222 vmaccess(&Mbmap[j], (caddr_t)m, k); 223 bufs = n << 3; 224 s = splimp(); 225 for (j=0; j < bufs; j++) { 226 m->m_off = 0; 227 m_free(m); 228 m++; 229 } 230 splx(s); 231 mbstat.m_pages += n; 232 return (1); 233 } 234 235 m_expand() 236 { 237 register i; 238 int need, needp, needs; 239 240 COUNT(M_EXPAND); 241 needs = need = mbstat.m_hiwat - mbstat.m_bufs; 242 needp = need >> 3; 243 if (pg_alloc(needp)) 244 return (1); 245 for (i=0; i < needp; i++, need -= NMBPG) 246 if (pg_alloc(1) == 0) 247 goto steal; 248 return (need < needs); 249 steal: 250 /* while (not enough) ask protocols to free code */ 251 ; 252 return (0); 253 } 254 255 #ifdef notdef 256 m_relse() 257 { 258 259 COUNT(M_RELSE); 260 } 261 #endif 262 263 m_cat(m, n) 264 register struct mbuf *m, *n; 265 { 266 267 while (m->m_next) 268 m = m->m_next; 269 while (n) 270 if (m->m_off + m->m_len + n->m_len <= MMAXOFF) { 271 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 272 (u_int)n->m_len); 273 m->m_len += n->m_len; 274 n = m_free(n); 275 } else { 276 m->m_next = n; 277 m = n; 278 n = m->m_next; 279 } 280 } 281 282 m_adj(mp, len) 283 struct mbuf *mp; 284 register int len; 285 { 286 register struct mbuf *m, *n; 287 288 COUNT(M_ADJ); 289 if ((m = mp) == NULL) 290 return; 291 if (len >= 0) { 292 while (m != NULL && len > 0) { 293 if (m->m_len <= len) { 294 len -= m->m_len; 295 m->m_len = 0; 296 m = m->m_next; 297 } else { 298 m->m_len -= len; 299 m->m_off += len; 300 break; 301 } 302 } 303 } else { 304 /* a 2 pass algorithm might be better */ 305 len = -len; 306 while (len > 0 && m->m_len != 0) { 307 while (m != NULL && m->m_len != 0) { 308 n = m; 309 m = m->m_next; 310 } 311 if (n->m_len <= len) { 312 len -= n->m_len; 313 n->m_len = 0; 314 m = mp; 315 } else { 316 n->m_len -= len; 317 break; 318 } 319 } 320 } 321 } 322