1 /* uipc_mbuf.c 1.16 81/11/22 */ 2 3 #include "../h/param.h" 4 #include "../h/dir.h" 5 #include "../h/user.h" 6 #include "../h/proc.h" 7 #include "../h/pte.h" 8 #include "../h/cmap.h" 9 #include "../h/map.h" 10 #include "../h/mbuf.h" 11 #include "../net/inet_systm.h" /* XXX */ 12 #include "../h/vm.h" 13 14 m_reserve(mbufs) 15 int mbufs; 16 { 17 18 /* 19 printf("reserve %d\n", mbufs); 20 */ 21 if (mbstat.m_lowat + (mbufs>>1) > NMBPAGES * NMBPG - 32) 22 return (0); 23 mbstat.m_hiwat += mbufs; 24 mbstat.m_lowat = mbstat.m_hiwat >> 1; 25 return (1); 26 } 27 28 m_release(mbufs) 29 int mbufs; 30 { 31 32 /* 33 printf("release %d\n", mbufs); 34 */ 35 mbstat.m_hiwat -= mbufs; 36 mbstat.m_lowat = mbstat.m_hiwat >> 1; 37 } 38 39 struct mbuf * 40 m_get(canwait) 41 int canwait; 42 { 43 register struct mbuf *m; 44 45 COUNT(M_GET); 46 MGET(m, canwait); 47 return (m); 48 } 49 50 struct mbuf * 51 m_getclr(canwait) 52 int canwait; 53 { 54 register struct mbuf *m; 55 56 COUNT(M_GETCLR); 57 m = m_get(canwait); 58 if (m == 0) 59 return (0); 60 m->m_off = MMINOFF; 61 bzero(mtod(m, caddr_t), MLEN); 62 return (m); 63 } 64 65 struct mbuf * 66 m_free(m) 67 struct mbuf *m; 68 { 69 register struct mbuf *n; 70 71 COUNT(M_FREE); 72 MFREE(m, n); 73 return (n); 74 } 75 76 /*ARGSUSED*/ 77 struct mbuf * 78 m_more(type) 79 int type; 80 { 81 register struct mbuf *m; 82 83 COUNT(M_MORE); 84 if (!m_expand()) { 85 mbstat.m_drops++; 86 return (NULL); 87 } 88 #define m_more(x) (panic("m_more"), (struct mbuf *)0) 89 MGET(m, type); 90 return (m); 91 } 92 93 m_freem(m) 94 register struct mbuf *m; 95 { 96 register struct mbuf *n; 97 register int s; 98 99 COUNT(M_FREEM); 100 if (m == NULL) 101 return; 102 s = splimp(); 103 do { 104 MFREE(m, n); 105 } while (m = n); 106 splx(s); 107 } 108 109 /*ARGSUSED*/ 110 m_pullup(m, len) 111 struct mbuf *m; 112 int len; 113 { 114 115 return (0); 116 } 117 118 struct mbuf * 119 m_copy(m, off, len) 120 register struct mbuf *m; 121 int off; 122 register int len; 123 { 124 register struct mbuf *n, **np; 125 struct mbuf *top, *p; 126 COUNT(M_COPY); 127 128 if (len == 0) 129 return (0); 130 if (off < 0 || len < 0) 131 panic("m_copy"); 132 while (off > 0) { 133 if (m == 0) 134 panic("m_copy"); 135 if (off < m->m_len) 136 break; 137 off -= m->m_len; 138 m = m->m_next; 139 } 140 np = ⊤ 141 top = 0; 142 while (len > 0) { 143 MGET(n, 1); 144 *np = n; 145 if (n == 0) 146 goto nospace; 147 if (m == 0) 148 panic("m_copy"); 149 n->m_len = MIN(len, m->m_len - off); 150 if (m->m_off > MMAXOFF) { 151 p = mtod(m, struct mbuf *); 152 n->m_off = ((int)p - (int)n) + off; 153 mprefcnt[mtopf(p)]++; 154 } else { 155 n->m_off = MMINOFF; 156 bcopy(mtod(m, caddr_t)+off, mtod(n, caddr_t), 157 (unsigned)n->m_len); 158 } 159 len -= n->m_len; 160 off = 0; 161 m = m->m_next; 162 np = &n->m_next; 163 } 164 return (top); 165 nospace: 166 printf("m_copy: no space\n"); 167 m_freem(top); 168 return (0); 169 } 170 171 mbinit() 172 { 173 register struct mbuf *m; 174 register i; 175 176 COUNT(MBUFINIT); 177 m = (struct mbuf *)&mbutl[0]; /* ->start of buffer virt mem */ 178 (void) vmemall(&Mbmap[0], 2, proc, CSYS); 179 vmaccess(&Mbmap[0], (caddr_t)m, 2); 180 for (i=0; i < NMBPG; i++) { 181 m->m_off = 0; 182 (void) m_free(m); 183 m++; 184 } 185 (void) pg_alloc(3); 186 mbstat.m_pages = 4; 187 mbstat.m_bufs = 32; 188 mbstat.m_lowat = 16; 189 mbstat.m_hiwat = 32; 190 { int j,k,n; 191 n = 32; 192 k = n << 1; 193 if ((i = rmalloc(mbmap, n)) == 0) 194 panic("mbinit"); 195 j = i<<1; 196 m = pftom(i); 197 /* should use vmemall sometimes */ 198 if (memall(&Mbmap[j], k, proc, CSYS) == 0) { 199 printf("botch\n"); 200 return; 201 } 202 vmaccess(&Mbmap[j], (caddr_t)m, k); 203 for (j=0; j < n; j++) { 204 m->m_off = 0; 205 m->m_next = mpfree; 206 mpfree = m; 207 m += NMBPG; 208 nmpfree++; 209 } 210 } 211 } 212 213 pg_alloc(n) 214 register int n; 215 { 216 register i, j, k; 217 register struct mbuf *m; 218 int bufs, s; 219 220 COUNT(PG_ALLOC); 221 k = n << 1; 222 if ((i = rmalloc(mbmap, n)) == 0) 223 return (0); 224 j = i<<1; 225 m = pftom(i); 226 /* should use vmemall sometimes */ 227 if (memall(&Mbmap[j], k, proc, CSYS) == 0) 228 return (0); 229 vmaccess(&Mbmap[j], (caddr_t)m, k); 230 bufs = n << 3; 231 s = splimp(); 232 for (j=0; j < bufs; j++) { 233 m->m_off = 0; 234 (void) m_free(m); 235 m++; 236 } 237 splx(s); 238 mbstat.m_pages += n; 239 return (1); 240 } 241 242 m_expand() 243 { 244 register i; 245 int need, needp, needs; 246 247 COUNT(M_EXPAND); 248 needs = need = mbstat.m_hiwat - mbstat.m_bufs; 249 needp = need >> 3; 250 if (pg_alloc(needp)) 251 return (1); 252 for (i=0; i < needp; i++, need -= NMBPG) 253 if (pg_alloc(1) == 0) 254 goto steal; 255 return (need < needs); 256 steal: 257 /* while (not enough) ask protocols to free code */ 258 ; 259 return (0); 260 } 261 262 #ifdef notdef 263 m_relse() 264 { 265 266 COUNT(M_RELSE); 267 } 268 #endif 269 270 m_cat(m, n) 271 register struct mbuf *m, *n; 272 { 273 274 while (m->m_next) 275 m = m->m_next; 276 while (n) 277 if (m->m_off + m->m_len + n->m_len <= MMAXOFF) { 278 bcopy(mtod(n, caddr_t), mtod(m, caddr_t) + m->m_len, 279 (u_int)n->m_len); 280 m->m_len += n->m_len; 281 n = m_free(n); 282 } else { 283 m->m_next = n; 284 m = n; 285 n = m->m_next; 286 } 287 } 288 289 m_adj(mp, len) 290 struct mbuf *mp; 291 register int len; 292 { 293 register struct mbuf *m, *n; 294 295 COUNT(M_ADJ); 296 if ((m = mp) == NULL) 297 return; 298 if (len >= 0) { 299 while (m != NULL && len > 0) { 300 if (m->m_len <= len) { 301 len -= m->m_len; 302 m->m_len = 0; 303 m = m->m_next; 304 } else { 305 m->m_len -= len; 306 m->m_off += len; 307 break; 308 } 309 } 310 } else { 311 /* a 2 pass algorithm might be better */ 312 len = -len; 313 while (len > 0 && m->m_len != 0) { 314 while (m != NULL && m->m_len != 0) { 315 n = m; 316 m = m->m_next; 317 } 318 if (n->m_len <= len) { 319 len -= n->m_len; 320 n->m_len = 0; 321 m = mp; 322 } else { 323 n->m_len -= len; 324 break; 325 } 326 } 327 } 328 } 329