1 /* $NetBSD: if_uba.c,v 1.16 1999/06/06 20:45:21 ragge Exp $ */ 2 3 /* 4 * Copyright (c) 1982, 1986, 1988 Regents of the University of California. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by the University of 18 * California, Berkeley and its contributors. 19 * 4. Neither the name of the University nor the names of its contributors 20 * may be used to endorse or promote products derived from this software 21 * without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 24 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 25 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 26 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 27 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 28 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 29 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 30 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 31 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 32 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 33 * SUCH DAMAGE. 34 * 35 * @(#)if_uba.c 7.16 (Berkeley) 12/16/90 36 */ 37 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/malloc.h> 42 #include <sys/mbuf.h> 43 #include <sys/map.h> 44 #include <sys/buf.h> 45 #include <sys/socket.h> 46 #include <sys/syslog.h> 47 48 #include <net/if.h> 49 50 #include <machine/pte.h> 51 #include <machine/mtpr.h> 52 #include <machine/vmparam.h> 53 #include <machine/cpu.h> 54 55 #include <vax/if/if_uba.h> 56 #include <vax/uba/ubareg.h> 57 #include <vax/uba/ubavar.h> 58 59 static int if_ubaalloc __P((struct ifubinfo *, struct ifrw *, int)); 60 static void rcv_xmtbuf __P((struct ifxmt *)); 61 static void restor_xmtbuf __P((struct ifxmt *)); 62 63 /* 64 * Routines supporting UNIBUS network interfaces. 65 * 66 * TODO: 67 * Support interfaces using only one BDP statically. 68 */ 69 70 /* 71 * Init UNIBUS for interface whose headers of size hlen are to 72 * end on a page boundary. We allocate a UNIBUS map register for the page 73 * with the header, and nmr more UNIBUS map registers for i/o on the adapter, 74 * doing this once for each read and once for each write buffer. We also 75 * allocate page frames in the mbuffer pool for these pages. 76 */ 77 int 78 if_ubaminit(ifu, uh, hlen, nmr, ifr, nr, ifw, nw) 79 register struct ifubinfo *ifu; 80 struct uba_softc *uh; 81 int hlen, nmr, nr, nw; 82 register struct ifrw *ifr; 83 register struct ifxmt *ifw; 84 { 85 register caddr_t p; 86 caddr_t cp; 87 int i, nclbytes, off; 88 89 if (hlen) 90 off = MCLBYTES - hlen; 91 else 92 off = 0; 93 nclbytes = roundup(nmr * VAX_NBPG, MCLBYTES); 94 if (hlen) 95 nclbytes += MCLBYTES; 96 if (ifr[0].ifrw_addr) 97 cp = ifr[0].ifrw_addr - off; 98 else { 99 cp = (caddr_t)malloc((u_long)((nr + nw) * nclbytes), M_DEVBUF, 100 M_NOWAIT); 101 if (cp == 0) 102 return (0); 103 p = cp; 104 for (i = 0; i < nr; i++) { 105 ifr[i].ifrw_addr = p + off; 106 p += nclbytes; 107 } 108 for (i = 0; i < nw; i++) { 109 ifw[i].ifw_base = p; 110 ifw[i].ifw_addr = p + off; 111 p += nclbytes; 112 } 113 ifu->iff_hlen = hlen; 114 ifu->iff_softc = uh; 115 ifu->iff_uba = uh->uh_uba; 116 ifu->iff_ubamr = uh->uh_mr; 117 } 118 for (i = 0; i < nr; i++) 119 if (if_ubaalloc(ifu, &ifr[i], nmr) == 0) { 120 nr = i; 121 nw = 0; 122 goto bad; 123 } 124 for (i = 0; i < nw; i++) 125 if (if_ubaalloc(ifu, &ifw[i].ifrw, nmr) == 0) { 126 nw = i; 127 goto bad; 128 } 129 while (--nw >= 0) { 130 for (i = 0; i < nmr; i++) 131 ifw[nw].ifw_wmap[i] = ifw[nw].ifw_mr[i]; 132 ifw[nw].ifw_xswapd = 0; 133 ifw[nw].ifw_flags = IFRW_W; 134 ifw[nw].ifw_nmr = nmr; 135 } 136 return (1); 137 bad: 138 while (--nw >= 0) 139 ubarelse(ifu->iff_softc, &ifw[nw].ifw_info); 140 while (--nr >= 0) 141 ubarelse(ifu->iff_softc, &ifr[nr].ifrw_info); 142 free(cp, M_DEVBUF); 143 ifr[0].ifrw_addr = 0; 144 return (0); 145 } 146 147 /* 148 * Setup an ifrw structure by allocating UNIBUS map registers, 149 * possibly a buffered data path, and initializing the fields of 150 * the ifrw structure to minimize run-time overhead. 151 */ 152 static int 153 if_ubaalloc(ifu, ifrw, nmr) 154 struct ifubinfo *ifu; 155 register struct ifrw *ifrw; 156 int nmr; 157 { 158 register int info; 159 160 info = 161 uballoc(ifu->iff_softc, ifrw->ifrw_addr, nmr*VAX_NBPG + ifu->iff_hlen, 162 ifu->iff_flags); 163 if (info == 0) 164 return (0); 165 ifrw->ifrw_info = info; 166 ifrw->ifrw_bdp = UBAI_BDP(info); 167 ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT); 168 ifrw->ifrw_mr = &ifu->iff_ubamr[UBAI_MR(info) + (ifu->iff_hlen? 1 : 0)]; 169 return (1); 170 } 171 172 /* 173 * Pull read data off a interface. 174 * Totlen is length of data, with local net header stripped. 175 * When full cluster sized units are present 176 * on the interface on cluster boundaries we can get them more 177 * easily by remapping, and take advantage of this here. 178 * Save a pointer to the interface structure and the total length, 179 * so that protocols can determine where incoming packets arrived. 180 * Note: we may be called to receive from a transmit buffer by some 181 * devices. In that case, we must force normal mapping of the buffer, 182 * so that the correct data will appear (only unibus maps are 183 * changed when remapping the transmit buffers). 184 */ 185 struct mbuf * 186 if_ubaget(ifu, ifr, totlen, ifp) 187 struct ifubinfo *ifu; 188 register struct ifrw *ifr; 189 register int totlen; 190 struct ifnet *ifp; 191 { 192 struct mbuf *top, **mp; 193 register struct mbuf *m; 194 register caddr_t cp = ifr->ifrw_addr + ifu->iff_hlen, pp; 195 register int len; 196 top = 0; 197 mp = ⊤ 198 MGETHDR(m, M_DONTWAIT, MT_DATA); 199 if (m == 0){ 200 return ((struct mbuf *)NULL); 201 } 202 m->m_pkthdr.rcvif = ifp; 203 m->m_pkthdr.len = totlen; 204 m->m_len = MHLEN; 205 206 if (ifr->ifrw_flags & IFRW_W){ 207 rcv_xmtbuf((struct ifxmt *)ifr); 208 } 209 while (totlen > 0) { 210 if (top) { 211 MGET(m, M_DONTWAIT, MT_DATA); 212 if (m == 0) { 213 m_freem(top); 214 top = 0; 215 goto out; 216 } 217 m->m_len = MLEN; 218 } 219 len = totlen; 220 if (len >= MINCLSIZE) { 221 struct pte *cpte, *ppte; 222 int x, *ip, i; 223 224 MCLGET(m, M_DONTWAIT); 225 if ((m->m_flags & M_EXT) == 0){ 226 goto nopage; 227 } 228 len = min(len, MCLBYTES); 229 m->m_len = len; 230 if (!claligned(cp)){ 231 goto copy; 232 } 233 /* 234 * Switch pages mapped to UNIBUS with new page pp, 235 * as quick form of copy. Remap UNIBUS and invalidate. 236 */ 237 pp = mtod(m, char *); 238 cpte = (struct pte *)kvtopte(cp); 239 ppte = (struct pte *)kvtopte(pp); 240 x = vax_btop(cp - ifr->ifrw_addr); 241 ip = (int *)&ifr->ifrw_mr[x]; 242 for (i = 0; i < MCLBYTES/VAX_NBPG; i++) { 243 struct pte t; 244 t = *ppte; *ppte++ = *cpte; *cpte = t; 245 *ip++ = cpte++->pg_pfn|ifr->ifrw_proto; 246 mtpr(cp,PR_TBIS); 247 cp += VAX_NBPG; 248 mtpr((caddr_t)pp,PR_TBIS); 249 pp += VAX_NBPG; 250 } 251 goto nocopy; 252 } 253 nopage: 254 if (len < m->m_len) { 255 /* 256 * Place initial small packet/header at end of mbuf. 257 */ 258 if (top == 0 && len + max_linkhdr <= m->m_len) 259 m->m_data += max_linkhdr; 260 m->m_len = len; 261 } else 262 len = m->m_len; 263 copy: 264 bcopy(cp, mtod(m, caddr_t), (unsigned)len); 265 cp += len; 266 nocopy: 267 *mp = m; 268 mp = &m->m_next; 269 totlen -= len; 270 } 271 out: 272 if (ifr->ifrw_flags & IFRW_W){ 273 restor_xmtbuf((struct ifxmt *)ifr); 274 } 275 return (top); 276 } 277 278 /* 279 * Change the mapping on a transmit buffer so that if_ubaget may 280 * receive from that buffer. Copy data from any pages mapped to Unibus 281 * into the pages mapped to normal kernel virtual memory, so that 282 * they can be accessed and swapped as usual. We take advantage 283 * of the fact that clusters are placed on the xtofree list 284 * in inverse order, finding the last one. 285 */ 286 static void 287 rcv_xmtbuf(ifw) 288 register struct ifxmt *ifw; 289 { 290 register struct mbuf *m; 291 struct mbuf **mprev; 292 register int i; 293 char *cp; 294 295 while ((i = ffs((long)ifw->ifw_xswapd)) != 0) { 296 cp = ifw->ifw_base + i * MCLBYTES; 297 i--; 298 ifw->ifw_xswapd &= ~(1<<i); 299 mprev = &ifw->ifw_xtofree; 300 for (m = ifw->ifw_xtofree; m && m->m_next; m = m->m_next) 301 mprev = &m->m_next; 302 if (m == NULL) 303 break; 304 bcopy(mtod(m, caddr_t), cp, MCLBYTES); 305 (void) m_free(m); 306 *mprev = NULL; 307 } 308 ifw->ifw_xswapd = 0; 309 for (i = 0; i < ifw->ifw_nmr; i++) 310 ifw->ifw_mr[i] = ifw->ifw_wmap[i]; 311 } 312 313 /* 314 * Put a transmit buffer back together after doing an if_ubaget on it, 315 * which may have swapped pages. 316 */ 317 static void 318 restor_xmtbuf(ifw) 319 register struct ifxmt *ifw; 320 { 321 register int i; 322 323 for (i = 0; i < ifw->ifw_nmr; i++) 324 ifw->ifw_wmap[i] = ifw->ifw_mr[i]; 325 } 326 327 /* 328 * Map a chain of mbufs onto a network interface 329 * in preparation for an i/o operation. 330 * The argument chain of mbufs includes the local network 331 * header which is copied to be in the mapped, aligned 332 * i/o space. 333 */ 334 int 335 if_ubaput(ifu, ifw, m) 336 struct ifubinfo *ifu; 337 register struct ifxmt *ifw; 338 register struct mbuf *m; 339 { 340 register struct mbuf *mp; 341 register caddr_t cp, dp; 342 register int i; 343 int xswapd = 0; 344 int x, cc, t; 345 346 cp = ifw->ifw_addr; 347 while (m) { 348 dp = mtod(m, char *); 349 if (claligned(cp) && claligned(dp) && 350 (m->m_len == MCLBYTES || m->m_next == (struct mbuf *)0)) { 351 struct pte *pte; 352 int *ip; 353 354 pte = (struct pte *)kvtopte(dp); 355 x = vax_btop(cp - ifw->ifw_addr); 356 ip = (int *)&ifw->ifw_mr[x]; 357 for (i = 0; i < MCLBYTES/VAX_NBPG; i++) 358 *ip++ = ifw->ifw_proto | pte++->pg_pfn; 359 xswapd |= 1 << (x>>(MCLSHIFT-VAX_PGSHIFT)); 360 mp = m->m_next; 361 m->m_next = ifw->ifw_xtofree; 362 ifw->ifw_xtofree = m; 363 cp += m->m_len; 364 } else { 365 bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len); 366 cp += m->m_len; 367 MFREE(m, mp); 368 } 369 m = mp; 370 } 371 372 /* 373 * Xswapd is the set of clusters we just mapped out. Ifu->iff_xswapd 374 * is the set of clusters mapped out from before. We compute 375 * the number of clusters involved in this operation in x. 376 * Clusters mapped out before and involved in this operation 377 * should be unmapped so original pages will be accessed by the device. 378 */ 379 cc = cp - ifw->ifw_addr; 380 x = ((cc - ifu->iff_hlen) + MCLBYTES - 1) >> MCLSHIFT; 381 ifw->ifw_xswapd &= ~xswapd; 382 while ((i = ffs((long)ifw->ifw_xswapd)) != 0) { 383 i--; 384 if (i >= x) 385 break; 386 ifw->ifw_xswapd &= ~(1<<i); 387 i *= MCLBYTES/VAX_NBPG; 388 for (t = 0; t < MCLBYTES/VAX_NBPG; t++) { 389 ifw->ifw_mr[i] = ifw->ifw_wmap[i]; 390 i++; 391 } 392 } 393 ifw->ifw_xswapd |= xswapd; 394 return (cc); 395 } 396