1*17221Stef /* if_dmc.c 6.3 84/09/27 */ 25725Sroot 35725Sroot #include "dmc.h" 45725Sroot #if NDMC > 0 55725Sroot #define printd if(dmcdebug)printf 611189Ssam int dmcdebug = 0; 75725Sroot /* 85725Sroot * DMC11 device driver, internet version 95725Sroot * 10*17221Stef * Bill Nesheim (bill@cornell.arpa or {vax135,uw-beaver,ihnp4}!bill) 11*17221Stef * Cornell University 12*17221Stef * Department of Computer Science 1311191Ssam * 14*17221Stef * Based loosly on 4.2BSD release 15*17221Stef * The UNIBUS support routines were taken from Lou Salkind's DEUNA driver 16*17221Stef * 17*17221Stef * TO DO: 18*17221Stef * generalize unibus routines 19*17221Stef * add timeout to mark interface down when other end of link dies 20*17221Stef * figure out better way to check for completed buffers 21*17221Stef * (not critical with DMC, only 7 bufs, but may cause problems 22*17221Stef * on a DMR) 235725Sroot */ 249794Ssam #include "../machine/pte.h" 255725Sroot 2617111Sbloom #include "param.h" 2717111Sbloom #include "systm.h" 2817111Sbloom #include "mbuf.h" 2917111Sbloom #include "buf.h" 30*17221Stef #include "ioctl.h" /* must precede tty.h */ 3117111Sbloom #include "tty.h" 3217111Sbloom #include "protosw.h" 3317111Sbloom #include "socket.h" 3417111Sbloom #include "vmmac.h" 3517111Sbloom #include "errno.h" 368460Sroot 378460Sroot #include "../net/if.h" 389176Ssam #include "../net/netisr.h" 398460Sroot #include "../net/route.h" 408416Swnj #include "../netinet/in.h" 418416Swnj #include "../netinet/in_systm.h" 428460Sroot 438460Sroot #include "../vax/cpu.h" 448460Sroot #include "../vax/mtpr.h" 4517111Sbloom #include "if_uba.h" 4617111Sbloom #include "if_dmc.h" 478460Sroot #include "../vaxuba/ubareg.h" 488460Sroot #include "../vaxuba/ubavar.h" 495725Sroot 505725Sroot /* 515725Sroot * Driver information for auto-configuration stuff. 525725Sroot */ 5313061Ssam int dmcprobe(), dmcattach(), dmcinit(), dmcioctl(); 5413061Ssam int dmcoutput(), dmcreset(); 555725Sroot struct uba_device *dmcinfo[NDMC]; 565725Sroot u_short dmcstd[] = { 0 }; 575725Sroot struct uba_driver dmcdriver = 585725Sroot { dmcprobe, 0, dmcattach, 0, dmcstd, "dmc", dmcinfo }; 595725Sroot 60*17221Stef /* as long as we use clists for command queues, we only have 28 bytes to use! */ 61*17221Stef /* DMC-11 only has 7 buffers; DMR-11 has 64 */ 62*17221Stef #define NRCV 7 63*17221Stef #define NXMT (NRCV - 2) /* avoid running out of buffers on recv end */ 64*17221Stef #define NTOT (NRCV + NXMT) 65*17221Stef 66*17221Stef /* error reporting intervals */ 67*17221Stef #define DMC_RPNBFS 50 68*17221Stef #define DMC_RPDSC 1 69*17221Stef #define DMC_RPTMO 20 70*17221Stef #define DMC_RPDCK 5 71*17221Stef 72*17221Stef struct dmc_command { 73*17221Stef char qp_cmd; /* command */ 74*17221Stef short qp_ubaddr; /* buffer address */ 75*17221Stef short qp_cc; /* character count || XMEM */ 76*17221Stef struct dmc_command *qp_next; /* next command on queue */ 77*17221Stef }; 78*17221Stef 795725Sroot /* 80*17221Stef * The dmcuba structures generalize the ifuba structure 81*17221Stef * to an arbitrary number of recieve and transmit buffers. 82*17221Stef */ 83*17221Stef struct ifxmt { 84*17221Stef struct ifrw x_ifrw; /* mapping imfo */ 85*17221Stef struct pte x_map[IF_MAXNUBAMR]; /* output base pages */ 86*17221Stef short x_xswapd; /* mask of clusters swapped */ 87*17221Stef struct mbuf *x_xtofree; /* pages being dma'd out */ 88*17221Stef }; 89*17221Stef struct dmcuba { 90*17221Stef short ifu_uban; /* uba number */ 91*17221Stef short ifu_hlen; /* local net header length */ 92*17221Stef struct uba_regs *ifu_uba; /* uba regs, in vm */ 93*17221Stef struct ifrw ifu_r[NRCV]; /* receive information */ 94*17221Stef struct ifxmt ifu_w[NXMT]; /* transmit information */ 95*17221Stef /* these should only be pointers */ 96*17221Stef short ifu_flags; /* used during uballoc's */ 97*17221Stef }; 98*17221Stef 99*17221Stef struct dmcbufs { 100*17221Stef int ubinfo; /* from uballoc */ 101*17221Stef short cc; /* buffer size */ 102*17221Stef short flags; /* access control */ 103*17221Stef }; 104*17221Stef #define DBUF_OURS 0 /* buffer is available */ 105*17221Stef #define DBUF_DMCS 1 /* buffer claimed by somebody */ 106*17221Stef #define DBUF_XMIT 4 /* transmit buffer */ 107*17221Stef #define DBUF_RCV 8 /* recieve buffer */ 108*17221Stef 109*17221Stef struct mbuf *dmc_get(); 110*17221Stef 111*17221Stef /* 1125725Sroot * DMC software status per interface. 1135725Sroot * 1145725Sroot * Each interface is referenced by a network interface structure, 1155725Sroot * sc_if, which the routing code uses to locate the interface. 1165725Sroot * This structure contains the output queue for the interface, its address, ... 117*17221Stef * We also have, for each interface, a set of 7 UBA interface structures 118*17221Stef * for each, which 119*17221Stef * contain information about the UNIBUS resources held by the interface: 1205725Sroot * map registers, buffered data paths, etc. Information is cached in this 1215725Sroot * structure for use by the if_uba.c routines in running the interface 1225725Sroot * efficiently. 1235725Sroot */ 1245725Sroot struct dmc_softc { 125*17221Stef short sc_oused; /* output buffers currently in use */ 126*17221Stef short sc_iused; /* input buffers given to DMC */ 127*17221Stef short sc_flag; /* flags */ 1285725Sroot struct ifnet sc_if; /* network-visible interface */ 129*17221Stef struct dmcbufs sc_rbufs[NRCV]; /* recieve buffer info */ 130*17221Stef struct dmcbufs sc_xbufs[NXMT]; /* transmit buffer info */ 131*17221Stef struct dmcuba sc_ifuba; /* UNIBUS resources */ 1325725Sroot int sc_ubinfo; /* UBA mapping info for base table */ 133*17221Stef int sc_errors[4]; /* non-fatal error counters */ 134*17221Stef #define sc_datck sc_errors[0] 135*17221Stef #define sc_timeo sc_errors[1] 136*17221Stef #define sc_nobuf sc_errors[2] 137*17221Stef #define sc_disc sc_errors[3] 138*17221Stef /* command queue stuff */ 139*17221Stef struct dmc_command sc_cmdbuf[NTOT+3]; 140*17221Stef struct dmc_command *sc_qhead; /* head of command queue */ 141*17221Stef struct dmc_command *sc_qtail; /* tail of command queue */ 142*17221Stef struct dmc_command *sc_qactive; /* command in progress */ 143*17221Stef struct dmc_command *sc_qfreeh; /* head of list of free cmd buffers */ 144*17221Stef struct dmc_command *sc_qfreet; /* tail of list of free cmd buffers */ 145*17221Stef /* end command queue stuff */ 1465725Sroot } dmc_softc[NDMC]; 1475725Sroot 1485725Sroot /* flags */ 149*17221Stef #define DMC_ALLOC 01 /* unibus resources allocated */ 150*17221Stef #define DMC_BMAPPED 02 /* base table mapped */ 1515725Sroot 152*17221Stef struct dmc_base { 153*17221Stef short d_base[128]; /* DMC base table */ 1545725Sroot } dmc_base[NDMC]; 1555725Sroot 156*17221Stef /* queue manipulation macros */ 157*17221Stef #define QUEUE_AT_HEAD(qp, head, tail) \ 158*17221Stef (qp)->qp_next = (head); \ 159*17221Stef (head) = (qp); \ 160*17221Stef if ((tail) == (struct dmc_command *) 0) \ 161*17221Stef (tail) = (head) 1625725Sroot 163*17221Stef #define QUEUE_AT_TAIL(qp, head, tail) \ 164*17221Stef if ((tail)) \ 165*17221Stef (tail)->qp_next = (qp); \ 166*17221Stef else \ 167*17221Stef (head) = (qp); \ 168*17221Stef (qp)->qp_next = (struct dmc_command *) 0; \ 169*17221Stef (tail) = (qp) 170*17221Stef 171*17221Stef #define DEQUEUE(head, tail) \ 172*17221Stef (head) = (head)->qp_next;\ 173*17221Stef if ((head) == (struct dmc_command *) 0)\ 174*17221Stef (tail) = (head) 175*17221Stef 1765725Sroot dmcprobe(reg) 1775725Sroot caddr_t reg; 1785725Sroot { 1795725Sroot register int br, cvec; 1805725Sroot register struct dmcdevice *addr = (struct dmcdevice *)reg; 1815725Sroot register int i; 1825725Sroot 1835725Sroot #ifdef lint 1845725Sroot br = 0; cvec = br; br = cvec; 1855725Sroot dmcrint(0); dmcxint(0); 1865725Sroot #endif 1875725Sroot addr->bsel1 = DMC_MCLR; 1885725Sroot for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 1895725Sroot ; 1905725Sroot if ((addr->bsel1 & DMC_RUN) == 0) 1916334Ssam return (0); 192*17221Stef /* MCLR is self clearing */ 1935725Sroot addr->bsel0 = DMC_RQI|DMC_IEI; 1945725Sroot DELAY(100000); 1955725Sroot addr->bsel1 = DMC_MCLR; 1965725Sroot for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 1975725Sroot ; 1986334Ssam return (1); 1995725Sroot } 2005725Sroot 2015725Sroot /* 2025725Sroot * Interface exists: make available by filling in network interface 2035725Sroot * record. System will initialize the interface when it is ready 2045725Sroot * to accept packets. 2055725Sroot */ 2065725Sroot dmcattach(ui) 2075725Sroot register struct uba_device *ui; 2085725Sroot { 2095725Sroot register struct dmc_softc *sc = &dmc_softc[ui->ui_unit]; 210*17221Stef register struct dmc_command *qp; 2115725Sroot 2125725Sroot sc->sc_if.if_unit = ui->ui_unit; 2135725Sroot sc->sc_if.if_name = "dmc"; 2145725Sroot sc->sc_if.if_mtu = DMCMTU; 2155725Sroot sc->sc_if.if_init = dmcinit; 2165725Sroot sc->sc_if.if_output = dmcoutput; 21713061Ssam sc->sc_if.if_ioctl = dmcioctl; 2188976Sroot sc->sc_if.if_reset = dmcreset; 219*17221Stef sc->sc_if.if_flags = IFF_POINTOPOINT; 220*17221Stef sc->sc_ifuba.ifu_flags = UBA_CANTWAIT; 221*17221Stef 222*17221Stef /* set up command queues */ 223*17221Stef sc->sc_qfreeh = sc->sc_qfreet = 224*17221Stef sc->sc_qhead = sc->sc_qtail = sc->sc_qactive = 225*17221Stef (struct dmc_command *) 0; 226*17221Stef /* set up free command buffer list */ 227*17221Stef for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NTOT+2]; qp++ ) { 228*17221Stef QUEUE_AT_HEAD( qp, sc->sc_qfreeh, sc->sc_qfreet); 229*17221Stef } 2305725Sroot if_attach(&sc->sc_if); 2315725Sroot } 2325725Sroot 2335725Sroot /* 2345725Sroot * Reset of interface after UNIBUS reset. 2355725Sroot * If interface is on specified UBA, reset it's state. 2365725Sroot */ 2375725Sroot dmcreset(unit, uban) 2385725Sroot int unit, uban; 2395725Sroot { 2405725Sroot register struct uba_device *ui; 241*17221Stef register struct dmc_softc *sc = &dmc_softc[unit]; 2425725Sroot 2435725Sroot if (unit >= NDMC || (ui = dmcinfo[unit]) == 0 || ui->ui_alive == 0 || 2445725Sroot ui->ui_ubanum != uban) 2455725Sroot return; 2465725Sroot printf(" dmc%d", unit); 247*17221Stef sc->sc_flag = 0; /* previous unibus resources no longer valid */ 2485725Sroot dmcinit(unit); 2495725Sroot } 2505725Sroot 2515725Sroot /* 2525725Sroot * Initialization of interface; reinitialize UNIBUS usage. 2535725Sroot */ 2545725Sroot dmcinit(unit) 2555725Sroot int unit; 2565725Sroot { 2575725Sroot register struct dmc_softc *sc = &dmc_softc[unit]; 2585725Sroot register struct uba_device *ui = dmcinfo[unit]; 2595725Sroot register struct dmcdevice *addr; 26013061Ssam register struct ifnet *ifp = &sc->sc_if; 261*17221Stef register struct ifrw *ifrw; 262*17221Stef register struct ifxmt *ifxp; 263*17221Stef register struct dmcbufs *rp; 264*17221Stef int base; 26513061Ssam struct sockaddr_in *sin; 2665725Sroot 2675725Sroot printd("dmcinit\n"); 268*17221Stef addr = (struct dmcdevice *)ui->ui_addr; 269*17221Stef 270*17221Stef sin = (struct sockaddr_in *) &ifp->if_addr; 271*17221Stef if (sin->sin_addr.s_addr == 0) /* if address still unknown */ 2725725Sroot return; 273*17221Stef sin = (struct sockaddr_in *) &ifp->if_dstaddr; 274*17221Stef if (sin->sin_addr.s_addr == 0) /* if address still unknown */ 275*17221Stef return; 276*17221Stef 277*17221Stef if ((addr->bsel1&DMC_RUN) == 0) { 278*17221Stef printf("dmcinit: DMC not running\n"); 279*17221Stef ifp->if_flags &= ~(IFF_RUNNING|IFF_UP); 280*17221Stef return; 281*17221Stef } 282*17221Stef /* map base table */ 283*17221Stef if ((sc->sc_flag&DMC_BMAPPED) == 0) { 284*17221Stef sc->sc_ubinfo = uballoc(ui->ui_ubanum, 285*17221Stef (caddr_t)&dmc_base[unit], sizeof (struct dmc_base), 0); 286*17221Stef sc->sc_flag |= DMC_BMAPPED; 287*17221Stef } 288*17221Stef /* initialize UNIBUS resources */ 289*17221Stef sc->sc_iused = sc->sc_oused = 0; 290*17221Stef if ((sc->sc_flag&DMC_ALLOC) == 0) { 291*17221Stef if (dmc_ubainit(&sc->sc_ifuba, ui->ui_ubanum, 0, 292*17221Stef (int)btoc(DMCMTU)) == 0) { 29313061Ssam printf("dmc%d: can't initialize\n", unit); 29413061Ssam ifp->if_flags &= ~IFF_UP; 29513061Ssam return; 29613061Ssam } 297*17221Stef sc->sc_flag |= DMC_ALLOC; 2985725Sroot } 299*17221Stef 300*17221Stef /* initialize buffer pool */ 301*17221Stef /* recieves */ 302*17221Stef ifrw = &sc->sc_ifuba.ifu_r[0]; 303*17221Stef for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 304*17221Stef rp->ubinfo = ifrw->ifrw_info & 0x3ffff; 305*17221Stef rp->cc = DMCMTU; 306*17221Stef rp->flags = DBUF_OURS|DBUF_RCV; 307*17221Stef printd("rcv: 0x%x\n",rp->ubinfo); 308*17221Stef ifrw++; 3096363Ssam } 310*17221Stef /* transmits */ 311*17221Stef ifxp = &sc->sc_ifuba.ifu_w[0]; 312*17221Stef for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) { 313*17221Stef rp->ubinfo = ifxp->x_ifrw.ifrw_info & 0x3ffff; 314*17221Stef rp->cc = 0; 315*17221Stef rp->flags = DBUF_OURS|DBUF_XMIT; 316*17221Stef printd("xmit: 0x%x\n",rp->ubinfo); 317*17221Stef ifxp++; 318*17221Stef } 319*17221Stef /* base in */ 320*17221Stef base = sc->sc_ubinfo & 0x3ffff; 321*17221Stef printd(" base 0x%x\n", base); 322*17221Stef dmcload(sc, DMC_BASEI, base, (base>>2)&DMC_XMEM); 323*17221Stef /* specify half duplex operation, flags tell if primary */ 324*17221Stef /* or secondary station */ 325*17221Stef if (ui->ui_flags == 0) 326*17221Stef /* use DDMCP mode in full duplex */ 327*17221Stef dmcload(sc, DMC_CNTLI, 0, 0); 328*17221Stef else if (ui->ui_flags == 1) 329*17221Stef /* use MAINTENENCE mode */ 330*17221Stef dmcload(sc, DMC_CNTLI, 0, DMC_MAINT ); 331*17221Stef else if (ui->ui_flags == 2) 332*17221Stef /* use DDCMP half duplex as primary station */ 333*17221Stef dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX); 334*17221Stef else if (ui->ui_flags == 3) 335*17221Stef /* use DDCMP half duplex as secondary station */ 336*17221Stef dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC); 337*17221Stef 338*17221Stef /* queue first NRCV buffers for DMC to fill */ 339*17221Stef for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 340*17221Stef rp->flags |= DBUF_DMCS; 341*17221Stef dmcload(sc, DMC_READ, rp->ubinfo, 342*17221Stef (((rp->ubinfo>>2)&DMC_XMEM)|rp->cc)); 343*17221Stef sc->sc_iused++; 344*17221Stef } 345*17221Stef 346*17221Stef /* enable output interrupts */ 347*17221Stef while ((addr->bsel2&DMC_IEO) == 0) 348*17221Stef addr->bsel2 |= DMC_IEO; 349*17221Stef ifp->if_flags |= IFF_UP|IFF_RUNNING; 3505725Sroot } 3515725Sroot 3525725Sroot /* 3535725Sroot * Start output on interface. Get another datagram 3545725Sroot * to send from the interface queue and map it to 3555725Sroot * the interface before starting output. 356*17221Stef * 357*17221Stef * Must be called at spl 5 3585725Sroot */ 3595725Sroot dmcstart(dev) 3605725Sroot dev_t dev; 3615725Sroot { 3625725Sroot int unit = minor(dev); 3635725Sroot register struct dmc_softc *sc = &dmc_softc[unit]; 3645725Sroot struct mbuf *m; 365*17221Stef register struct dmcbufs *rp; 366*17221Stef register int n; 3675725Sroot 368*17221Stef if ((sc->sc_flag & DMC_ALLOC) == 0) { 369*17221Stef printf("dmcstart: no unibus resources!!\n"); 3705725Sroot return; 371*17221Stef } 3725725Sroot /* 373*17221Stef * Dequeue up to NXMT requests and map them to the UNIBUS. 374*17221Stef * If no more requests, or no dmc buffers available, just return. 3755725Sroot */ 376*17221Stef n = 0; 377*17221Stef for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) { 378*17221Stef /* find an available buffer */ 379*17221Stef if ((rp->flags&DBUF_DMCS) == 0){ 380*17221Stef IF_DEQUEUE(&sc->sc_if.if_snd, m); 381*17221Stef if (m == 0) 382*17221Stef return; 383*17221Stef if ((rp->flags&DBUF_XMIT) == 0) 384*17221Stef printf("dmcstart: not xmit buf\n"); 385*17221Stef /* mark it dmcs */ 386*17221Stef rp->flags |= (DBUF_DMCS); 387*17221Stef /* 388*17221Stef * Have request mapped to UNIBUS for transmission 389*17221Stef * and start the output. 390*17221Stef */ 391*17221Stef rp->cc = (dmcput(&sc->sc_ifuba, n, m))&DMC_CCOUNT; 392*17221Stef sc->sc_oused++; 393*17221Stef dmcload(sc, DMC_WRITE, rp->ubinfo, 394*17221Stef rp->cc | ((rp->ubinfo>>2)&DMC_XMEM)); 395*17221Stef } 396*17221Stef n++; 397*17221Stef } 3985725Sroot } 3995725Sroot 4005725Sroot /* 4015725Sroot * Utility routine to load the DMC device registers. 4025725Sroot */ 4035725Sroot dmcload(sc, type, w0, w1) 4045725Sroot register struct dmc_softc *sc; 4055725Sroot int type, w0, w1; 4065725Sroot { 4075725Sroot register struct dmcdevice *addr; 408*17221Stef register int unit, sps; 409*17221Stef register struct dmc_command *qp; 4105725Sroot 411*17221Stef unit = (sc - dmc_softc)/ sizeof (struct dmc_softc); 4125725Sroot addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr; 4135725Sroot sps = spl5(); 414*17221Stef 415*17221Stef /* grab a command buffer from the free list */ 416*17221Stef if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0) 417*17221Stef panic("dmc command queue overflow"); 418*17221Stef DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet); 419*17221Stef 420*17221Stef /* fill in requested info */ 421*17221Stef qp->qp_cmd = (type | DMC_RQI); 422*17221Stef qp->qp_ubaddr = w0; 423*17221Stef qp->qp_cc = w1; 424*17221Stef 425*17221Stef if (sc->sc_qactive) { /* command in progress */ 426*17221Stef if (type == DMC_READ) { 427*17221Stef QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail); 428*17221Stef } else { 429*17221Stef QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail); 430*17221Stef } 431*17221Stef } else { /* command port free */ 432*17221Stef sc->sc_qactive = qp; 433*17221Stef addr->bsel0 = qp->qp_cmd; 4345725Sroot dmcrint(unit); 435*17221Stef } 4365725Sroot splx(sps); 4375725Sroot } 4385725Sroot 4395725Sroot /* 4405725Sroot * DMC interface receiver interrupt. 4415725Sroot * Ready to accept another command, 4425725Sroot * pull one off the command queue. 4435725Sroot */ 4445725Sroot dmcrint(unit) 4455725Sroot int unit; 4465725Sroot { 4475725Sroot register struct dmc_softc *sc; 4485725Sroot register struct dmcdevice *addr; 449*17221Stef register struct dmc_command *qp; 4505725Sroot register int n; 4515725Sroot 4525725Sroot addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr; 4535725Sroot sc = &dmc_softc[unit]; 454*17221Stef if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) { 455*17221Stef printf("dmcrint: no command\n"); 456*17221Stef return; 457*17221Stef } 4585725Sroot while (addr->bsel0&DMC_RDYI) { 459*17221Stef addr->sel4 = qp->qp_ubaddr; 460*17221Stef addr->sel6 = qp->qp_cc; 4615725Sroot addr->bsel0 &= ~(DMC_IEI|DMC_RQI); 462*17221Stef printd("load done, cmd 0x%x, ubaddr 0x%x, cc 0x%x\n", 463*17221Stef qp->qp_cmd, qp->qp_ubaddr, qp->qp_cc); 464*17221Stef /* free command buffer */ 465*17221Stef QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet); 466*17221Stef while (addr->bsel0 & DMC_RDYI) { 467*17221Stef /* 468*17221Stef * Can't check for RDYO here 'cause 469*17221Stef * this routine isn't reentrant! 470*17221Stef */ 471*17221Stef DELAY(5); 472*17221Stef } 473*17221Stef /* move on to next command */ 474*17221Stef if ((sc->sc_qactive = sc->sc_qhead)==(struct dmc_command *) 0) 475*17221Stef /* all done */ 476*17221Stef break; 477*17221Stef /* more commands to do, start the next one */ 478*17221Stef qp = sc->sc_qactive; 479*17221Stef DEQUEUE(sc->sc_qhead, sc->sc_qtail); 480*17221Stef addr->bsel0 = qp->qp_cmd; 4815725Sroot n = RDYSCAN; 4825725Sroot while (n-- && (addr->bsel0&DMC_RDYI) == 0) 483*17221Stef DELAY(5); 4845725Sroot } 485*17221Stef if (sc->sc_qactive) { 486*17221Stef addr->bsel0 |= DMC_IEI|DMC_RQI; 487*17221Stef /* VMS does it twice !*$%@# */ 488*17221Stef addr->bsel0 |= DMC_IEI|DMC_RQI; 489*17221Stef } 4905725Sroot } 4915725Sroot 4925725Sroot /* 4935725Sroot * DMC interface transmitter interrupt. 494*17221Stef * A transfer may have completed, check for errors. 4955725Sroot * If it was a read, notify appropriate protocol. 4965725Sroot * If it was a write, pull the next one off the queue. 4975725Sroot */ 4985725Sroot dmcxint(unit) 4995725Sroot int unit; 5005725Sroot { 5015725Sroot register struct dmc_softc *sc; 50213065Ssam register struct ifnet *ifp; 5035725Sroot struct uba_device *ui = dmcinfo[unit]; 5045725Sroot struct dmcdevice *addr; 5055725Sroot struct mbuf *m; 5065725Sroot register struct ifqueue *inq; 507*17221Stef int arg, pkaddr, cmd, len; 508*17221Stef register struct ifrw *ifrw; 509*17221Stef register struct dmcbufs *rp; 5105725Sroot 5115725Sroot addr = (struct dmcdevice *)ui->ui_addr; 512*17221Stef sc = &dmc_softc[unit]; 513*17221Stef ifp = &sc->sc_if; 514*17221Stef 51511189Ssam cmd = addr->bsel2 & 0xff; 516*17221Stef arg = addr->sel6 & 0xffff; 517*17221Stef if ((cmd&DMC_RDYO) == 0) { 518*17221Stef printf("dmc%d: bogus xmit intr\n", unit); 519*17221Stef return; 520*17221Stef } 521*17221Stef /* reconstruct UNIBUS address of buffer returned to us */ 522*17221Stef pkaddr = ((arg&DMC_XMEM)<<2)|(addr->sel4 & 0xffff); 523*17221Stef /* release port */ 5245725Sroot addr->bsel2 &= ~DMC_RDYO; 52511189Ssam switch (cmd & 07) { 5265725Sroot 5275725Sroot case DMC_OUR: 5285725Sroot /* 529*17221Stef * A read has completed. 530*17221Stef * Pass packet to type specific 5315725Sroot * higher-level input routine. 5325725Sroot */ 53313065Ssam ifp->if_ipackets++; 5345725Sroot len = arg & DMC_CCOUNT; 535*17221Stef /* find location in dmcuba struct */ 536*17221Stef ifrw = &sc->sc_ifuba.ifu_r[0]; 537*17221Stef rp = &sc->sc_rbufs[0]; 538*17221Stef for (; rp < &sc->sc_rbufs[NRCV]; rp++) { 539*17221Stef if (rp->ubinfo == pkaddr) 540*17221Stef goto foundrcv; 541*17221Stef ifrw++; 542*17221Stef } 543*17221Stef printf("bad rcv pkt addr 0x%x len 0x%x\n", pkaddr, len); 544*17221Stef goto setup; 545*17221Stef 546*17221Stef foundrcv: 547*17221Stef if ((rp->flags&DBUF_DMCS) == 0) { 548*17221Stef printf("dmcxint: done unalloc rbuf\n"); 549*17221Stef } 55013065Ssam switch (ifp->if_addr.sa_family) { 5515725Sroot #ifdef INET 5526334Ssam case AF_INET: 5536260Swnj schednetisr(NETISR_IP); 5545725Sroot inq = &ipintrq; 5555725Sroot break; 5565725Sroot #endif 5575725Sroot 5585725Sroot default: 5596334Ssam printf("dmc%d: unknown address type %d\n", unit, 56013065Ssam ifp->if_addr.sa_family); 5615725Sroot goto setup; 5625725Sroot } 563*17221Stef 564*17221Stef m = dmc_get(&sc->sc_ifuba, ifrw, len, 0); 565*17221Stef if (m == (struct mbuf *)0) 5665725Sroot goto setup; 5676207Swnj if (IF_QFULL(inq)) { 5686207Swnj IF_DROP(inq); 5699176Ssam m_freem(m); 5706207Swnj } else 5716207Swnj IF_ENQUEUE(inq, m); 5725725Sroot setup: 573*17221Stef arg = ifrw->ifrw_info & 0x3ffff; 5745725Sroot dmcload(sc, DMC_READ, arg, ((arg >> 2) & DMC_XMEM) | DMCMTU); 575*17221Stef break; 5765725Sroot 5775725Sroot case DMC_OUX: 5785725Sroot /* 5795725Sroot * A write has completed, start another 5805725Sroot * transfer if there is more data to send. 5815725Sroot */ 58213065Ssam ifp->if_opackets++; 583*17221Stef printd("OUX pkaddr 0x%x\n",pkaddr); 584*17221Stef /* find associated dmcbuf structure */ 585*17221Stef rp = &sc->sc_xbufs[0]; 586*17221Stef for (; rp < &sc->sc_xbufs[NXMT]; rp++) { 587*17221Stef if (rp->ubinfo == pkaddr) 588*17221Stef goto found; 5895725Sroot } 590*17221Stef printf("dmc%d: bad packet address 0x%x\n", 591*17221Stef unit, pkaddr); 592*17221Stef break; 593*17221Stef found: 594*17221Stef if ((rp->flags&DBUF_DMCS) == 0) 595*17221Stef printf("dmc returned unallocated packet 0x%x\n", 596*17221Stef pkaddr); 597*17221Stef /* mark buffer free */ 598*17221Stef rp->flags &= ~(DBUF_DMCS); 599*17221Stef sc->sc_oused--; 6005725Sroot dmcstart(unit); 601*17221Stef break; 6025725Sroot 6035725Sroot case DMC_CNTLO: 6045725Sroot arg &= DMC_CNTMASK; 6055725Sroot if (arg&DMC_FATAL) { 606*17221Stef register int i; 607*17221Stef 608*17221Stef printf("dmc%d: fatal error, flags=%b\n", 609*17221Stef unit, arg, CNTLO_BITS); 610*17221Stef ifp->if_flags &= ~(IFF_RUNNING|IFF_UP); 611*17221Stef /* master clear device */ 6125725Sroot addr->bsel1 = DMC_MCLR; 613*17221Stef for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 614*17221Stef ; 615*17221Stef dmcinit(unit); 616*17221Stef ifp->if_ierrors++; 617*17221Stef break; 6185725Sroot } else { 619*17221Stef /* ACCUMULATE STATISTICS */ 620*17221Stef switch(arg) { 621*17221Stef case DMC_NOBUFS: 622*17221Stef ifp->if_ierrors++; 623*17221Stef if((sc->sc_nobuf++ % DMC_RPNBFS) != 0) 624*17221Stef break; 625*17221Stef goto report; 626*17221Stef case DMC_DISCONN: 627*17221Stef if((sc->sc_disc++ % DMC_RPDSC) != 0) 628*17221Stef break; 629*17221Stef goto report; 630*17221Stef case DMC_TIMEOUT: 631*17221Stef if((sc->sc_timeo++ % DMC_RPTMO) != 0) 632*17221Stef break; 633*17221Stef goto report; 634*17221Stef case DMC_DATACK: 635*17221Stef ifp->if_oerrors++; 636*17221Stef if((sc->sc_datck++ % DMC_RPDCK) != 0) 637*17221Stef break; 638*17221Stef goto report; 639*17221Stef default: 640*17221Stef goto report; 641*17221Stef } 642*17221Stef break; 643*17221Stef report: 644*17221Stef printf("dmc%d: soft error, flags=%b\n", 645*17221Stef unit, arg, CNTLO_BITS); 6465725Sroot } 647*17221Stef break; 6485725Sroot 6495725Sroot default: 6505725Sroot printf("dmc%d: bad control %o\n", unit, cmd); 6515725Sroot } 652*17221Stef return; 6535725Sroot } 6545725Sroot 6555725Sroot /* 6565725Sroot * DMC output routine. 6575725Sroot * Just send the data, header was supplied by 6585725Sroot * upper level protocol routines. 6595725Sroot */ 6606334Ssam dmcoutput(ifp, m, dst) 6615725Sroot register struct ifnet *ifp; 6625725Sroot register struct mbuf *m; 6636334Ssam struct sockaddr *dst; 6645725Sroot { 6655725Sroot int s; 6665725Sroot 66713065Ssam if (dst->sa_family != ifp->if_addr.sa_family) { 6689176Ssam printf("dmc%d: af%d not supported\n", ifp->if_unit, 66913065Ssam dst->sa_family); 6706334Ssam m_freem(m); 6716502Ssam return (EAFNOSUPPORT); 6725725Sroot } 673*17221Stef s = spl5(); 6746207Swnj if (IF_QFULL(&ifp->if_snd)) { 6756207Swnj IF_DROP(&ifp->if_snd); 6766334Ssam m_freem(m); 6776207Swnj splx(s); 6786502Ssam return (ENOBUFS); 6796207Swnj } 6805725Sroot IF_ENQUEUE(&ifp->if_snd, m); 681*17221Stef dmcstart(ifp->if_unit); 6825725Sroot splx(s); 6836502Ssam return (0); 6845725Sroot } 68513061Ssam 68613061Ssam /* 68713061Ssam * Process an ioctl request. 68813061Ssam */ 68913061Ssam dmcioctl(ifp, cmd, data) 69013061Ssam register struct ifnet *ifp; 69113061Ssam int cmd; 69213061Ssam caddr_t data; 69313061Ssam { 69413061Ssam struct ifreq *ifr = (struct ifreq *)data; 69513061Ssam struct sockaddr_in *sin; 69613061Ssam int s = splimp(), error = 0; 69713061Ssam 69813061Ssam switch (cmd) { 69913061Ssam 70013061Ssam case SIOCSIFADDR: 70113061Ssam if (ifp->if_flags & IFF_RUNNING) 702*17221Stef if_rtinit(ifp, -1); /* delete previous route */ 70313061Ssam sin = (struct sockaddr_in *)&ifr->ifr_addr; 70413085Ssam ifp->if_addr = *(struct sockaddr *)sin; 70513061Ssam ifp->if_net = in_netof(sin->sin_addr); 706*17221Stef ifp->if_flags |= IFF_UP; 707*17221Stef /* set up routing table entry */ 708*17221Stef if ((ifp->if_flags & IFF_ROUTE) == 0) { 709*17221Stef rtinit(&ifp->if_dstaddr, &ifp->if_addr, RTF_HOST|RTF_UP); 710*17221Stef ifp->if_flags |= IFF_ROUTE; 711*17221Stef } 71213061Ssam break; 71313061Ssam 71413061Ssam case SIOCSIFDSTADDR: 71513061Ssam ifp->if_dstaddr = ifr->ifr_dstaddr; 71613061Ssam break; 717*17221Stef 71813061Ssam default: 71913061Ssam error = EINVAL; 72013061Ssam } 721*17221Stef if ((ifp->if_flags & IFF_RUNNING) == 0) 722*17221Stef dmcinit(ifp->if_unit); 72313061Ssam splx(s); 72413061Ssam return (error); 72513061Ssam } 726*17221Stef 727*17221Stef /* 728*17221Stef * Routines supporting UNIBUS network interfaces. 729*17221Stef */ 730*17221Stef 731*17221Stef /* 732*17221Stef * Init UNIBUS for interface on uban whose headers of size hlen are to 733*17221Stef * end on a page boundary. We allocate a UNIBUS map register for the page 734*17221Stef * with the header, and nmr more UNIBUS map registers for i/o on the adapter, 735*17221Stef * doing this for each receive and transmit buffer. We also 736*17221Stef * allocate page frames in the mbuffer pool for these pages. 737*17221Stef */ 738*17221Stef dmc_ubainit(ifu, uban, hlen, nmr) 739*17221Stef register struct dmcuba *ifu; 740*17221Stef int uban, hlen, nmr; 741*17221Stef { 742*17221Stef register caddr_t cp, dp; 743*17221Stef register struct ifrw *ifrw; 744*17221Stef register struct ifxmt *ifxp; 745*17221Stef int i, ncl; 746*17221Stef 747*17221Stef ncl = clrnd(nmr + CLSIZE) / CLSIZE; 748*17221Stef if (ifu->ifu_r[0].ifrw_addr) { 749*17221Stef /* 750*17221Stef * If the first read buffer has a non-zero 751*17221Stef * address, it means we have already allocated core 752*17221Stef */ 753*17221Stef cp = ifu->ifu_r[0].ifrw_addr - (CLBYTES - hlen); 754*17221Stef } else { 755*17221Stef cp = m_clalloc(NTOT * ncl, MPG_SPACE); 756*17221Stef if (cp == 0) 757*17221Stef return (0); 758*17221Stef ifu->ifu_hlen = hlen; 759*17221Stef ifu->ifu_uban = uban; 760*17221Stef ifu->ifu_uba = uba_hd[uban].uh_uba; 761*17221Stef dp = cp + CLBYTES - hlen; 762*17221Stef for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) { 763*17221Stef ifrw->ifrw_addr = dp; 764*17221Stef dp += ncl * CLBYTES; 765*17221Stef } 766*17221Stef for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) { 767*17221Stef ifxp->x_ifrw.ifrw_addr = dp; 768*17221Stef dp += ncl * CLBYTES; 769*17221Stef } 770*17221Stef } 771*17221Stef /* allocate for receive ring */ 772*17221Stef for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) { 773*17221Stef if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) { 774*17221Stef struct ifrw *rw; 775*17221Stef 776*17221Stef for (rw = ifu->ifu_r; rw < ifrw; rw++) 777*17221Stef ubarelse(ifu->ifu_uban, &rw->ifrw_info); 778*17221Stef goto bad; 779*17221Stef } 780*17221Stef } 781*17221Stef /* and now transmit ring */ 782*17221Stef for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) { 783*17221Stef ifrw = &ifxp->x_ifrw; 784*17221Stef if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) { 785*17221Stef struct ifxmt *xp; 786*17221Stef 787*17221Stef for (xp = ifu->ifu_w; xp < ifxp; xp++) 788*17221Stef ubarelse(ifu->ifu_uban, &xp->x_ifrw.ifrw_info); 789*17221Stef for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) 790*17221Stef ubarelse(ifu->ifu_uban, &ifrw->ifrw_info); 791*17221Stef goto bad; 792*17221Stef } 793*17221Stef for (i = 0; i < nmr; i++) 794*17221Stef ifxp->x_map[i] = ifrw->ifrw_mr[i]; 795*17221Stef ifxp->x_xswapd = 0; 796*17221Stef } 797*17221Stef return (1); 798*17221Stef bad: 799*17221Stef m_pgfree(cp, NTOT * ncl); 800*17221Stef ifu->ifu_r[0].ifrw_addr = 0; 801*17221Stef return (0); 802*17221Stef } 803*17221Stef 804*17221Stef /* 805*17221Stef * Setup either a ifrw structure by allocating UNIBUS map registers, 806*17221Stef * possibly a buffered data path, and initializing the fields of 807*17221Stef * the ifrw structure to minimize run-time overhead. 808*17221Stef */ 809*17221Stef static 810*17221Stef dmc_ubaalloc(ifu, ifrw, nmr) 811*17221Stef struct dmcuba *ifu; 812*17221Stef register struct ifrw *ifrw; 813*17221Stef int nmr; 814*17221Stef { 815*17221Stef register int info; 816*17221Stef 817*17221Stef info = 818*17221Stef uballoc(ifu->ifu_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->ifu_hlen, 819*17221Stef ifu->ifu_flags); 820*17221Stef if (info == 0) 821*17221Stef return (0); 822*17221Stef ifrw->ifrw_info = info; 823*17221Stef ifrw->ifrw_bdp = UBAI_BDP(info); 824*17221Stef ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT); 825*17221Stef ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) + 1]; 826*17221Stef return (1); 827*17221Stef } 828*17221Stef 829*17221Stef /* 830*17221Stef * Pull read data off a interface. 831*17221Stef * Len is length of data, with local net header stripped. 832*17221Stef * Off is non-zero if a trailer protocol was used, and 833*17221Stef * gives the offset of the trailer information. 834*17221Stef * We copy the trailer information and then all the normal 835*17221Stef * data into mbufs. When full cluster sized units are present 836*17221Stef * on the interface on cluster boundaries we can get them more 837*17221Stef * easily by remapping, and take advantage of this here. 838*17221Stef */ 839*17221Stef struct mbuf * 840*17221Stef dmc_get(ifu, ifrw, totlen, off0) 841*17221Stef register struct dmcuba *ifu; 842*17221Stef register struct ifrw *ifrw; 843*17221Stef int totlen, off0; 844*17221Stef { 845*17221Stef struct mbuf *top, **mp, *m; 846*17221Stef int off = off0, len; 847*17221Stef register caddr_t cp = ifrw->ifrw_addr + ifu->ifu_hlen; 848*17221Stef 849*17221Stef top = 0; 850*17221Stef mp = ⊤ 851*17221Stef while (totlen > 0) { 852*17221Stef MGET(m, M_DONTWAIT, MT_DATA); 853*17221Stef if (m == 0) 854*17221Stef goto bad; 855*17221Stef if (off) { 856*17221Stef len = totlen - off; 857*17221Stef cp = ifrw->ifrw_addr + ifu->ifu_hlen + off; 858*17221Stef } else 859*17221Stef len = totlen; 860*17221Stef if (len >= CLBYTES) { 861*17221Stef struct mbuf *p; 862*17221Stef struct pte *cpte, *ppte; 863*17221Stef int x, *ip, i; 864*17221Stef 865*17221Stef MCLGET(p, 1); 866*17221Stef if (p == 0) 867*17221Stef goto nopage; 868*17221Stef len = m->m_len = CLBYTES; 869*17221Stef m->m_off = (int)p - (int)m; 870*17221Stef if (!claligned(cp)) 871*17221Stef goto copy; 872*17221Stef 873*17221Stef /* 874*17221Stef * Switch pages mapped to UNIBUS with new page p, 875*17221Stef * as quick form of copy. Remap UNIBUS and invalidate. 876*17221Stef */ 877*17221Stef cpte = &Mbmap[mtocl(cp)*CLSIZE]; 878*17221Stef ppte = &Mbmap[mtocl(p)*CLSIZE]; 879*17221Stef x = btop(cp - ifrw->ifrw_addr); 880*17221Stef ip = (int *)&ifrw->ifrw_mr[x]; 881*17221Stef for (i = 0; i < CLSIZE; i++) { 882*17221Stef struct pte t; 883*17221Stef t = *ppte; *ppte++ = *cpte; *cpte = t; 884*17221Stef *ip++ = 885*17221Stef cpte++->pg_pfnum|ifrw->ifrw_proto; 886*17221Stef mtpr(TBIS, cp); 887*17221Stef cp += NBPG; 888*17221Stef mtpr(TBIS, (caddr_t)p); 889*17221Stef p += NBPG / sizeof (*p); 890*17221Stef } 891*17221Stef goto nocopy; 892*17221Stef } 893*17221Stef nopage: 894*17221Stef m->m_len = MIN(MLEN, len); 895*17221Stef m->m_off = MMINOFF; 896*17221Stef copy: 897*17221Stef bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len); 898*17221Stef cp += m->m_len; 899*17221Stef nocopy: 900*17221Stef *mp = m; 901*17221Stef mp = &m->m_next; 902*17221Stef if (off) { 903*17221Stef /* sort of an ALGOL-W style for statement... */ 904*17221Stef off += m->m_len; 905*17221Stef if (off == totlen) { 906*17221Stef cp = ifrw->ifrw_addr + ifu->ifu_hlen; 907*17221Stef off = 0; 908*17221Stef totlen = off0; 909*17221Stef } 910*17221Stef } else 911*17221Stef totlen -= m->m_len; 912*17221Stef } 913*17221Stef return (top); 914*17221Stef bad: 915*17221Stef m_freem(top); 916*17221Stef return (0); 917*17221Stef } 918*17221Stef 919*17221Stef /* 920*17221Stef * Map a chain of mbufs onto a network interface 921*17221Stef * in preparation for an i/o operation. 922*17221Stef * The argument chain of mbufs includes the local network 923*17221Stef * header which is copied to be in the mapped, aligned 924*17221Stef * i/o space. 925*17221Stef */ 926*17221Stef dmcput(ifu, n, m) 927*17221Stef struct dmcuba *ifu; 928*17221Stef int n; 929*17221Stef register struct mbuf *m; 930*17221Stef { 931*17221Stef register struct mbuf *mp; 932*17221Stef register caddr_t cp; 933*17221Stef register struct ifxmt *ifxp; 934*17221Stef register struct ifrw *ifrw; 935*17221Stef register int i; 936*17221Stef int xswapd = 0; 937*17221Stef int x, cc, t; 938*17221Stef caddr_t dp; 939*17221Stef 940*17221Stef ifxp = &ifu->ifu_w[n]; 941*17221Stef ifrw = &ifxp->x_ifrw; 942*17221Stef cp = ifrw->ifrw_addr; 943*17221Stef while (m) { 944*17221Stef dp = mtod(m, char *); 945*17221Stef if (claligned(cp) && claligned(dp) && m->m_len == CLBYTES) { 946*17221Stef struct pte *pte; int *ip; 947*17221Stef pte = &Mbmap[mtocl(dp)*CLSIZE]; 948*17221Stef x = btop(cp - ifrw->ifrw_addr); 949*17221Stef ip = (int *)&ifrw->ifrw_mr[x]; 950*17221Stef for (i = 0; i < CLSIZE; i++) 951*17221Stef *ip++ = ifrw->ifrw_proto | pte++->pg_pfnum; 952*17221Stef xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT)); 953*17221Stef mp = m->m_next; 954*17221Stef m->m_next = ifxp->x_xtofree; 955*17221Stef ifxp->x_xtofree = m; 956*17221Stef cp += m->m_len; 957*17221Stef } else { 958*17221Stef bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len); 959*17221Stef cp += m->m_len; 960*17221Stef MFREE(m, mp); 961*17221Stef } 962*17221Stef m = mp; 963*17221Stef } 964*17221Stef 965*17221Stef /* 966*17221Stef * Xswapd is the set of clusters we just mapped out. Ifxp->x_xswapd 967*17221Stef * is the set of clusters mapped out from before. We compute 968*17221Stef * the number of clusters involved in this operation in x. 969*17221Stef * Clusters mapped out before and involved in this operation 970*17221Stef * should be unmapped so original pages will be accessed by the device. 971*17221Stef */ 972*17221Stef cc = cp - ifrw->ifrw_addr; 973*17221Stef x = ((cc - ifu->ifu_hlen) + CLBYTES - 1) >> CLSHIFT; 974*17221Stef ifxp->x_xswapd &= ~xswapd; 975*17221Stef while (i = ffs(ifxp->x_xswapd)) { 976*17221Stef i--; 977*17221Stef if (i >= x) 978*17221Stef break; 979*17221Stef ifxp->x_xswapd &= ~(1<<i); 980*17221Stef i *= CLSIZE; 981*17221Stef for (t = 0; t < CLSIZE; t++) { 982*17221Stef ifrw->ifrw_mr[i] = ifxp->x_map[i]; 983*17221Stef i++; 984*17221Stef } 985*17221Stef } 986*17221Stef ifxp->x_xswapd |= xswapd; 987*17221Stef return (cc); 988*17221Stef } 98913085Ssam #endif 990