1*17565Skarels /* if_dmc.c 6.4 84/12/20 */ 25725Sroot 35725Sroot #include "dmc.h" 45725Sroot #if NDMC > 0 5*17565Skarels 65725Sroot /* 75725Sroot * DMC11 device driver, internet version 85725Sroot * 9*17565Skarels * Bill Nesheim 1017221Stef * Cornell University 1111191Ssam * 12*17565Skarels * Lou Salkind 13*17565Skarels * New York University 145725Sroot */ 15*17565Skarels 16*17565Skarels /* #define DEBUG /* for base table dump on fatal error */ 17*17565Skarels 189794Ssam #include "../machine/pte.h" 195725Sroot 2017111Sbloom #include "param.h" 2117111Sbloom #include "systm.h" 2217111Sbloom #include "mbuf.h" 2317111Sbloom #include "buf.h" 2417221Stef #include "ioctl.h" /* must precede tty.h */ 2517111Sbloom #include "tty.h" 2617111Sbloom #include "protosw.h" 2717111Sbloom #include "socket.h" 2817111Sbloom #include "vmmac.h" 2917111Sbloom #include "errno.h" 308460Sroot 318460Sroot #include "../net/if.h" 329176Ssam #include "../net/netisr.h" 338460Sroot #include "../net/route.h" 348416Swnj #include "../netinet/in.h" 358416Swnj #include "../netinet/in_systm.h" 36*17565Skarels #include "../netinet/ip.h" 37*17565Skarels #include "../netinet/ip_var.h" 388460Sroot 398460Sroot #include "../vax/cpu.h" 408460Sroot #include "../vax/mtpr.h" 4117111Sbloom #include "if_uba.h" 4217111Sbloom #include "if_dmc.h" 438460Sroot #include "../vaxuba/ubareg.h" 448460Sroot #include "../vaxuba/ubavar.h" 455725Sroot 46*17565Skarels #include "../h/time.h" 47*17565Skarels #include "../h/kernel.h" 48*17565Skarels 49*17565Skarels int dmctimer; /* timer started? */ 50*17565Skarels int dmc_timeout = 8; /* timeout value */ 51*17565Skarels int dmcwatch(); 52*17565Skarels 535725Sroot /* 545725Sroot * Driver information for auto-configuration stuff. 555725Sroot */ 5613061Ssam int dmcprobe(), dmcattach(), dmcinit(), dmcioctl(); 5713061Ssam int dmcoutput(), dmcreset(); 585725Sroot struct uba_device *dmcinfo[NDMC]; 595725Sroot u_short dmcstd[] = { 0 }; 605725Sroot struct uba_driver dmcdriver = 615725Sroot { dmcprobe, 0, dmcattach, 0, dmcstd, "dmc", dmcinfo }; 625725Sroot 6317221Stef #define NRCV 7 64*17565Skarels #define NXMT 3 6517221Stef #define NTOT (NRCV + NXMT) 66*17565Skarels #define NCMDS (NTOT+4) /* size of command queue */ 6717221Stef 68*17565Skarels #define printd if(dmcdebug)printf 69*17565Skarels int dmcdebug = 0; 70*17565Skarels 7117221Stef /* error reporting intervals */ 7217221Stef #define DMC_RPNBFS 50 7317221Stef #define DMC_RPDSC 1 74*17565Skarels #define DMC_RPTMO 10 75*17565Skarels #define DMC_RPDCK 10 7617221Stef 7717221Stef struct dmc_command { 7817221Stef char qp_cmd; /* command */ 7917221Stef short qp_ubaddr; /* buffer address */ 8017221Stef short qp_cc; /* character count || XMEM */ 8117221Stef struct dmc_command *qp_next; /* next command on queue */ 8217221Stef }; 8317221Stef 845725Sroot /* 8517221Stef * The dmcuba structures generalize the ifuba structure 86*17565Skarels * to an arbitrary number of receive and transmit buffers. 8717221Stef */ 8817221Stef struct ifxmt { 89*17565Skarels struct ifrw x_ifrw; /* mapping info */ 9017221Stef struct pte x_map[IF_MAXNUBAMR]; /* output base pages */ 9117221Stef short x_xswapd; /* mask of clusters swapped */ 9217221Stef struct mbuf *x_xtofree; /* pages being dma'd out */ 9317221Stef }; 94*17565Skarels 9517221Stef struct dmcuba { 9617221Stef short ifu_uban; /* uba number */ 9717221Stef short ifu_hlen; /* local net header length */ 9817221Stef struct uba_regs *ifu_uba; /* uba regs, in vm */ 9917221Stef struct ifrw ifu_r[NRCV]; /* receive information */ 10017221Stef struct ifxmt ifu_w[NXMT]; /* transmit information */ 10117221Stef /* these should only be pointers */ 10217221Stef short ifu_flags; /* used during uballoc's */ 10317221Stef }; 10417221Stef 10517221Stef struct dmcbufs { 10617221Stef int ubinfo; /* from uballoc */ 10717221Stef short cc; /* buffer size */ 10817221Stef short flags; /* access control */ 10917221Stef }; 11017221Stef #define DBUF_OURS 0 /* buffer is available */ 11117221Stef #define DBUF_DMCS 1 /* buffer claimed by somebody */ 11217221Stef #define DBUF_XMIT 4 /* transmit buffer */ 113*17565Skarels #define DBUF_RCV 8 /* receive buffer */ 11417221Stef 11517221Stef struct mbuf *dmc_get(); 11617221Stef 11717221Stef /* 1185725Sroot * DMC software status per interface. 1195725Sroot * 1205725Sroot * Each interface is referenced by a network interface structure, 1215725Sroot * sc_if, which the routing code uses to locate the interface. 1225725Sroot * This structure contains the output queue for the interface, its address, ... 12317221Stef * We also have, for each interface, a set of 7 UBA interface structures 12417221Stef * for each, which 12517221Stef * contain information about the UNIBUS resources held by the interface: 1265725Sroot * map registers, buffered data paths, etc. Information is cached in this 1275725Sroot * structure for use by the if_uba.c routines in running the interface 1285725Sroot * efficiently. 1295725Sroot */ 1305725Sroot struct dmc_softc { 13117221Stef short sc_oused; /* output buffers currently in use */ 13217221Stef short sc_iused; /* input buffers given to DMC */ 13317221Stef short sc_flag; /* flags */ 134*17565Skarels int sc_nticks; /* seconds since last interrupt */ 1355725Sroot struct ifnet sc_if; /* network-visible interface */ 136*17565Skarels struct dmcbufs sc_rbufs[NRCV]; /* receive buffer info */ 13717221Stef struct dmcbufs sc_xbufs[NXMT]; /* transmit buffer info */ 13817221Stef struct dmcuba sc_ifuba; /* UNIBUS resources */ 1395725Sroot int sc_ubinfo; /* UBA mapping info for base table */ 14017221Stef int sc_errors[4]; /* non-fatal error counters */ 14117221Stef #define sc_datck sc_errors[0] 14217221Stef #define sc_timeo sc_errors[1] 14317221Stef #define sc_nobuf sc_errors[2] 14417221Stef #define sc_disc sc_errors[3] 14517221Stef /* command queue stuff */ 146*17565Skarels struct dmc_command sc_cmdbuf[NCMDS]; 14717221Stef struct dmc_command *sc_qhead; /* head of command queue */ 14817221Stef struct dmc_command *sc_qtail; /* tail of command queue */ 14917221Stef struct dmc_command *sc_qactive; /* command in progress */ 15017221Stef struct dmc_command *sc_qfreeh; /* head of list of free cmd buffers */ 15117221Stef struct dmc_command *sc_qfreet; /* tail of list of free cmd buffers */ 15217221Stef /* end command queue stuff */ 1535725Sroot } dmc_softc[NDMC]; 1545725Sroot 1555725Sroot /* flags */ 156*17565Skarels #define DMC_ALLOC 01 /* unibus resources allocated */ 157*17565Skarels #define DMC_BMAPPED 02 /* base table mapped */ 158*17565Skarels #define DMC_RESTART 04 /* software restart in progress */ 159*17565Skarels #define DMC_ACTIVE 08 /* device active */ 1605725Sroot 161*17565Skarels struct dmc_base { 162*17565Skarels short d_base[128]; /* DMC base table */ 1635725Sroot } dmc_base[NDMC]; 1645725Sroot 16517221Stef /* queue manipulation macros */ 16617221Stef #define QUEUE_AT_HEAD(qp, head, tail) \ 16717221Stef (qp)->qp_next = (head); \ 16817221Stef (head) = (qp); \ 16917221Stef if ((tail) == (struct dmc_command *) 0) \ 17017221Stef (tail) = (head) 1715725Sroot 17217221Stef #define QUEUE_AT_TAIL(qp, head, tail) \ 17317221Stef if ((tail)) \ 17417221Stef (tail)->qp_next = (qp); \ 17517221Stef else \ 17617221Stef (head) = (qp); \ 17717221Stef (qp)->qp_next = (struct dmc_command *) 0; \ 17817221Stef (tail) = (qp) 17917221Stef 18017221Stef #define DEQUEUE(head, tail) \ 18117221Stef (head) = (head)->qp_next;\ 18217221Stef if ((head) == (struct dmc_command *) 0)\ 18317221Stef (tail) = (head) 18417221Stef 1855725Sroot dmcprobe(reg) 1865725Sroot caddr_t reg; 1875725Sroot { 1885725Sroot register int br, cvec; 1895725Sroot register struct dmcdevice *addr = (struct dmcdevice *)reg; 1905725Sroot register int i; 1915725Sroot 1925725Sroot #ifdef lint 1935725Sroot br = 0; cvec = br; br = cvec; 1945725Sroot dmcrint(0); dmcxint(0); 1955725Sroot #endif 1965725Sroot addr->bsel1 = DMC_MCLR; 1975725Sroot for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 1985725Sroot ; 199*17565Skarels if ((addr->bsel1 & DMC_RUN) == 0) { 200*17565Skarels printf("dmcprobe: can't start device\n" ); 2016334Ssam return (0); 202*17565Skarels } 2035725Sroot addr->bsel0 = DMC_RQI|DMC_IEI; 204*17565Skarels /* let's be paranoid */ 205*17565Skarels addr->bsel0 |= DMC_RQI|DMC_IEI; 206*17565Skarels DELAY(1000000); 2075725Sroot addr->bsel1 = DMC_MCLR; 2085725Sroot for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 2095725Sroot ; 2106334Ssam return (1); 2115725Sroot } 2125725Sroot 2135725Sroot /* 2145725Sroot * Interface exists: make available by filling in network interface 2155725Sroot * record. System will initialize the interface when it is ready 2165725Sroot * to accept packets. 2175725Sroot */ 2185725Sroot dmcattach(ui) 2195725Sroot register struct uba_device *ui; 2205725Sroot { 2215725Sroot register struct dmc_softc *sc = &dmc_softc[ui->ui_unit]; 2225725Sroot 2235725Sroot sc->sc_if.if_unit = ui->ui_unit; 2245725Sroot sc->sc_if.if_name = "dmc"; 2255725Sroot sc->sc_if.if_mtu = DMCMTU; 2265725Sroot sc->sc_if.if_init = dmcinit; 2275725Sroot sc->sc_if.if_output = dmcoutput; 22813061Ssam sc->sc_if.if_ioctl = dmcioctl; 2298976Sroot sc->sc_if.if_reset = dmcreset; 23017221Stef sc->sc_if.if_flags = IFF_POINTOPOINT; 23117221Stef sc->sc_ifuba.ifu_flags = UBA_CANTWAIT; 23217221Stef 233*17565Skarels if_attach(&sc->sc_if); 234*17565Skarels if (dmctimer == 0) { 235*17565Skarels dmctimer = 1; 236*17565Skarels timeout(dmcwatch, (caddr_t) 0, hz); 23717221Stef } 2385725Sroot } 2395725Sroot 2405725Sroot /* 2415725Sroot * Reset of interface after UNIBUS reset. 2425725Sroot * If interface is on specified UBA, reset it's state. 2435725Sroot */ 2445725Sroot dmcreset(unit, uban) 2455725Sroot int unit, uban; 2465725Sroot { 2475725Sroot register struct uba_device *ui; 24817221Stef register struct dmc_softc *sc = &dmc_softc[unit]; 2495725Sroot 2505725Sroot if (unit >= NDMC || (ui = dmcinfo[unit]) == 0 || ui->ui_alive == 0 || 2515725Sroot ui->ui_ubanum != uban) 2525725Sroot return; 2535725Sroot printf(" dmc%d", unit); 254*17565Skarels sc->sc_flag = 0; 2555725Sroot dmcinit(unit); 2565725Sroot } 2575725Sroot 2585725Sroot /* 2595725Sroot * Initialization of interface; reinitialize UNIBUS usage. 2605725Sroot */ 2615725Sroot dmcinit(unit) 2625725Sroot int unit; 2635725Sroot { 2645725Sroot register struct dmc_softc *sc = &dmc_softc[unit]; 2655725Sroot register struct uba_device *ui = dmcinfo[unit]; 2665725Sroot register struct dmcdevice *addr; 26713061Ssam register struct ifnet *ifp = &sc->sc_if; 26817221Stef register struct ifrw *ifrw; 26917221Stef register struct ifxmt *ifxp; 27017221Stef register struct dmcbufs *rp; 271*17565Skarels register struct dmc_command *qp; 27217221Stef int base; 27313061Ssam struct sockaddr_in *sin; 274*17565Skarels int s; 2755725Sroot 27617221Stef addr = (struct dmcdevice *)ui->ui_addr; 27717221Stef 27817221Stef sin = (struct sockaddr_in *) &ifp->if_addr; 27917221Stef if (sin->sin_addr.s_addr == 0) /* if address still unknown */ 2805725Sroot return; 28117221Stef sin = (struct sockaddr_in *) &ifp->if_dstaddr; 28217221Stef if (sin->sin_addr.s_addr == 0) /* if address still unknown */ 28317221Stef return; 28417221Stef 28517221Stef if ((addr->bsel1&DMC_RUN) == 0) { 28617221Stef printf("dmcinit: DMC not running\n"); 28717221Stef ifp->if_flags &= ~(IFF_RUNNING|IFF_UP); 28817221Stef return; 28917221Stef } 29017221Stef /* map base table */ 291*17565Skarels if ((sc->sc_flag & DMC_BMAPPED) == 0) { 29217221Stef sc->sc_ubinfo = uballoc(ui->ui_ubanum, 29317221Stef (caddr_t)&dmc_base[unit], sizeof (struct dmc_base), 0); 29417221Stef sc->sc_flag |= DMC_BMAPPED; 29517221Stef } 29617221Stef /* initialize UNIBUS resources */ 29717221Stef sc->sc_iused = sc->sc_oused = 0; 298*17565Skarels if ((sc->sc_flag & DMC_ALLOC) == 0) { 299*17565Skarels if (dmc_ubainit(&sc->sc_ifuba, ui->ui_ubanum, 300*17565Skarels sizeof(struct dmc_header), (int)btoc(DMCMTU)) == 0) { 30113061Ssam printf("dmc%d: can't initialize\n", unit); 30213061Ssam ifp->if_flags &= ~IFF_UP; 30313061Ssam return; 30413061Ssam } 30517221Stef sc->sc_flag |= DMC_ALLOC; 3065725Sroot } 30717221Stef 30817221Stef /* initialize buffer pool */ 309*17565Skarels /* receives */ 31017221Stef ifrw = &sc->sc_ifuba.ifu_r[0]; 31117221Stef for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 31217221Stef rp->ubinfo = ifrw->ifrw_info & 0x3ffff; 313*17565Skarels rp->cc = DMCMTU + sizeof (struct dmc_header); 31417221Stef rp->flags = DBUF_OURS|DBUF_RCV; 31517221Stef ifrw++; 3166363Ssam } 31717221Stef /* transmits */ 31817221Stef ifxp = &sc->sc_ifuba.ifu_w[0]; 31917221Stef for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) { 32017221Stef rp->ubinfo = ifxp->x_ifrw.ifrw_info & 0x3ffff; 32117221Stef rp->cc = 0; 32217221Stef rp->flags = DBUF_OURS|DBUF_XMIT; 32317221Stef ifxp++; 32417221Stef } 325*17565Skarels 326*17565Skarels /* set up command queues */ 327*17565Skarels sc->sc_qfreeh = sc->sc_qfreet 328*17565Skarels = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive = 329*17565Skarels (struct dmc_command *)0; 330*17565Skarels /* set up free command buffer list */ 331*17565Skarels for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) { 332*17565Skarels QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet); 333*17565Skarels } 334*17565Skarels 33517221Stef /* base in */ 33617221Stef base = sc->sc_ubinfo & 0x3ffff; 337*17565Skarels dmcload(sc, DMC_BASEI, base, (base>>2) & DMC_XMEM); 33817221Stef /* specify half duplex operation, flags tell if primary */ 33917221Stef /* or secondary station */ 34017221Stef if (ui->ui_flags == 0) 341*17565Skarels /* use DDMCP mode in full duplex */ 342*17565Skarels dmcload(sc, DMC_CNTLI, 0, 0); 34317221Stef else if (ui->ui_flags == 1) 344*17565Skarels /* use MAINTENENCE mode */ 345*17565Skarels dmcload(sc, DMC_CNTLI, 0, DMC_MAINT ); 34617221Stef else if (ui->ui_flags == 2) 34717221Stef /* use DDCMP half duplex as primary station */ 34817221Stef dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX); 34917221Stef else if (ui->ui_flags == 3) 35017221Stef /* use DDCMP half duplex as secondary station */ 35117221Stef dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC); 352*17565Skarels 353*17565Skarels /* enable operation done interrupts */ 354*17565Skarels sc->sc_flag &= ~DMC_ACTIVE; 355*17565Skarels while ((addr->bsel2 & DMC_IEO) == 0) 356*17565Skarels addr->bsel2 |= DMC_IEO; 357*17565Skarels s = spl5(); 35817221Stef /* queue first NRCV buffers for DMC to fill */ 35917221Stef for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 36017221Stef rp->flags |= DBUF_DMCS; 36117221Stef dmcload(sc, DMC_READ, rp->ubinfo, 362*17565Skarels (((rp->ubinfo>>2)&DMC_XMEM) | rp->cc)); 36317221Stef sc->sc_iused++; 36417221Stef } 365*17565Skarels splx(s); 366*17565Skarels ifp->if_flags |= IFF_UP|IFF_RUNNING; 36717221Stef 3685725Sroot } 3695725Sroot 3705725Sroot /* 3715725Sroot * Start output on interface. Get another datagram 3725725Sroot * to send from the interface queue and map it to 3735725Sroot * the interface before starting output. 37417221Stef * 37517221Stef * Must be called at spl 5 3765725Sroot */ 3775725Sroot dmcstart(dev) 3785725Sroot dev_t dev; 3795725Sroot { 3805725Sroot int unit = minor(dev); 3815725Sroot register struct dmc_softc *sc = &dmc_softc[unit]; 3825725Sroot struct mbuf *m; 38317221Stef register struct dmcbufs *rp; 38417221Stef register int n; 3855725Sroot 3865725Sroot /* 38717221Stef * Dequeue up to NXMT requests and map them to the UNIBUS. 38817221Stef * If no more requests, or no dmc buffers available, just return. 3895725Sroot */ 39017221Stef n = 0; 39117221Stef for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) { 39217221Stef /* find an available buffer */ 393*17565Skarels if ((rp->flags & DBUF_DMCS) == 0) { 39417221Stef IF_DEQUEUE(&sc->sc_if.if_snd, m); 39517221Stef if (m == 0) 39617221Stef return; 39717221Stef /* mark it dmcs */ 39817221Stef rp->flags |= (DBUF_DMCS); 39917221Stef /* 40017221Stef * Have request mapped to UNIBUS for transmission 40117221Stef * and start the output. 40217221Stef */ 403*17565Skarels rp->cc = dmcput(&sc->sc_ifuba, n, m); 404*17565Skarels rp->cc &= DMC_CCOUNT; 40517221Stef sc->sc_oused++; 40617221Stef dmcload(sc, DMC_WRITE, rp->ubinfo, 40717221Stef rp->cc | ((rp->ubinfo>>2)&DMC_XMEM)); 40817221Stef } 40917221Stef n++; 41017221Stef } 4115725Sroot } 4125725Sroot 4135725Sroot /* 4145725Sroot * Utility routine to load the DMC device registers. 4155725Sroot */ 4165725Sroot dmcload(sc, type, w0, w1) 4175725Sroot register struct dmc_softc *sc; 4185725Sroot int type, w0, w1; 4195725Sroot { 4205725Sroot register struct dmcdevice *addr; 42117221Stef register int unit, sps; 42217221Stef register struct dmc_command *qp; 4235725Sroot 424*17565Skarels unit = sc - dmc_softc; 4255725Sroot addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr; 4265725Sroot sps = spl5(); 42717221Stef 42817221Stef /* grab a command buffer from the free list */ 42917221Stef if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0) 43017221Stef panic("dmc command queue overflow"); 43117221Stef DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet); 43217221Stef 43317221Stef /* fill in requested info */ 43417221Stef qp->qp_cmd = (type | DMC_RQI); 43517221Stef qp->qp_ubaddr = w0; 43617221Stef qp->qp_cc = w1; 43717221Stef 43817221Stef if (sc->sc_qactive) { /* command in progress */ 43917221Stef if (type == DMC_READ) { 44017221Stef QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail); 44117221Stef } else { 44217221Stef QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail); 44317221Stef } 44417221Stef } else { /* command port free */ 44517221Stef sc->sc_qactive = qp; 44617221Stef addr->bsel0 = qp->qp_cmd; 4475725Sroot dmcrint(unit); 44817221Stef } 4495725Sroot splx(sps); 4505725Sroot } 4515725Sroot 4525725Sroot /* 4535725Sroot * DMC interface receiver interrupt. 4545725Sroot * Ready to accept another command, 4555725Sroot * pull one off the command queue. 4565725Sroot */ 4575725Sroot dmcrint(unit) 4585725Sroot int unit; 4595725Sroot { 4605725Sroot register struct dmc_softc *sc; 4615725Sroot register struct dmcdevice *addr; 46217221Stef register struct dmc_command *qp; 4635725Sroot register int n; 4645725Sroot 4655725Sroot addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr; 4665725Sroot sc = &dmc_softc[unit]; 46717221Stef if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) { 468*17565Skarels printf("dmc%d: dmcrint no command\n", unit); 46917221Stef return; 47017221Stef } 4715725Sroot while (addr->bsel0&DMC_RDYI) { 47217221Stef addr->sel4 = qp->qp_ubaddr; 47317221Stef addr->sel6 = qp->qp_cc; 4745725Sroot addr->bsel0 &= ~(DMC_IEI|DMC_RQI); 47517221Stef /* free command buffer */ 47617221Stef QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet); 47717221Stef while (addr->bsel0 & DMC_RDYI) { 47817221Stef /* 47917221Stef * Can't check for RDYO here 'cause 48017221Stef * this routine isn't reentrant! 48117221Stef */ 48217221Stef DELAY(5); 48317221Stef } 48417221Stef /* move on to next command */ 485*17565Skarels if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0) 486*17565Skarels break; /* all done */ 48717221Stef /* more commands to do, start the next one */ 48817221Stef qp = sc->sc_qactive; 48917221Stef DEQUEUE(sc->sc_qhead, sc->sc_qtail); 49017221Stef addr->bsel0 = qp->qp_cmd; 4915725Sroot n = RDYSCAN; 492*17565Skarels while (n-- > 0) 493*17565Skarels if ((addr->bsel0&DMC_RDYI) || (addr->bsel2&DMC_RDYO)) 494*17565Skarels break; 4955725Sroot } 49617221Stef if (sc->sc_qactive) { 49717221Stef addr->bsel0 |= DMC_IEI|DMC_RQI; 49817221Stef /* VMS does it twice !*$%@# */ 49917221Stef addr->bsel0 |= DMC_IEI|DMC_RQI; 50017221Stef } 501*17565Skarels 5025725Sroot } 5035725Sroot 5045725Sroot /* 5055725Sroot * DMC interface transmitter interrupt. 50617221Stef * A transfer may have completed, check for errors. 5075725Sroot * If it was a read, notify appropriate protocol. 5085725Sroot * If it was a write, pull the next one off the queue. 5095725Sroot */ 5105725Sroot dmcxint(unit) 5115725Sroot int unit; 5125725Sroot { 5135725Sroot register struct dmc_softc *sc; 51413065Ssam register struct ifnet *ifp; 5155725Sroot struct uba_device *ui = dmcinfo[unit]; 5165725Sroot struct dmcdevice *addr; 5175725Sroot struct mbuf *m; 518*17565Skarels struct ifqueue *inq; 51917221Stef int arg, pkaddr, cmd, len; 52017221Stef register struct ifrw *ifrw; 52117221Stef register struct dmcbufs *rp; 522*17565Skarels register struct ifxmt *ifxp; 523*17565Skarels struct dmc_header *dh; 524*17565Skarels int off, resid; 5255725Sroot 5265725Sroot addr = (struct dmcdevice *)ui->ui_addr; 52717221Stef sc = &dmc_softc[unit]; 52817221Stef ifp = &sc->sc_if; 52917221Stef 530*17565Skarels while (addr->bsel2 & DMC_RDYO) { 5315725Sroot 532*17565Skarels cmd = addr->bsel2 & 0xff; 533*17565Skarels arg = addr->sel6 & 0xffff; 534*17565Skarels /* reconstruct UNIBUS address of buffer returned to us */ 535*17565Skarels pkaddr = ((arg&DMC_XMEM)<<2) | (addr->sel4 & 0xffff); 536*17565Skarels /* release port */ 537*17565Skarels addr->bsel2 &= ~DMC_RDYO; 538*17565Skarels switch (cmd & 07) { 539*17565Skarels 540*17565Skarels case DMC_OUR: 541*17565Skarels /* 542*17565Skarels * A read has completed. 543*17565Skarels * Pass packet to type specific 544*17565Skarels * higher-level input routine. 545*17565Skarels */ 546*17565Skarels ifp->if_ipackets++; 547*17565Skarels /* find location in dmcuba struct */ 548*17565Skarels ifrw= &sc->sc_ifuba.ifu_r[0]; 549*17565Skarels for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 550*17565Skarels if(rp->ubinfo == pkaddr) 551*17565Skarels break; 552*17565Skarels ifrw++; 553*17565Skarels } 554*17565Skarels if (rp >= &sc->sc_rbufs[NRCV]) 555*17565Skarels panic("dmc rcv"); 556*17565Skarels if ((rp->flags & DBUF_DMCS) == 0) 557*17565Skarels printf("dmc%d: done unalloc rbuf\n", unit); 558*17565Skarels 559*17565Skarels len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header); 560*17565Skarels if (len < 0 || len > DMCMTU) { 561*17565Skarels ifp->if_ierrors++; 562*17565Skarels printd("dmc%d: bad rcv pkt addr 0x%x len 0x%x\n", 563*17565Skarels unit, pkaddr, len); 564*17565Skarels goto setup; 565*17565Skarels } 566*17565Skarels /* 567*17565Skarels * Deal with trailer protocol: if type is trailer 568*17565Skarels * get true type from first 16-bit word past data. 569*17565Skarels * Remember that type was trailer by setting off. 570*17565Skarels */ 571*17565Skarels dh = (struct dmc_header *)ifrw->ifrw_addr; 572*17565Skarels dh->dmc_type = ntohs((u_short)dh->dmc_type); 573*17565Skarels #define dmcdataaddr(dh, off, type) ((type)(((caddr_t)((dh)+1)+(off)))) 574*17565Skarels if (dh->dmc_type >= DMC_TRAILER && 575*17565Skarels dh->dmc_type < DMC_TRAILER+DMC_NTRAILER) { 576*17565Skarels off = (dh->dmc_type - DMC_TRAILER) * 512; 577*17565Skarels if (off >= DMCMTU) 578*17565Skarels goto setup; /* sanity */ 579*17565Skarels dh->dmc_type = ntohs(*dmcdataaddr(dh, off, u_short *)); 580*17565Skarels resid = ntohs(*(dmcdataaddr(dh, off+2, u_short *))); 581*17565Skarels if (off + resid > len) 582*17565Skarels goto setup; /* sanity */ 583*17565Skarels len = off + resid; 584*17565Skarels } else 585*17565Skarels off = 0; 586*17565Skarels if (len == 0) 587*17565Skarels goto setup; 588*17565Skarels 589*17565Skarels /* 590*17565Skarels * Pull packet off interface. Off is nonzero if 591*17565Skarels * packet has trailing header; dmc_get will then 592*17565Skarels * force this header information to be at the front, 593*17565Skarels * but we still have to drop the type and length 594*17565Skarels * which are at the front of any trailer data. 595*17565Skarels */ 596*17565Skarels m = dmc_get(&sc->sc_ifuba, ifrw, len, off); 597*17565Skarels if (m == 0) 598*17565Skarels goto setup; 599*17565Skarels if (off) { 600*17565Skarels m->m_off += 2 * sizeof (u_short); 601*17565Skarels m->m_len -= 2 * sizeof (u_short); 602*17565Skarels } 603*17565Skarels switch (dh->dmc_type) { 604*17565Skarels 6055725Sroot #ifdef INET 606*17565Skarels case DMC_IPTYPE: 607*17565Skarels schednetisr(NETISR_IP); 608*17565Skarels inq = &ipintrq; 609*17565Skarels break; 6105725Sroot #endif 611*17565Skarels default: 612*17565Skarels m_freem(m); 613*17565Skarels goto setup; 614*17565Skarels } 6155725Sroot 616*17565Skarels if (IF_QFULL(inq)) { 617*17565Skarels IF_DROP(inq); 618*17565Skarels m_freem(m); 619*17565Skarels } else 620*17565Skarels IF_ENQUEUE(inq, m); 62117221Stef 622*17565Skarels setup: 623*17565Skarels /* is this needed? */ 624*17565Skarels rp->ubinfo = ifrw->ifrw_info & 0x3ffff; 6255725Sroot 626*17565Skarels dmcload(sc, DMC_READ, rp->ubinfo, 627*17565Skarels ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc); 628*17565Skarels break; 6295725Sroot 630*17565Skarels case DMC_OUX: 631*17565Skarels /* 632*17565Skarels * A write has completed, start another 633*17565Skarels * transfer if there is more data to send. 634*17565Skarels */ 635*17565Skarels ifp->if_opackets++; 636*17565Skarels /* find associated dmcbuf structure */ 637*17565Skarels ifxp = &sc->sc_ifuba.ifu_w[0]; 638*17565Skarels for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) { 639*17565Skarels if(rp->ubinfo == pkaddr) 640*17565Skarels break; 641*17565Skarels ifxp++; 642*17565Skarels } 643*17565Skarels if (rp >= &sc->sc_xbufs[NXMT]) { 644*17565Skarels printf("dmc%d: bad packet address 0x%x\n", 645*17565Skarels unit, pkaddr); 646*17565Skarels break; 647*17565Skarels } 648*17565Skarels if ((rp->flags & DBUF_DMCS) == 0) 649*17565Skarels printf("dmc%d: unallocated packet 0x%x\n", 650*17565Skarels unit, pkaddr); 651*17565Skarels /* mark buffer free */ 652*17565Skarels if (ifxp->x_xtofree) { 653*17565Skarels (void)m_freem(ifxp->x_xtofree); 654*17565Skarels ifxp->x_xtofree = 0; 655*17565Skarels } 656*17565Skarels rp->flags &= ~DBUF_DMCS; 657*17565Skarels sc->sc_oused--; 658*17565Skarels sc->sc_nticks = 0; 659*17565Skarels sc->sc_flag |= DMC_ACTIVE; 660*17565Skarels break; 66117221Stef 662*17565Skarels case DMC_CNTLO: 663*17565Skarels arg &= DMC_CNTMASK; 664*17565Skarels if (arg & DMC_FATAL) { 665*17565Skarels printd("dmc%d: fatal error, flags=%b\n", 666*17565Skarels unit, arg, CNTLO_BITS); 667*17565Skarels ifp->if_flags &= ~(IFF_RUNNING|IFF_UP); 668*17565Skarels dmcrestart(unit); 669*17565Skarels break; 670*17565Skarels } 671*17565Skarels /* ACCUMULATE STATISTICS */ 67217221Stef switch(arg) { 67317221Stef case DMC_NOBUFS: 674*17565Skarels ifp->if_ierrors++; 675*17565Skarels if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0) 676*17565Skarels goto report; 677*17565Skarels break; 67817221Stef case DMC_DISCONN: 679*17565Skarels if ((sc->sc_disc++ % DMC_RPDSC) == 0) 680*17565Skarels goto report; 681*17565Skarels break; 68217221Stef case DMC_TIMEOUT: 683*17565Skarels if ((sc->sc_timeo++ % DMC_RPTMO) == 0) 684*17565Skarels goto report; 685*17565Skarels break; 68617221Stef case DMC_DATACK: 687*17565Skarels ifp->if_oerrors++; 688*17565Skarels if ((sc->sc_datck++ % DMC_RPDCK) == 0) 689*17565Skarels goto report; 690*17565Skarels break; 69117221Stef default: 69217221Stef goto report; 69317221Stef } 69417221Stef break; 69517221Stef report: 696*17565Skarels printd("dmc%d: soft error, flags=%b\n", unit, 697*17565Skarels arg, CNTLO_BITS); 698*17565Skarels if ((sc->sc_flag & DMC_RESTART) == 0) { 699*17565Skarels /* 700*17565Skarels * kill off the dmc to get things 701*17565Skarels * going again by generating a 702*17565Skarels * procedure error 703*17565Skarels */ 704*17565Skarels sc->sc_flag |= DMC_RESTART; 705*17565Skarels arg = sc->sc_ubinfo & 0x3ffff; 706*17565Skarels dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM); 707*17565Skarels } 708*17565Skarels break; 709*17565Skarels 710*17565Skarels default: 711*17565Skarels printf("dmc%d: bad control %o\n", unit, cmd); 712*17565Skarels break; 7135725Sroot } 7145725Sroot } 715*17565Skarels dmcstart(unit); 71617221Stef return; 7175725Sroot } 7185725Sroot 7195725Sroot /* 7205725Sroot * DMC output routine. 721*17565Skarels * Encapsulate a packet of type family for the dmc. 722*17565Skarels * Use trailer local net encapsulation if enough data in first 723*17565Skarels * packet leaves a multiple of 512 bytes of data in remainder. 7245725Sroot */ 725*17565Skarels dmcoutput(ifp, m0, dst) 7265725Sroot register struct ifnet *ifp; 727*17565Skarels register struct mbuf *m0; 7286334Ssam struct sockaddr *dst; 7295725Sroot { 730*17565Skarels int type, error, s; 731*17565Skarels register struct mbuf *m = m0; 732*17565Skarels register struct dmc_header *dh; 733*17565Skarels register int off; 7345725Sroot 735*17565Skarels switch (dst->sa_family) { 736*17565Skarels #ifdef INET 737*17565Skarels case AF_INET: 738*17565Skarels off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len; 739*17565Skarels if ((ifp->if_flags & IFF_NOTRAILERS) == 0) 740*17565Skarels if (off > 0 && (off & 0x1ff) == 0 && 741*17565Skarels m->m_off >= MMINOFF + 2 * sizeof (u_short)) { 742*17565Skarels type = DMC_TRAILER + (off>>9); 743*17565Skarels m->m_off -= 2 * sizeof (u_short); 744*17565Skarels m->m_len += 2 * sizeof (u_short); 745*17565Skarels *mtod(m, u_short *) = htons((u_short)DMC_IPTYPE); 746*17565Skarels *(mtod(m, u_short *) + 1) = htons((u_short)m->m_len); 747*17565Skarels goto gottrailertype; 748*17565Skarels } 749*17565Skarels type = DMC_IPTYPE; 750*17565Skarels off = 0; 751*17565Skarels goto gottype; 752*17565Skarels #endif 753*17565Skarels 754*17565Skarels case AF_UNSPEC: 755*17565Skarels dh = (struct dmc_header *)dst->sa_data; 756*17565Skarels type = dh->dmc_type; 757*17565Skarels goto gottype; 758*17565Skarels 759*17565Skarels default: 760*17565Skarels printf("dmc%d: can't handle af%d\n", ifp->if_unit, 761*17565Skarels dst->sa_family); 762*17565Skarels error = EAFNOSUPPORT; 763*17565Skarels goto bad; 7645725Sroot } 765*17565Skarels 766*17565Skarels gottrailertype: 767*17565Skarels /* 768*17565Skarels * Packet to be sent as a trailer; move first packet 769*17565Skarels * (control information) to end of chain. 770*17565Skarels */ 771*17565Skarels while (m->m_next) 772*17565Skarels m = m->m_next; 773*17565Skarels m->m_next = m0; 774*17565Skarels m = m0->m_next; 775*17565Skarels m0->m_next = 0; 776*17565Skarels m0 = m; 777*17565Skarels 778*17565Skarels gottype: 779*17565Skarels /* 780*17565Skarels * Add local network header 781*17565Skarels * (there is space for a uba on a vax to step on) 782*17565Skarels */ 783*17565Skarels if (m->m_off > MMAXOFF || 784*17565Skarels MMINOFF + sizeof(struct dmc_header) > m->m_off) { 785*17565Skarels m = m_get(M_DONTWAIT, MT_HEADER); 786*17565Skarels if (m == 0) { 787*17565Skarels error = ENOBUFS; 788*17565Skarels goto bad; 789*17565Skarels } 790*17565Skarels m->m_next = m0; 791*17565Skarels m->m_off = MMINOFF; 792*17565Skarels m->m_len = sizeof (struct dmc_header); 793*17565Skarels } else { 794*17565Skarels m->m_off -= sizeof (struct dmc_header); 795*17565Skarels m->m_len += sizeof (struct dmc_header); 796*17565Skarels } 797*17565Skarels dh = mtod(m, struct dmc_header *); 798*17565Skarels dh->dmc_type = htons((u_short)type); 799*17565Skarels 800*17565Skarels /* 801*17565Skarels * Queue message on interface, and start output if interface 802*17565Skarels * not yet active. 803*17565Skarels */ 804*17565Skarels s = splimp(); 8056207Swnj if (IF_QFULL(&ifp->if_snd)) { 8066207Swnj IF_DROP(&ifp->if_snd); 8076334Ssam m_freem(m); 8086207Swnj splx(s); 8096502Ssam return (ENOBUFS); 8106207Swnj } 8115725Sroot IF_ENQUEUE(&ifp->if_snd, m); 81217221Stef dmcstart(ifp->if_unit); 8135725Sroot splx(s); 8146502Ssam return (0); 815*17565Skarels 816*17565Skarels bad: 817*17565Skarels m_freem(m0); 818*17565Skarels return (error); 8195725Sroot } 82013061Ssam 821*17565Skarels 82213061Ssam /* 82313061Ssam * Process an ioctl request. 82413061Ssam */ 82513061Ssam dmcioctl(ifp, cmd, data) 82613061Ssam register struct ifnet *ifp; 82713061Ssam int cmd; 82813061Ssam caddr_t data; 82913061Ssam { 83013061Ssam struct ifreq *ifr = (struct ifreq *)data; 83113061Ssam struct sockaddr_in *sin; 83213061Ssam int s = splimp(), error = 0; 83313061Ssam 83413061Ssam switch (cmd) { 83513061Ssam 83613061Ssam case SIOCSIFADDR: 837*17565Skarels sin = (struct sockaddr_in *)&ifr->ifr_addr; 838*17565Skarels if (sin->sin_family != AF_INET) 839*17565Skarels return (EINVAL); 84013061Ssam if (ifp->if_flags & IFF_RUNNING) 841*17565Skarels if_rtinit(ifp, -1); /* delete previous route */ 84213085Ssam ifp->if_addr = *(struct sockaddr *)sin; 84313061Ssam ifp->if_net = in_netof(sin->sin_addr); 84417221Stef ifp->if_flags |= IFF_UP; 84517221Stef /* set up routing table entry */ 84617221Stef if ((ifp->if_flags & IFF_ROUTE) == 0) { 84717221Stef rtinit(&ifp->if_dstaddr, &ifp->if_addr, RTF_HOST|RTF_UP); 84817221Stef ifp->if_flags |= IFF_ROUTE; 84917221Stef } 85013061Ssam break; 85113061Ssam 85213061Ssam case SIOCSIFDSTADDR: 85313061Ssam ifp->if_dstaddr = ifr->ifr_dstaddr; 85413061Ssam break; 85517221Stef 85613061Ssam default: 85713061Ssam error = EINVAL; 85813061Ssam } 85917221Stef if ((ifp->if_flags & IFF_RUNNING) == 0) 86017221Stef dmcinit(ifp->if_unit); 86113061Ssam splx(s); 86213061Ssam return (error); 86313061Ssam } 86417221Stef 865*17565Skarels 86617221Stef /* 86717221Stef * Routines supporting UNIBUS network interfaces. 86817221Stef */ 86917221Stef 87017221Stef /* 87117221Stef * Init UNIBUS for interface on uban whose headers of size hlen are to 87217221Stef * end on a page boundary. We allocate a UNIBUS map register for the page 87317221Stef * with the header, and nmr more UNIBUS map registers for i/o on the adapter, 87417221Stef * doing this for each receive and transmit buffer. We also 87517221Stef * allocate page frames in the mbuffer pool for these pages. 87617221Stef */ 87717221Stef dmc_ubainit(ifu, uban, hlen, nmr) 87817221Stef register struct dmcuba *ifu; 87917221Stef int uban, hlen, nmr; 88017221Stef { 88117221Stef register caddr_t cp, dp; 88217221Stef register struct ifrw *ifrw; 88317221Stef register struct ifxmt *ifxp; 88417221Stef int i, ncl; 88517221Stef 88617221Stef ncl = clrnd(nmr + CLSIZE) / CLSIZE; 887*17565Skarels if (ifu->ifu_r[0].ifrw_addr) 88817221Stef /* 88917221Stef * If the first read buffer has a non-zero 89017221Stef * address, it means we have already allocated core 89117221Stef */ 89217221Stef cp = ifu->ifu_r[0].ifrw_addr - (CLBYTES - hlen); 893*17565Skarels else { 89417221Stef cp = m_clalloc(NTOT * ncl, MPG_SPACE); 89517221Stef if (cp == 0) 89617221Stef return (0); 89717221Stef ifu->ifu_hlen = hlen; 89817221Stef ifu->ifu_uban = uban; 89917221Stef ifu->ifu_uba = uba_hd[uban].uh_uba; 90017221Stef dp = cp + CLBYTES - hlen; 90117221Stef for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) { 90217221Stef ifrw->ifrw_addr = dp; 90317221Stef dp += ncl * CLBYTES; 90417221Stef } 90517221Stef for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) { 90617221Stef ifxp->x_ifrw.ifrw_addr = dp; 90717221Stef dp += ncl * CLBYTES; 90817221Stef } 90917221Stef } 91017221Stef /* allocate for receive ring */ 91117221Stef for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) { 91217221Stef if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) { 91317221Stef struct ifrw *rw; 91417221Stef 91517221Stef for (rw = ifu->ifu_r; rw < ifrw; rw++) 91617221Stef ubarelse(ifu->ifu_uban, &rw->ifrw_info); 91717221Stef goto bad; 91817221Stef } 91917221Stef } 92017221Stef /* and now transmit ring */ 92117221Stef for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) { 92217221Stef ifrw = &ifxp->x_ifrw; 92317221Stef if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) { 92417221Stef struct ifxmt *xp; 92517221Stef 92617221Stef for (xp = ifu->ifu_w; xp < ifxp; xp++) 92717221Stef ubarelse(ifu->ifu_uban, &xp->x_ifrw.ifrw_info); 92817221Stef for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) 92917221Stef ubarelse(ifu->ifu_uban, &ifrw->ifrw_info); 93017221Stef goto bad; 93117221Stef } 93217221Stef for (i = 0; i < nmr; i++) 93317221Stef ifxp->x_map[i] = ifrw->ifrw_mr[i]; 93417221Stef ifxp->x_xswapd = 0; 93517221Stef } 93617221Stef return (1); 93717221Stef bad: 93817221Stef m_pgfree(cp, NTOT * ncl); 93917221Stef ifu->ifu_r[0].ifrw_addr = 0; 94017221Stef return (0); 94117221Stef } 94217221Stef 94317221Stef /* 94417221Stef * Setup either a ifrw structure by allocating UNIBUS map registers, 94517221Stef * possibly a buffered data path, and initializing the fields of 94617221Stef * the ifrw structure to minimize run-time overhead. 94717221Stef */ 94817221Stef static 94917221Stef dmc_ubaalloc(ifu, ifrw, nmr) 95017221Stef struct dmcuba *ifu; 95117221Stef register struct ifrw *ifrw; 95217221Stef int nmr; 95317221Stef { 95417221Stef register int info; 95517221Stef 95617221Stef info = 95717221Stef uballoc(ifu->ifu_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->ifu_hlen, 95817221Stef ifu->ifu_flags); 95917221Stef if (info == 0) 96017221Stef return (0); 96117221Stef ifrw->ifrw_info = info; 96217221Stef ifrw->ifrw_bdp = UBAI_BDP(info); 96317221Stef ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT); 96417221Stef ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) + 1]; 96517221Stef return (1); 96617221Stef } 96717221Stef 96817221Stef /* 96917221Stef * Pull read data off a interface. 97017221Stef * Len is length of data, with local net header stripped. 97117221Stef * Off is non-zero if a trailer protocol was used, and 97217221Stef * gives the offset of the trailer information. 97317221Stef * We copy the trailer information and then all the normal 97417221Stef * data into mbufs. When full cluster sized units are present 97517221Stef * on the interface on cluster boundaries we can get them more 97617221Stef * easily by remapping, and take advantage of this here. 97717221Stef */ 97817221Stef struct mbuf * 97917221Stef dmc_get(ifu, ifrw, totlen, off0) 98017221Stef register struct dmcuba *ifu; 98117221Stef register struct ifrw *ifrw; 98217221Stef int totlen, off0; 98317221Stef { 98417221Stef struct mbuf *top, **mp, *m; 98517221Stef int off = off0, len; 98617221Stef register caddr_t cp = ifrw->ifrw_addr + ifu->ifu_hlen; 98717221Stef 98817221Stef top = 0; 98917221Stef mp = ⊤ 99017221Stef while (totlen > 0) { 99117221Stef MGET(m, M_DONTWAIT, MT_DATA); 99217221Stef if (m == 0) 99317221Stef goto bad; 99417221Stef if (off) { 99517221Stef len = totlen - off; 99617221Stef cp = ifrw->ifrw_addr + ifu->ifu_hlen + off; 99717221Stef } else 99817221Stef len = totlen; 99917221Stef if (len >= CLBYTES) { 100017221Stef struct mbuf *p; 100117221Stef struct pte *cpte, *ppte; 100217221Stef int x, *ip, i; 100317221Stef 100417221Stef MCLGET(p, 1); 100517221Stef if (p == 0) 100617221Stef goto nopage; 100717221Stef len = m->m_len = CLBYTES; 100817221Stef m->m_off = (int)p - (int)m; 100917221Stef if (!claligned(cp)) 101017221Stef goto copy; 101117221Stef 101217221Stef /* 101317221Stef * Switch pages mapped to UNIBUS with new page p, 101417221Stef * as quick form of copy. Remap UNIBUS and invalidate. 101517221Stef */ 101617221Stef cpte = &Mbmap[mtocl(cp)*CLSIZE]; 101717221Stef ppte = &Mbmap[mtocl(p)*CLSIZE]; 101817221Stef x = btop(cp - ifrw->ifrw_addr); 101917221Stef ip = (int *)&ifrw->ifrw_mr[x]; 102017221Stef for (i = 0; i < CLSIZE; i++) { 102117221Stef struct pte t; 102217221Stef t = *ppte; *ppte++ = *cpte; *cpte = t; 102317221Stef *ip++ = 102417221Stef cpte++->pg_pfnum|ifrw->ifrw_proto; 102517221Stef mtpr(TBIS, cp); 102617221Stef cp += NBPG; 102717221Stef mtpr(TBIS, (caddr_t)p); 102817221Stef p += NBPG / sizeof (*p); 102917221Stef } 103017221Stef goto nocopy; 103117221Stef } 103217221Stef nopage: 103317221Stef m->m_len = MIN(MLEN, len); 103417221Stef m->m_off = MMINOFF; 103517221Stef copy: 103617221Stef bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len); 103717221Stef cp += m->m_len; 103817221Stef nocopy: 103917221Stef *mp = m; 104017221Stef mp = &m->m_next; 104117221Stef if (off) { 104217221Stef /* sort of an ALGOL-W style for statement... */ 104317221Stef off += m->m_len; 104417221Stef if (off == totlen) { 104517221Stef cp = ifrw->ifrw_addr + ifu->ifu_hlen; 104617221Stef off = 0; 104717221Stef totlen = off0; 104817221Stef } 104917221Stef } else 105017221Stef totlen -= m->m_len; 105117221Stef } 105217221Stef return (top); 105317221Stef bad: 105417221Stef m_freem(top); 105517221Stef return (0); 105617221Stef } 105717221Stef 105817221Stef /* 105917221Stef * Map a chain of mbufs onto a network interface 106017221Stef * in preparation for an i/o operation. 106117221Stef * The argument chain of mbufs includes the local network 106217221Stef * header which is copied to be in the mapped, aligned 106317221Stef * i/o space. 106417221Stef */ 106517221Stef dmcput(ifu, n, m) 106617221Stef struct dmcuba *ifu; 106717221Stef int n; 106817221Stef register struct mbuf *m; 106917221Stef { 107017221Stef register struct mbuf *mp; 107117221Stef register caddr_t cp; 107217221Stef register struct ifxmt *ifxp; 107317221Stef register struct ifrw *ifrw; 107417221Stef register int i; 107517221Stef int xswapd = 0; 107617221Stef int x, cc, t; 107717221Stef caddr_t dp; 107817221Stef 107917221Stef ifxp = &ifu->ifu_w[n]; 108017221Stef ifrw = &ifxp->x_ifrw; 108117221Stef cp = ifrw->ifrw_addr; 108217221Stef while (m) { 108317221Stef dp = mtod(m, char *); 108417221Stef if (claligned(cp) && claligned(dp) && m->m_len == CLBYTES) { 108517221Stef struct pte *pte; int *ip; 108617221Stef pte = &Mbmap[mtocl(dp)*CLSIZE]; 108717221Stef x = btop(cp - ifrw->ifrw_addr); 108817221Stef ip = (int *)&ifrw->ifrw_mr[x]; 108917221Stef for (i = 0; i < CLSIZE; i++) 109017221Stef *ip++ = ifrw->ifrw_proto | pte++->pg_pfnum; 109117221Stef xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT)); 109217221Stef mp = m->m_next; 109317221Stef m->m_next = ifxp->x_xtofree; 109417221Stef ifxp->x_xtofree = m; 109517221Stef cp += m->m_len; 109617221Stef } else { 109717221Stef bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len); 109817221Stef cp += m->m_len; 109917221Stef MFREE(m, mp); 110017221Stef } 110117221Stef m = mp; 110217221Stef } 110317221Stef 110417221Stef /* 110517221Stef * Xswapd is the set of clusters we just mapped out. Ifxp->x_xswapd 110617221Stef * is the set of clusters mapped out from before. We compute 110717221Stef * the number of clusters involved in this operation in x. 110817221Stef * Clusters mapped out before and involved in this operation 110917221Stef * should be unmapped so original pages will be accessed by the device. 111017221Stef */ 111117221Stef cc = cp - ifrw->ifrw_addr; 111217221Stef x = ((cc - ifu->ifu_hlen) + CLBYTES - 1) >> CLSHIFT; 111317221Stef ifxp->x_xswapd &= ~xswapd; 111417221Stef while (i = ffs(ifxp->x_xswapd)) { 111517221Stef i--; 111617221Stef if (i >= x) 111717221Stef break; 111817221Stef ifxp->x_xswapd &= ~(1<<i); 111917221Stef i *= CLSIZE; 112017221Stef for (t = 0; t < CLSIZE; t++) { 112117221Stef ifrw->ifrw_mr[i] = ifxp->x_map[i]; 112217221Stef i++; 112317221Stef } 112417221Stef } 112517221Stef ifxp->x_xswapd |= xswapd; 112617221Stef return (cc); 112717221Stef } 1128*17565Skarels 1129*17565Skarels /* 1130*17565Skarels * Restart after a fatal error. 1131*17565Skarels * Clear device and reinitialize. 1132*17565Skarels */ 1133*17565Skarels dmcrestart(unit) 1134*17565Skarels int unit; 1135*17565Skarels { 1136*17565Skarels register struct dmc_softc *sc = &dmc_softc[unit]; 1137*17565Skarels register struct uba_device *ui = dmcinfo[unit]; 1138*17565Skarels register struct dmcdevice *addr; 1139*17565Skarels register struct ifxmt *ifxp; 1140*17565Skarels register int i; 1141*17565Skarels register struct mbuf *m; 1142*17565Skarels struct dmcuba *ifu; 1143*17565Skarels 1144*17565Skarels addr = (struct dmcdevice *)ui->ui_addr; 1145*17565Skarels ifu = &sc->sc_ifuba; 1146*17565Skarels #ifdef DEBUG 1147*17565Skarels /* dump base table */ 1148*17565Skarels printf("dmc%d base table:\n", unit); 1149*17565Skarels for (i = 0; i < sizeof (struct dmc_base); i++) 1150*17565Skarels printf("%o\n" ,dmc_base[unit].d_base[i]); 115113085Ssam #endif 1152*17565Skarels /* 1153*17565Skarels * Let the DMR finish the MCLR. At 1 Mbit, it should do so 1154*17565Skarels * in about a max of 6.4 milliseconds with diagnostics enabled. 1155*17565Skarels */ 1156*17565Skarels addr->bsel1 = DMC_MCLR; 1157*17565Skarels for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 1158*17565Skarels ; 1159*17565Skarels /* Did the timer expire or did the DMR finish? */ 1160*17565Skarels if ((addr->bsel1 & DMC_RUN) == 0) { 1161*17565Skarels printf("dmc%d: M820 Test Failed\n", unit); 1162*17565Skarels return; 1163*17565Skarels } 1164*17565Skarels 1165*17565Skarels #ifdef notdef /* tef sez why throw these packets away??? */ 1166*17565Skarels /* purge send queue */ 1167*17565Skarels IF_DEQUEUE(&sc->sc_if.if_snd, m); 1168*17565Skarels while (m) { 1169*17565Skarels m_freem(m); 1170*17565Skarels IF_DEQUEUE(&sc->sc_if.if_snd, m); 1171*17565Skarels } 1172*17565Skarels #endif 1173*17565Skarels for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) { 1174*17565Skarels if (ifxp->x_xtofree) { 1175*17565Skarels (void) m_freem(ifxp->x_xtofree); 1176*17565Skarels ifxp->x_xtofree = 0; 1177*17565Skarels } 1178*17565Skarels } 1179*17565Skarels 1180*17565Skarels /* restart DMC */ 1181*17565Skarels dmcinit(unit); 1182*17565Skarels sc->sc_flag &= ~DMC_RESTART; 1183*17565Skarels sc->sc_if.if_collisions++; /* why not? */ 1184*17565Skarels } 1185*17565Skarels 1186*17565Skarels /* 1187*17565Skarels * Check to see that transmitted packets don't 1188*17565Skarels * lose interrupts. The device has to be active. 1189*17565Skarels */ 1190*17565Skarels dmcwatch() 1191*17565Skarels { 1192*17565Skarels register struct uba_device *ui; 1193*17565Skarels register struct dmc_softc *sc; 1194*17565Skarels struct dmcdevice *addr; 1195*17565Skarels register int i; 1196*17565Skarels 1197*17565Skarels for (i = 0; i < NDMC; i++) { 1198*17565Skarels sc = &dmc_softc[i]; 1199*17565Skarels if ((sc->sc_flag & DMC_ACTIVE) == 0) 1200*17565Skarels continue; 1201*17565Skarels if ((ui = dmcinfo[i]) == 0 || ui->ui_alive == 0) 1202*17565Skarels continue; 1203*17565Skarels if (sc->sc_oused) { 1204*17565Skarels sc->sc_nticks++; 1205*17565Skarels if (sc->sc_nticks > dmc_timeout) { 1206*17565Skarels sc->sc_nticks = 0; 1207*17565Skarels addr = (struct dmcdevice *)ui->ui_addr; 1208*17565Skarels printd("dmc%d hung: bsel0=%b bsel2=%b\n", i, 1209*17565Skarels addr->bsel0 & 0xff, DMC0BITS, 1210*17565Skarels addr->bsel2 & 0xff, DMC2BITS); 1211*17565Skarels dmcrestart(i); 1212*17565Skarels } 1213*17565Skarels } 1214*17565Skarels } 1215*17565Skarels timeout(dmcwatch, (caddr_t) 0, hz); 1216*17565Skarels } 1217*17565Skarels #endif 1218