1 /* if_dmc.c 6.5 85/05/01 */ 2 3 #include "dmc.h" 4 #if NDMC > 0 5 6 /* 7 * DMC11 device driver, internet version 8 * 9 * Bill Nesheim 10 * Cornell University 11 * 12 * Lou Salkind 13 * New York University 14 */ 15 16 /* #define DEBUG /* for base table dump on fatal error */ 17 18 #include "../machine/pte.h" 19 20 #include "param.h" 21 #include "systm.h" 22 #include "mbuf.h" 23 #include "buf.h" 24 #include "ioctl.h" /* must precede tty.h */ 25 #include "tty.h" 26 #include "protosw.h" 27 #include "socket.h" 28 #include "vmmac.h" 29 #include "errno.h" 30 31 #include "../net/if.h" 32 #include "../net/netisr.h" 33 #include "../net/route.h" 34 #include "../netinet/in.h" 35 #include "../netinet/in_systm.h" 36 #include "../netinet/ip.h" 37 #include "../netinet/ip_var.h" 38 39 #include "../vax/cpu.h" 40 #include "../vax/mtpr.h" 41 #include "if_uba.h" 42 #include "if_dmc.h" 43 #include "../vaxuba/ubareg.h" 44 #include "../vaxuba/ubavar.h" 45 46 #include "../h/time.h" 47 #include "../h/kernel.h" 48 49 int dmctimer; /* timer started? */ 50 int dmc_timeout = 8; /* timeout value */ 51 int dmcwatch(); 52 53 /* 54 * Driver information for auto-configuration stuff. 55 */ 56 int dmcprobe(), dmcattach(), dmcinit(), dmcioctl(); 57 int dmcoutput(), dmcreset(); 58 struct uba_device *dmcinfo[NDMC]; 59 u_short dmcstd[] = { 0 }; 60 struct uba_driver dmcdriver = 61 { dmcprobe, 0, dmcattach, 0, dmcstd, "dmc", dmcinfo }; 62 63 #define NRCV 7 64 #define NXMT 3 65 #define NTOT (NRCV + NXMT) 66 #define NCMDS (NTOT+4) /* size of command queue */ 67 68 #define printd if(dmcdebug)printf 69 int dmcdebug = 0; 70 71 /* error reporting intervals */ 72 #define DMC_RPNBFS 50 73 #define DMC_RPDSC 1 74 #define DMC_RPTMO 10 75 #define DMC_RPDCK 10 76 77 struct dmc_command { 78 char qp_cmd; /* command */ 79 short qp_ubaddr; /* buffer address */ 80 short qp_cc; /* character count || XMEM */ 81 struct dmc_command *qp_next; /* next command on queue */ 82 }; 83 84 /* 85 * The dmcuba structures generalize the ifuba structure 86 * to an arbitrary number of receive and transmit buffers. 87 */ 88 struct ifxmt { 89 struct ifrw x_ifrw; /* mapping info */ 90 struct pte x_map[IF_MAXNUBAMR]; /* output base pages */ 91 short x_xswapd; /* mask of clusters swapped */ 92 struct mbuf *x_xtofree; /* pages being dma'd out */ 93 }; 94 95 struct dmcuba { 96 short ifu_uban; /* uba number */ 97 short ifu_hlen; /* local net header length */ 98 struct uba_regs *ifu_uba; /* uba regs, in vm */ 99 struct ifrw ifu_r[NRCV]; /* receive information */ 100 struct ifxmt ifu_w[NXMT]; /* transmit information */ 101 /* these should only be pointers */ 102 short ifu_flags; /* used during uballoc's */ 103 }; 104 105 struct dmcbufs { 106 int ubinfo; /* from uballoc */ 107 short cc; /* buffer size */ 108 short flags; /* access control */ 109 }; 110 #define DBUF_OURS 0 /* buffer is available */ 111 #define DBUF_DMCS 1 /* buffer claimed by somebody */ 112 #define DBUF_XMIT 4 /* transmit buffer */ 113 #define DBUF_RCV 8 /* receive buffer */ 114 115 struct mbuf *dmc_get(); 116 117 /* 118 * DMC software status per interface. 119 * 120 * Each interface is referenced by a network interface structure, 121 * sc_if, which the routing code uses to locate the interface. 122 * This structure contains the output queue for the interface, its address, ... 123 * We also have, for each interface, a set of 7 UBA interface structures 124 * for each, which 125 * contain information about the UNIBUS resources held by the interface: 126 * map registers, buffered data paths, etc. Information is cached in this 127 * structure for use by the if_uba.c routines in running the interface 128 * efficiently. 129 */ 130 struct dmc_softc { 131 short sc_oused; /* output buffers currently in use */ 132 short sc_iused; /* input buffers given to DMC */ 133 short sc_flag; /* flags */ 134 int sc_nticks; /* seconds since last interrupt */ 135 struct ifnet sc_if; /* network-visible interface */ 136 struct dmcbufs sc_rbufs[NRCV]; /* receive buffer info */ 137 struct dmcbufs sc_xbufs[NXMT]; /* transmit buffer info */ 138 struct dmcuba sc_ifuba; /* UNIBUS resources */ 139 int sc_ubinfo; /* UBA mapping info for base table */ 140 int sc_errors[4]; /* non-fatal error counters */ 141 #define sc_datck sc_errors[0] 142 #define sc_timeo sc_errors[1] 143 #define sc_nobuf sc_errors[2] 144 #define sc_disc sc_errors[3] 145 /* command queue stuff */ 146 struct dmc_command sc_cmdbuf[NCMDS]; 147 struct dmc_command *sc_qhead; /* head of command queue */ 148 struct dmc_command *sc_qtail; /* tail of command queue */ 149 struct dmc_command *sc_qactive; /* command in progress */ 150 struct dmc_command *sc_qfreeh; /* head of list of free cmd buffers */ 151 struct dmc_command *sc_qfreet; /* tail of list of free cmd buffers */ 152 /* end command queue stuff */ 153 } dmc_softc[NDMC]; 154 155 /* flags */ 156 #define DMC_ALLOC 01 /* unibus resources allocated */ 157 #define DMC_BMAPPED 02 /* base table mapped */ 158 #define DMC_RESTART 04 /* software restart in progress */ 159 #define DMC_ACTIVE 08 /* device active */ 160 161 struct dmc_base { 162 short d_base[128]; /* DMC base table */ 163 } dmc_base[NDMC]; 164 165 /* queue manipulation macros */ 166 #define QUEUE_AT_HEAD(qp, head, tail) \ 167 (qp)->qp_next = (head); \ 168 (head) = (qp); \ 169 if ((tail) == (struct dmc_command *) 0) \ 170 (tail) = (head) 171 172 #define QUEUE_AT_TAIL(qp, head, tail) \ 173 if ((tail)) \ 174 (tail)->qp_next = (qp); \ 175 else \ 176 (head) = (qp); \ 177 (qp)->qp_next = (struct dmc_command *) 0; \ 178 (tail) = (qp) 179 180 #define DEQUEUE(head, tail) \ 181 (head) = (head)->qp_next;\ 182 if ((head) == (struct dmc_command *) 0)\ 183 (tail) = (head) 184 185 dmcprobe(reg) 186 caddr_t reg; 187 { 188 register int br, cvec; 189 register struct dmcdevice *addr = (struct dmcdevice *)reg; 190 register int i; 191 192 #ifdef lint 193 br = 0; cvec = br; br = cvec; 194 dmcrint(0); dmcxint(0); 195 #endif 196 addr->bsel1 = DMC_MCLR; 197 for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 198 ; 199 if ((addr->bsel1 & DMC_RUN) == 0) { 200 printf("dmcprobe: can't start device\n" ); 201 return (0); 202 } 203 addr->bsel0 = DMC_RQI|DMC_IEI; 204 /* let's be paranoid */ 205 addr->bsel0 |= DMC_RQI|DMC_IEI; 206 DELAY(1000000); 207 addr->bsel1 = DMC_MCLR; 208 for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 209 ; 210 return (1); 211 } 212 213 /* 214 * Interface exists: make available by filling in network interface 215 * record. System will initialize the interface when it is ready 216 * to accept packets. 217 */ 218 dmcattach(ui) 219 register struct uba_device *ui; 220 { 221 register struct dmc_softc *sc = &dmc_softc[ui->ui_unit]; 222 223 sc->sc_if.if_unit = ui->ui_unit; 224 sc->sc_if.if_name = "dmc"; 225 sc->sc_if.if_mtu = DMCMTU; 226 sc->sc_if.if_init = dmcinit; 227 sc->sc_if.if_output = dmcoutput; 228 sc->sc_if.if_ioctl = dmcioctl; 229 sc->sc_if.if_reset = dmcreset; 230 sc->sc_if.if_flags = IFF_POINTOPOINT; 231 sc->sc_ifuba.ifu_flags = UBA_CANTWAIT; 232 233 if_attach(&sc->sc_if); 234 if (dmctimer == 0) { 235 dmctimer = 1; 236 timeout(dmcwatch, (caddr_t) 0, hz); 237 } 238 } 239 240 /* 241 * Reset of interface after UNIBUS reset. 242 * If interface is on specified UBA, reset its state. 243 */ 244 dmcreset(unit, uban) 245 int unit, uban; 246 { 247 register struct uba_device *ui; 248 register struct dmc_softc *sc = &dmc_softc[unit]; 249 250 if (unit >= NDMC || (ui = dmcinfo[unit]) == 0 || ui->ui_alive == 0 || 251 ui->ui_ubanum != uban) 252 return; 253 printf(" dmc%d", unit); 254 sc->sc_flag = 0; 255 sc->sc_if.if_flags &= ~IFF_RUNNING; 256 dmcinit(unit); 257 } 258 259 /* 260 * Initialization of interface; reinitialize UNIBUS usage. 261 */ 262 dmcinit(unit) 263 int unit; 264 { 265 register struct dmc_softc *sc = &dmc_softc[unit]; 266 register struct uba_device *ui = dmcinfo[unit]; 267 register struct dmcdevice *addr; 268 register struct ifnet *ifp = &sc->sc_if; 269 register struct ifrw *ifrw; 270 register struct ifxmt *ifxp; 271 register struct dmcbufs *rp; 272 register struct dmc_command *qp; 273 struct ifaddr *ifa; 274 int base; 275 int s; 276 277 addr = (struct dmcdevice *)ui->ui_addr; 278 279 /* 280 * Check to see that an address has been set 281 * (both local and destination for an address family). 282 */ 283 for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) 284 if (ifa->ifa_addr.sa_family && ifa->ifa_dstaddr.sa_family) 285 break; 286 if (ifa == (struct ifaddr *) 0) 287 return; 288 289 if ((addr->bsel1&DMC_RUN) == 0) { 290 printf("dmcinit: DMC not running\n"); 291 ifp->if_flags &= ~IFF_UP; 292 return; 293 } 294 /* map base table */ 295 if ((sc->sc_flag & DMC_BMAPPED) == 0) { 296 sc->sc_ubinfo = uballoc(ui->ui_ubanum, 297 (caddr_t)&dmc_base[unit], sizeof (struct dmc_base), 0); 298 sc->sc_flag |= DMC_BMAPPED; 299 } 300 /* initialize UNIBUS resources */ 301 sc->sc_iused = sc->sc_oused = 0; 302 if ((ifp->if_flags & IFF_RUNNING) == 0) { 303 if (dmc_ubainit(&sc->sc_ifuba, ui->ui_ubanum, 304 sizeof(struct dmc_header), (int)btoc(DMCMTU)) == 0) { 305 printf("dmc%d: can't allocate uba resources\n", unit); 306 ifp->if_flags &= ~IFF_UP; 307 return; 308 } 309 ifp->if_flags |= IFF_RUNNING; 310 } 311 312 /* initialize buffer pool */ 313 /* receives */ 314 ifrw = &sc->sc_ifuba.ifu_r[0]; 315 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 316 rp->ubinfo = ifrw->ifrw_info & 0x3ffff; 317 rp->cc = DMCMTU + sizeof (struct dmc_header); 318 rp->flags = DBUF_OURS|DBUF_RCV; 319 ifrw++; 320 } 321 /* transmits */ 322 ifxp = &sc->sc_ifuba.ifu_w[0]; 323 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) { 324 rp->ubinfo = ifxp->x_ifrw.ifrw_info & 0x3ffff; 325 rp->cc = 0; 326 rp->flags = DBUF_OURS|DBUF_XMIT; 327 ifxp++; 328 } 329 330 /* set up command queues */ 331 sc->sc_qfreeh = sc->sc_qfreet 332 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive = 333 (struct dmc_command *)0; 334 /* set up free command buffer list */ 335 for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) { 336 QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet); 337 } 338 339 /* base in */ 340 base = sc->sc_ubinfo & 0x3ffff; 341 dmcload(sc, DMC_BASEI, base, (base>>2) & DMC_XMEM); 342 /* specify half duplex operation, flags tell if primary */ 343 /* or secondary station */ 344 if (ui->ui_flags == 0) 345 /* use DDMCP mode in full duplex */ 346 dmcload(sc, DMC_CNTLI, 0, 0); 347 else if (ui->ui_flags == 1) 348 /* use MAINTENENCE mode */ 349 dmcload(sc, DMC_CNTLI, 0, DMC_MAINT ); 350 else if (ui->ui_flags == 2) 351 /* use DDCMP half duplex as primary station */ 352 dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX); 353 else if (ui->ui_flags == 3) 354 /* use DDCMP half duplex as secondary station */ 355 dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC); 356 357 /* enable operation done interrupts */ 358 sc->sc_flag &= ~DMC_ACTIVE; 359 while ((addr->bsel2 & DMC_IEO) == 0) 360 addr->bsel2 |= DMC_IEO; 361 s = spl5(); 362 /* queue first NRCV buffers for DMC to fill */ 363 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 364 rp->flags |= DBUF_DMCS; 365 dmcload(sc, DMC_READ, rp->ubinfo, 366 (((rp->ubinfo>>2)&DMC_XMEM) | rp->cc)); 367 sc->sc_iused++; 368 } 369 splx(s); 370 } 371 372 /* 373 * Start output on interface. Get another datagram 374 * to send from the interface queue and map it to 375 * the interface before starting output. 376 * 377 * Must be called at spl 5 378 */ 379 dmcstart(dev) 380 dev_t dev; 381 { 382 int unit = minor(dev); 383 register struct dmc_softc *sc = &dmc_softc[unit]; 384 struct mbuf *m; 385 register struct dmcbufs *rp; 386 register int n; 387 388 /* 389 * Dequeue up to NXMT requests and map them to the UNIBUS. 390 * If no more requests, or no dmc buffers available, just return. 391 */ 392 n = 0; 393 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) { 394 /* find an available buffer */ 395 if ((rp->flags & DBUF_DMCS) == 0) { 396 IF_DEQUEUE(&sc->sc_if.if_snd, m); 397 if (m == 0) 398 return; 399 /* mark it dmcs */ 400 rp->flags |= (DBUF_DMCS); 401 /* 402 * Have request mapped to UNIBUS for transmission 403 * and start the output. 404 */ 405 rp->cc = dmcput(&sc->sc_ifuba, n, m); 406 rp->cc &= DMC_CCOUNT; 407 sc->sc_oused++; 408 dmcload(sc, DMC_WRITE, rp->ubinfo, 409 rp->cc | ((rp->ubinfo>>2)&DMC_XMEM)); 410 } 411 n++; 412 } 413 } 414 415 /* 416 * Utility routine to load the DMC device registers. 417 */ 418 dmcload(sc, type, w0, w1) 419 register struct dmc_softc *sc; 420 int type, w0, w1; 421 { 422 register struct dmcdevice *addr; 423 register int unit, sps; 424 register struct dmc_command *qp; 425 426 unit = sc - dmc_softc; 427 addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr; 428 sps = spl5(); 429 430 /* grab a command buffer from the free list */ 431 if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0) 432 panic("dmc command queue overflow"); 433 DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet); 434 435 /* fill in requested info */ 436 qp->qp_cmd = (type | DMC_RQI); 437 qp->qp_ubaddr = w0; 438 qp->qp_cc = w1; 439 440 if (sc->sc_qactive) { /* command in progress */ 441 if (type == DMC_READ) { 442 QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail); 443 } else { 444 QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail); 445 } 446 } else { /* command port free */ 447 sc->sc_qactive = qp; 448 addr->bsel0 = qp->qp_cmd; 449 dmcrint(unit); 450 } 451 splx(sps); 452 } 453 454 /* 455 * DMC interface receiver interrupt. 456 * Ready to accept another command, 457 * pull one off the command queue. 458 */ 459 dmcrint(unit) 460 int unit; 461 { 462 register struct dmc_softc *sc; 463 register struct dmcdevice *addr; 464 register struct dmc_command *qp; 465 register int n; 466 467 addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr; 468 sc = &dmc_softc[unit]; 469 if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) { 470 printf("dmc%d: dmcrint no command\n", unit); 471 return; 472 } 473 while (addr->bsel0&DMC_RDYI) { 474 addr->sel4 = qp->qp_ubaddr; 475 addr->sel6 = qp->qp_cc; 476 addr->bsel0 &= ~(DMC_IEI|DMC_RQI); 477 /* free command buffer */ 478 QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet); 479 while (addr->bsel0 & DMC_RDYI) { 480 /* 481 * Can't check for RDYO here 'cause 482 * this routine isn't reentrant! 483 */ 484 DELAY(5); 485 } 486 /* move on to next command */ 487 if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0) 488 break; /* all done */ 489 /* more commands to do, start the next one */ 490 qp = sc->sc_qactive; 491 DEQUEUE(sc->sc_qhead, sc->sc_qtail); 492 addr->bsel0 = qp->qp_cmd; 493 n = RDYSCAN; 494 while (n-- > 0) 495 if ((addr->bsel0&DMC_RDYI) || (addr->bsel2&DMC_RDYO)) 496 break; 497 } 498 if (sc->sc_qactive) { 499 addr->bsel0 |= DMC_IEI|DMC_RQI; 500 /* VMS does it twice !*$%@# */ 501 addr->bsel0 |= DMC_IEI|DMC_RQI; 502 } 503 504 } 505 506 /* 507 * DMC interface transmitter interrupt. 508 * A transfer may have completed, check for errors. 509 * If it was a read, notify appropriate protocol. 510 * If it was a write, pull the next one off the queue. 511 */ 512 dmcxint(unit) 513 int unit; 514 { 515 register struct dmc_softc *sc; 516 register struct ifnet *ifp; 517 struct uba_device *ui = dmcinfo[unit]; 518 struct dmcdevice *addr; 519 struct mbuf *m; 520 struct ifqueue *inq; 521 int arg, pkaddr, cmd, len; 522 register struct ifrw *ifrw; 523 register struct dmcbufs *rp; 524 register struct ifxmt *ifxp; 525 struct dmc_header *dh; 526 int off, resid; 527 528 addr = (struct dmcdevice *)ui->ui_addr; 529 sc = &dmc_softc[unit]; 530 ifp = &sc->sc_if; 531 532 while (addr->bsel2 & DMC_RDYO) { 533 534 cmd = addr->bsel2 & 0xff; 535 arg = addr->sel6 & 0xffff; 536 /* reconstruct UNIBUS address of buffer returned to us */ 537 pkaddr = ((arg&DMC_XMEM)<<2) | (addr->sel4 & 0xffff); 538 /* release port */ 539 addr->bsel2 &= ~DMC_RDYO; 540 switch (cmd & 07) { 541 542 case DMC_OUR: 543 /* 544 * A read has completed. 545 * Pass packet to type specific 546 * higher-level input routine. 547 */ 548 ifp->if_ipackets++; 549 /* find location in dmcuba struct */ 550 ifrw= &sc->sc_ifuba.ifu_r[0]; 551 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 552 if(rp->ubinfo == pkaddr) 553 break; 554 ifrw++; 555 } 556 if (rp >= &sc->sc_rbufs[NRCV]) 557 panic("dmc rcv"); 558 if ((rp->flags & DBUF_DMCS) == 0) 559 printf("dmc%d: done unalloc rbuf\n", unit); 560 561 len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header); 562 if (len < 0 || len > DMCMTU) { 563 ifp->if_ierrors++; 564 printd("dmc%d: bad rcv pkt addr 0x%x len 0x%x\n", 565 unit, pkaddr, len); 566 goto setup; 567 } 568 /* 569 * Deal with trailer protocol: if type is trailer 570 * get true type from first 16-bit word past data. 571 * Remember that type was trailer by setting off. 572 */ 573 dh = (struct dmc_header *)ifrw->ifrw_addr; 574 dh->dmc_type = ntohs((u_short)dh->dmc_type); 575 #define dmcdataaddr(dh, off, type) ((type)(((caddr_t)((dh)+1)+(off)))) 576 if (dh->dmc_type >= DMC_TRAILER && 577 dh->dmc_type < DMC_TRAILER+DMC_NTRAILER) { 578 off = (dh->dmc_type - DMC_TRAILER) * 512; 579 if (off >= DMCMTU) 580 goto setup; /* sanity */ 581 dh->dmc_type = ntohs(*dmcdataaddr(dh, off, u_short *)); 582 resid = ntohs(*(dmcdataaddr(dh, off+2, u_short *))); 583 if (off + resid > len) 584 goto setup; /* sanity */ 585 len = off + resid; 586 } else 587 off = 0; 588 if (len == 0) 589 goto setup; 590 591 /* 592 * Pull packet off interface. Off is nonzero if 593 * packet has trailing header; dmc_get will then 594 * force this header information to be at the front, 595 * but we still have to drop the type and length 596 * which are at the front of any trailer data. 597 */ 598 m = dmc_get(&sc->sc_ifuba, ifrw, len, off); 599 if (m == 0) 600 goto setup; 601 if (off) { 602 m->m_off += 2 * sizeof (u_short); 603 m->m_len -= 2 * sizeof (u_short); 604 } 605 switch (dh->dmc_type) { 606 607 #ifdef INET 608 case DMC_IPTYPE: 609 schednetisr(NETISR_IP); 610 inq = &ipintrq; 611 break; 612 #endif 613 default: 614 m_freem(m); 615 goto setup; 616 } 617 618 if (IF_QFULL(inq)) { 619 IF_DROP(inq); 620 m_freem(m); 621 } else 622 IF_ENQUEUE(inq, m); 623 624 setup: 625 /* is this needed? */ 626 rp->ubinfo = ifrw->ifrw_info & 0x3ffff; 627 628 dmcload(sc, DMC_READ, rp->ubinfo, 629 ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc); 630 break; 631 632 case DMC_OUX: 633 /* 634 * A write has completed, start another 635 * transfer if there is more data to send. 636 */ 637 ifp->if_opackets++; 638 /* find associated dmcbuf structure */ 639 ifxp = &sc->sc_ifuba.ifu_w[0]; 640 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) { 641 if(rp->ubinfo == pkaddr) 642 break; 643 ifxp++; 644 } 645 if (rp >= &sc->sc_xbufs[NXMT]) { 646 printf("dmc%d: bad packet address 0x%x\n", 647 unit, pkaddr); 648 break; 649 } 650 if ((rp->flags & DBUF_DMCS) == 0) 651 printf("dmc%d: unallocated packet 0x%x\n", 652 unit, pkaddr); 653 /* mark buffer free */ 654 if (ifxp->x_xtofree) { 655 (void)m_freem(ifxp->x_xtofree); 656 ifxp->x_xtofree = 0; 657 } 658 rp->flags &= ~DBUF_DMCS; 659 sc->sc_oused--; 660 sc->sc_nticks = 0; 661 sc->sc_flag |= DMC_ACTIVE; 662 break; 663 664 case DMC_CNTLO: 665 arg &= DMC_CNTMASK; 666 if (arg & DMC_FATAL) { 667 printd("dmc%d: fatal error, flags=%b\n", 668 unit, arg, CNTLO_BITS); 669 dmcrestart(unit); 670 break; 671 } 672 /* ACCUMULATE STATISTICS */ 673 switch(arg) { 674 case DMC_NOBUFS: 675 ifp->if_ierrors++; 676 if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0) 677 goto report; 678 break; 679 case DMC_DISCONN: 680 if ((sc->sc_disc++ % DMC_RPDSC) == 0) 681 goto report; 682 break; 683 case DMC_TIMEOUT: 684 if ((sc->sc_timeo++ % DMC_RPTMO) == 0) 685 goto report; 686 break; 687 case DMC_DATACK: 688 ifp->if_oerrors++; 689 if ((sc->sc_datck++ % DMC_RPDCK) == 0) 690 goto report; 691 break; 692 default: 693 goto report; 694 } 695 break; 696 report: 697 printd("dmc%d: soft error, flags=%b\n", unit, 698 arg, CNTLO_BITS); 699 if ((sc->sc_flag & DMC_RESTART) == 0) { 700 /* 701 * kill off the dmc to get things 702 * going again by generating a 703 * procedure error 704 */ 705 sc->sc_flag |= DMC_RESTART; 706 arg = sc->sc_ubinfo & 0x3ffff; 707 dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM); 708 } 709 break; 710 711 default: 712 printf("dmc%d: bad control %o\n", unit, cmd); 713 break; 714 } 715 } 716 dmcstart(unit); 717 return; 718 } 719 720 /* 721 * DMC output routine. 722 * Encapsulate a packet of type family for the dmc. 723 * Use trailer local net encapsulation if enough data in first 724 * packet leaves a multiple of 512 bytes of data in remainder. 725 */ 726 dmcoutput(ifp, m0, dst) 727 register struct ifnet *ifp; 728 register struct mbuf *m0; 729 struct sockaddr *dst; 730 { 731 int type, error, s; 732 register struct mbuf *m = m0; 733 register struct dmc_header *dh; 734 register int off; 735 736 switch (dst->sa_family) { 737 #ifdef INET 738 case AF_INET: 739 off = ntohs((u_short)mtod(m, struct ip *)->ip_len) - m->m_len; 740 if ((ifp->if_flags & IFF_NOTRAILERS) == 0) 741 if (off > 0 && (off & 0x1ff) == 0 && 742 m->m_off >= MMINOFF + 2 * sizeof (u_short)) { 743 type = DMC_TRAILER + (off>>9); 744 m->m_off -= 2 * sizeof (u_short); 745 m->m_len += 2 * sizeof (u_short); 746 *mtod(m, u_short *) = htons((u_short)DMC_IPTYPE); 747 *(mtod(m, u_short *) + 1) = htons((u_short)m->m_len); 748 goto gottrailertype; 749 } 750 type = DMC_IPTYPE; 751 off = 0; 752 goto gottype; 753 #endif 754 755 case AF_UNSPEC: 756 dh = (struct dmc_header *)dst->sa_data; 757 type = dh->dmc_type; 758 goto gottype; 759 760 default: 761 printf("dmc%d: can't handle af%d\n", ifp->if_unit, 762 dst->sa_family); 763 error = EAFNOSUPPORT; 764 goto bad; 765 } 766 767 gottrailertype: 768 /* 769 * Packet to be sent as a trailer; move first packet 770 * (control information) to end of chain. 771 */ 772 while (m->m_next) 773 m = m->m_next; 774 m->m_next = m0; 775 m = m0->m_next; 776 m0->m_next = 0; 777 m0 = m; 778 779 gottype: 780 /* 781 * Add local network header 782 * (there is space for a uba on a vax to step on) 783 */ 784 if (m->m_off > MMAXOFF || 785 MMINOFF + sizeof(struct dmc_header) > m->m_off) { 786 m = m_get(M_DONTWAIT, MT_HEADER); 787 if (m == 0) { 788 error = ENOBUFS; 789 goto bad; 790 } 791 m->m_next = m0; 792 m->m_off = MMINOFF; 793 m->m_len = sizeof (struct dmc_header); 794 } else { 795 m->m_off -= sizeof (struct dmc_header); 796 m->m_len += sizeof (struct dmc_header); 797 } 798 dh = mtod(m, struct dmc_header *); 799 dh->dmc_type = htons((u_short)type); 800 801 /* 802 * Queue message on interface, and start output if interface 803 * not yet active. 804 */ 805 s = splimp(); 806 if (IF_QFULL(&ifp->if_snd)) { 807 IF_DROP(&ifp->if_snd); 808 m_freem(m); 809 splx(s); 810 return (ENOBUFS); 811 } 812 IF_ENQUEUE(&ifp->if_snd, m); 813 dmcstart(ifp->if_unit); 814 splx(s); 815 return (0); 816 817 bad: 818 m_freem(m0); 819 return (error); 820 } 821 822 823 /* 824 * Process an ioctl request. 825 */ 826 dmcioctl(ifp, cmd, data) 827 register struct ifnet *ifp; 828 int cmd; 829 caddr_t data; 830 { 831 int s = splimp(), error = 0; 832 833 switch (cmd) { 834 835 case SIOCSIFADDR: 836 ifp->if_flags |= IFF_UP; 837 if ((ifp->if_flags & IFF_RUNNING) == 0) 838 dmcinit(ifp->if_unit); 839 break; 840 841 case SIOCSIFDSTADDR: 842 if ((ifp->if_flags & IFF_RUNNING) == 0) 843 dmcinit(ifp->if_unit); 844 break; 845 846 default: 847 error = EINVAL; 848 } 849 splx(s); 850 return (error); 851 } 852 853 854 /* 855 * Routines supporting UNIBUS network interfaces. 856 */ 857 858 /* 859 * Init UNIBUS for interface on uban whose headers of size hlen are to 860 * end on a page boundary. We allocate a UNIBUS map register for the page 861 * with the header, and nmr more UNIBUS map registers for i/o on the adapter, 862 * doing this for each receive and transmit buffer. We also 863 * allocate page frames in the mbuffer pool for these pages. 864 */ 865 dmc_ubainit(ifu, uban, hlen, nmr) 866 register struct dmcuba *ifu; 867 int uban, hlen, nmr; 868 { 869 register caddr_t cp, dp; 870 register struct ifrw *ifrw; 871 register struct ifxmt *ifxp; 872 int i, ncl; 873 874 ncl = clrnd(nmr + CLSIZE) / CLSIZE; 875 if (ifu->ifu_r[0].ifrw_addr) 876 /* 877 * If the first read buffer has a non-zero 878 * address, it means we have already allocated core 879 */ 880 cp = ifu->ifu_r[0].ifrw_addr - (CLBYTES - hlen); 881 else { 882 cp = m_clalloc(NTOT * ncl, MPG_SPACE); 883 if (cp == 0) 884 return (0); 885 ifu->ifu_hlen = hlen; 886 ifu->ifu_uban = uban; 887 ifu->ifu_uba = uba_hd[uban].uh_uba; 888 dp = cp + CLBYTES - hlen; 889 for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) { 890 ifrw->ifrw_addr = dp; 891 dp += ncl * CLBYTES; 892 } 893 for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) { 894 ifxp->x_ifrw.ifrw_addr = dp; 895 dp += ncl * CLBYTES; 896 } 897 } 898 /* allocate for receive ring */ 899 for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) { 900 if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) { 901 struct ifrw *rw; 902 903 for (rw = ifu->ifu_r; rw < ifrw; rw++) 904 ubarelse(ifu->ifu_uban, &rw->ifrw_info); 905 goto bad; 906 } 907 } 908 /* and now transmit ring */ 909 for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) { 910 ifrw = &ifxp->x_ifrw; 911 if (dmc_ubaalloc(ifu, ifrw, nmr) == 0) { 912 struct ifxmt *xp; 913 914 for (xp = ifu->ifu_w; xp < ifxp; xp++) 915 ubarelse(ifu->ifu_uban, &xp->x_ifrw.ifrw_info); 916 for (ifrw = ifu->ifu_r; ifrw < &ifu->ifu_r[NRCV]; ifrw++) 917 ubarelse(ifu->ifu_uban, &ifrw->ifrw_info); 918 goto bad; 919 } 920 for (i = 0; i < nmr; i++) 921 ifxp->x_map[i] = ifrw->ifrw_mr[i]; 922 ifxp->x_xswapd = 0; 923 } 924 return (1); 925 bad: 926 m_pgfree(cp, NTOT * ncl); 927 ifu->ifu_r[0].ifrw_addr = 0; 928 return (0); 929 } 930 931 /* 932 * Setup either a ifrw structure by allocating UNIBUS map registers, 933 * possibly a buffered data path, and initializing the fields of 934 * the ifrw structure to minimize run-time overhead. 935 */ 936 static 937 dmc_ubaalloc(ifu, ifrw, nmr) 938 struct dmcuba *ifu; 939 register struct ifrw *ifrw; 940 int nmr; 941 { 942 register int info; 943 944 info = 945 uballoc(ifu->ifu_uban, ifrw->ifrw_addr, nmr*NBPG + ifu->ifu_hlen, 946 ifu->ifu_flags); 947 if (info == 0) 948 return (0); 949 ifrw->ifrw_info = info; 950 ifrw->ifrw_bdp = UBAI_BDP(info); 951 ifrw->ifrw_proto = UBAMR_MRV | (UBAI_BDP(info) << UBAMR_DPSHIFT); 952 ifrw->ifrw_mr = &ifu->ifu_uba->uba_map[UBAI_MR(info) + 1]; 953 return (1); 954 } 955 956 /* 957 * Pull read data off a interface. 958 * Len is length of data, with local net header stripped. 959 * Off is non-zero if a trailer protocol was used, and 960 * gives the offset of the trailer information. 961 * We copy the trailer information and then all the normal 962 * data into mbufs. When full cluster sized units are present 963 * on the interface on cluster boundaries we can get them more 964 * easily by remapping, and take advantage of this here. 965 */ 966 struct mbuf * 967 dmc_get(ifu, ifrw, totlen, off0) 968 register struct dmcuba *ifu; 969 register struct ifrw *ifrw; 970 int totlen, off0; 971 { 972 struct mbuf *top, **mp, *m; 973 int off = off0, len; 974 register caddr_t cp = ifrw->ifrw_addr + ifu->ifu_hlen; 975 976 top = 0; 977 mp = ⊤ 978 while (totlen > 0) { 979 MGET(m, M_DONTWAIT, MT_DATA); 980 if (m == 0) 981 goto bad; 982 if (off) { 983 len = totlen - off; 984 cp = ifrw->ifrw_addr + ifu->ifu_hlen + off; 985 } else 986 len = totlen; 987 if (len >= CLBYTES) { 988 struct mbuf *p; 989 struct pte *cpte, *ppte; 990 int x, *ip, i; 991 992 MCLGET(p, 1); 993 if (p == 0) 994 goto nopage; 995 len = m->m_len = CLBYTES; 996 m->m_off = (int)p - (int)m; 997 if (!claligned(cp)) 998 goto copy; 999 1000 /* 1001 * Switch pages mapped to UNIBUS with new page p, 1002 * as quick form of copy. Remap UNIBUS and invalidate. 1003 */ 1004 cpte = &Mbmap[mtocl(cp)*CLSIZE]; 1005 ppte = &Mbmap[mtocl(p)*CLSIZE]; 1006 x = btop(cp - ifrw->ifrw_addr); 1007 ip = (int *)&ifrw->ifrw_mr[x]; 1008 for (i = 0; i < CLSIZE; i++) { 1009 struct pte t; 1010 t = *ppte; *ppte++ = *cpte; *cpte = t; 1011 *ip++ = 1012 cpte++->pg_pfnum|ifrw->ifrw_proto; 1013 mtpr(TBIS, cp); 1014 cp += NBPG; 1015 mtpr(TBIS, (caddr_t)p); 1016 p += NBPG / sizeof (*p); 1017 } 1018 goto nocopy; 1019 } 1020 nopage: 1021 m->m_len = MIN(MLEN, len); 1022 m->m_off = MMINOFF; 1023 copy: 1024 bcopy(cp, mtod(m, caddr_t), (unsigned)m->m_len); 1025 cp += m->m_len; 1026 nocopy: 1027 *mp = m; 1028 mp = &m->m_next; 1029 if (off) { 1030 /* sort of an ALGOL-W style for statement... */ 1031 off += m->m_len; 1032 if (off == totlen) { 1033 cp = ifrw->ifrw_addr + ifu->ifu_hlen; 1034 off = 0; 1035 totlen = off0; 1036 } 1037 } else 1038 totlen -= m->m_len; 1039 } 1040 return (top); 1041 bad: 1042 m_freem(top); 1043 return (0); 1044 } 1045 1046 /* 1047 * Map a chain of mbufs onto a network interface 1048 * in preparation for an i/o operation. 1049 * The argument chain of mbufs includes the local network 1050 * header which is copied to be in the mapped, aligned 1051 * i/o space. 1052 */ 1053 dmcput(ifu, n, m) 1054 struct dmcuba *ifu; 1055 int n; 1056 register struct mbuf *m; 1057 { 1058 register struct mbuf *mp; 1059 register caddr_t cp; 1060 register struct ifxmt *ifxp; 1061 register struct ifrw *ifrw; 1062 register int i; 1063 int xswapd = 0; 1064 int x, cc, t; 1065 caddr_t dp; 1066 1067 ifxp = &ifu->ifu_w[n]; 1068 ifrw = &ifxp->x_ifrw; 1069 cp = ifrw->ifrw_addr; 1070 while (m) { 1071 dp = mtod(m, char *); 1072 if (claligned(cp) && claligned(dp) && m->m_len == CLBYTES) { 1073 struct pte *pte; int *ip; 1074 pte = &Mbmap[mtocl(dp)*CLSIZE]; 1075 x = btop(cp - ifrw->ifrw_addr); 1076 ip = (int *)&ifrw->ifrw_mr[x]; 1077 for (i = 0; i < CLSIZE; i++) 1078 *ip++ = ifrw->ifrw_proto | pte++->pg_pfnum; 1079 xswapd |= 1 << (x>>(CLSHIFT-PGSHIFT)); 1080 mp = m->m_next; 1081 m->m_next = ifxp->x_xtofree; 1082 ifxp->x_xtofree = m; 1083 cp += m->m_len; 1084 } else { 1085 bcopy(mtod(m, caddr_t), cp, (unsigned)m->m_len); 1086 cp += m->m_len; 1087 MFREE(m, mp); 1088 } 1089 m = mp; 1090 } 1091 1092 /* 1093 * Xswapd is the set of clusters we just mapped out. Ifxp->x_xswapd 1094 * is the set of clusters mapped out from before. We compute 1095 * the number of clusters involved in this operation in x. 1096 * Clusters mapped out before and involved in this operation 1097 * should be unmapped so original pages will be accessed by the device. 1098 */ 1099 cc = cp - ifrw->ifrw_addr; 1100 x = ((cc - ifu->ifu_hlen) + CLBYTES - 1) >> CLSHIFT; 1101 ifxp->x_xswapd &= ~xswapd; 1102 while (i = ffs(ifxp->x_xswapd)) { 1103 i--; 1104 if (i >= x) 1105 break; 1106 ifxp->x_xswapd &= ~(1<<i); 1107 i *= CLSIZE; 1108 for (t = 0; t < CLSIZE; t++) { 1109 ifrw->ifrw_mr[i] = ifxp->x_map[i]; 1110 i++; 1111 } 1112 } 1113 ifxp->x_xswapd |= xswapd; 1114 return (cc); 1115 } 1116 1117 /* 1118 * Restart after a fatal error. 1119 * Clear device and reinitialize. 1120 */ 1121 dmcrestart(unit) 1122 int unit; 1123 { 1124 register struct dmc_softc *sc = &dmc_softc[unit]; 1125 register struct uba_device *ui = dmcinfo[unit]; 1126 register struct dmcdevice *addr; 1127 register struct ifxmt *ifxp; 1128 register int i; 1129 register struct mbuf *m; 1130 struct dmcuba *ifu; 1131 1132 addr = (struct dmcdevice *)ui->ui_addr; 1133 ifu = &sc->sc_ifuba; 1134 #ifdef DEBUG 1135 /* dump base table */ 1136 printf("dmc%d base table:\n", unit); 1137 for (i = 0; i < sizeof (struct dmc_base); i++) 1138 printf("%o\n" ,dmc_base[unit].d_base[i]); 1139 #endif 1140 /* 1141 * Let the DMR finish the MCLR. At 1 Mbit, it should do so 1142 * in about a max of 6.4 milliseconds with diagnostics enabled. 1143 */ 1144 addr->bsel1 = DMC_MCLR; 1145 for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 1146 ; 1147 /* Did the timer expire or did the DMR finish? */ 1148 if ((addr->bsel1 & DMC_RUN) == 0) { 1149 printf("dmc%d: M820 Test Failed\n", unit); 1150 return; 1151 } 1152 1153 #ifdef notdef /* tef sez why throw these packets away??? */ 1154 /* purge send queue */ 1155 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1156 while (m) { 1157 m_freem(m); 1158 IF_DEQUEUE(&sc->sc_if.if_snd, m); 1159 } 1160 #endif 1161 for (ifxp = ifu->ifu_w; ifxp < &ifu->ifu_w[NXMT]; ifxp++) { 1162 if (ifxp->x_xtofree) { 1163 (void) m_freem(ifxp->x_xtofree); 1164 ifxp->x_xtofree = 0; 1165 } 1166 } 1167 1168 /* restart DMC */ 1169 dmcinit(unit); 1170 sc->sc_flag &= ~DMC_RESTART; 1171 sc->sc_if.if_collisions++; /* why not? */ 1172 } 1173 1174 /* 1175 * Check to see that transmitted packets don't 1176 * lose interrupts. The device has to be active. 1177 */ 1178 dmcwatch() 1179 { 1180 register struct uba_device *ui; 1181 register struct dmc_softc *sc; 1182 struct dmcdevice *addr; 1183 register int i; 1184 1185 for (i = 0; i < NDMC; i++) { 1186 sc = &dmc_softc[i]; 1187 if ((sc->sc_flag & DMC_ACTIVE) == 0) 1188 continue; 1189 if ((ui = dmcinfo[i]) == 0 || ui->ui_alive == 0) 1190 continue; 1191 if (sc->sc_oused) { 1192 sc->sc_nticks++; 1193 if (sc->sc_nticks > dmc_timeout) { 1194 sc->sc_nticks = 0; 1195 addr = (struct dmcdevice *)ui->ui_addr; 1196 printd("dmc%d hung: bsel0=%b bsel2=%b\n", i, 1197 addr->bsel0 & 0xff, DMC0BITS, 1198 addr->bsel2 & 0xff, DMC2BITS); 1199 dmcrestart(i); 1200 } 1201 } 1202 } 1203 timeout(dmcwatch, (caddr_t) 0, hz); 1204 } 1205 #endif 1206