1 /* 2 * Copyright (c) 1982, 1986 Regents of the University of California. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms are permitted 6 * provided that the above copyright notice and this paragraph are 7 * duplicated in all such forms and that any documentation, 8 * advertising materials, and other materials related to such 9 * distribution and use acknowledge that the software was developed 10 * by the University of California, Berkeley. The name of the 11 * University may not be used to endorse or promote products derived 12 * from this software without specific prior written permission. 13 * THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR 14 * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED 15 * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. 16 * 17 * @(#)if_dmc.c 7.4 (Berkeley) 10/12/88 18 */ 19 20 #include "dmc.h" 21 #if NDMC > 0 22 23 /* 24 * DMC11 device driver, internet version 25 * 26 * Bill Nesheim 27 * Cornell University 28 * 29 * Lou Salkind 30 * New York University 31 */ 32 33 /* #define DEBUG /* for base table dump on fatal error */ 34 35 #include "../machine/pte.h" 36 37 #include "param.h" 38 #include "systm.h" 39 #include "mbuf.h" 40 #include "buf.h" 41 #include "ioctl.h" /* must precede tty.h */ 42 #include "tty.h" 43 #include "protosw.h" 44 #include "socket.h" 45 #include "syslog.h" 46 #include "vmmac.h" 47 #include "errno.h" 48 49 #include "../net/if.h" 50 #include "../net/netisr.h" 51 #include "../net/route.h" 52 53 #ifdef INET 54 #include "../netinet/in.h" 55 #include "../netinet/in_systm.h" 56 #include "../netinet/in_var.h" 57 #include "../netinet/ip.h" 58 #endif 59 60 #include "../vax/cpu.h" 61 #include "../vax/mtpr.h" 62 #include "if_uba.h" 63 #include "if_dmc.h" 64 #include "../vaxuba/ubareg.h" 65 #include "../vaxuba/ubavar.h" 66 67 #include "../h/time.h" 68 #include "../h/kernel.h" 69 70 /* 71 * output timeout value, sec.; should depend on line speed. 72 */ 73 int dmc_timeout = 20; 74 75 /* 76 * Driver information for auto-configuration stuff. 77 */ 78 int dmcprobe(), dmcattach(), dmcinit(), dmcioctl(); 79 int dmcoutput(), dmcreset(), dmctimeout(); 80 struct uba_device *dmcinfo[NDMC]; 81 u_short dmcstd[] = { 0 }; 82 struct uba_driver dmcdriver = 83 { dmcprobe, 0, dmcattach, 0, dmcstd, "dmc", dmcinfo }; 84 85 #define NRCV 7 86 #define NXMT 3 87 #define NCMDS (NRCV+NXMT+4) /* size of command queue */ 88 89 #define printd if(dmcdebug)printf 90 int dmcdebug = 0; 91 92 /* error reporting intervals */ 93 #define DMC_RPNBFS 50 94 #define DMC_RPDSC 1 95 #define DMC_RPTMO 10 96 #define DMC_RPDCK 10 97 98 struct dmc_command { 99 char qp_cmd; /* command */ 100 short qp_ubaddr; /* buffer address */ 101 short qp_cc; /* character count || XMEM */ 102 struct dmc_command *qp_next; /* next command on queue */ 103 }; 104 105 struct dmcbufs { 106 int ubinfo; /* from uballoc */ 107 short cc; /* buffer size */ 108 short flags; /* access control */ 109 }; 110 #define DBUF_OURS 0 /* buffer is available */ 111 #define DBUF_DMCS 1 /* buffer claimed by somebody */ 112 #define DBUF_XMIT 4 /* transmit buffer */ 113 #define DBUF_RCV 8 /* receive buffer */ 114 115 116 /* 117 * DMC software status per interface. 118 * 119 * Each interface is referenced by a network interface structure, 120 * sc_if, which the routing code uses to locate the interface. 121 * This structure contains the output queue for the interface, its address, ... 122 * We also have, for each interface, a set of 7 UBA interface structures 123 * for each, which 124 * contain information about the UNIBUS resources held by the interface: 125 * map registers, buffered data paths, etc. Information is cached in this 126 * structure for use by the if_uba.c routines in running the interface 127 * efficiently. 128 */ 129 struct dmc_softc { 130 struct ifnet sc_if; /* network-visible interface */ 131 short sc_oused; /* output buffers currently in use */ 132 short sc_iused; /* input buffers given to DMC */ 133 short sc_flag; /* flags */ 134 int sc_ubinfo; /* UBA mapping info for base table */ 135 int sc_errors[4]; /* non-fatal error counters */ 136 #define sc_datck sc_errors[0] 137 #define sc_timeo sc_errors[1] 138 #define sc_nobuf sc_errors[2] 139 #define sc_disc sc_errors[3] 140 struct dmcbufs sc_rbufs[NRCV]; /* receive buffer info */ 141 struct dmcbufs sc_xbufs[NXMT]; /* transmit buffer info */ 142 struct ifubinfo sc_ifuba; /* UNIBUS resources */ 143 struct ifrw sc_ifr[NRCV]; /* UNIBUS receive buffer maps */ 144 struct ifxmt sc_ifw[NXMT]; /* UNIBUS receive buffer maps */ 145 /* command queue stuff */ 146 struct dmc_command sc_cmdbuf[NCMDS]; 147 struct dmc_command *sc_qhead; /* head of command queue */ 148 struct dmc_command *sc_qtail; /* tail of command queue */ 149 struct dmc_command *sc_qactive; /* command in progress */ 150 struct dmc_command *sc_qfreeh; /* head of list of free cmd buffers */ 151 struct dmc_command *sc_qfreet; /* tail of list of free cmd buffers */ 152 /* end command queue stuff */ 153 } dmc_softc[NDMC]; 154 155 /* flags */ 156 #define DMC_RUNNING 0x01 /* device initialized */ 157 #define DMC_BMAPPED 0x02 /* base table mapped */ 158 #define DMC_RESTART 0x04 /* software restart in progress */ 159 #define DMC_ONLINE 0x08 /* device running (had a RDYO) */ 160 161 struct dmc_base { 162 short d_base[128]; /* DMC base table */ 163 } dmc_base[NDMC]; 164 165 /* queue manipulation macros */ 166 #define QUEUE_AT_HEAD(qp, head, tail) \ 167 (qp)->qp_next = (head); \ 168 (head) = (qp); \ 169 if ((tail) == (struct dmc_command *) 0) \ 170 (tail) = (head) 171 172 #define QUEUE_AT_TAIL(qp, head, tail) \ 173 if ((tail)) \ 174 (tail)->qp_next = (qp); \ 175 else \ 176 (head) = (qp); \ 177 (qp)->qp_next = (struct dmc_command *) 0; \ 178 (tail) = (qp) 179 180 #define DEQUEUE(head, tail) \ 181 (head) = (head)->qp_next;\ 182 if ((head) == (struct dmc_command *) 0)\ 183 (tail) = (head) 184 185 dmcprobe(reg) 186 caddr_t reg; 187 { 188 register int br, cvec; 189 register struct dmcdevice *addr = (struct dmcdevice *)reg; 190 register int i; 191 192 #ifdef lint 193 br = 0; cvec = br; br = cvec; 194 dmcrint(0); dmcxint(0); 195 #endif 196 addr->bsel1 = DMC_MCLR; 197 for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 198 ; 199 if ((addr->bsel1 & DMC_RUN) == 0) { 200 printf("dmcprobe: can't start device\n" ); 201 return (0); 202 } 203 addr->bsel0 = DMC_RQI|DMC_IEI; 204 /* let's be paranoid */ 205 addr->bsel0 |= DMC_RQI|DMC_IEI; 206 DELAY(1000000); 207 addr->bsel1 = DMC_MCLR; 208 for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 209 ; 210 return (1); 211 } 212 213 /* 214 * Interface exists: make available by filling in network interface 215 * record. System will initialize the interface when it is ready 216 * to accept packets. 217 */ 218 dmcattach(ui) 219 register struct uba_device *ui; 220 { 221 register struct dmc_softc *sc = &dmc_softc[ui->ui_unit]; 222 223 sc->sc_if.if_unit = ui->ui_unit; 224 sc->sc_if.if_name = "dmc"; 225 sc->sc_if.if_mtu = DMCMTU; 226 sc->sc_if.if_init = dmcinit; 227 sc->sc_if.if_output = dmcoutput; 228 sc->sc_if.if_ioctl = dmcioctl; 229 sc->sc_if.if_reset = dmcreset; 230 sc->sc_if.if_watchdog = dmctimeout; 231 sc->sc_if.if_flags = IFF_POINTOPOINT; 232 sc->sc_ifuba.iff_flags = UBA_CANTWAIT; 233 234 if_attach(&sc->sc_if); 235 } 236 237 /* 238 * Reset of interface after UNIBUS reset. 239 * If interface is on specified UBA, reset its state. 240 */ 241 dmcreset(unit, uban) 242 int unit, uban; 243 { 244 register struct uba_device *ui; 245 register struct dmc_softc *sc = &dmc_softc[unit]; 246 247 if (unit >= NDMC || (ui = dmcinfo[unit]) == 0 || ui->ui_alive == 0 || 248 ui->ui_ubanum != uban) 249 return; 250 printf(" dmc%d", unit); 251 sc->sc_flag = 0; 252 sc->sc_if.if_flags &= ~IFF_RUNNING; 253 dmcinit(unit); 254 } 255 256 /* 257 * Initialization of interface; reinitialize UNIBUS usage. 258 */ 259 dmcinit(unit) 260 int unit; 261 { 262 register struct dmc_softc *sc = &dmc_softc[unit]; 263 register struct uba_device *ui = dmcinfo[unit]; 264 register struct dmcdevice *addr; 265 register struct ifnet *ifp = &sc->sc_if; 266 register struct ifrw *ifrw; 267 register struct ifxmt *ifxp; 268 register struct dmcbufs *rp; 269 register struct dmc_command *qp; 270 struct ifaddr *ifa; 271 int base; 272 int s; 273 274 addr = (struct dmcdevice *)ui->ui_addr; 275 276 /* 277 * Check to see that an address has been set 278 * (both local and destination for an address family). 279 */ 280 for (ifa = ifp->if_addrlist; ifa; ifa = ifa->ifa_next) 281 if (ifa->ifa_addr.sa_family && ifa->ifa_dstaddr.sa_family) 282 break; 283 if (ifa == (struct ifaddr *) 0) 284 return; 285 286 if ((addr->bsel1&DMC_RUN) == 0) { 287 printf("dmcinit: DMC not running\n"); 288 ifp->if_flags &= ~IFF_UP; 289 return; 290 } 291 /* map base table */ 292 if ((sc->sc_flag & DMC_BMAPPED) == 0) { 293 sc->sc_ubinfo = uballoc(ui->ui_ubanum, 294 (caddr_t)&dmc_base[unit], sizeof (struct dmc_base), 0); 295 sc->sc_flag |= DMC_BMAPPED; 296 } 297 /* initialize UNIBUS resources */ 298 sc->sc_iused = sc->sc_oused = 0; 299 if ((ifp->if_flags & IFF_RUNNING) == 0) { 300 if (if_ubaminit(&sc->sc_ifuba, ui->ui_ubanum, 301 sizeof(struct dmc_header), (int)btoc(DMCMTU), 302 sc->sc_ifr, NRCV, sc->sc_ifw, NXMT) == 0) { 303 printf("dmc%d: can't allocate uba resources\n", unit); 304 ifp->if_flags &= ~IFF_UP; 305 return; 306 } 307 ifp->if_flags |= IFF_RUNNING; 308 } 309 sc->sc_flag &= ~DMC_ONLINE; 310 sc->sc_flag |= DMC_RUNNING; 311 /* 312 * Limit packets enqueued until we see if we're on the air. 313 */ 314 ifp->if_snd.ifq_maxlen = 3; 315 316 /* initialize buffer pool */ 317 /* receives */ 318 ifrw = &sc->sc_ifr[0]; 319 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 320 rp->ubinfo = ifrw->ifrw_info & 0x3ffff; 321 rp->cc = DMCMTU + sizeof (struct dmc_header); 322 rp->flags = DBUF_OURS|DBUF_RCV; 323 ifrw++; 324 } 325 /* transmits */ 326 ifxp = &sc->sc_ifw[0]; 327 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) { 328 rp->ubinfo = ifxp->ifw_info & 0x3ffff; 329 rp->cc = 0; 330 rp->flags = DBUF_OURS|DBUF_XMIT; 331 ifxp++; 332 } 333 334 /* set up command queues */ 335 sc->sc_qfreeh = sc->sc_qfreet 336 = sc->sc_qhead = sc->sc_qtail = sc->sc_qactive = 337 (struct dmc_command *)0; 338 /* set up free command buffer list */ 339 for (qp = &sc->sc_cmdbuf[0]; qp < &sc->sc_cmdbuf[NCMDS]; qp++) { 340 QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet); 341 } 342 343 /* base in */ 344 base = sc->sc_ubinfo & 0x3ffff; 345 dmcload(sc, DMC_BASEI, base, (base>>2) & DMC_XMEM); 346 /* specify half duplex operation, flags tell if primary */ 347 /* or secondary station */ 348 if (ui->ui_flags == 0) 349 /* use DDCMP mode in full duplex */ 350 dmcload(sc, DMC_CNTLI, 0, 0); 351 else if (ui->ui_flags == 1) 352 /* use MAINTENENCE mode */ 353 dmcload(sc, DMC_CNTLI, 0, DMC_MAINT ); 354 else if (ui->ui_flags == 2) 355 /* use DDCMP half duplex as primary station */ 356 dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX); 357 else if (ui->ui_flags == 3) 358 /* use DDCMP half duplex as secondary station */ 359 dmcload(sc, DMC_CNTLI, 0, DMC_HDPLX | DMC_SEC); 360 361 /* enable operation done interrupts */ 362 while ((addr->bsel2 & DMC_IEO) == 0) 363 addr->bsel2 |= DMC_IEO; 364 s = spl5(); 365 /* queue first NRCV buffers for DMC to fill */ 366 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 367 rp->flags |= DBUF_DMCS; 368 dmcload(sc, DMC_READ, rp->ubinfo, 369 (((rp->ubinfo>>2)&DMC_XMEM) | rp->cc)); 370 sc->sc_iused++; 371 } 372 splx(s); 373 } 374 375 /* 376 * Start output on interface. Get another datagram 377 * to send from the interface queue and map it to 378 * the interface before starting output. 379 * 380 * Must be called at spl 5 381 */ 382 dmcstart(unit) 383 int unit; 384 { 385 register struct dmc_softc *sc = &dmc_softc[unit]; 386 struct mbuf *m; 387 register struct dmcbufs *rp; 388 register int n; 389 390 /* 391 * Dequeue up to NXMT requests and map them to the UNIBUS. 392 * If no more requests, or no dmc buffers available, just return. 393 */ 394 n = 0; 395 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++ ) { 396 /* find an available buffer */ 397 if ((rp->flags & DBUF_DMCS) == 0) { 398 IF_DEQUEUE(&sc->sc_if.if_snd, m); 399 if (m == 0) 400 return; 401 /* mark it dmcs */ 402 rp->flags |= (DBUF_DMCS); 403 /* 404 * Have request mapped to UNIBUS for transmission 405 * and start the output. 406 */ 407 rp->cc = if_ubaput(&sc->sc_ifuba, &sc->sc_ifw[n], m); 408 rp->cc &= DMC_CCOUNT; 409 if (++sc->sc_oused == 1) 410 sc->sc_if.if_timer = dmc_timeout; 411 dmcload(sc, DMC_WRITE, rp->ubinfo, 412 rp->cc | ((rp->ubinfo>>2)&DMC_XMEM)); 413 } 414 n++; 415 } 416 } 417 418 /* 419 * Utility routine to load the DMC device registers. 420 */ 421 dmcload(sc, type, w0, w1) 422 register struct dmc_softc *sc; 423 int type, w0, w1; 424 { 425 register struct dmcdevice *addr; 426 register int unit, sps; 427 register struct dmc_command *qp; 428 429 unit = sc - dmc_softc; 430 addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr; 431 sps = spl5(); 432 433 /* grab a command buffer from the free list */ 434 if ((qp = sc->sc_qfreeh) == (struct dmc_command *)0) 435 panic("dmc command queue overflow"); 436 DEQUEUE(sc->sc_qfreeh, sc->sc_qfreet); 437 438 /* fill in requested info */ 439 qp->qp_cmd = (type | DMC_RQI); 440 qp->qp_ubaddr = w0; 441 qp->qp_cc = w1; 442 443 if (sc->sc_qactive) { /* command in progress */ 444 if (type == DMC_READ) { 445 QUEUE_AT_HEAD(qp, sc->sc_qhead, sc->sc_qtail); 446 } else { 447 QUEUE_AT_TAIL(qp, sc->sc_qhead, sc->sc_qtail); 448 } 449 } else { /* command port free */ 450 sc->sc_qactive = qp; 451 addr->bsel0 = qp->qp_cmd; 452 dmcrint(unit); 453 } 454 splx(sps); 455 } 456 457 /* 458 * DMC interface receiver interrupt. 459 * Ready to accept another command, 460 * pull one off the command queue. 461 */ 462 dmcrint(unit) 463 int unit; 464 { 465 register struct dmc_softc *sc; 466 register struct dmcdevice *addr; 467 register struct dmc_command *qp; 468 register int n; 469 470 addr = (struct dmcdevice *)dmcinfo[unit]->ui_addr; 471 sc = &dmc_softc[unit]; 472 if ((qp = sc->sc_qactive) == (struct dmc_command *) 0) { 473 printf("dmc%d: dmcrint no command\n", unit); 474 return; 475 } 476 while (addr->bsel0&DMC_RDYI) { 477 addr->sel4 = qp->qp_ubaddr; 478 addr->sel6 = qp->qp_cc; 479 addr->bsel0 &= ~(DMC_IEI|DMC_RQI); 480 /* free command buffer */ 481 QUEUE_AT_HEAD(qp, sc->sc_qfreeh, sc->sc_qfreet); 482 while (addr->bsel0 & DMC_RDYI) { 483 /* 484 * Can't check for RDYO here 'cause 485 * this routine isn't reentrant! 486 */ 487 DELAY(5); 488 } 489 /* move on to next command */ 490 if ((sc->sc_qactive = sc->sc_qhead) == (struct dmc_command *)0) 491 break; /* all done */ 492 /* more commands to do, start the next one */ 493 qp = sc->sc_qactive; 494 DEQUEUE(sc->sc_qhead, sc->sc_qtail); 495 addr->bsel0 = qp->qp_cmd; 496 n = RDYSCAN; 497 while (n-- > 0) 498 if ((addr->bsel0&DMC_RDYI) || (addr->bsel2&DMC_RDYO)) 499 break; 500 } 501 if (sc->sc_qactive) { 502 addr->bsel0 |= DMC_IEI|DMC_RQI; 503 /* VMS does it twice !*$%@# */ 504 addr->bsel0 |= DMC_IEI|DMC_RQI; 505 } 506 507 } 508 509 /* 510 * DMC interface transmitter interrupt. 511 * A transfer may have completed, check for errors. 512 * If it was a read, notify appropriate protocol. 513 * If it was a write, pull the next one off the queue. 514 */ 515 dmcxint(unit) 516 int unit; 517 { 518 register struct dmc_softc *sc; 519 register struct ifnet *ifp; 520 struct uba_device *ui = dmcinfo[unit]; 521 struct dmcdevice *addr; 522 struct mbuf *m; 523 struct ifqueue *inq; 524 int arg, pkaddr, cmd, len, s; 525 register struct ifrw *ifrw; 526 register struct dmcbufs *rp; 527 register struct ifxmt *ifxp; 528 struct dmc_header *dh; 529 int off, resid; 530 531 addr = (struct dmcdevice *)ui->ui_addr; 532 sc = &dmc_softc[unit]; 533 ifp = &sc->sc_if; 534 535 while (addr->bsel2 & DMC_RDYO) { 536 537 cmd = addr->bsel2 & 0xff; 538 arg = addr->sel6 & 0xffff; 539 /* reconstruct UNIBUS address of buffer returned to us */ 540 pkaddr = ((arg&DMC_XMEM)<<2) | (addr->sel4 & 0xffff); 541 /* release port */ 542 addr->bsel2 &= ~DMC_RDYO; 543 switch (cmd & 07) { 544 545 case DMC_OUR: 546 /* 547 * A read has completed. 548 * Pass packet to type specific 549 * higher-level input routine. 550 */ 551 ifp->if_ipackets++; 552 /* find location in dmcuba struct */ 553 ifrw= &sc->sc_ifr[0]; 554 for (rp = &sc->sc_rbufs[0]; rp < &sc->sc_rbufs[NRCV]; rp++) { 555 if(rp->ubinfo == pkaddr) 556 break; 557 ifrw++; 558 } 559 if (rp >= &sc->sc_rbufs[NRCV]) 560 panic("dmc rcv"); 561 if ((rp->flags & DBUF_DMCS) == 0) 562 printf("dmc%d: done unalloc rbuf\n", unit); 563 564 len = (arg & DMC_CCOUNT) - sizeof (struct dmc_header); 565 if (len < 0 || len > DMCMTU) { 566 ifp->if_ierrors++; 567 printd("dmc%d: bad rcv pkt addr 0x%x len 0x%x\n", 568 unit, pkaddr, len); 569 goto setup; 570 } 571 /* 572 * Deal with trailer protocol: if type is trailer 573 * get true type from first 16-bit word past data. 574 * Remember that type was trailer by setting off. 575 */ 576 dh = (struct dmc_header *)ifrw->ifrw_addr; 577 dh->dmc_type = ntohs((u_short)dh->dmc_type); 578 #define dmcdataaddr(dh, off, type) ((type)(((caddr_t)((dh)+1)+(off)))) 579 if (dh->dmc_type >= DMC_TRAILER && 580 dh->dmc_type < DMC_TRAILER+DMC_NTRAILER) { 581 off = (dh->dmc_type - DMC_TRAILER) * 512; 582 if (off >= DMCMTU) 583 goto setup; /* sanity */ 584 dh->dmc_type = ntohs(*dmcdataaddr(dh, off, u_short *)); 585 resid = ntohs(*(dmcdataaddr(dh, off+2, u_short *))); 586 if (off + resid > len) 587 goto setup; /* sanity */ 588 len = off + resid; 589 } else 590 off = 0; 591 if (len == 0) 592 goto setup; 593 594 /* 595 * Pull packet off interface. Off is nonzero if 596 * packet has trailing header; dmc_get will then 597 * force this header information to be at the front, 598 * but we still have to drop the type and length 599 * which are at the front of any trailer data. 600 */ 601 m = if_ubaget(&sc->sc_ifuba, ifrw, len, off, ifp); 602 if (m == 0) 603 goto setup; 604 switch (dh->dmc_type) { 605 606 #ifdef INET 607 case DMC_IPTYPE: 608 schednetisr(NETISR_IP); 609 inq = &ipintrq; 610 break; 611 #endif 612 default: 613 m_freem(m); 614 goto setup; 615 } 616 617 s = splimp(); 618 if (IF_QFULL(inq)) { 619 IF_DROP(inq); 620 m_freem(m); 621 } else 622 IF_ENQUEUE(inq, m); 623 splx(s); 624 625 setup: 626 /* is this needed? */ 627 rp->ubinfo = ifrw->ifrw_info & 0x3ffff; 628 629 dmcload(sc, DMC_READ, rp->ubinfo, 630 ((rp->ubinfo >> 2) & DMC_XMEM) | rp->cc); 631 break; 632 633 case DMC_OUX: 634 /* 635 * A write has completed, start another 636 * transfer if there is more data to send. 637 */ 638 ifp->if_opackets++; 639 /* find associated dmcbuf structure */ 640 ifxp = &sc->sc_ifw[0]; 641 for (rp = &sc->sc_xbufs[0]; rp < &sc->sc_xbufs[NXMT]; rp++) { 642 if(rp->ubinfo == pkaddr) 643 break; 644 ifxp++; 645 } 646 if (rp >= &sc->sc_xbufs[NXMT]) { 647 printf("dmc%d: bad packet address 0x%x\n", 648 unit, pkaddr); 649 break; 650 } 651 if ((rp->flags & DBUF_DMCS) == 0) 652 printf("dmc%d: unallocated packet 0x%x\n", 653 unit, pkaddr); 654 /* mark buffer free */ 655 if (ifxp->ifw_xtofree) { 656 (void)m_freem(ifxp->ifw_xtofree); 657 ifxp->ifw_xtofree = 0; 658 } 659 rp->flags &= ~DBUF_DMCS; 660 if (--sc->sc_oused == 0) 661 sc->sc_if.if_timer = 0; 662 else 663 sc->sc_if.if_timer = dmc_timeout; 664 if ((sc->sc_flag & DMC_ONLINE) == 0) { 665 extern int ifqmaxlen; 666 667 /* 668 * We're on the air. 669 * Open the queue to the usual value. 670 */ 671 sc->sc_flag |= DMC_ONLINE; 672 ifp->if_snd.ifq_maxlen = ifqmaxlen; 673 } 674 break; 675 676 case DMC_CNTLO: 677 arg &= DMC_CNTMASK; 678 if (arg & DMC_FATAL) { 679 if (arg != DMC_START) 680 log(LOG_ERR, 681 "dmc%d: fatal error, flags=%b\n", 682 unit, arg, CNTLO_BITS); 683 dmcrestart(unit); 684 break; 685 } 686 /* ACCUMULATE STATISTICS */ 687 switch(arg) { 688 case DMC_NOBUFS: 689 ifp->if_ierrors++; 690 if ((sc->sc_nobuf++ % DMC_RPNBFS) == 0) 691 goto report; 692 break; 693 case DMC_DISCONN: 694 if ((sc->sc_disc++ % DMC_RPDSC) == 0) 695 goto report; 696 break; 697 case DMC_TIMEOUT: 698 if ((sc->sc_timeo++ % DMC_RPTMO) == 0) 699 goto report; 700 break; 701 case DMC_DATACK: 702 ifp->if_oerrors++; 703 if ((sc->sc_datck++ % DMC_RPDCK) == 0) 704 goto report; 705 break; 706 default: 707 goto report; 708 } 709 break; 710 report: 711 printd("dmc%d: soft error, flags=%b\n", unit, 712 arg, CNTLO_BITS); 713 if ((sc->sc_flag & DMC_RESTART) == 0) { 714 /* 715 * kill off the dmc to get things 716 * going again by generating a 717 * procedure error 718 */ 719 sc->sc_flag |= DMC_RESTART; 720 arg = sc->sc_ubinfo & 0x3ffff; 721 dmcload(sc, DMC_BASEI, arg, (arg>>2)&DMC_XMEM); 722 } 723 break; 724 725 default: 726 printf("dmc%d: bad control %o\n", unit, cmd); 727 break; 728 } 729 } 730 dmcstart(unit); 731 return; 732 } 733 734 /* 735 * DMC output routine. 736 * Encapsulate a packet of type family for the dmc. 737 * Use trailer local net encapsulation if enough data in first 738 * packet leaves a multiple of 512 bytes of data in remainder. 739 */ 740 dmcoutput(ifp, m0, dst) 741 register struct ifnet *ifp; 742 register struct mbuf *m0; 743 struct sockaddr *dst; 744 { 745 int type, error, s; 746 register struct mbuf *m = m0; 747 register struct dmc_header *dh; 748 register int off; 749 750 if ((ifp->if_flags & IFF_UP) == 0) { 751 error = ENETDOWN; 752 goto bad; 753 } 754 755 switch (dst->sa_family) { 756 #ifdef INET 757 case AF_INET: 758 off = m->m_pkthdr.len - m->m_len; 759 if ((ifp->if_flags & IFF_NOTRAILERS) == 0) 760 if (off > 0 && (off & 0x1ff) == 0 && 761 (m->m_flags & M_EXT) == 0 && 762 m->m_data >= m->m_pktdat + 2 * sizeof (u_short)) { 763 type = DMC_TRAILER + (off>>9); 764 m->m_data -= 2 * sizeof (u_short); 765 m->m_len += 2 * sizeof (u_short); 766 *mtod(m, u_short *) = htons((u_short)DMC_IPTYPE); 767 *(mtod(m, u_short *) + 1) = htons((u_short)m->m_len); 768 goto gottrailertype; 769 } 770 type = DMC_IPTYPE; 771 off = 0; 772 goto gottype; 773 #endif 774 775 case AF_UNSPEC: 776 dh = (struct dmc_header *)dst->sa_data; 777 type = dh->dmc_type; 778 goto gottype; 779 780 default: 781 printf("dmc%d: can't handle af%d\n", ifp->if_unit, 782 dst->sa_family); 783 error = EAFNOSUPPORT; 784 goto bad; 785 } 786 787 gottrailertype: 788 /* 789 * Packet to be sent as a trailer; move first packet 790 * (control information) to end of chain. 791 */ 792 while (m->m_next) 793 m = m->m_next; 794 m->m_next = m0; 795 m = m0->m_next; 796 m0->m_next = 0; 797 m0 = m; 798 799 gottype: 800 /* 801 * Add local network header 802 * (there is space for a uba on a vax to step on) 803 */ 804 M_PREPEND(m, sizeof(struct dmc_header), M_DONTWAIT); 805 if (m == 0) { 806 error = ENOBUFS; 807 goto bad; 808 } 809 dh = mtod(m, struct dmc_header *); 810 dh->dmc_type = htons((u_short)type); 811 812 /* 813 * Queue message on interface, and start output if interface 814 * not yet active. 815 */ 816 s = splimp(); 817 if (IF_QFULL(&ifp->if_snd)) { 818 IF_DROP(&ifp->if_snd); 819 m_freem(m); 820 splx(s); 821 return (ENOBUFS); 822 } 823 IF_ENQUEUE(&ifp->if_snd, m); 824 dmcstart(ifp->if_unit); 825 splx(s); 826 return (0); 827 828 bad: 829 m_freem(m0); 830 return (error); 831 } 832 833 834 /* 835 * Process an ioctl request. 836 */ 837 /* ARGSUSED */ 838 dmcioctl(ifp, cmd, data) 839 register struct ifnet *ifp; 840 int cmd; 841 caddr_t data; 842 { 843 int s = splimp(), error = 0; 844 register struct dmc_softc *sc = &dmc_softc[ifp->if_unit]; 845 846 switch (cmd) { 847 848 case SIOCSIFADDR: 849 ifp->if_flags |= IFF_UP; 850 if ((ifp->if_flags & IFF_RUNNING) == 0) 851 dmcinit(ifp->if_unit); 852 break; 853 854 case SIOCSIFDSTADDR: 855 if ((ifp->if_flags & IFF_RUNNING) == 0) 856 dmcinit(ifp->if_unit); 857 break; 858 859 case SIOCSIFFLAGS: 860 if ((ifp->if_flags & IFF_UP) == 0 && 861 sc->sc_flag & DMC_RUNNING) 862 dmcdown(ifp->if_unit); 863 else if (ifp->if_flags & IFF_UP && 864 (sc->sc_flag & DMC_RUNNING) == 0) 865 dmcrestart(ifp->if_unit); 866 break; 867 868 default: 869 error = EINVAL; 870 } 871 splx(s); 872 return (error); 873 } 874 875 /* 876 * Restart after a fatal error. 877 * Clear device and reinitialize. 878 */ 879 dmcrestart(unit) 880 int unit; 881 { 882 register struct dmc_softc *sc = &dmc_softc[unit]; 883 register struct dmcdevice *addr; 884 register int i; 885 int s; 886 887 #ifdef DEBUG 888 /* dump base table */ 889 printf("dmc%d base table:\n", unit); 890 for (i = 0; i < sizeof (struct dmc_base); i++) 891 printf("%o\n" ,dmc_base[unit].d_base[i]); 892 #endif 893 894 dmcdown(unit); 895 896 /* 897 * Let the DMR finish the MCLR. At 1 Mbit, it should do so 898 * in about a max of 6.4 milliseconds with diagnostics enabled. 899 */ 900 addr = (struct dmcdevice *)(dmcinfo[unit]->ui_addr); 901 for (i = 100000; i && (addr->bsel1 & DMC_RUN) == 0; i--) 902 ; 903 /* Did the timer expire or did the DMR finish? */ 904 if ((addr->bsel1 & DMC_RUN) == 0) { 905 log(LOG_ERR, "dmc%d: M820 Test Failed\n", unit); 906 return; 907 } 908 909 /* restart DMC */ 910 dmcinit(unit); 911 sc->sc_flag &= ~DMC_RESTART; 912 s = spl5(); 913 dmcstart(unit); 914 splx(s); 915 sc->sc_if.if_collisions++; /* why not? */ 916 } 917 918 /* 919 * Reset a device and mark down. 920 * Flush output queue and drop queue limit. 921 */ 922 dmcdown(unit) 923 int unit; 924 { 925 register struct dmc_softc *sc = &dmc_softc[unit]; 926 register struct ifxmt *ifxp; 927 928 ((struct dmcdevice *)(dmcinfo[unit]->ui_addr))->bsel1 = DMC_MCLR; 929 sc->sc_flag &= ~(DMC_RUNNING | DMC_ONLINE); 930 931 for (ifxp = sc->sc_ifw; ifxp < &sc->sc_ifw[NXMT]; ifxp++) { 932 if (ifxp->ifw_xtofree) { 933 (void) m_freem(ifxp->ifw_xtofree); 934 ifxp->ifw_xtofree = 0; 935 } 936 } 937 if_qflush(&sc->sc_if.if_snd); 938 } 939 940 /* 941 * Watchdog timeout to see that transmitted packets don't 942 * lose interrupts. The device has to be online (the first 943 * transmission may block until the other side comes up). 944 */ 945 dmctimeout(unit) 946 int unit; 947 { 948 register struct dmc_softc *sc; 949 struct dmcdevice *addr; 950 951 sc = &dmc_softc[unit]; 952 if (sc->sc_flag & DMC_ONLINE) { 953 addr = (struct dmcdevice *)(dmcinfo[unit]->ui_addr); 954 log(LOG_ERR, "dmc%d: output timeout, bsel0=%b bsel2=%b\n", 955 unit, addr->bsel0 & 0xff, DMC0BITS, 956 addr->bsel2 & 0xff, DMC2BITS); 957 dmcrestart(unit); 958 } 959 } 960 #endif 961