1 /* $NetBSD: qe.c,v 1.19 2001/11/13 06:58:17 lukem Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 1998 Jason L. Wright. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. The name of the authors may not be used to endorse or promote products 52 * derived from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 57 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 /* 67 * Driver for the SBus qec+qe QuadEthernet board. 68 * 69 * This driver was written using the AMD MACE Am79C940 documentation, some 70 * ideas gleaned from the S/Linux driver for this card, Solaris header files, 71 * and a loan of a card from Paul Southworth of the Internet Engineering 72 * Group (www.ieng.com). 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.19 2001/11/13 06:58:17 lukem Exp $"); 77 78 #define QEDEBUG 79 80 #include "opt_ddb.h" 81 #include "opt_inet.h" 82 #include "opt_ccitt.h" 83 #include "opt_llc.h" 84 #include "opt_ns.h" 85 #include "bpfilter.h" 86 #include "rnd.h" 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/errno.h> 92 #include <sys/ioctl.h> 93 #include <sys/mbuf.h> 94 #include <sys/socket.h> 95 #include <sys/syslog.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 #if NRND > 0 99 #include <sys/rnd.h> 100 #endif 101 102 #include <net/if.h> 103 #include <net/if_dl.h> 104 #include <net/if_types.h> 105 #include <net/netisr.h> 106 #include <net/if_media.h> 107 #include <net/if_ether.h> 108 109 #ifdef INET 110 #include <netinet/in.h> 111 #include <netinet/if_inarp.h> 112 #include <netinet/in_systm.h> 113 #include <netinet/in_var.h> 114 #include <netinet/ip.h> 115 #endif 116 117 #ifdef NS 118 #include <netns/ns.h> 119 #include <netns/ns_if.h> 120 #endif 121 122 #if NBPFILTER > 0 123 #include <net/bpf.h> 124 #include <net/bpfdesc.h> 125 #endif 126 127 #include <machine/bus.h> 128 #include <machine/intr.h> 129 #include <machine/autoconf.h> 130 131 #include <dev/sbus/sbusvar.h> 132 #include <dev/sbus/qecreg.h> 133 #include <dev/sbus/qecvar.h> 134 #include <dev/sbus/qereg.h> 135 136 struct qe_softc { 137 struct device sc_dev; /* base device */ 138 struct sbusdev sc_sd; /* sbus device */ 139 bus_space_tag_t sc_bustag; /* bus & dma tags */ 140 bus_dma_tag_t sc_dmatag; 141 bus_dmamap_t sc_dmamap; 142 struct ethercom sc_ethercom; 143 struct ifmedia sc_ifmedia; /* interface media */ 144 145 struct qec_softc *sc_qec; /* QEC parent */ 146 147 bus_space_handle_t sc_qr; /* QEC registers */ 148 bus_space_handle_t sc_mr; /* MACE registers */ 149 bus_space_handle_t sc_cr; /* channel registers */ 150 151 int sc_channel; /* channel number */ 152 u_int sc_rev; /* board revision */ 153 154 int sc_burst; 155 156 struct qec_ring sc_rb; /* Packet Ring Buffer */ 157 158 /* MAC address */ 159 u_int8_t sc_enaddr[6]; 160 161 #ifdef QEDEBUG 162 int sc_debug; 163 #endif 164 }; 165 166 int qematch __P((struct device *, struct cfdata *, void *)); 167 void qeattach __P((struct device *, struct device *, void *)); 168 169 void qeinit __P((struct qe_softc *)); 170 void qestart __P((struct ifnet *)); 171 void qestop __P((struct qe_softc *)); 172 void qewatchdog __P((struct ifnet *)); 173 int qeioctl __P((struct ifnet *, u_long, caddr_t)); 174 void qereset __P((struct qe_softc *)); 175 176 int qeintr __P((void *)); 177 int qe_eint __P((struct qe_softc *, u_int32_t)); 178 int qe_rint __P((struct qe_softc *)); 179 int qe_tint __P((struct qe_softc *)); 180 void qe_mcreset __P((struct qe_softc *)); 181 182 static int qe_put __P((struct qe_softc *, int, struct mbuf *)); 183 static void qe_read __P((struct qe_softc *, int, int)); 184 static struct mbuf *qe_get __P((struct qe_softc *, int, int)); 185 186 /* ifmedia callbacks */ 187 void qe_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 188 int qe_ifmedia_upd __P((struct ifnet *)); 189 190 struct cfattach qe_ca = { 191 sizeof(struct qe_softc), qematch, qeattach 192 }; 193 194 int 195 qematch(parent, cf, aux) 196 struct device *parent; 197 struct cfdata *cf; 198 void *aux; 199 { 200 struct sbus_attach_args *sa = aux; 201 202 return (strcmp(cf->cf_driver->cd_name, sa->sa_name) == 0); 203 } 204 205 void 206 qeattach(parent, self, aux) 207 struct device *parent, *self; 208 void *aux; 209 { 210 struct sbus_attach_args *sa = aux; 211 struct qec_softc *qec = (struct qec_softc *)parent; 212 struct qe_softc *sc = (struct qe_softc *)self; 213 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 214 int node = sa->sa_node; 215 bus_dma_tag_t dmatag = sa->sa_dmatag; 216 bus_dma_segment_t seg; 217 bus_size_t size; 218 int rseg, error; 219 extern void myetheraddr __P((u_char *)); 220 221 if (sa->sa_nreg < 2) { 222 printf("%s: only %d register sets\n", 223 self->dv_xname, sa->sa_nreg); 224 return; 225 } 226 227 if (bus_space_map2(sa->sa_bustag, 228 (bus_type_t)sa->sa_reg[0].sbr_slot, 229 (bus_addr_t)sa->sa_reg[0].sbr_offset, 230 (bus_size_t)sa->sa_reg[0].sbr_size, 231 BUS_SPACE_MAP_LINEAR, 0, &sc->sc_cr) != 0) { 232 printf("%s: cannot map registers\n", self->dv_xname); 233 return; 234 } 235 236 if (bus_space_map2(sa->sa_bustag, 237 (bus_type_t)sa->sa_reg[1].sbr_slot, 238 (bus_addr_t)sa->sa_reg[1].sbr_offset, 239 (bus_size_t)sa->sa_reg[1].sbr_size, 240 BUS_SPACE_MAP_LINEAR, 0, &sc->sc_mr) != 0) { 241 printf("%s: cannot map registers\n", self->dv_xname); 242 return; 243 } 244 245 sc->sc_rev = PROM_getpropint(node, "mace-version", -1); 246 printf(" rev %x", sc->sc_rev); 247 248 sc->sc_bustag = sa->sa_bustag; 249 sc->sc_dmatag = sa->sa_dmatag; 250 sc->sc_qec = qec; 251 sc->sc_qr = qec->sc_regs; 252 253 sc->sc_channel = PROM_getpropint(node, "channel#", -1); 254 sc->sc_burst = qec->sc_burst; 255 256 qestop(sc); 257 258 /* Note: no interrupt level passed */ 259 (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, 0, qeintr, sc); 260 myetheraddr(sc->sc_enaddr); 261 262 /* 263 * Allocate descriptor ring and buffers. 264 */ 265 266 /* for now, allocate as many bufs as there are ring descriptors */ 267 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 268 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 269 270 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 271 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 272 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + 273 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; 274 275 /* Get a DMA handle */ 276 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 277 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 278 printf("%s: DMA map create error %d\n", self->dv_xname, error); 279 return; 280 } 281 282 /* Allocate DMA buffer */ 283 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0, 284 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 285 printf("%s: DMA buffer alloc error %d\n", 286 self->dv_xname, error); 287 return; 288 } 289 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 290 291 /* Map DMA buffer in CPU addressable space */ 292 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 293 &sc->sc_rb.rb_membase, 294 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 295 printf("%s: DMA buffer map error %d\n", 296 self->dv_xname, error); 297 bus_dmamem_free(dmatag, &seg, rseg); 298 return; 299 } 300 301 /* Load the buffer */ 302 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 303 sc->sc_rb.rb_membase, size, NULL, 304 BUS_DMA_NOWAIT)) != 0) { 305 printf("%s: DMA buffer map load error %d\n", 306 self->dv_xname, error); 307 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 308 bus_dmamem_free(dmatag, &seg, rseg); 309 return; 310 } 311 312 /* Initialize media properties */ 313 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts); 314 ifmedia_add(&sc->sc_ifmedia, 315 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 316 0, NULL); 317 ifmedia_add(&sc->sc_ifmedia, 318 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 319 0, NULL); 320 ifmedia_add(&sc->sc_ifmedia, 321 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 322 0, NULL); 323 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 324 325 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 326 ifp->if_softc = sc; 327 ifp->if_start = qestart; 328 ifp->if_ioctl = qeioctl; 329 ifp->if_watchdog = qewatchdog; 330 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | 331 IFF_MULTICAST; 332 IFQ_SET_READY(&ifp->if_snd); 333 334 /* Attach the interface. */ 335 if_attach(ifp); 336 ether_ifattach(ifp, sc->sc_enaddr); 337 338 printf(" address %s\n", ether_sprintf(sc->sc_enaddr)); 339 } 340 341 /* 342 * Pull data off an interface. 343 * Len is the length of data, with local net header stripped. 344 * We copy the data into mbufs. When full cluster sized units are present, 345 * we copy into clusters. 346 */ 347 static __inline__ struct mbuf * 348 qe_get(sc, idx, totlen) 349 struct qe_softc *sc; 350 int idx, totlen; 351 { 352 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 353 struct mbuf *m; 354 struct mbuf *top, **mp; 355 int len, pad, boff = 0; 356 caddr_t bp; 357 358 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ; 359 360 MGETHDR(m, M_DONTWAIT, MT_DATA); 361 if (m == NULL) 362 return (NULL); 363 m->m_pkthdr.rcvif = ifp; 364 m->m_pkthdr.len = totlen; 365 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); 366 m->m_data += pad; 367 len = MHLEN - pad; 368 top = NULL; 369 mp = ⊤ 370 371 while (totlen > 0) { 372 if (top) { 373 MGET(m, M_DONTWAIT, MT_DATA); 374 if (m == NULL) { 375 m_freem(top); 376 return (NULL); 377 } 378 len = MLEN; 379 } 380 if (top && totlen >= MINCLSIZE) { 381 MCLGET(m, M_DONTWAIT); 382 if (m->m_flags & M_EXT) 383 len = MCLBYTES; 384 } 385 m->m_len = len = min(totlen, len); 386 bcopy(bp + boff, mtod(m, caddr_t), len); 387 boff += len; 388 totlen -= len; 389 *mp = m; 390 mp = &m->m_next; 391 } 392 393 return (top); 394 } 395 396 /* 397 * Routine to copy from mbuf chain to transmit buffer in 398 * network buffer memory. 399 */ 400 __inline__ int 401 qe_put(sc, idx, m) 402 struct qe_softc *sc; 403 int idx; 404 struct mbuf *m; 405 { 406 struct mbuf *n; 407 int len, tlen = 0, boff = 0; 408 caddr_t bp; 409 410 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ; 411 412 for (; m; m = n) { 413 len = m->m_len; 414 if (len == 0) { 415 MFREE(m, n); 416 continue; 417 } 418 bcopy(mtod(m, caddr_t), bp+boff, len); 419 boff += len; 420 tlen += len; 421 MFREE(m, n); 422 } 423 return (tlen); 424 } 425 426 /* 427 * Pass a packet to the higher levels. 428 */ 429 __inline__ void 430 qe_read(sc, idx, len) 431 struct qe_softc *sc; 432 int idx, len; 433 { 434 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 435 struct mbuf *m; 436 437 if (len <= sizeof(struct ether_header) || 438 len > ETHERMTU + sizeof(struct ether_header)) { 439 440 printf("%s: invalid packet size %d; dropping\n", 441 ifp->if_xname, len); 442 443 ifp->if_ierrors++; 444 return; 445 } 446 447 /* 448 * Pull packet off interface. 449 */ 450 m = qe_get(sc, idx, len); 451 if (m == NULL) { 452 ifp->if_ierrors++; 453 return; 454 } 455 ifp->if_ipackets++; 456 457 #if NBPFILTER > 0 458 /* 459 * Check if there's a BPF listener on this interface. 460 * If so, hand off the raw packet to BPF. 461 */ 462 if (ifp->if_bpf) 463 bpf_mtap(ifp->if_bpf, m); 464 #endif 465 /* Pass the packet up. */ 466 (*ifp->if_input)(ifp, m); 467 } 468 469 /* 470 * Start output on interface. 471 * We make two assumptions here: 472 * 1) that the current priority is set to splnet _before_ this code 473 * is called *and* is returned to the appropriate priority after 474 * return 475 * 2) that the IFF_OACTIVE flag is checked before this code is called 476 * (i.e. that the output part of the interface is idle) 477 */ 478 void 479 qestart(ifp) 480 struct ifnet *ifp; 481 { 482 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc; 483 struct qec_xd *txd = sc->sc_rb.rb_txd; 484 struct mbuf *m; 485 unsigned int bix, len; 486 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 487 488 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 489 return; 490 491 bix = sc->sc_rb.rb_tdhead; 492 493 for (;;) { 494 IFQ_DEQUEUE(&ifp->if_snd, m); 495 if (m == 0) 496 break; 497 498 #if NBPFILTER > 0 499 /* 500 * If BPF is listening on this interface, let it see the 501 * packet before we commit it to the wire. 502 */ 503 if (ifp->if_bpf) 504 bpf_mtap(ifp->if_bpf, m); 505 #endif 506 507 /* 508 * Copy the mbuf chain into the transmit buffer. 509 */ 510 len = qe_put(sc, bix, m); 511 512 /* 513 * Initialize transmit registers and start transmission 514 */ 515 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP | 516 (len & QEC_XD_LENGTH); 517 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL, 518 QE_CR_CTRL_TWAKEUP); 519 520 if (++bix == QEC_XD_RING_MAXSIZE) 521 bix = 0; 522 523 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 524 ifp->if_flags |= IFF_OACTIVE; 525 break; 526 } 527 } 528 529 sc->sc_rb.rb_tdhead = bix; 530 } 531 532 void 533 qestop(sc) 534 struct qe_softc *sc; 535 { 536 bus_space_tag_t t = sc->sc_bustag; 537 bus_space_handle_t mr = sc->sc_mr; 538 bus_space_handle_t cr = sc->sc_cr; 539 int n; 540 541 #if defined(SUN4U) || defined(__GNUC__) 542 (void)&t; 543 #endif 544 /* Stop the schwurst */ 545 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST); 546 for (n = 200; n > 0; n--) { 547 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) & 548 QE_MR_BIUCC_SWRST) == 0) 549 break; 550 DELAY(20); 551 } 552 553 /* then reset */ 554 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET); 555 for (n = 200; n > 0; n--) { 556 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) & 557 QE_CR_CTRL_RESET) == 0) 558 break; 559 DELAY(20); 560 } 561 } 562 563 /* 564 * Reset interface. 565 */ 566 void 567 qereset(sc) 568 struct qe_softc *sc; 569 { 570 int s; 571 572 s = splnet(); 573 qestop(sc); 574 qeinit(sc); 575 splx(s); 576 } 577 578 void 579 qewatchdog(ifp) 580 struct ifnet *ifp; 581 { 582 struct qe_softc *sc = ifp->if_softc; 583 584 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 585 ifp->if_oerrors++; 586 587 qereset(sc); 588 } 589 590 /* 591 * Interrupt dispatch. 592 */ 593 int 594 qeintr(arg) 595 void *arg; 596 { 597 struct qe_softc *sc = (struct qe_softc *)arg; 598 bus_space_tag_t t = sc->sc_bustag; 599 u_int32_t qecstat, qestat; 600 int r = 0; 601 602 #if defined(SUN4U) || defined(__GNUC__) 603 (void)&t; 604 #endif 605 /* Read QEC status and channel status */ 606 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT); 607 #ifdef QEDEBUG 608 if (sc->sc_debug) { 609 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat); 610 } 611 #endif 612 613 /* Filter out status for this channel */ 614 qecstat = qecstat >> (4 * sc->sc_channel); 615 if ((qecstat & 0xf) == 0) 616 return (r); 617 618 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT); 619 620 #ifdef QEDEBUG 621 if (sc->sc_debug) { 622 char bits[64]; int i; 623 bus_space_tag_t t = sc->sc_bustag; 624 bus_space_handle_t mr = sc->sc_mr; 625 626 printf("qe%d: intr: qestat=%s\n", sc->sc_channel, 627 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, sizeof(bits))); 628 629 printf("MACE registers:\n"); 630 for (i = 0 ; i < 32; i++) { 631 printf(" m[%d]=%x,", i, bus_space_read_1(t, mr, i)); 632 if (((i+1) & 7) == 0) 633 printf("\n"); 634 } 635 } 636 #endif 637 638 if (qestat & QE_CR_STAT_ALLERRORS) { 639 #ifdef QEDEBUG 640 if (sc->sc_debug) { 641 char bits[64]; 642 printf("qe%d: eint: qestat=%s\n", sc->sc_channel, 643 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, 644 sizeof(bits))); 645 } 646 #endif 647 r |= qe_eint(sc, qestat); 648 if (r == -1) 649 return (1); 650 } 651 652 if (qestat & QE_CR_STAT_TXIRQ) 653 r |= qe_tint(sc); 654 655 if (qestat & QE_CR_STAT_RXIRQ) 656 r |= qe_rint(sc); 657 658 return (r); 659 } 660 661 /* 662 * Transmit interrupt. 663 */ 664 int 665 qe_tint(sc) 666 struct qe_softc *sc; 667 { 668 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 669 unsigned int bix, txflags; 670 671 bix = sc->sc_rb.rb_tdtail; 672 673 for (;;) { 674 if (sc->sc_rb.rb_td_nbusy <= 0) 675 break; 676 677 txflags = sc->sc_rb.rb_txd[bix].xd_flags; 678 679 if (txflags & QEC_XD_OWN) 680 break; 681 682 ifp->if_flags &= ~IFF_OACTIVE; 683 ifp->if_opackets++; 684 685 if (++bix == QEC_XD_RING_MAXSIZE) 686 bix = 0; 687 688 --sc->sc_rb.rb_td_nbusy; 689 } 690 691 sc->sc_rb.rb_tdtail = bix; 692 693 qestart(ifp); 694 695 if (sc->sc_rb.rb_td_nbusy == 0) 696 ifp->if_timer = 0; 697 698 return (1); 699 } 700 701 /* 702 * Receive interrupt. 703 */ 704 int 705 qe_rint(sc) 706 struct qe_softc *sc; 707 { 708 struct qec_xd *xd = sc->sc_rb.rb_rxd; 709 unsigned int bix, len; 710 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 711 #ifdef QEDEBUG 712 int npackets = 0; 713 #endif 714 715 bix = sc->sc_rb.rb_rdtail; 716 717 /* 718 * Process all buffers with valid data. 719 */ 720 for (;;) { 721 len = xd[bix].xd_flags; 722 if (len & QEC_XD_OWN) 723 break; 724 725 #ifdef QEDEBUG 726 npackets++; 727 #endif 728 729 len &= QEC_XD_LENGTH; 730 len -= 4; 731 qe_read(sc, bix, len); 732 733 /* ... */ 734 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags = 735 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH); 736 737 if (++bix == QEC_XD_RING_MAXSIZE) 738 bix = 0; 739 } 740 #ifdef QEDEBUG 741 if (npackets == 0 && sc->sc_debug) 742 printf("%s: rint: no packets; rb index %d; status 0x%x\n", 743 sc->sc_dev.dv_xname, bix, len); 744 #endif 745 746 sc->sc_rb.rb_rdtail = bix; 747 748 return (1); 749 } 750 751 /* 752 * Error interrupt. 753 */ 754 int 755 qe_eint(sc, why) 756 struct qe_softc *sc; 757 u_int32_t why; 758 { 759 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 760 int r = 0, rst = 0; 761 762 if (why & QE_CR_STAT_EDEFER) { 763 printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname); 764 r |= 1; 765 ifp->if_oerrors++; 766 } 767 768 if (why & QE_CR_STAT_CLOSS) { 769 printf("%s: no carrier, link down?\n", sc->sc_dev.dv_xname); 770 ifp->if_oerrors++; 771 r |= 1; 772 } 773 774 if (why & QE_CR_STAT_ERETRIES) { 775 printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname); 776 ifp->if_oerrors++; 777 r |= 1; 778 rst = 1; 779 } 780 781 782 if (why & QE_CR_STAT_LCOLL) { 783 printf("%s: late tx transmission\n", sc->sc_dev.dv_xname); 784 ifp->if_oerrors++; 785 r |= 1; 786 rst = 1; 787 } 788 789 if (why & QE_CR_STAT_FUFLOW) { 790 printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname); 791 ifp->if_oerrors++; 792 r |= 1; 793 rst = 1; 794 } 795 796 if (why & QE_CR_STAT_JERROR) { 797 printf("%s: jabber seen\n", sc->sc_dev.dv_xname); 798 r |= 1; 799 } 800 801 if (why & QE_CR_STAT_BERROR) { 802 printf("%s: babble seen\n", sc->sc_dev.dv_xname); 803 r |= 1; 804 } 805 806 if (why & QE_CR_STAT_TCCOFLOW) { 807 ifp->if_collisions += 256; 808 ifp->if_oerrors += 256; 809 r |= 1; 810 } 811 812 if (why & QE_CR_STAT_TXDERROR) { 813 printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname); 814 rst = 1; 815 r |= 1; 816 } 817 818 if (why & QE_CR_STAT_TXLERR) { 819 printf("%s: tx late error\n", sc->sc_dev.dv_xname); 820 ifp->if_oerrors++; 821 rst = 1; 822 r |= 1; 823 } 824 825 if (why & QE_CR_STAT_TXPERR) { 826 printf("%s: tx dma parity error\n", sc->sc_dev.dv_xname); 827 ifp->if_oerrors++; 828 rst = 1; 829 r |= 1; 830 } 831 832 if (why & QE_CR_STAT_TXSERR) { 833 printf("%s: tx dma sbus error ack\n", sc->sc_dev.dv_xname); 834 ifp->if_oerrors++; 835 rst = 1; 836 r |= 1; 837 } 838 839 if (why & QE_CR_STAT_RCCOFLOW) { 840 ifp->if_collisions += 256; 841 ifp->if_ierrors += 256; 842 r |= 1; 843 } 844 845 if (why & QE_CR_STAT_RUOFLOW) { 846 ifp->if_ierrors += 256; 847 r |= 1; 848 } 849 850 if (why & QE_CR_STAT_MCOFLOW) { 851 ifp->if_ierrors += 256; 852 r |= 1; 853 } 854 855 if (why & QE_CR_STAT_RXFOFLOW) { 856 printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname); 857 ifp->if_ierrors++; 858 r |= 1; 859 } 860 861 if (why & QE_CR_STAT_RLCOLL) { 862 printf("%s: rx late collision\n", sc->sc_dev.dv_xname); 863 ifp->if_ierrors++; 864 ifp->if_collisions++; 865 r |= 1; 866 } 867 868 if (why & QE_CR_STAT_FCOFLOW) { 869 ifp->if_ierrors += 256; 870 r |= 1; 871 } 872 873 if (why & QE_CR_STAT_CECOFLOW) { 874 ifp->if_ierrors += 256; 875 r |= 1; 876 } 877 878 if (why & QE_CR_STAT_RXDROP) { 879 printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname); 880 ifp->if_ierrors++; 881 r |= 1; 882 } 883 884 if (why & QE_CR_STAT_RXSMALL) { 885 printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname); 886 ifp->if_ierrors++; 887 r |= 1; 888 rst = 1; 889 } 890 891 if (why & QE_CR_STAT_RXLERR) { 892 printf("%s: rx late error\n", sc->sc_dev.dv_xname); 893 ifp->if_ierrors++; 894 r |= 1; 895 rst = 1; 896 } 897 898 if (why & QE_CR_STAT_RXPERR) { 899 printf("%s: rx dma parity error\n", sc->sc_dev.dv_xname); 900 ifp->if_ierrors++; 901 r |= 1; 902 rst = 1; 903 } 904 905 if (why & QE_CR_STAT_RXSERR) { 906 printf("%s: rx dma sbus error ack\n", sc->sc_dev.dv_xname); 907 ifp->if_ierrors++; 908 r |= 1; 909 rst = 1; 910 } 911 912 if (r == 0) 913 printf("%s: unexpected interrupt error: %08x\n", 914 sc->sc_dev.dv_xname, why); 915 916 if (rst) { 917 printf("%s: resetting...\n", sc->sc_dev.dv_xname); 918 qereset(sc); 919 return (-1); 920 } 921 922 return (r); 923 } 924 925 int 926 qeioctl(ifp, cmd, data) 927 struct ifnet *ifp; 928 u_long cmd; 929 caddr_t data; 930 { 931 struct qe_softc *sc = ifp->if_softc; 932 struct ifaddr *ifa = (struct ifaddr *)data; 933 struct ifreq *ifr = (struct ifreq *)data; 934 int s, error = 0; 935 936 s = splnet(); 937 938 switch (cmd) { 939 case SIOCSIFADDR: 940 ifp->if_flags |= IFF_UP; 941 switch (ifa->ifa_addr->sa_family) { 942 #ifdef INET 943 case AF_INET: 944 qeinit(sc); 945 arp_ifinit(ifp, ifa); 946 break; 947 #endif /* INET */ 948 #ifdef NS 949 case AF_NS: 950 { 951 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 952 953 if (ns_nullhost(*ina)) 954 ina->x_host = 955 *(union ns_host *)LLADDR(ifp->if_sadl); 956 else 957 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 958 sizeof(sc->sc_enaddr)); 959 /* Set new address. */ 960 qeinit(sc); 961 break; 962 } 963 #endif /* NS */ 964 default: 965 qeinit(sc); 966 break; 967 } 968 break; 969 970 case SIOCSIFFLAGS: 971 if ((ifp->if_flags & IFF_UP) == 0 && 972 (ifp->if_flags & IFF_RUNNING) != 0) { 973 /* 974 * If interface is marked down and it is running, then 975 * stop it. 976 */ 977 qestop(sc); 978 ifp->if_flags &= ~IFF_RUNNING; 979 980 } else if ((ifp->if_flags & IFF_UP) != 0 && 981 (ifp->if_flags & IFF_RUNNING) == 0) { 982 /* 983 * If interface is marked up and it is stopped, then 984 * start it. 985 */ 986 qeinit(sc); 987 988 } else { 989 /* 990 * Reset the interface to pick up changes in any other 991 * flags that affect hardware registers. 992 */ 993 qestop(sc); 994 qeinit(sc); 995 } 996 #ifdef QEDEBUG 997 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 998 #endif 999 break; 1000 1001 case SIOCADDMULTI: 1002 case SIOCDELMULTI: 1003 error = (cmd == SIOCADDMULTI) ? 1004 ether_addmulti(ifr, &sc->sc_ethercom): 1005 ether_delmulti(ifr, &sc->sc_ethercom); 1006 1007 if (error == ENETRESET) { 1008 /* 1009 * Multicast list has changed; set the hardware filter 1010 * accordingly. 1011 */ 1012 qe_mcreset(sc); 1013 error = 0; 1014 } 1015 break; 1016 1017 case SIOCGIFMEDIA: 1018 case SIOCSIFMEDIA: 1019 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); 1020 break; 1021 1022 default: 1023 error = EINVAL; 1024 break; 1025 } 1026 1027 splx(s); 1028 return (error); 1029 } 1030 1031 1032 void 1033 qeinit(sc) 1034 struct qe_softc *sc; 1035 { 1036 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1037 bus_space_tag_t t = sc->sc_bustag; 1038 bus_space_handle_t cr = sc->sc_cr; 1039 bus_space_handle_t mr = sc->sc_mr; 1040 struct qec_softc *qec = sc->sc_qec; 1041 u_int32_t qecaddr; 1042 u_int8_t *ea; 1043 int s; 1044 1045 #if defined(SUN4U) || defined(__GNUC__) 1046 (void)&t; 1047 #endif 1048 s = splnet(); 1049 1050 qestop(sc); 1051 1052 /* 1053 * Allocate descriptor ring and buffers 1054 */ 1055 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ); 1056 1057 /* Channel registers: */ 1058 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma); 1059 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma); 1060 1061 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0); 1062 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0); 1063 bus_space_write_4(t, cr, QE_CRI_QMASK, 0); 1064 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL); 1065 bus_space_write_4(t, cr, QE_CRI_CCNT, 0); 1066 bus_space_write_4(t, cr, QE_CRI_PIPG, 0); 1067 1068 qecaddr = sc->sc_channel * qec->sc_msize; 1069 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr); 1070 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr); 1071 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize); 1072 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize); 1073 1074 /* MACE registers: */ 1075 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL); 1076 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT); 1077 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0); 1078 1079 /* 1080 * Mask MACE's receive interrupt, since we're being notified 1081 * by the QEC after DMA completes. 1082 */ 1083 bus_space_write_1(t, mr, QE_MRI_IMR, 1084 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM); 1085 1086 bus_space_write_1(t, mr, QE_MRI_BIUCC, 1087 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS); 1088 1089 bus_space_write_1(t, mr, QE_MRI_FIFOFC, 1090 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 | 1091 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU); 1092 1093 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP); 1094 1095 /* 1096 * Station address 1097 */ 1098 ea = sc->sc_enaddr; 1099 bus_space_write_1(t, mr, QE_MRI_IAC, 1100 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR); 1101 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6); 1102 1103 /* Apply media settings */ 1104 qe_ifmedia_upd(ifp); 1105 1106 /* 1107 * Clear Logical address filter 1108 */ 1109 bus_space_write_1(t, mr, QE_MRI_IAC, 1110 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1111 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8); 1112 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1113 1114 /* Clear missed packet count (register cleared on read) */ 1115 (void)bus_space_read_1(t, mr, QE_MRI_MPC); 1116 1117 #if 0 1118 /* test register: */ 1119 bus_space_write_1(t, mr, QE_MRI_UTR, 0); 1120 #endif 1121 1122 /* Reset multicast filter */ 1123 qe_mcreset(sc); 1124 1125 ifp->if_flags |= IFF_RUNNING; 1126 ifp->if_flags &= ~IFF_OACTIVE; 1127 splx(s); 1128 } 1129 1130 /* 1131 * Reset multicast filter. 1132 */ 1133 void 1134 qe_mcreset(sc) 1135 struct qe_softc *sc; 1136 { 1137 struct ethercom *ec = &sc->sc_ethercom; 1138 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1139 bus_space_tag_t t = sc->sc_bustag; 1140 bus_space_handle_t mr = sc->sc_mr; 1141 struct ether_multi *enm; 1142 struct ether_multistep step; 1143 u_int32_t crc; 1144 u_int16_t hash[4]; 1145 u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0]; 1146 int i, j; 1147 1148 #if defined(SUN4U) || defined(__GNUC__) 1149 (void)&t; 1150 #endif 1151 1152 /* We also enable transmitter & receiver here */ 1153 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV; 1154 1155 if (ifp->if_flags & IFF_PROMISC) { 1156 maccc |= QE_MR_MACCC_PROM; 1157 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1158 return; 1159 } 1160 1161 if (ifp->if_flags & IFF_ALLMULTI) { 1162 bus_space_write_1(t, mr, QE_MRI_IAC, 1163 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1164 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1165 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1166 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1167 return; 1168 } 1169 1170 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1171 1172 ETHER_FIRST_MULTI(step, ec, enm); 1173 while (enm != NULL) { 1174 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, 1175 ETHER_ADDR_LEN) != 0) { 1176 /* 1177 * We must listen to a range of multicast 1178 * addresses. For now, just accept all 1179 * multicasts, rather than trying to set only 1180 * those filter bits needed to match the range. 1181 * (At this time, the only use of address 1182 * ranges is for IP multicast routing, for 1183 * which the range is big enough to require 1184 * all bits set.) 1185 */ 1186 bus_space_write_1(t, mr, QE_MRI_IAC, 1187 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1188 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1189 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1190 ifp->if_flags |= IFF_ALLMULTI; 1191 break; 1192 } 1193 1194 crc = 0xffffffff; 1195 1196 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1197 octet = enm->enm_addrlo[i]; 1198 1199 for (j = 0; j < 8; j++) { 1200 if ((crc & 1) ^ (octet & 1)) { 1201 crc >>= 1; 1202 crc ^= MC_POLY_LE; 1203 } 1204 else 1205 crc >>= 1; 1206 octet >>= 1; 1207 } 1208 } 1209 1210 crc >>= 26; 1211 hash[crc >> 4] |= 1 << (crc & 0xf); 1212 ETHER_NEXT_MULTI(step, enm); 1213 } 1214 1215 bus_space_write_1(t, mr, QE_MRI_IAC, 1216 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1217 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8); 1218 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1219 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1220 } 1221 1222 /* 1223 * Get current media settings. 1224 */ 1225 void 1226 qe_ifmedia_sts(ifp, ifmr) 1227 struct ifnet *ifp; 1228 struct ifmediareq *ifmr; 1229 { 1230 struct qe_softc *sc = ifp->if_softc; 1231 bus_space_tag_t t = sc->sc_bustag; 1232 bus_space_handle_t mr = sc->sc_mr; 1233 u_int8_t v; 1234 1235 #if defined(SUN4U) || defined(__GNUC__) 1236 (void)&t; 1237 #endif 1238 v = bus_space_read_1(t, mr, QE_MRI_PLSCC); 1239 1240 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) { 1241 case QE_MR_PLSCC_TP: 1242 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1243 break; 1244 case QE_MR_PLSCC_AUI: 1245 ifmr->ifm_active = IFM_ETHER | IFM_10_5; 1246 break; 1247 case QE_MR_PLSCC_GPSI: 1248 case QE_MR_PLSCC_DAI: 1249 /* ... */ 1250 break; 1251 } 1252 1253 v = bus_space_read_1(t, mr, QE_MRI_PHYCC); 1254 ifmr->ifm_status |= IFM_AVALID; 1255 if ((v & QE_MR_PHYCC_LNKFL) != 0) 1256 ifmr->ifm_status &= ~IFM_ACTIVE; 1257 else 1258 ifmr->ifm_status |= IFM_ACTIVE; 1259 1260 } 1261 1262 /* 1263 * Set media options. 1264 */ 1265 int 1266 qe_ifmedia_upd(ifp) 1267 struct ifnet *ifp; 1268 { 1269 struct qe_softc *sc = ifp->if_softc; 1270 struct ifmedia *ifm = &sc->sc_ifmedia; 1271 bus_space_tag_t t = sc->sc_bustag; 1272 bus_space_handle_t mr = sc->sc_mr; 1273 int newmedia = ifm->ifm_media; 1274 u_int8_t plscc, phycc; 1275 1276 #if defined(SUN4U) || defined(__GNUC__) 1277 (void)&t; 1278 #endif 1279 if (IFM_TYPE(newmedia) != IFM_ETHER) 1280 return (EINVAL); 1281 1282 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK; 1283 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL; 1284 1285 if (IFM_SUBTYPE(newmedia) == IFM_AUTO) 1286 phycc |= QE_MR_PHYCC_ASEL; 1287 else if (IFM_SUBTYPE(newmedia) == IFM_10_T) 1288 plscc |= QE_MR_PLSCC_TP; 1289 else if (IFM_SUBTYPE(newmedia) == IFM_10_5) 1290 plscc |= QE_MR_PLSCC_AUI; 1291 1292 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc); 1293 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc); 1294 1295 return (0); 1296 } 1297