1 /* $NetBSD: qe.c,v 1.33 2004/10/30 18:10:06 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 1998 Jason L. Wright. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. The name of the authors may not be used to endorse or promote products 52 * derived from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 57 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 /* 67 * Driver for the SBus qec+qe QuadEthernet board. 68 * 69 * This driver was written using the AMD MACE Am79C940 documentation, some 70 * ideas gleaned from the S/Linux driver for this card, Solaris header files, 71 * and a loan of a card from Paul Southworth of the Internet Engineering 72 * Group (www.ieng.com). 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.33 2004/10/30 18:10:06 thorpej Exp $"); 77 78 #define QEDEBUG 79 80 #include "opt_ddb.h" 81 #include "opt_inet.h" 82 #include "opt_ccitt.h" 83 #include "opt_llc.h" 84 #include "opt_ns.h" 85 #include "bpfilter.h" 86 #include "rnd.h" 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/kernel.h> 91 #include <sys/errno.h> 92 #include <sys/ioctl.h> 93 #include <sys/mbuf.h> 94 #include <sys/socket.h> 95 #include <sys/syslog.h> 96 #include <sys/device.h> 97 #include <sys/malloc.h> 98 #if NRND > 0 99 #include <sys/rnd.h> 100 #endif 101 102 #include <net/if.h> 103 #include <net/if_dl.h> 104 #include <net/if_types.h> 105 #include <net/netisr.h> 106 #include <net/if_media.h> 107 #include <net/if_ether.h> 108 109 #ifdef INET 110 #include <netinet/in.h> 111 #include <netinet/if_inarp.h> 112 #include <netinet/in_systm.h> 113 #include <netinet/in_var.h> 114 #include <netinet/ip.h> 115 #endif 116 117 #ifdef NS 118 #include <netns/ns.h> 119 #include <netns/ns_if.h> 120 #endif 121 122 #if NBPFILTER > 0 123 #include <net/bpf.h> 124 #include <net/bpfdesc.h> 125 #endif 126 127 #include <machine/bus.h> 128 #include <machine/intr.h> 129 #include <machine/autoconf.h> 130 131 #include <dev/sbus/sbusvar.h> 132 #include <dev/sbus/qecreg.h> 133 #include <dev/sbus/qecvar.h> 134 #include <dev/sbus/qereg.h> 135 136 struct qe_softc { 137 struct device sc_dev; /* base device */ 138 struct sbusdev sc_sd; /* sbus device */ 139 bus_space_tag_t sc_bustag; /* bus & DMA tags */ 140 bus_dma_tag_t sc_dmatag; 141 bus_dmamap_t sc_dmamap; 142 struct ethercom sc_ethercom; 143 struct ifmedia sc_ifmedia; /* interface media */ 144 145 struct qec_softc *sc_qec; /* QEC parent */ 146 147 bus_space_handle_t sc_qr; /* QEC registers */ 148 bus_space_handle_t sc_mr; /* MACE registers */ 149 bus_space_handle_t sc_cr; /* channel registers */ 150 151 int sc_channel; /* channel number */ 152 u_int sc_rev; /* board revision */ 153 154 int sc_burst; 155 156 struct qec_ring sc_rb; /* Packet Ring Buffer */ 157 158 /* MAC address */ 159 u_int8_t sc_enaddr[6]; 160 161 #ifdef QEDEBUG 162 int sc_debug; 163 #endif 164 }; 165 166 int qematch __P((struct device *, struct cfdata *, void *)); 167 void qeattach __P((struct device *, struct device *, void *)); 168 169 void qeinit __P((struct qe_softc *)); 170 void qestart __P((struct ifnet *)); 171 void qestop __P((struct qe_softc *)); 172 void qewatchdog __P((struct ifnet *)); 173 int qeioctl __P((struct ifnet *, u_long, caddr_t)); 174 void qereset __P((struct qe_softc *)); 175 176 int qeintr __P((void *)); 177 int qe_eint __P((struct qe_softc *, u_int32_t)); 178 int qe_rint __P((struct qe_softc *)); 179 int qe_tint __P((struct qe_softc *)); 180 void qe_mcreset __P((struct qe_softc *)); 181 182 static int qe_put __P((struct qe_softc *, int, struct mbuf *)); 183 static void qe_read __P((struct qe_softc *, int, int)); 184 static struct mbuf *qe_get __P((struct qe_softc *, int, int)); 185 186 /* ifmedia callbacks */ 187 void qe_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 188 int qe_ifmedia_upd __P((struct ifnet *)); 189 190 CFATTACH_DECL(qe, sizeof(struct qe_softc), 191 qematch, qeattach, NULL, NULL); 192 193 int 194 qematch(parent, cf, aux) 195 struct device *parent; 196 struct cfdata *cf; 197 void *aux; 198 { 199 struct sbus_attach_args *sa = aux; 200 201 return (strcmp(cf->cf_name, sa->sa_name) == 0); 202 } 203 204 void 205 qeattach(parent, self, aux) 206 struct device *parent, *self; 207 void *aux; 208 { 209 struct sbus_attach_args *sa = aux; 210 struct qec_softc *qec = (struct qec_softc *)parent; 211 struct qe_softc *sc = (struct qe_softc *)self; 212 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 213 int node = sa->sa_node; 214 bus_dma_tag_t dmatag = sa->sa_dmatag; 215 bus_dma_segment_t seg; 216 bus_size_t size; 217 int rseg, error; 218 219 if (sa->sa_nreg < 2) { 220 printf("%s: only %d register sets\n", 221 self->dv_xname, sa->sa_nreg); 222 return; 223 } 224 225 if (bus_space_map(sa->sa_bustag, 226 (bus_addr_t)BUS_ADDR( 227 sa->sa_reg[0].oa_space, 228 sa->sa_reg[0].oa_base), 229 (bus_size_t)sa->sa_reg[0].oa_size, 230 0, &sc->sc_cr) != 0) { 231 printf("%s: cannot map registers\n", self->dv_xname); 232 return; 233 } 234 235 if (bus_space_map(sa->sa_bustag, 236 (bus_addr_t)BUS_ADDR( 237 sa->sa_reg[1].oa_space, 238 sa->sa_reg[1].oa_base), 239 (bus_size_t)sa->sa_reg[1].oa_size, 240 0, &sc->sc_mr) != 0) { 241 printf("%s: cannot map registers\n", self->dv_xname); 242 return; 243 } 244 245 sc->sc_rev = prom_getpropint(node, "mace-version", -1); 246 printf(" rev %x", sc->sc_rev); 247 248 sc->sc_bustag = sa->sa_bustag; 249 sc->sc_dmatag = sa->sa_dmatag; 250 sc->sc_qec = qec; 251 sc->sc_qr = qec->sc_regs; 252 253 sc->sc_channel = prom_getpropint(node, "channel#", -1); 254 sc->sc_burst = qec->sc_burst; 255 256 qestop(sc); 257 258 /* Note: no interrupt level passed */ 259 (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, qeintr, sc); 260 prom_getether(node, sc->sc_enaddr); 261 262 /* 263 * Allocate descriptor ring and buffers. 264 */ 265 266 /* for now, allocate as many bufs as there are ring descriptors */ 267 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 268 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 269 270 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 271 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 272 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + 273 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; 274 275 /* Get a DMA handle */ 276 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 277 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 278 printf("%s: DMA map create error %d\n", self->dv_xname, error); 279 return; 280 } 281 282 /* Allocate DMA buffer */ 283 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0, 284 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 285 printf("%s: DMA buffer alloc error %d\n", 286 self->dv_xname, error); 287 return; 288 } 289 290 /* Map DMA buffer in CPU addressable space */ 291 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 292 &sc->sc_rb.rb_membase, 293 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 294 printf("%s: DMA buffer map error %d\n", 295 self->dv_xname, error); 296 bus_dmamem_free(dmatag, &seg, rseg); 297 return; 298 } 299 300 /* Load the buffer */ 301 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 302 sc->sc_rb.rb_membase, size, NULL, 303 BUS_DMA_NOWAIT)) != 0) { 304 printf("%s: DMA buffer map load error %d\n", 305 self->dv_xname, error); 306 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 307 bus_dmamem_free(dmatag, &seg, rseg); 308 return; 309 } 310 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 311 312 /* Initialize media properties */ 313 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts); 314 ifmedia_add(&sc->sc_ifmedia, 315 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 316 0, NULL); 317 ifmedia_add(&sc->sc_ifmedia, 318 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 319 0, NULL); 320 ifmedia_add(&sc->sc_ifmedia, 321 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 322 0, NULL); 323 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 324 325 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 326 ifp->if_softc = sc; 327 ifp->if_start = qestart; 328 ifp->if_ioctl = qeioctl; 329 ifp->if_watchdog = qewatchdog; 330 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | 331 IFF_MULTICAST; 332 IFQ_SET_READY(&ifp->if_snd); 333 334 /* Attach the interface. */ 335 if_attach(ifp); 336 ether_ifattach(ifp, sc->sc_enaddr); 337 338 printf(" address %s\n", ether_sprintf(sc->sc_enaddr)); 339 } 340 341 /* 342 * Pull data off an interface. 343 * Len is the length of data, with local net header stripped. 344 * We copy the data into mbufs. When full cluster sized units are present, 345 * we copy into clusters. 346 */ 347 static __inline__ struct mbuf * 348 qe_get(sc, idx, totlen) 349 struct qe_softc *sc; 350 int idx, totlen; 351 { 352 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 353 struct mbuf *m; 354 struct mbuf *top, **mp; 355 int len, pad, boff = 0; 356 caddr_t bp; 357 358 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ; 359 360 MGETHDR(m, M_DONTWAIT, MT_DATA); 361 if (m == NULL) 362 return (NULL); 363 m->m_pkthdr.rcvif = ifp; 364 m->m_pkthdr.len = totlen; 365 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); 366 m->m_data += pad; 367 len = MHLEN - pad; 368 top = NULL; 369 mp = ⊤ 370 371 while (totlen > 0) { 372 if (top) { 373 MGET(m, M_DONTWAIT, MT_DATA); 374 if (m == NULL) { 375 m_freem(top); 376 return (NULL); 377 } 378 len = MLEN; 379 } 380 if (top && totlen >= MINCLSIZE) { 381 MCLGET(m, M_DONTWAIT); 382 if (m->m_flags & M_EXT) 383 len = MCLBYTES; 384 } 385 m->m_len = len = min(totlen, len); 386 bcopy(bp + boff, mtod(m, caddr_t), len); 387 boff += len; 388 totlen -= len; 389 *mp = m; 390 mp = &m->m_next; 391 } 392 393 return (top); 394 } 395 396 /* 397 * Routine to copy from mbuf chain to transmit buffer in 398 * network buffer memory. 399 */ 400 __inline__ int 401 qe_put(sc, idx, m) 402 struct qe_softc *sc; 403 int idx; 404 struct mbuf *m; 405 { 406 struct mbuf *n; 407 int len, tlen = 0, boff = 0; 408 caddr_t bp; 409 410 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ; 411 412 for (; m; m = n) { 413 len = m->m_len; 414 if (len == 0) { 415 MFREE(m, n); 416 continue; 417 } 418 bcopy(mtod(m, caddr_t), bp+boff, len); 419 boff += len; 420 tlen += len; 421 MFREE(m, n); 422 } 423 return (tlen); 424 } 425 426 /* 427 * Pass a packet to the higher levels. 428 */ 429 __inline__ void 430 qe_read(sc, idx, len) 431 struct qe_softc *sc; 432 int idx, len; 433 { 434 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 435 struct mbuf *m; 436 437 if (len <= sizeof(struct ether_header) || 438 len > ETHERMTU + sizeof(struct ether_header)) { 439 440 printf("%s: invalid packet size %d; dropping\n", 441 ifp->if_xname, len); 442 443 ifp->if_ierrors++; 444 return; 445 } 446 447 /* 448 * Pull packet off interface. 449 */ 450 m = qe_get(sc, idx, len); 451 if (m == NULL) { 452 ifp->if_ierrors++; 453 return; 454 } 455 ifp->if_ipackets++; 456 457 #if NBPFILTER > 0 458 /* 459 * Check if there's a BPF listener on this interface. 460 * If so, hand off the raw packet to BPF. 461 */ 462 if (ifp->if_bpf) 463 bpf_mtap(ifp->if_bpf, m); 464 #endif 465 /* Pass the packet up. */ 466 (*ifp->if_input)(ifp, m); 467 } 468 469 /* 470 * Start output on interface. 471 * We make two assumptions here: 472 * 1) that the current priority is set to splnet _before_ this code 473 * is called *and* is returned to the appropriate priority after 474 * return 475 * 2) that the IFF_OACTIVE flag is checked before this code is called 476 * (i.e. that the output part of the interface is idle) 477 */ 478 void 479 qestart(ifp) 480 struct ifnet *ifp; 481 { 482 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc; 483 struct qec_xd *txd = sc->sc_rb.rb_txd; 484 struct mbuf *m; 485 unsigned int bix, len; 486 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 487 488 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 489 return; 490 491 bix = sc->sc_rb.rb_tdhead; 492 493 for (;;) { 494 IFQ_DEQUEUE(&ifp->if_snd, m); 495 if (m == 0) 496 break; 497 498 #if NBPFILTER > 0 499 /* 500 * If BPF is listening on this interface, let it see the 501 * packet before we commit it to the wire. 502 */ 503 if (ifp->if_bpf) 504 bpf_mtap(ifp->if_bpf, m); 505 #endif 506 507 /* 508 * Copy the mbuf chain into the transmit buffer. 509 */ 510 len = qe_put(sc, bix, m); 511 512 /* 513 * Initialize transmit registers and start transmission 514 */ 515 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP | 516 (len & QEC_XD_LENGTH); 517 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL, 518 QE_CR_CTRL_TWAKEUP); 519 520 if (++bix == QEC_XD_RING_MAXSIZE) 521 bix = 0; 522 523 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 524 ifp->if_flags |= IFF_OACTIVE; 525 break; 526 } 527 } 528 529 sc->sc_rb.rb_tdhead = bix; 530 } 531 532 void 533 qestop(sc) 534 struct qe_softc *sc; 535 { 536 bus_space_tag_t t = sc->sc_bustag; 537 bus_space_handle_t mr = sc->sc_mr; 538 bus_space_handle_t cr = sc->sc_cr; 539 int n; 540 541 #if defined(SUN4U) || defined(__GNUC__) 542 (void)&t; 543 #endif 544 /* Stop the schwurst */ 545 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST); 546 for (n = 200; n > 0; n--) { 547 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) & 548 QE_MR_BIUCC_SWRST) == 0) 549 break; 550 DELAY(20); 551 } 552 553 /* then reset */ 554 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET); 555 for (n = 200; n > 0; n--) { 556 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) & 557 QE_CR_CTRL_RESET) == 0) 558 break; 559 DELAY(20); 560 } 561 } 562 563 /* 564 * Reset interface. 565 */ 566 void 567 qereset(sc) 568 struct qe_softc *sc; 569 { 570 int s; 571 572 s = splnet(); 573 qestop(sc); 574 qeinit(sc); 575 splx(s); 576 } 577 578 void 579 qewatchdog(ifp) 580 struct ifnet *ifp; 581 { 582 struct qe_softc *sc = ifp->if_softc; 583 584 log(LOG_ERR, "%s: device timeout\n", sc->sc_dev.dv_xname); 585 ifp->if_oerrors++; 586 587 qereset(sc); 588 } 589 590 /* 591 * Interrupt dispatch. 592 */ 593 int 594 qeintr(arg) 595 void *arg; 596 { 597 struct qe_softc *sc = (struct qe_softc *)arg; 598 bus_space_tag_t t = sc->sc_bustag; 599 u_int32_t qecstat, qestat; 600 int r = 0; 601 602 #if defined(SUN4U) || defined(__GNUC__) 603 (void)&t; 604 #endif 605 /* Read QEC status and channel status */ 606 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT); 607 #ifdef QEDEBUG 608 if (sc->sc_debug) { 609 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat); 610 } 611 #endif 612 613 /* Filter out status for this channel */ 614 qecstat = qecstat >> (4 * sc->sc_channel); 615 if ((qecstat & 0xf) == 0) 616 return (r); 617 618 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT); 619 620 #ifdef QEDEBUG 621 if (sc->sc_debug) { 622 char bits[64]; int i; 623 bus_space_tag_t t = sc->sc_bustag; 624 bus_space_handle_t mr = sc->sc_mr; 625 626 printf("qe%d: intr: qestat=%s\n", sc->sc_channel, 627 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, sizeof(bits))); 628 629 printf("MACE registers:\n"); 630 for (i = 0 ; i < 32; i++) { 631 printf(" m[%d]=%x,", i, bus_space_read_1(t, mr, i)); 632 if (((i+1) & 7) == 0) 633 printf("\n"); 634 } 635 } 636 #endif 637 638 if (qestat & QE_CR_STAT_ALLERRORS) { 639 #ifdef QEDEBUG 640 if (sc->sc_debug) { 641 char bits[64]; 642 printf("qe%d: eint: qestat=%s\n", sc->sc_channel, 643 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, 644 sizeof(bits))); 645 } 646 #endif 647 r |= qe_eint(sc, qestat); 648 if (r == -1) 649 return (1); 650 } 651 652 if (qestat & QE_CR_STAT_TXIRQ) 653 r |= qe_tint(sc); 654 655 if (qestat & QE_CR_STAT_RXIRQ) 656 r |= qe_rint(sc); 657 658 return (r); 659 } 660 661 /* 662 * Transmit interrupt. 663 */ 664 int 665 qe_tint(sc) 666 struct qe_softc *sc; 667 { 668 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 669 unsigned int bix, txflags; 670 671 bix = sc->sc_rb.rb_tdtail; 672 673 for (;;) { 674 if (sc->sc_rb.rb_td_nbusy <= 0) 675 break; 676 677 txflags = sc->sc_rb.rb_txd[bix].xd_flags; 678 679 if (txflags & QEC_XD_OWN) 680 break; 681 682 ifp->if_flags &= ~IFF_OACTIVE; 683 ifp->if_opackets++; 684 685 if (++bix == QEC_XD_RING_MAXSIZE) 686 bix = 0; 687 688 --sc->sc_rb.rb_td_nbusy; 689 } 690 691 sc->sc_rb.rb_tdtail = bix; 692 693 qestart(ifp); 694 695 if (sc->sc_rb.rb_td_nbusy == 0) 696 ifp->if_timer = 0; 697 698 return (1); 699 } 700 701 /* 702 * Receive interrupt. 703 */ 704 int 705 qe_rint(sc) 706 struct qe_softc *sc; 707 { 708 struct qec_xd *xd = sc->sc_rb.rb_rxd; 709 unsigned int bix, len; 710 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 711 #ifdef QEDEBUG 712 int npackets = 0; 713 #endif 714 715 bix = sc->sc_rb.rb_rdtail; 716 717 /* 718 * Process all buffers with valid data. 719 */ 720 for (;;) { 721 len = xd[bix].xd_flags; 722 if (len & QEC_XD_OWN) 723 break; 724 725 #ifdef QEDEBUG 726 npackets++; 727 #endif 728 729 len &= QEC_XD_LENGTH; 730 len -= 4; 731 qe_read(sc, bix, len); 732 733 /* ... */ 734 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags = 735 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH); 736 737 if (++bix == QEC_XD_RING_MAXSIZE) 738 bix = 0; 739 } 740 #ifdef QEDEBUG 741 if (npackets == 0 && sc->sc_debug) 742 printf("%s: rint: no packets; rb index %d; status 0x%x\n", 743 sc->sc_dev.dv_xname, bix, len); 744 #endif 745 746 sc->sc_rb.rb_rdtail = bix; 747 748 return (1); 749 } 750 751 /* 752 * Error interrupt. 753 */ 754 int 755 qe_eint(sc, why) 756 struct qe_softc *sc; 757 u_int32_t why; 758 { 759 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 760 int r = 0, rst = 0; 761 762 if (why & QE_CR_STAT_EDEFER) { 763 printf("%s: excessive tx defers.\n", sc->sc_dev.dv_xname); 764 r |= 1; 765 ifp->if_oerrors++; 766 } 767 768 if (why & QE_CR_STAT_CLOSS) { 769 printf("%s: no carrier, link down?\n", sc->sc_dev.dv_xname); 770 ifp->if_oerrors++; 771 r |= 1; 772 } 773 774 if (why & QE_CR_STAT_ERETRIES) { 775 printf("%s: excessive tx retries\n", sc->sc_dev.dv_xname); 776 ifp->if_oerrors++; 777 r |= 1; 778 rst = 1; 779 } 780 781 782 if (why & QE_CR_STAT_LCOLL) { 783 printf("%s: late tx transmission\n", sc->sc_dev.dv_xname); 784 ifp->if_oerrors++; 785 r |= 1; 786 rst = 1; 787 } 788 789 if (why & QE_CR_STAT_FUFLOW) { 790 printf("%s: tx fifo underflow\n", sc->sc_dev.dv_xname); 791 ifp->if_oerrors++; 792 r |= 1; 793 rst = 1; 794 } 795 796 if (why & QE_CR_STAT_JERROR) { 797 printf("%s: jabber seen\n", sc->sc_dev.dv_xname); 798 r |= 1; 799 } 800 801 if (why & QE_CR_STAT_BERROR) { 802 printf("%s: babble seen\n", sc->sc_dev.dv_xname); 803 r |= 1; 804 } 805 806 if (why & QE_CR_STAT_TCCOFLOW) { 807 ifp->if_collisions += 256; 808 ifp->if_oerrors += 256; 809 r |= 1; 810 } 811 812 if (why & QE_CR_STAT_TXDERROR) { 813 printf("%s: tx descriptor is bad\n", sc->sc_dev.dv_xname); 814 rst = 1; 815 r |= 1; 816 } 817 818 if (why & QE_CR_STAT_TXLERR) { 819 printf("%s: tx late error\n", sc->sc_dev.dv_xname); 820 ifp->if_oerrors++; 821 rst = 1; 822 r |= 1; 823 } 824 825 if (why & QE_CR_STAT_TXPERR) { 826 printf("%s: tx DMA parity error\n", sc->sc_dev.dv_xname); 827 ifp->if_oerrors++; 828 rst = 1; 829 r |= 1; 830 } 831 832 if (why & QE_CR_STAT_TXSERR) { 833 printf("%s: tx DMA sbus error ack\n", sc->sc_dev.dv_xname); 834 ifp->if_oerrors++; 835 rst = 1; 836 r |= 1; 837 } 838 839 if (why & QE_CR_STAT_RCCOFLOW) { 840 ifp->if_collisions += 256; 841 ifp->if_ierrors += 256; 842 r |= 1; 843 } 844 845 if (why & QE_CR_STAT_RUOFLOW) { 846 ifp->if_ierrors += 256; 847 r |= 1; 848 } 849 850 if (why & QE_CR_STAT_MCOFLOW) { 851 ifp->if_ierrors += 256; 852 r |= 1; 853 } 854 855 if (why & QE_CR_STAT_RXFOFLOW) { 856 printf("%s: rx fifo overflow\n", sc->sc_dev.dv_xname); 857 ifp->if_ierrors++; 858 r |= 1; 859 } 860 861 if (why & QE_CR_STAT_RLCOLL) { 862 printf("%s: rx late collision\n", sc->sc_dev.dv_xname); 863 ifp->if_ierrors++; 864 ifp->if_collisions++; 865 r |= 1; 866 } 867 868 if (why & QE_CR_STAT_FCOFLOW) { 869 ifp->if_ierrors += 256; 870 r |= 1; 871 } 872 873 if (why & QE_CR_STAT_CECOFLOW) { 874 ifp->if_ierrors += 256; 875 r |= 1; 876 } 877 878 if (why & QE_CR_STAT_RXDROP) { 879 printf("%s: rx packet dropped\n", sc->sc_dev.dv_xname); 880 ifp->if_ierrors++; 881 r |= 1; 882 } 883 884 if (why & QE_CR_STAT_RXSMALL) { 885 printf("%s: rx buffer too small\n", sc->sc_dev.dv_xname); 886 ifp->if_ierrors++; 887 r |= 1; 888 rst = 1; 889 } 890 891 if (why & QE_CR_STAT_RXLERR) { 892 printf("%s: rx late error\n", sc->sc_dev.dv_xname); 893 ifp->if_ierrors++; 894 r |= 1; 895 rst = 1; 896 } 897 898 if (why & QE_CR_STAT_RXPERR) { 899 printf("%s: rx DMA parity error\n", sc->sc_dev.dv_xname); 900 ifp->if_ierrors++; 901 r |= 1; 902 rst = 1; 903 } 904 905 if (why & QE_CR_STAT_RXSERR) { 906 printf("%s: rx DMA sbus error ack\n", sc->sc_dev.dv_xname); 907 ifp->if_ierrors++; 908 r |= 1; 909 rst = 1; 910 } 911 912 if (r == 0) 913 printf("%s: unexpected interrupt error: %08x\n", 914 sc->sc_dev.dv_xname, why); 915 916 if (rst) { 917 printf("%s: resetting...\n", sc->sc_dev.dv_xname); 918 qereset(sc); 919 return (-1); 920 } 921 922 return (r); 923 } 924 925 int 926 qeioctl(ifp, cmd, data) 927 struct ifnet *ifp; 928 u_long cmd; 929 caddr_t data; 930 { 931 struct qe_softc *sc = ifp->if_softc; 932 struct ifaddr *ifa = (struct ifaddr *)data; 933 struct ifreq *ifr = (struct ifreq *)data; 934 int s, error = 0; 935 936 s = splnet(); 937 938 switch (cmd) { 939 case SIOCSIFADDR: 940 ifp->if_flags |= IFF_UP; 941 switch (ifa->ifa_addr->sa_family) { 942 #ifdef INET 943 case AF_INET: 944 qeinit(sc); 945 arp_ifinit(ifp, ifa); 946 break; 947 #endif /* INET */ 948 #ifdef NS 949 case AF_NS: 950 { 951 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 952 953 if (ns_nullhost(*ina)) 954 ina->x_host = 955 *(union ns_host *)LLADDR(ifp->if_sadl); 956 else 957 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 958 sizeof(sc->sc_enaddr)); 959 /* Set new address. */ 960 qeinit(sc); 961 break; 962 } 963 #endif /* NS */ 964 default: 965 qeinit(sc); 966 break; 967 } 968 break; 969 970 case SIOCSIFFLAGS: 971 if ((ifp->if_flags & IFF_UP) == 0 && 972 (ifp->if_flags & IFF_RUNNING) != 0) { 973 /* 974 * If interface is marked down and it is running, then 975 * stop it. 976 */ 977 qestop(sc); 978 ifp->if_flags &= ~IFF_RUNNING; 979 980 } else if ((ifp->if_flags & IFF_UP) != 0 && 981 (ifp->if_flags & IFF_RUNNING) == 0) { 982 /* 983 * If interface is marked up and it is stopped, then 984 * start it. 985 */ 986 qeinit(sc); 987 988 } else { 989 /* 990 * Reset the interface to pick up changes in any other 991 * flags that affect hardware registers. 992 */ 993 qestop(sc); 994 qeinit(sc); 995 } 996 #ifdef QEDEBUG 997 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 998 #endif 999 break; 1000 1001 case SIOCADDMULTI: 1002 case SIOCDELMULTI: 1003 error = (cmd == SIOCADDMULTI) ? 1004 ether_addmulti(ifr, &sc->sc_ethercom): 1005 ether_delmulti(ifr, &sc->sc_ethercom); 1006 1007 if (error == ENETRESET) { 1008 /* 1009 * Multicast list has changed; set the hardware filter 1010 * accordingly. 1011 */ 1012 if (ifp->if_flags & IFF_RUNNING) 1013 qe_mcreset(sc); 1014 error = 0; 1015 } 1016 break; 1017 1018 case SIOCGIFMEDIA: 1019 case SIOCSIFMEDIA: 1020 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); 1021 break; 1022 1023 default: 1024 error = EINVAL; 1025 break; 1026 } 1027 1028 splx(s); 1029 return (error); 1030 } 1031 1032 1033 void 1034 qeinit(sc) 1035 struct qe_softc *sc; 1036 { 1037 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1038 bus_space_tag_t t = sc->sc_bustag; 1039 bus_space_handle_t cr = sc->sc_cr; 1040 bus_space_handle_t mr = sc->sc_mr; 1041 struct qec_softc *qec = sc->sc_qec; 1042 u_int32_t qecaddr; 1043 u_int8_t *ea; 1044 int s; 1045 1046 #if defined(SUN4U) || defined(__GNUC__) 1047 (void)&t; 1048 #endif 1049 s = splnet(); 1050 1051 qestop(sc); 1052 1053 /* 1054 * Allocate descriptor ring and buffers 1055 */ 1056 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ); 1057 1058 /* Channel registers: */ 1059 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma); 1060 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma); 1061 1062 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0); 1063 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0); 1064 bus_space_write_4(t, cr, QE_CRI_QMASK, 0); 1065 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL); 1066 bus_space_write_4(t, cr, QE_CRI_CCNT, 0); 1067 bus_space_write_4(t, cr, QE_CRI_PIPG, 0); 1068 1069 qecaddr = sc->sc_channel * qec->sc_msize; 1070 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr); 1071 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr); 1072 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize); 1073 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize); 1074 1075 /* MACE registers: */ 1076 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL); 1077 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT); 1078 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0); 1079 1080 /* 1081 * Mask MACE's receive interrupt, since we're being notified 1082 * by the QEC after DMA completes. 1083 */ 1084 bus_space_write_1(t, mr, QE_MRI_IMR, 1085 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM); 1086 1087 bus_space_write_1(t, mr, QE_MRI_BIUCC, 1088 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS); 1089 1090 bus_space_write_1(t, mr, QE_MRI_FIFOFC, 1091 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 | 1092 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU); 1093 1094 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP); 1095 1096 /* 1097 * Station address 1098 */ 1099 ea = sc->sc_enaddr; 1100 bus_space_write_1(t, mr, QE_MRI_IAC, 1101 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR); 1102 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6); 1103 1104 /* Apply media settings */ 1105 qe_ifmedia_upd(ifp); 1106 1107 /* 1108 * Clear Logical address filter 1109 */ 1110 bus_space_write_1(t, mr, QE_MRI_IAC, 1111 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1112 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8); 1113 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1114 1115 /* Clear missed packet count (register cleared on read) */ 1116 (void)bus_space_read_1(t, mr, QE_MRI_MPC); 1117 1118 #if 0 1119 /* test register: */ 1120 bus_space_write_1(t, mr, QE_MRI_UTR, 0); 1121 #endif 1122 1123 /* Reset multicast filter */ 1124 qe_mcreset(sc); 1125 1126 ifp->if_flags |= IFF_RUNNING; 1127 ifp->if_flags &= ~IFF_OACTIVE; 1128 splx(s); 1129 } 1130 1131 /* 1132 * Reset multicast filter. 1133 */ 1134 void 1135 qe_mcreset(sc) 1136 struct qe_softc *sc; 1137 { 1138 struct ethercom *ec = &sc->sc_ethercom; 1139 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1140 bus_space_tag_t t = sc->sc_bustag; 1141 bus_space_handle_t mr = sc->sc_mr; 1142 struct ether_multi *enm; 1143 struct ether_multistep step; 1144 u_int32_t crc; 1145 u_int16_t hash[4]; 1146 u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0]; 1147 int i, j; 1148 1149 #if defined(SUN4U) || defined(__GNUC__) 1150 (void)&t; 1151 #endif 1152 1153 /* We also enable transmitter & receiver here */ 1154 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV; 1155 1156 if (ifp->if_flags & IFF_PROMISC) { 1157 maccc |= QE_MR_MACCC_PROM; 1158 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1159 return; 1160 } 1161 1162 if (ifp->if_flags & IFF_ALLMULTI) { 1163 bus_space_write_1(t, mr, QE_MRI_IAC, 1164 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1165 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1166 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1167 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1168 return; 1169 } 1170 1171 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1172 1173 ETHER_FIRST_MULTI(step, ec, enm); 1174 while (enm != NULL) { 1175 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1176 ETHER_ADDR_LEN) != 0) { 1177 /* 1178 * We must listen to a range of multicast 1179 * addresses. For now, just accept all 1180 * multicasts, rather than trying to set only 1181 * those filter bits needed to match the range. 1182 * (At this time, the only use of address 1183 * ranges is for IP multicast routing, for 1184 * which the range is big enough to require 1185 * all bits set.) 1186 */ 1187 bus_space_write_1(t, mr, QE_MRI_IAC, 1188 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1189 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1190 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1191 ifp->if_flags |= IFF_ALLMULTI; 1192 break; 1193 } 1194 1195 crc = 0xffffffff; 1196 1197 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1198 octet = enm->enm_addrlo[i]; 1199 1200 for (j = 0; j < 8; j++) { 1201 if ((crc & 1) ^ (octet & 1)) { 1202 crc >>= 1; 1203 crc ^= MC_POLY_LE; 1204 } 1205 else 1206 crc >>= 1; 1207 octet >>= 1; 1208 } 1209 } 1210 1211 crc >>= 26; 1212 hash[crc >> 4] |= 1 << (crc & 0xf); 1213 ETHER_NEXT_MULTI(step, enm); 1214 } 1215 1216 bus_space_write_1(t, mr, QE_MRI_IAC, 1217 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1218 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8); 1219 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1220 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1221 } 1222 1223 /* 1224 * Get current media settings. 1225 */ 1226 void 1227 qe_ifmedia_sts(ifp, ifmr) 1228 struct ifnet *ifp; 1229 struct ifmediareq *ifmr; 1230 { 1231 struct qe_softc *sc = ifp->if_softc; 1232 bus_space_tag_t t = sc->sc_bustag; 1233 bus_space_handle_t mr = sc->sc_mr; 1234 u_int8_t v; 1235 1236 #if defined(SUN4U) || defined(__GNUC__) 1237 (void)&t; 1238 #endif 1239 v = bus_space_read_1(t, mr, QE_MRI_PLSCC); 1240 1241 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) { 1242 case QE_MR_PLSCC_TP: 1243 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1244 break; 1245 case QE_MR_PLSCC_AUI: 1246 ifmr->ifm_active = IFM_ETHER | IFM_10_5; 1247 break; 1248 case QE_MR_PLSCC_GPSI: 1249 case QE_MR_PLSCC_DAI: 1250 /* ... */ 1251 break; 1252 } 1253 1254 v = bus_space_read_1(t, mr, QE_MRI_PHYCC); 1255 ifmr->ifm_status |= IFM_AVALID; 1256 if ((v & QE_MR_PHYCC_LNKFL) != 0) 1257 ifmr->ifm_status &= ~IFM_ACTIVE; 1258 else 1259 ifmr->ifm_status |= IFM_ACTIVE; 1260 1261 } 1262 1263 /* 1264 * Set media options. 1265 */ 1266 int 1267 qe_ifmedia_upd(ifp) 1268 struct ifnet *ifp; 1269 { 1270 struct qe_softc *sc = ifp->if_softc; 1271 struct ifmedia *ifm = &sc->sc_ifmedia; 1272 bus_space_tag_t t = sc->sc_bustag; 1273 bus_space_handle_t mr = sc->sc_mr; 1274 int newmedia = ifm->ifm_media; 1275 u_int8_t plscc, phycc; 1276 1277 #if defined(SUN4U) || defined(__GNUC__) 1278 (void)&t; 1279 #endif 1280 if (IFM_TYPE(newmedia) != IFM_ETHER) 1281 return (EINVAL); 1282 1283 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK; 1284 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL; 1285 1286 if (IFM_SUBTYPE(newmedia) == IFM_AUTO) 1287 phycc |= QE_MR_PHYCC_ASEL; 1288 else if (IFM_SUBTYPE(newmedia) == IFM_10_T) 1289 plscc |= QE_MR_PLSCC_TP; 1290 else if (IFM_SUBTYPE(newmedia) == IFM_10_5) 1291 plscc |= QE_MR_PLSCC_AUI; 1292 1293 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc); 1294 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc); 1295 1296 return (0); 1297 } 1298