1 /* $NetBSD: qe.c,v 1.44 2008/04/05 18:35:32 cegger Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /* 40 * Copyright (c) 1998 Jason L. Wright. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. The name of the authors may not be used to endorse or promote products 52 * derived from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 55 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 56 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 57 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 58 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 59 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 60 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 61 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 62 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 63 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 64 */ 65 66 /* 67 * Driver for the SBus qec+qe QuadEthernet board. 68 * 69 * This driver was written using the AMD MACE Am79C940 documentation, some 70 * ideas gleaned from the S/Linux driver for this card, Solaris header files, 71 * and a loan of a card from Paul Southworth of the Internet Engineering 72 * Group (www.ieng.com). 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.44 2008/04/05 18:35:32 cegger Exp $"); 77 78 #define QEDEBUG 79 80 #include "opt_ddb.h" 81 #include "opt_inet.h" 82 #include "bpfilter.h" 83 #include "rnd.h" 84 85 #include <sys/param.h> 86 #include <sys/systm.h> 87 #include <sys/kernel.h> 88 #include <sys/errno.h> 89 #include <sys/ioctl.h> 90 #include <sys/mbuf.h> 91 #include <sys/socket.h> 92 #include <sys/syslog.h> 93 #include <sys/device.h> 94 #include <sys/malloc.h> 95 #if NRND > 0 96 #include <sys/rnd.h> 97 #endif 98 99 #include <net/if.h> 100 #include <net/if_dl.h> 101 #include <net/if_types.h> 102 #include <net/netisr.h> 103 #include <net/if_media.h> 104 #include <net/if_ether.h> 105 106 #ifdef INET 107 #include <netinet/in.h> 108 #include <netinet/if_inarp.h> 109 #include <netinet/in_systm.h> 110 #include <netinet/in_var.h> 111 #include <netinet/ip.h> 112 #endif 113 114 115 #if NBPFILTER > 0 116 #include <net/bpf.h> 117 #include <net/bpfdesc.h> 118 #endif 119 120 #include <sys/bus.h> 121 #include <sys/intr.h> 122 #include <machine/autoconf.h> 123 124 #include <dev/sbus/sbusvar.h> 125 #include <dev/sbus/qecreg.h> 126 #include <dev/sbus/qecvar.h> 127 #include <dev/sbus/qereg.h> 128 129 struct qe_softc { 130 struct device sc_dev; /* base device */ 131 struct sbusdev sc_sd; /* sbus device */ 132 bus_space_tag_t sc_bustag; /* bus & DMA tags */ 133 bus_dma_tag_t sc_dmatag; 134 bus_dmamap_t sc_dmamap; 135 struct ethercom sc_ethercom; 136 struct ifmedia sc_ifmedia; /* interface media */ 137 138 struct qec_softc *sc_qec; /* QEC parent */ 139 140 bus_space_handle_t sc_qr; /* QEC registers */ 141 bus_space_handle_t sc_mr; /* MACE registers */ 142 bus_space_handle_t sc_cr; /* channel registers */ 143 144 int sc_channel; /* channel number */ 145 u_int sc_rev; /* board revision */ 146 147 int sc_burst; 148 149 struct qec_ring sc_rb; /* Packet Ring Buffer */ 150 151 /* MAC address */ 152 u_int8_t sc_enaddr[6]; 153 154 #ifdef QEDEBUG 155 int sc_debug; 156 #endif 157 }; 158 159 int qematch(struct device *, struct cfdata *, void *); 160 void qeattach(struct device *, struct device *, void *); 161 162 void qeinit(struct qe_softc *); 163 void qestart(struct ifnet *); 164 void qestop(struct qe_softc *); 165 void qewatchdog(struct ifnet *); 166 int qeioctl(struct ifnet *, u_long, void *); 167 void qereset(struct qe_softc *); 168 169 int qeintr(void *); 170 int qe_eint(struct qe_softc *, u_int32_t); 171 int qe_rint(struct qe_softc *); 172 int qe_tint(struct qe_softc *); 173 void qe_mcreset(struct qe_softc *); 174 175 static int qe_put(struct qe_softc *, int, struct mbuf *); 176 static void qe_read(struct qe_softc *, int, int); 177 static struct mbuf *qe_get(struct qe_softc *, int, int); 178 179 /* ifmedia callbacks */ 180 void qe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 181 int qe_ifmedia_upd(struct ifnet *); 182 183 CFATTACH_DECL(qe, sizeof(struct qe_softc), 184 qematch, qeattach, NULL, NULL); 185 186 int 187 qematch(parent, cf, aux) 188 struct device *parent; 189 struct cfdata *cf; 190 void *aux; 191 { 192 struct sbus_attach_args *sa = aux; 193 194 return (strcmp(cf->cf_name, sa->sa_name) == 0); 195 } 196 197 void 198 qeattach(parent, self, aux) 199 struct device *parent, *self; 200 void *aux; 201 { 202 struct sbus_attach_args *sa = aux; 203 struct qec_softc *qec = (struct qec_softc *)parent; 204 struct qe_softc *sc = (struct qe_softc *)self; 205 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 206 int node = sa->sa_node; 207 bus_dma_tag_t dmatag = sa->sa_dmatag; 208 bus_dma_segment_t seg; 209 bus_size_t size; 210 int rseg, error; 211 212 if (sa->sa_nreg < 2) { 213 printf("%s: only %d register sets\n", 214 device_xname(self), sa->sa_nreg); 215 return; 216 } 217 218 if (bus_space_map(sa->sa_bustag, 219 (bus_addr_t)BUS_ADDR( 220 sa->sa_reg[0].oa_space, 221 sa->sa_reg[0].oa_base), 222 (bus_size_t)sa->sa_reg[0].oa_size, 223 0, &sc->sc_cr) != 0) { 224 aprint_error_dev(self, "cannot map registers\n"); 225 return; 226 } 227 228 if (bus_space_map(sa->sa_bustag, 229 (bus_addr_t)BUS_ADDR( 230 sa->sa_reg[1].oa_space, 231 sa->sa_reg[1].oa_base), 232 (bus_size_t)sa->sa_reg[1].oa_size, 233 0, &sc->sc_mr) != 0) { 234 aprint_error_dev(self, "cannot map registers\n"); 235 return; 236 } 237 238 sc->sc_rev = prom_getpropint(node, "mace-version", -1); 239 printf(" rev %x", sc->sc_rev); 240 241 sc->sc_bustag = sa->sa_bustag; 242 sc->sc_dmatag = sa->sa_dmatag; 243 sc->sc_qec = qec; 244 sc->sc_qr = qec->sc_regs; 245 246 sc->sc_channel = prom_getpropint(node, "channel#", -1); 247 sc->sc_burst = qec->sc_burst; 248 249 qestop(sc); 250 251 /* Note: no interrupt level passed */ 252 (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, qeintr, sc); 253 prom_getether(node, sc->sc_enaddr); 254 255 /* 256 * Allocate descriptor ring and buffers. 257 */ 258 259 /* for now, allocate as many bufs as there are ring descriptors */ 260 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 261 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 262 263 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 264 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 265 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + 266 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; 267 268 /* Get a DMA handle */ 269 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 270 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 271 aprint_error_dev(self, "DMA map create error %d\n", 272 error); 273 return; 274 } 275 276 /* Allocate DMA buffer */ 277 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0, 278 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 279 aprint_error_dev(self, "DMA buffer alloc error %d\n", 280 error); 281 return; 282 } 283 284 /* Map DMA buffer in CPU addressable space */ 285 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 286 &sc->sc_rb.rb_membase, 287 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 288 aprint_error_dev(self, "DMA buffer map error %d\n", 289 error); 290 bus_dmamem_free(dmatag, &seg, rseg); 291 return; 292 } 293 294 /* Load the buffer */ 295 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 296 sc->sc_rb.rb_membase, size, NULL, 297 BUS_DMA_NOWAIT)) != 0) { 298 aprint_error_dev(self, "DMA buffer map load error %d\n", 299 error); 300 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 301 bus_dmamem_free(dmatag, &seg, rseg); 302 return; 303 } 304 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 305 306 /* Initialize media properties */ 307 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts); 308 ifmedia_add(&sc->sc_ifmedia, 309 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 310 0, NULL); 311 ifmedia_add(&sc->sc_ifmedia, 312 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 313 0, NULL); 314 ifmedia_add(&sc->sc_ifmedia, 315 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 316 0, NULL); 317 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 318 319 memcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 320 ifp->if_softc = sc; 321 ifp->if_start = qestart; 322 ifp->if_ioctl = qeioctl; 323 ifp->if_watchdog = qewatchdog; 324 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | 325 IFF_MULTICAST; 326 IFQ_SET_READY(&ifp->if_snd); 327 328 /* Attach the interface. */ 329 if_attach(ifp); 330 ether_ifattach(ifp, sc->sc_enaddr); 331 332 printf(" address %s\n", ether_sprintf(sc->sc_enaddr)); 333 } 334 335 /* 336 * Pull data off an interface. 337 * Len is the length of data, with local net header stripped. 338 * We copy the data into mbufs. When full cluster sized units are present, 339 * we copy into clusters. 340 */ 341 static inline struct mbuf * 342 qe_get(sc, idx, totlen) 343 struct qe_softc *sc; 344 int idx, totlen; 345 { 346 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 347 struct mbuf *m; 348 struct mbuf *top, **mp; 349 int len, pad, boff = 0; 350 void *bp; 351 352 bp = (char *)sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ; 353 354 MGETHDR(m, M_DONTWAIT, MT_DATA); 355 if (m == NULL) 356 return (NULL); 357 m->m_pkthdr.rcvif = ifp; 358 m->m_pkthdr.len = totlen; 359 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); 360 m->m_data += pad; 361 len = MHLEN - pad; 362 top = NULL; 363 mp = ⊤ 364 365 while (totlen > 0) { 366 if (top) { 367 MGET(m, M_DONTWAIT, MT_DATA); 368 if (m == NULL) { 369 m_freem(top); 370 return (NULL); 371 } 372 len = MLEN; 373 } 374 if (top && totlen >= MINCLSIZE) { 375 MCLGET(m, M_DONTWAIT); 376 if (m->m_flags & M_EXT) 377 len = MCLBYTES; 378 } 379 m->m_len = len = min(totlen, len); 380 memcpy(mtod(m, void *), (char *)bp + boff, len); 381 boff += len; 382 totlen -= len; 383 *mp = m; 384 mp = &m->m_next; 385 } 386 387 return (top); 388 } 389 390 /* 391 * Routine to copy from mbuf chain to transmit buffer in 392 * network buffer memory. 393 */ 394 inline int 395 qe_put(sc, idx, m) 396 struct qe_softc *sc; 397 int idx; 398 struct mbuf *m; 399 { 400 struct mbuf *n; 401 int len, tlen = 0, boff = 0; 402 void *bp; 403 404 bp = (char *)sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ; 405 406 for (; m; m = n) { 407 len = m->m_len; 408 if (len == 0) { 409 MFREE(m, n); 410 continue; 411 } 412 memcpy((char *)bp + boff, mtod(m, void *), len); 413 boff += len; 414 tlen += len; 415 MFREE(m, n); 416 } 417 return (tlen); 418 } 419 420 /* 421 * Pass a packet to the higher levels. 422 */ 423 inline void 424 qe_read(sc, idx, len) 425 struct qe_softc *sc; 426 int idx, len; 427 { 428 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 429 struct mbuf *m; 430 431 if (len <= sizeof(struct ether_header) || 432 len > ETHERMTU + sizeof(struct ether_header)) { 433 434 printf("%s: invalid packet size %d; dropping\n", 435 ifp->if_xname, len); 436 437 ifp->if_ierrors++; 438 return; 439 } 440 441 /* 442 * Pull packet off interface. 443 */ 444 m = qe_get(sc, idx, len); 445 if (m == NULL) { 446 ifp->if_ierrors++; 447 return; 448 } 449 ifp->if_ipackets++; 450 451 #if NBPFILTER > 0 452 /* 453 * Check if there's a BPF listener on this interface. 454 * If so, hand off the raw packet to BPF. 455 */ 456 if (ifp->if_bpf) 457 bpf_mtap(ifp->if_bpf, m); 458 #endif 459 /* Pass the packet up. */ 460 (*ifp->if_input)(ifp, m); 461 } 462 463 /* 464 * Start output on interface. 465 * We make two assumptions here: 466 * 1) that the current priority is set to splnet _before_ this code 467 * is called *and* is returned to the appropriate priority after 468 * return 469 * 2) that the IFF_OACTIVE flag is checked before this code is called 470 * (i.e. that the output part of the interface is idle) 471 */ 472 void 473 qestart(ifp) 474 struct ifnet *ifp; 475 { 476 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc; 477 struct qec_xd *txd = sc->sc_rb.rb_txd; 478 struct mbuf *m; 479 unsigned int bix, len; 480 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 481 482 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 483 return; 484 485 bix = sc->sc_rb.rb_tdhead; 486 487 for (;;) { 488 IFQ_DEQUEUE(&ifp->if_snd, m); 489 if (m == 0) 490 break; 491 492 #if NBPFILTER > 0 493 /* 494 * If BPF is listening on this interface, let it see the 495 * packet before we commit it to the wire. 496 */ 497 if (ifp->if_bpf) 498 bpf_mtap(ifp->if_bpf, m); 499 #endif 500 501 /* 502 * Copy the mbuf chain into the transmit buffer. 503 */ 504 len = qe_put(sc, bix, m); 505 506 /* 507 * Initialize transmit registers and start transmission 508 */ 509 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP | 510 (len & QEC_XD_LENGTH); 511 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL, 512 QE_CR_CTRL_TWAKEUP); 513 514 if (++bix == QEC_XD_RING_MAXSIZE) 515 bix = 0; 516 517 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 518 ifp->if_flags |= IFF_OACTIVE; 519 break; 520 } 521 } 522 523 sc->sc_rb.rb_tdhead = bix; 524 } 525 526 void 527 qestop(sc) 528 struct qe_softc *sc; 529 { 530 bus_space_tag_t t = sc->sc_bustag; 531 bus_space_handle_t mr = sc->sc_mr; 532 bus_space_handle_t cr = sc->sc_cr; 533 int n; 534 535 #if defined(SUN4U) || defined(__GNUC__) 536 (void)&t; 537 #endif 538 /* Stop the schwurst */ 539 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST); 540 for (n = 200; n > 0; n--) { 541 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) & 542 QE_MR_BIUCC_SWRST) == 0) 543 break; 544 DELAY(20); 545 } 546 547 /* then reset */ 548 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET); 549 for (n = 200; n > 0; n--) { 550 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) & 551 QE_CR_CTRL_RESET) == 0) 552 break; 553 DELAY(20); 554 } 555 } 556 557 /* 558 * Reset interface. 559 */ 560 void 561 qereset(sc) 562 struct qe_softc *sc; 563 { 564 int s; 565 566 s = splnet(); 567 qestop(sc); 568 qeinit(sc); 569 splx(s); 570 } 571 572 void 573 qewatchdog(ifp) 574 struct ifnet *ifp; 575 { 576 struct qe_softc *sc = ifp->if_softc; 577 578 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev)); 579 ifp->if_oerrors++; 580 581 qereset(sc); 582 } 583 584 /* 585 * Interrupt dispatch. 586 */ 587 int 588 qeintr(arg) 589 void *arg; 590 { 591 struct qe_softc *sc = (struct qe_softc *)arg; 592 bus_space_tag_t t = sc->sc_bustag; 593 u_int32_t qecstat, qestat; 594 int r = 0; 595 596 #if defined(SUN4U) || defined(__GNUC__) 597 (void)&t; 598 #endif 599 /* Read QEC status and channel status */ 600 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT); 601 #ifdef QEDEBUG 602 if (sc->sc_debug) { 603 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat); 604 } 605 #endif 606 607 /* Filter out status for this channel */ 608 qecstat = qecstat >> (4 * sc->sc_channel); 609 if ((qecstat & 0xf) == 0) 610 return (r); 611 612 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT); 613 614 #ifdef QEDEBUG 615 if (sc->sc_debug) { 616 char bits[64]; int i; 617 bus_space_tag_t t1 = sc->sc_bustag; 618 bus_space_handle_t mr = sc->sc_mr; 619 620 printf("qe%d: intr: qestat=%s\n", sc->sc_channel, 621 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, sizeof(bits))); 622 623 printf("MACE registers:\n"); 624 for (i = 0 ; i < 32; i++) { 625 printf(" m[%d]=%x,", i, bus_space_read_1(t1, mr, i)); 626 if (((i+1) & 7) == 0) 627 printf("\n"); 628 } 629 } 630 #endif 631 632 if (qestat & QE_CR_STAT_ALLERRORS) { 633 #ifdef QEDEBUG 634 if (sc->sc_debug) { 635 char bits[64]; 636 printf("qe%d: eint: qestat=%s\n", sc->sc_channel, 637 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, 638 sizeof(bits))); 639 } 640 #endif 641 r |= qe_eint(sc, qestat); 642 if (r == -1) 643 return (1); 644 } 645 646 if (qestat & QE_CR_STAT_TXIRQ) 647 r |= qe_tint(sc); 648 649 if (qestat & QE_CR_STAT_RXIRQ) 650 r |= qe_rint(sc); 651 652 return (r); 653 } 654 655 /* 656 * Transmit interrupt. 657 */ 658 int 659 qe_tint(sc) 660 struct qe_softc *sc; 661 { 662 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 663 unsigned int bix, txflags; 664 665 bix = sc->sc_rb.rb_tdtail; 666 667 for (;;) { 668 if (sc->sc_rb.rb_td_nbusy <= 0) 669 break; 670 671 txflags = sc->sc_rb.rb_txd[bix].xd_flags; 672 673 if (txflags & QEC_XD_OWN) 674 break; 675 676 ifp->if_flags &= ~IFF_OACTIVE; 677 ifp->if_opackets++; 678 679 if (++bix == QEC_XD_RING_MAXSIZE) 680 bix = 0; 681 682 --sc->sc_rb.rb_td_nbusy; 683 } 684 685 sc->sc_rb.rb_tdtail = bix; 686 687 qestart(ifp); 688 689 if (sc->sc_rb.rb_td_nbusy == 0) 690 ifp->if_timer = 0; 691 692 return (1); 693 } 694 695 /* 696 * Receive interrupt. 697 */ 698 int 699 qe_rint(sc) 700 struct qe_softc *sc; 701 { 702 struct qec_xd *xd = sc->sc_rb.rb_rxd; 703 unsigned int bix, len; 704 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 705 #ifdef QEDEBUG 706 int npackets = 0; 707 #endif 708 709 bix = sc->sc_rb.rb_rdtail; 710 711 /* 712 * Process all buffers with valid data. 713 */ 714 for (;;) { 715 len = xd[bix].xd_flags; 716 if (len & QEC_XD_OWN) 717 break; 718 719 #ifdef QEDEBUG 720 npackets++; 721 #endif 722 723 len &= QEC_XD_LENGTH; 724 len -= 4; 725 qe_read(sc, bix, len); 726 727 /* ... */ 728 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags = 729 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH); 730 731 if (++bix == QEC_XD_RING_MAXSIZE) 732 bix = 0; 733 } 734 #ifdef QEDEBUG 735 if (npackets == 0 && sc->sc_debug) 736 printf("%s: rint: no packets; rb index %d; status 0x%x\n", 737 device_xname(&sc->sc_dev), bix, len); 738 #endif 739 740 sc->sc_rb.rb_rdtail = bix; 741 742 return (1); 743 } 744 745 /* 746 * Error interrupt. 747 */ 748 int 749 qe_eint(sc, why) 750 struct qe_softc *sc; 751 u_int32_t why; 752 { 753 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 754 int r = 0, rst = 0; 755 756 if (why & QE_CR_STAT_EDEFER) { 757 printf("%s: excessive tx defers.\n", device_xname(&sc->sc_dev)); 758 r |= 1; 759 ifp->if_oerrors++; 760 } 761 762 if (why & QE_CR_STAT_CLOSS) { 763 printf("%s: no carrier, link down?\n", device_xname(&sc->sc_dev)); 764 ifp->if_oerrors++; 765 r |= 1; 766 } 767 768 if (why & QE_CR_STAT_ERETRIES) { 769 printf("%s: excessive tx retries\n", device_xname(&sc->sc_dev)); 770 ifp->if_oerrors++; 771 r |= 1; 772 rst = 1; 773 } 774 775 776 if (why & QE_CR_STAT_LCOLL) { 777 printf("%s: late tx transmission\n", device_xname(&sc->sc_dev)); 778 ifp->if_oerrors++; 779 r |= 1; 780 rst = 1; 781 } 782 783 if (why & QE_CR_STAT_FUFLOW) { 784 printf("%s: tx fifo underflow\n", device_xname(&sc->sc_dev)); 785 ifp->if_oerrors++; 786 r |= 1; 787 rst = 1; 788 } 789 790 if (why & QE_CR_STAT_JERROR) { 791 printf("%s: jabber seen\n", device_xname(&sc->sc_dev)); 792 r |= 1; 793 } 794 795 if (why & QE_CR_STAT_BERROR) { 796 printf("%s: babble seen\n", device_xname(&sc->sc_dev)); 797 r |= 1; 798 } 799 800 if (why & QE_CR_STAT_TCCOFLOW) { 801 ifp->if_collisions += 256; 802 ifp->if_oerrors += 256; 803 r |= 1; 804 } 805 806 if (why & QE_CR_STAT_TXDERROR) { 807 printf("%s: tx descriptor is bad\n", device_xname(&sc->sc_dev)); 808 rst = 1; 809 r |= 1; 810 } 811 812 if (why & QE_CR_STAT_TXLERR) { 813 printf("%s: tx late error\n", device_xname(&sc->sc_dev)); 814 ifp->if_oerrors++; 815 rst = 1; 816 r |= 1; 817 } 818 819 if (why & QE_CR_STAT_TXPERR) { 820 printf("%s: tx DMA parity error\n", device_xname(&sc->sc_dev)); 821 ifp->if_oerrors++; 822 rst = 1; 823 r |= 1; 824 } 825 826 if (why & QE_CR_STAT_TXSERR) { 827 printf("%s: tx DMA sbus error ack\n", device_xname(&sc->sc_dev)); 828 ifp->if_oerrors++; 829 rst = 1; 830 r |= 1; 831 } 832 833 if (why & QE_CR_STAT_RCCOFLOW) { 834 ifp->if_collisions += 256; 835 ifp->if_ierrors += 256; 836 r |= 1; 837 } 838 839 if (why & QE_CR_STAT_RUOFLOW) { 840 ifp->if_ierrors += 256; 841 r |= 1; 842 } 843 844 if (why & QE_CR_STAT_MCOFLOW) { 845 ifp->if_ierrors += 256; 846 r |= 1; 847 } 848 849 if (why & QE_CR_STAT_RXFOFLOW) { 850 printf("%s: rx fifo overflow\n", device_xname(&sc->sc_dev)); 851 ifp->if_ierrors++; 852 r |= 1; 853 } 854 855 if (why & QE_CR_STAT_RLCOLL) { 856 printf("%s: rx late collision\n", device_xname(&sc->sc_dev)); 857 ifp->if_ierrors++; 858 ifp->if_collisions++; 859 r |= 1; 860 } 861 862 if (why & QE_CR_STAT_FCOFLOW) { 863 ifp->if_ierrors += 256; 864 r |= 1; 865 } 866 867 if (why & QE_CR_STAT_CECOFLOW) { 868 ifp->if_ierrors += 256; 869 r |= 1; 870 } 871 872 if (why & QE_CR_STAT_RXDROP) { 873 printf("%s: rx packet dropped\n", device_xname(&sc->sc_dev)); 874 ifp->if_ierrors++; 875 r |= 1; 876 } 877 878 if (why & QE_CR_STAT_RXSMALL) { 879 printf("%s: rx buffer too small\n", device_xname(&sc->sc_dev)); 880 ifp->if_ierrors++; 881 r |= 1; 882 rst = 1; 883 } 884 885 if (why & QE_CR_STAT_RXLERR) { 886 printf("%s: rx late error\n", device_xname(&sc->sc_dev)); 887 ifp->if_ierrors++; 888 r |= 1; 889 rst = 1; 890 } 891 892 if (why & QE_CR_STAT_RXPERR) { 893 printf("%s: rx DMA parity error\n", device_xname(&sc->sc_dev)); 894 ifp->if_ierrors++; 895 r |= 1; 896 rst = 1; 897 } 898 899 if (why & QE_CR_STAT_RXSERR) { 900 printf("%s: rx DMA sbus error ack\n", device_xname(&sc->sc_dev)); 901 ifp->if_ierrors++; 902 r |= 1; 903 rst = 1; 904 } 905 906 if (r == 0) 907 aprint_error_dev(&sc->sc_dev, "unexpected interrupt error: %08x\n", 908 why); 909 910 if (rst) { 911 printf("%s: resetting...\n", device_xname(&sc->sc_dev)); 912 qereset(sc); 913 return (-1); 914 } 915 916 return (r); 917 } 918 919 int 920 qeioctl(ifp, cmd, data) 921 struct ifnet *ifp; 922 u_long cmd; 923 void *data; 924 { 925 struct qe_softc *sc = ifp->if_softc; 926 struct ifaddr *ifa = (struct ifaddr *)data; 927 struct ifreq *ifr = (struct ifreq *)data; 928 int s, error = 0; 929 930 s = splnet(); 931 932 switch (cmd) { 933 case SIOCSIFADDR: 934 ifp->if_flags |= IFF_UP; 935 switch (ifa->ifa_addr->sa_family) { 936 #ifdef INET 937 case AF_INET: 938 qeinit(sc); 939 arp_ifinit(ifp, ifa); 940 break; 941 #endif /* INET */ 942 default: 943 qeinit(sc); 944 break; 945 } 946 break; 947 948 case SIOCSIFFLAGS: 949 if ((ifp->if_flags & IFF_UP) == 0 && 950 (ifp->if_flags & IFF_RUNNING) != 0) { 951 /* 952 * If interface is marked down and it is running, then 953 * stop it. 954 */ 955 qestop(sc); 956 ifp->if_flags &= ~IFF_RUNNING; 957 958 } else if ((ifp->if_flags & IFF_UP) != 0 && 959 (ifp->if_flags & IFF_RUNNING) == 0) { 960 /* 961 * If interface is marked up and it is stopped, then 962 * start it. 963 */ 964 qeinit(sc); 965 966 } else { 967 /* 968 * Reset the interface to pick up changes in any other 969 * flags that affect hardware registers. 970 */ 971 qestop(sc); 972 qeinit(sc); 973 } 974 #ifdef QEDEBUG 975 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 976 #endif 977 break; 978 979 case SIOCADDMULTI: 980 case SIOCDELMULTI: 981 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 982 /* 983 * Multicast list has changed; set the hardware filter 984 * accordingly. 985 */ 986 if (ifp->if_flags & IFF_RUNNING) 987 qe_mcreset(sc); 988 error = 0; 989 } 990 break; 991 992 case SIOCGIFMEDIA: 993 case SIOCSIFMEDIA: 994 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); 995 break; 996 997 default: 998 error = EINVAL; 999 break; 1000 } 1001 1002 splx(s); 1003 return (error); 1004 } 1005 1006 1007 void 1008 qeinit(sc) 1009 struct qe_softc *sc; 1010 { 1011 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1012 bus_space_tag_t t = sc->sc_bustag; 1013 bus_space_handle_t cr = sc->sc_cr; 1014 bus_space_handle_t mr = sc->sc_mr; 1015 struct qec_softc *qec = sc->sc_qec; 1016 u_int32_t qecaddr; 1017 u_int8_t *ea; 1018 int s; 1019 1020 #if defined(SUN4U) || defined(__GNUC__) 1021 (void)&t; 1022 #endif 1023 s = splnet(); 1024 1025 qestop(sc); 1026 1027 /* 1028 * Allocate descriptor ring and buffers 1029 */ 1030 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ); 1031 1032 /* Channel registers: */ 1033 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma); 1034 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma); 1035 1036 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0); 1037 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0); 1038 bus_space_write_4(t, cr, QE_CRI_QMASK, 0); 1039 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL); 1040 bus_space_write_4(t, cr, QE_CRI_CCNT, 0); 1041 bus_space_write_4(t, cr, QE_CRI_PIPG, 0); 1042 1043 qecaddr = sc->sc_channel * qec->sc_msize; 1044 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr); 1045 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr); 1046 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize); 1047 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize); 1048 1049 /* MACE registers: */ 1050 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL); 1051 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT); 1052 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0); 1053 1054 /* 1055 * Mask MACE's receive interrupt, since we're being notified 1056 * by the QEC after DMA completes. 1057 */ 1058 bus_space_write_1(t, mr, QE_MRI_IMR, 1059 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM); 1060 1061 bus_space_write_1(t, mr, QE_MRI_BIUCC, 1062 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS); 1063 1064 bus_space_write_1(t, mr, QE_MRI_FIFOFC, 1065 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 | 1066 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU); 1067 1068 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP); 1069 1070 /* 1071 * Station address 1072 */ 1073 ea = sc->sc_enaddr; 1074 bus_space_write_1(t, mr, QE_MRI_IAC, 1075 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR); 1076 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6); 1077 1078 /* Apply media settings */ 1079 qe_ifmedia_upd(ifp); 1080 1081 /* 1082 * Clear Logical address filter 1083 */ 1084 bus_space_write_1(t, mr, QE_MRI_IAC, 1085 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1086 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8); 1087 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1088 1089 /* Clear missed packet count (register cleared on read) */ 1090 (void)bus_space_read_1(t, mr, QE_MRI_MPC); 1091 1092 #if 0 1093 /* test register: */ 1094 bus_space_write_1(t, mr, QE_MRI_UTR, 0); 1095 #endif 1096 1097 /* Reset multicast filter */ 1098 qe_mcreset(sc); 1099 1100 ifp->if_flags |= IFF_RUNNING; 1101 ifp->if_flags &= ~IFF_OACTIVE; 1102 splx(s); 1103 } 1104 1105 /* 1106 * Reset multicast filter. 1107 */ 1108 void 1109 qe_mcreset(sc) 1110 struct qe_softc *sc; 1111 { 1112 struct ethercom *ec = &sc->sc_ethercom; 1113 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1114 bus_space_tag_t t = sc->sc_bustag; 1115 bus_space_handle_t mr = sc->sc_mr; 1116 struct ether_multi *enm; 1117 struct ether_multistep step; 1118 u_int32_t crc; 1119 u_int16_t hash[4]; 1120 u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0]; 1121 int i, j; 1122 1123 #if defined(SUN4U) || defined(__GNUC__) 1124 (void)&t; 1125 #endif 1126 1127 /* We also enable transmitter & receiver here */ 1128 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV; 1129 1130 if (ifp->if_flags & IFF_PROMISC) { 1131 maccc |= QE_MR_MACCC_PROM; 1132 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1133 return; 1134 } 1135 1136 if (ifp->if_flags & IFF_ALLMULTI) { 1137 bus_space_write_1(t, mr, QE_MRI_IAC, 1138 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1139 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1140 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1141 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1142 return; 1143 } 1144 1145 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1146 1147 ETHER_FIRST_MULTI(step, ec, enm); 1148 while (enm != NULL) { 1149 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1150 ETHER_ADDR_LEN) != 0) { 1151 /* 1152 * We must listen to a range of multicast 1153 * addresses. For now, just accept all 1154 * multicasts, rather than trying to set only 1155 * those filter bits needed to match the range. 1156 * (At this time, the only use of address 1157 * ranges is for IP multicast routing, for 1158 * which the range is big enough to require 1159 * all bits set.) 1160 */ 1161 bus_space_write_1(t, mr, QE_MRI_IAC, 1162 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1163 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1164 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1165 ifp->if_flags |= IFF_ALLMULTI; 1166 break; 1167 } 1168 1169 crc = 0xffffffff; 1170 1171 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1172 octet = enm->enm_addrlo[i]; 1173 1174 for (j = 0; j < 8; j++) { 1175 if ((crc & 1) ^ (octet & 1)) { 1176 crc >>= 1; 1177 crc ^= MC_POLY_LE; 1178 } 1179 else 1180 crc >>= 1; 1181 octet >>= 1; 1182 } 1183 } 1184 1185 crc >>= 26; 1186 hash[crc >> 4] |= 1 << (crc & 0xf); 1187 ETHER_NEXT_MULTI(step, enm); 1188 } 1189 1190 bus_space_write_1(t, mr, QE_MRI_IAC, 1191 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1192 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8); 1193 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1194 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1195 } 1196 1197 /* 1198 * Get current media settings. 1199 */ 1200 void 1201 qe_ifmedia_sts(ifp, ifmr) 1202 struct ifnet *ifp; 1203 struct ifmediareq *ifmr; 1204 { 1205 struct qe_softc *sc = ifp->if_softc; 1206 bus_space_tag_t t = sc->sc_bustag; 1207 bus_space_handle_t mr = sc->sc_mr; 1208 u_int8_t v; 1209 1210 #if defined(SUN4U) || defined(__GNUC__) 1211 (void)&t; 1212 #endif 1213 v = bus_space_read_1(t, mr, QE_MRI_PLSCC); 1214 1215 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) { 1216 case QE_MR_PLSCC_TP: 1217 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1218 break; 1219 case QE_MR_PLSCC_AUI: 1220 ifmr->ifm_active = IFM_ETHER | IFM_10_5; 1221 break; 1222 case QE_MR_PLSCC_GPSI: 1223 case QE_MR_PLSCC_DAI: 1224 /* ... */ 1225 break; 1226 } 1227 1228 v = bus_space_read_1(t, mr, QE_MRI_PHYCC); 1229 ifmr->ifm_status |= IFM_AVALID; 1230 if ((v & QE_MR_PHYCC_LNKFL) != 0) 1231 ifmr->ifm_status &= ~IFM_ACTIVE; 1232 else 1233 ifmr->ifm_status |= IFM_ACTIVE; 1234 1235 } 1236 1237 /* 1238 * Set media options. 1239 */ 1240 int 1241 qe_ifmedia_upd(ifp) 1242 struct ifnet *ifp; 1243 { 1244 struct qe_softc *sc = ifp->if_softc; 1245 struct ifmedia *ifm = &sc->sc_ifmedia; 1246 bus_space_tag_t t = sc->sc_bustag; 1247 bus_space_handle_t mr = sc->sc_mr; 1248 int newmedia = ifm->ifm_media; 1249 u_int8_t plscc, phycc; 1250 1251 #if defined(SUN4U) || defined(__GNUC__) 1252 (void)&t; 1253 #endif 1254 if (IFM_TYPE(newmedia) != IFM_ETHER) 1255 return (EINVAL); 1256 1257 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK; 1258 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL; 1259 1260 if (IFM_SUBTYPE(newmedia) == IFM_AUTO) 1261 phycc |= QE_MR_PHYCC_ASEL; 1262 else if (IFM_SUBTYPE(newmedia) == IFM_10_T) 1263 plscc |= QE_MR_PLSCC_TP; 1264 else if (IFM_SUBTYPE(newmedia) == IFM_10_5) 1265 plscc |= QE_MR_PLSCC_AUI; 1266 1267 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc); 1268 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc); 1269 1270 return (0); 1271 } 1272