1 /* $NetBSD: qe.c,v 1.45 2008/04/28 20:23:57 martin Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1998 Jason L. Wright. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. The name of the authors may not be used to endorse or promote products 45 * derived from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * Driver for the SBus qec+qe QuadEthernet board. 61 * 62 * This driver was written using the AMD MACE Am79C940 documentation, some 63 * ideas gleaned from the S/Linux driver for this card, Solaris header files, 64 * and a loan of a card from Paul Southworth of the Internet Engineering 65 * Group (www.ieng.com). 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.45 2008/04/28 20:23:57 martin Exp $"); 70 71 #define QEDEBUG 72 73 #include "opt_ddb.h" 74 #include "opt_inet.h" 75 #include "bpfilter.h" 76 #include "rnd.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/errno.h> 82 #include <sys/ioctl.h> 83 #include <sys/mbuf.h> 84 #include <sys/socket.h> 85 #include <sys/syslog.h> 86 #include <sys/device.h> 87 #include <sys/malloc.h> 88 #if NRND > 0 89 #include <sys/rnd.h> 90 #endif 91 92 #include <net/if.h> 93 #include <net/if_dl.h> 94 #include <net/if_types.h> 95 #include <net/netisr.h> 96 #include <net/if_media.h> 97 #include <net/if_ether.h> 98 99 #ifdef INET 100 #include <netinet/in.h> 101 #include <netinet/if_inarp.h> 102 #include <netinet/in_systm.h> 103 #include <netinet/in_var.h> 104 #include <netinet/ip.h> 105 #endif 106 107 108 #if NBPFILTER > 0 109 #include <net/bpf.h> 110 #include <net/bpfdesc.h> 111 #endif 112 113 #include <sys/bus.h> 114 #include <sys/intr.h> 115 #include <machine/autoconf.h> 116 117 #include <dev/sbus/sbusvar.h> 118 #include <dev/sbus/qecreg.h> 119 #include <dev/sbus/qecvar.h> 120 #include <dev/sbus/qereg.h> 121 122 struct qe_softc { 123 struct device sc_dev; /* base device */ 124 struct sbusdev sc_sd; /* sbus device */ 125 bus_space_tag_t sc_bustag; /* bus & DMA tags */ 126 bus_dma_tag_t sc_dmatag; 127 bus_dmamap_t sc_dmamap; 128 struct ethercom sc_ethercom; 129 struct ifmedia sc_ifmedia; /* interface media */ 130 131 struct qec_softc *sc_qec; /* QEC parent */ 132 133 bus_space_handle_t sc_qr; /* QEC registers */ 134 bus_space_handle_t sc_mr; /* MACE registers */ 135 bus_space_handle_t sc_cr; /* channel registers */ 136 137 int sc_channel; /* channel number */ 138 u_int sc_rev; /* board revision */ 139 140 int sc_burst; 141 142 struct qec_ring sc_rb; /* Packet Ring Buffer */ 143 144 /* MAC address */ 145 u_int8_t sc_enaddr[6]; 146 147 #ifdef QEDEBUG 148 int sc_debug; 149 #endif 150 }; 151 152 int qematch(struct device *, struct cfdata *, void *); 153 void qeattach(struct device *, struct device *, void *); 154 155 void qeinit(struct qe_softc *); 156 void qestart(struct ifnet *); 157 void qestop(struct qe_softc *); 158 void qewatchdog(struct ifnet *); 159 int qeioctl(struct ifnet *, u_long, void *); 160 void qereset(struct qe_softc *); 161 162 int qeintr(void *); 163 int qe_eint(struct qe_softc *, u_int32_t); 164 int qe_rint(struct qe_softc *); 165 int qe_tint(struct qe_softc *); 166 void qe_mcreset(struct qe_softc *); 167 168 static int qe_put(struct qe_softc *, int, struct mbuf *); 169 static void qe_read(struct qe_softc *, int, int); 170 static struct mbuf *qe_get(struct qe_softc *, int, int); 171 172 /* ifmedia callbacks */ 173 void qe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 174 int qe_ifmedia_upd(struct ifnet *); 175 176 CFATTACH_DECL(qe, sizeof(struct qe_softc), 177 qematch, qeattach, NULL, NULL); 178 179 int 180 qematch(parent, cf, aux) 181 struct device *parent; 182 struct cfdata *cf; 183 void *aux; 184 { 185 struct sbus_attach_args *sa = aux; 186 187 return (strcmp(cf->cf_name, sa->sa_name) == 0); 188 } 189 190 void 191 qeattach(parent, self, aux) 192 struct device *parent, *self; 193 void *aux; 194 { 195 struct sbus_attach_args *sa = aux; 196 struct qec_softc *qec = (struct qec_softc *)parent; 197 struct qe_softc *sc = (struct qe_softc *)self; 198 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 199 int node = sa->sa_node; 200 bus_dma_tag_t dmatag = sa->sa_dmatag; 201 bus_dma_segment_t seg; 202 bus_size_t size; 203 int rseg, error; 204 205 if (sa->sa_nreg < 2) { 206 printf("%s: only %d register sets\n", 207 device_xname(self), sa->sa_nreg); 208 return; 209 } 210 211 if (bus_space_map(sa->sa_bustag, 212 (bus_addr_t)BUS_ADDR( 213 sa->sa_reg[0].oa_space, 214 sa->sa_reg[0].oa_base), 215 (bus_size_t)sa->sa_reg[0].oa_size, 216 0, &sc->sc_cr) != 0) { 217 aprint_error_dev(self, "cannot map registers\n"); 218 return; 219 } 220 221 if (bus_space_map(sa->sa_bustag, 222 (bus_addr_t)BUS_ADDR( 223 sa->sa_reg[1].oa_space, 224 sa->sa_reg[1].oa_base), 225 (bus_size_t)sa->sa_reg[1].oa_size, 226 0, &sc->sc_mr) != 0) { 227 aprint_error_dev(self, "cannot map registers\n"); 228 return; 229 } 230 231 sc->sc_rev = prom_getpropint(node, "mace-version", -1); 232 printf(" rev %x", sc->sc_rev); 233 234 sc->sc_bustag = sa->sa_bustag; 235 sc->sc_dmatag = sa->sa_dmatag; 236 sc->sc_qec = qec; 237 sc->sc_qr = qec->sc_regs; 238 239 sc->sc_channel = prom_getpropint(node, "channel#", -1); 240 sc->sc_burst = qec->sc_burst; 241 242 qestop(sc); 243 244 /* Note: no interrupt level passed */ 245 (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, qeintr, sc); 246 prom_getether(node, sc->sc_enaddr); 247 248 /* 249 * Allocate descriptor ring and buffers. 250 */ 251 252 /* for now, allocate as many bufs as there are ring descriptors */ 253 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 254 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 255 256 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 257 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 258 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + 259 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; 260 261 /* Get a DMA handle */ 262 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 263 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 264 aprint_error_dev(self, "DMA map create error %d\n", 265 error); 266 return; 267 } 268 269 /* Allocate DMA buffer */ 270 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0, 271 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 272 aprint_error_dev(self, "DMA buffer alloc error %d\n", 273 error); 274 return; 275 } 276 277 /* Map DMA buffer in CPU addressable space */ 278 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 279 &sc->sc_rb.rb_membase, 280 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 281 aprint_error_dev(self, "DMA buffer map error %d\n", 282 error); 283 bus_dmamem_free(dmatag, &seg, rseg); 284 return; 285 } 286 287 /* Load the buffer */ 288 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 289 sc->sc_rb.rb_membase, size, NULL, 290 BUS_DMA_NOWAIT)) != 0) { 291 aprint_error_dev(self, "DMA buffer map load error %d\n", 292 error); 293 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 294 bus_dmamem_free(dmatag, &seg, rseg); 295 return; 296 } 297 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 298 299 /* Initialize media properties */ 300 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts); 301 ifmedia_add(&sc->sc_ifmedia, 302 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 303 0, NULL); 304 ifmedia_add(&sc->sc_ifmedia, 305 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 306 0, NULL); 307 ifmedia_add(&sc->sc_ifmedia, 308 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 309 0, NULL); 310 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 311 312 memcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 313 ifp->if_softc = sc; 314 ifp->if_start = qestart; 315 ifp->if_ioctl = qeioctl; 316 ifp->if_watchdog = qewatchdog; 317 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | 318 IFF_MULTICAST; 319 IFQ_SET_READY(&ifp->if_snd); 320 321 /* Attach the interface. */ 322 if_attach(ifp); 323 ether_ifattach(ifp, sc->sc_enaddr); 324 325 printf(" address %s\n", ether_sprintf(sc->sc_enaddr)); 326 } 327 328 /* 329 * Pull data off an interface. 330 * Len is the length of data, with local net header stripped. 331 * We copy the data into mbufs. When full cluster sized units are present, 332 * we copy into clusters. 333 */ 334 static inline struct mbuf * 335 qe_get(sc, idx, totlen) 336 struct qe_softc *sc; 337 int idx, totlen; 338 { 339 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 340 struct mbuf *m; 341 struct mbuf *top, **mp; 342 int len, pad, boff = 0; 343 void *bp; 344 345 bp = (char *)sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ; 346 347 MGETHDR(m, M_DONTWAIT, MT_DATA); 348 if (m == NULL) 349 return (NULL); 350 m->m_pkthdr.rcvif = ifp; 351 m->m_pkthdr.len = totlen; 352 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); 353 m->m_data += pad; 354 len = MHLEN - pad; 355 top = NULL; 356 mp = ⊤ 357 358 while (totlen > 0) { 359 if (top) { 360 MGET(m, M_DONTWAIT, MT_DATA); 361 if (m == NULL) { 362 m_freem(top); 363 return (NULL); 364 } 365 len = MLEN; 366 } 367 if (top && totlen >= MINCLSIZE) { 368 MCLGET(m, M_DONTWAIT); 369 if (m->m_flags & M_EXT) 370 len = MCLBYTES; 371 } 372 m->m_len = len = min(totlen, len); 373 memcpy(mtod(m, void *), (char *)bp + boff, len); 374 boff += len; 375 totlen -= len; 376 *mp = m; 377 mp = &m->m_next; 378 } 379 380 return (top); 381 } 382 383 /* 384 * Routine to copy from mbuf chain to transmit buffer in 385 * network buffer memory. 386 */ 387 inline int 388 qe_put(sc, idx, m) 389 struct qe_softc *sc; 390 int idx; 391 struct mbuf *m; 392 { 393 struct mbuf *n; 394 int len, tlen = 0, boff = 0; 395 void *bp; 396 397 bp = (char *)sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ; 398 399 for (; m; m = n) { 400 len = m->m_len; 401 if (len == 0) { 402 MFREE(m, n); 403 continue; 404 } 405 memcpy((char *)bp + boff, mtod(m, void *), len); 406 boff += len; 407 tlen += len; 408 MFREE(m, n); 409 } 410 return (tlen); 411 } 412 413 /* 414 * Pass a packet to the higher levels. 415 */ 416 inline void 417 qe_read(sc, idx, len) 418 struct qe_softc *sc; 419 int idx, len; 420 { 421 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 422 struct mbuf *m; 423 424 if (len <= sizeof(struct ether_header) || 425 len > ETHERMTU + sizeof(struct ether_header)) { 426 427 printf("%s: invalid packet size %d; dropping\n", 428 ifp->if_xname, len); 429 430 ifp->if_ierrors++; 431 return; 432 } 433 434 /* 435 * Pull packet off interface. 436 */ 437 m = qe_get(sc, idx, len); 438 if (m == NULL) { 439 ifp->if_ierrors++; 440 return; 441 } 442 ifp->if_ipackets++; 443 444 #if NBPFILTER > 0 445 /* 446 * Check if there's a BPF listener on this interface. 447 * If so, hand off the raw packet to BPF. 448 */ 449 if (ifp->if_bpf) 450 bpf_mtap(ifp->if_bpf, m); 451 #endif 452 /* Pass the packet up. */ 453 (*ifp->if_input)(ifp, m); 454 } 455 456 /* 457 * Start output on interface. 458 * We make two assumptions here: 459 * 1) that the current priority is set to splnet _before_ this code 460 * is called *and* is returned to the appropriate priority after 461 * return 462 * 2) that the IFF_OACTIVE flag is checked before this code is called 463 * (i.e. that the output part of the interface is idle) 464 */ 465 void 466 qestart(ifp) 467 struct ifnet *ifp; 468 { 469 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc; 470 struct qec_xd *txd = sc->sc_rb.rb_txd; 471 struct mbuf *m; 472 unsigned int bix, len; 473 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 474 475 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 476 return; 477 478 bix = sc->sc_rb.rb_tdhead; 479 480 for (;;) { 481 IFQ_DEQUEUE(&ifp->if_snd, m); 482 if (m == 0) 483 break; 484 485 #if NBPFILTER > 0 486 /* 487 * If BPF is listening on this interface, let it see the 488 * packet before we commit it to the wire. 489 */ 490 if (ifp->if_bpf) 491 bpf_mtap(ifp->if_bpf, m); 492 #endif 493 494 /* 495 * Copy the mbuf chain into the transmit buffer. 496 */ 497 len = qe_put(sc, bix, m); 498 499 /* 500 * Initialize transmit registers and start transmission 501 */ 502 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP | 503 (len & QEC_XD_LENGTH); 504 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL, 505 QE_CR_CTRL_TWAKEUP); 506 507 if (++bix == QEC_XD_RING_MAXSIZE) 508 bix = 0; 509 510 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 511 ifp->if_flags |= IFF_OACTIVE; 512 break; 513 } 514 } 515 516 sc->sc_rb.rb_tdhead = bix; 517 } 518 519 void 520 qestop(sc) 521 struct qe_softc *sc; 522 { 523 bus_space_tag_t t = sc->sc_bustag; 524 bus_space_handle_t mr = sc->sc_mr; 525 bus_space_handle_t cr = sc->sc_cr; 526 int n; 527 528 #if defined(SUN4U) || defined(__GNUC__) 529 (void)&t; 530 #endif 531 /* Stop the schwurst */ 532 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST); 533 for (n = 200; n > 0; n--) { 534 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) & 535 QE_MR_BIUCC_SWRST) == 0) 536 break; 537 DELAY(20); 538 } 539 540 /* then reset */ 541 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET); 542 for (n = 200; n > 0; n--) { 543 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) & 544 QE_CR_CTRL_RESET) == 0) 545 break; 546 DELAY(20); 547 } 548 } 549 550 /* 551 * Reset interface. 552 */ 553 void 554 qereset(sc) 555 struct qe_softc *sc; 556 { 557 int s; 558 559 s = splnet(); 560 qestop(sc); 561 qeinit(sc); 562 splx(s); 563 } 564 565 void 566 qewatchdog(ifp) 567 struct ifnet *ifp; 568 { 569 struct qe_softc *sc = ifp->if_softc; 570 571 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev)); 572 ifp->if_oerrors++; 573 574 qereset(sc); 575 } 576 577 /* 578 * Interrupt dispatch. 579 */ 580 int 581 qeintr(arg) 582 void *arg; 583 { 584 struct qe_softc *sc = (struct qe_softc *)arg; 585 bus_space_tag_t t = sc->sc_bustag; 586 u_int32_t qecstat, qestat; 587 int r = 0; 588 589 #if defined(SUN4U) || defined(__GNUC__) 590 (void)&t; 591 #endif 592 /* Read QEC status and channel status */ 593 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT); 594 #ifdef QEDEBUG 595 if (sc->sc_debug) { 596 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat); 597 } 598 #endif 599 600 /* Filter out status for this channel */ 601 qecstat = qecstat >> (4 * sc->sc_channel); 602 if ((qecstat & 0xf) == 0) 603 return (r); 604 605 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT); 606 607 #ifdef QEDEBUG 608 if (sc->sc_debug) { 609 char bits[64]; int i; 610 bus_space_tag_t t1 = sc->sc_bustag; 611 bus_space_handle_t mr = sc->sc_mr; 612 613 printf("qe%d: intr: qestat=%s\n", sc->sc_channel, 614 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, sizeof(bits))); 615 616 printf("MACE registers:\n"); 617 for (i = 0 ; i < 32; i++) { 618 printf(" m[%d]=%x,", i, bus_space_read_1(t1, mr, i)); 619 if (((i+1) & 7) == 0) 620 printf("\n"); 621 } 622 } 623 #endif 624 625 if (qestat & QE_CR_STAT_ALLERRORS) { 626 #ifdef QEDEBUG 627 if (sc->sc_debug) { 628 char bits[64]; 629 printf("qe%d: eint: qestat=%s\n", sc->sc_channel, 630 bitmask_snprintf(qestat, QE_CR_STAT_BITS, bits, 631 sizeof(bits))); 632 } 633 #endif 634 r |= qe_eint(sc, qestat); 635 if (r == -1) 636 return (1); 637 } 638 639 if (qestat & QE_CR_STAT_TXIRQ) 640 r |= qe_tint(sc); 641 642 if (qestat & QE_CR_STAT_RXIRQ) 643 r |= qe_rint(sc); 644 645 return (r); 646 } 647 648 /* 649 * Transmit interrupt. 650 */ 651 int 652 qe_tint(sc) 653 struct qe_softc *sc; 654 { 655 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 656 unsigned int bix, txflags; 657 658 bix = sc->sc_rb.rb_tdtail; 659 660 for (;;) { 661 if (sc->sc_rb.rb_td_nbusy <= 0) 662 break; 663 664 txflags = sc->sc_rb.rb_txd[bix].xd_flags; 665 666 if (txflags & QEC_XD_OWN) 667 break; 668 669 ifp->if_flags &= ~IFF_OACTIVE; 670 ifp->if_opackets++; 671 672 if (++bix == QEC_XD_RING_MAXSIZE) 673 bix = 0; 674 675 --sc->sc_rb.rb_td_nbusy; 676 } 677 678 sc->sc_rb.rb_tdtail = bix; 679 680 qestart(ifp); 681 682 if (sc->sc_rb.rb_td_nbusy == 0) 683 ifp->if_timer = 0; 684 685 return (1); 686 } 687 688 /* 689 * Receive interrupt. 690 */ 691 int 692 qe_rint(sc) 693 struct qe_softc *sc; 694 { 695 struct qec_xd *xd = sc->sc_rb.rb_rxd; 696 unsigned int bix, len; 697 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 698 #ifdef QEDEBUG 699 int npackets = 0; 700 #endif 701 702 bix = sc->sc_rb.rb_rdtail; 703 704 /* 705 * Process all buffers with valid data. 706 */ 707 for (;;) { 708 len = xd[bix].xd_flags; 709 if (len & QEC_XD_OWN) 710 break; 711 712 #ifdef QEDEBUG 713 npackets++; 714 #endif 715 716 len &= QEC_XD_LENGTH; 717 len -= 4; 718 qe_read(sc, bix, len); 719 720 /* ... */ 721 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags = 722 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH); 723 724 if (++bix == QEC_XD_RING_MAXSIZE) 725 bix = 0; 726 } 727 #ifdef QEDEBUG 728 if (npackets == 0 && sc->sc_debug) 729 printf("%s: rint: no packets; rb index %d; status 0x%x\n", 730 device_xname(&sc->sc_dev), bix, len); 731 #endif 732 733 sc->sc_rb.rb_rdtail = bix; 734 735 return (1); 736 } 737 738 /* 739 * Error interrupt. 740 */ 741 int 742 qe_eint(sc, why) 743 struct qe_softc *sc; 744 u_int32_t why; 745 { 746 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 747 int r = 0, rst = 0; 748 749 if (why & QE_CR_STAT_EDEFER) { 750 printf("%s: excessive tx defers.\n", device_xname(&sc->sc_dev)); 751 r |= 1; 752 ifp->if_oerrors++; 753 } 754 755 if (why & QE_CR_STAT_CLOSS) { 756 printf("%s: no carrier, link down?\n", device_xname(&sc->sc_dev)); 757 ifp->if_oerrors++; 758 r |= 1; 759 } 760 761 if (why & QE_CR_STAT_ERETRIES) { 762 printf("%s: excessive tx retries\n", device_xname(&sc->sc_dev)); 763 ifp->if_oerrors++; 764 r |= 1; 765 rst = 1; 766 } 767 768 769 if (why & QE_CR_STAT_LCOLL) { 770 printf("%s: late tx transmission\n", device_xname(&sc->sc_dev)); 771 ifp->if_oerrors++; 772 r |= 1; 773 rst = 1; 774 } 775 776 if (why & QE_CR_STAT_FUFLOW) { 777 printf("%s: tx fifo underflow\n", device_xname(&sc->sc_dev)); 778 ifp->if_oerrors++; 779 r |= 1; 780 rst = 1; 781 } 782 783 if (why & QE_CR_STAT_JERROR) { 784 printf("%s: jabber seen\n", device_xname(&sc->sc_dev)); 785 r |= 1; 786 } 787 788 if (why & QE_CR_STAT_BERROR) { 789 printf("%s: babble seen\n", device_xname(&sc->sc_dev)); 790 r |= 1; 791 } 792 793 if (why & QE_CR_STAT_TCCOFLOW) { 794 ifp->if_collisions += 256; 795 ifp->if_oerrors += 256; 796 r |= 1; 797 } 798 799 if (why & QE_CR_STAT_TXDERROR) { 800 printf("%s: tx descriptor is bad\n", device_xname(&sc->sc_dev)); 801 rst = 1; 802 r |= 1; 803 } 804 805 if (why & QE_CR_STAT_TXLERR) { 806 printf("%s: tx late error\n", device_xname(&sc->sc_dev)); 807 ifp->if_oerrors++; 808 rst = 1; 809 r |= 1; 810 } 811 812 if (why & QE_CR_STAT_TXPERR) { 813 printf("%s: tx DMA parity error\n", device_xname(&sc->sc_dev)); 814 ifp->if_oerrors++; 815 rst = 1; 816 r |= 1; 817 } 818 819 if (why & QE_CR_STAT_TXSERR) { 820 printf("%s: tx DMA sbus error ack\n", device_xname(&sc->sc_dev)); 821 ifp->if_oerrors++; 822 rst = 1; 823 r |= 1; 824 } 825 826 if (why & QE_CR_STAT_RCCOFLOW) { 827 ifp->if_collisions += 256; 828 ifp->if_ierrors += 256; 829 r |= 1; 830 } 831 832 if (why & QE_CR_STAT_RUOFLOW) { 833 ifp->if_ierrors += 256; 834 r |= 1; 835 } 836 837 if (why & QE_CR_STAT_MCOFLOW) { 838 ifp->if_ierrors += 256; 839 r |= 1; 840 } 841 842 if (why & QE_CR_STAT_RXFOFLOW) { 843 printf("%s: rx fifo overflow\n", device_xname(&sc->sc_dev)); 844 ifp->if_ierrors++; 845 r |= 1; 846 } 847 848 if (why & QE_CR_STAT_RLCOLL) { 849 printf("%s: rx late collision\n", device_xname(&sc->sc_dev)); 850 ifp->if_ierrors++; 851 ifp->if_collisions++; 852 r |= 1; 853 } 854 855 if (why & QE_CR_STAT_FCOFLOW) { 856 ifp->if_ierrors += 256; 857 r |= 1; 858 } 859 860 if (why & QE_CR_STAT_CECOFLOW) { 861 ifp->if_ierrors += 256; 862 r |= 1; 863 } 864 865 if (why & QE_CR_STAT_RXDROP) { 866 printf("%s: rx packet dropped\n", device_xname(&sc->sc_dev)); 867 ifp->if_ierrors++; 868 r |= 1; 869 } 870 871 if (why & QE_CR_STAT_RXSMALL) { 872 printf("%s: rx buffer too small\n", device_xname(&sc->sc_dev)); 873 ifp->if_ierrors++; 874 r |= 1; 875 rst = 1; 876 } 877 878 if (why & QE_CR_STAT_RXLERR) { 879 printf("%s: rx late error\n", device_xname(&sc->sc_dev)); 880 ifp->if_ierrors++; 881 r |= 1; 882 rst = 1; 883 } 884 885 if (why & QE_CR_STAT_RXPERR) { 886 printf("%s: rx DMA parity error\n", device_xname(&sc->sc_dev)); 887 ifp->if_ierrors++; 888 r |= 1; 889 rst = 1; 890 } 891 892 if (why & QE_CR_STAT_RXSERR) { 893 printf("%s: rx DMA sbus error ack\n", device_xname(&sc->sc_dev)); 894 ifp->if_ierrors++; 895 r |= 1; 896 rst = 1; 897 } 898 899 if (r == 0) 900 aprint_error_dev(&sc->sc_dev, "unexpected interrupt error: %08x\n", 901 why); 902 903 if (rst) { 904 printf("%s: resetting...\n", device_xname(&sc->sc_dev)); 905 qereset(sc); 906 return (-1); 907 } 908 909 return (r); 910 } 911 912 int 913 qeioctl(ifp, cmd, data) 914 struct ifnet *ifp; 915 u_long cmd; 916 void *data; 917 { 918 struct qe_softc *sc = ifp->if_softc; 919 struct ifaddr *ifa = (struct ifaddr *)data; 920 struct ifreq *ifr = (struct ifreq *)data; 921 int s, error = 0; 922 923 s = splnet(); 924 925 switch (cmd) { 926 case SIOCSIFADDR: 927 ifp->if_flags |= IFF_UP; 928 switch (ifa->ifa_addr->sa_family) { 929 #ifdef INET 930 case AF_INET: 931 qeinit(sc); 932 arp_ifinit(ifp, ifa); 933 break; 934 #endif /* INET */ 935 default: 936 qeinit(sc); 937 break; 938 } 939 break; 940 941 case SIOCSIFFLAGS: 942 if ((ifp->if_flags & IFF_UP) == 0 && 943 (ifp->if_flags & IFF_RUNNING) != 0) { 944 /* 945 * If interface is marked down and it is running, then 946 * stop it. 947 */ 948 qestop(sc); 949 ifp->if_flags &= ~IFF_RUNNING; 950 951 } else if ((ifp->if_flags & IFF_UP) != 0 && 952 (ifp->if_flags & IFF_RUNNING) == 0) { 953 /* 954 * If interface is marked up and it is stopped, then 955 * start it. 956 */ 957 qeinit(sc); 958 959 } else { 960 /* 961 * Reset the interface to pick up changes in any other 962 * flags that affect hardware registers. 963 */ 964 qestop(sc); 965 qeinit(sc); 966 } 967 #ifdef QEDEBUG 968 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 969 #endif 970 break; 971 972 case SIOCADDMULTI: 973 case SIOCDELMULTI: 974 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 975 /* 976 * Multicast list has changed; set the hardware filter 977 * accordingly. 978 */ 979 if (ifp->if_flags & IFF_RUNNING) 980 qe_mcreset(sc); 981 error = 0; 982 } 983 break; 984 985 case SIOCGIFMEDIA: 986 case SIOCSIFMEDIA: 987 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); 988 break; 989 990 default: 991 error = EINVAL; 992 break; 993 } 994 995 splx(s); 996 return (error); 997 } 998 999 1000 void 1001 qeinit(sc) 1002 struct qe_softc *sc; 1003 { 1004 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1005 bus_space_tag_t t = sc->sc_bustag; 1006 bus_space_handle_t cr = sc->sc_cr; 1007 bus_space_handle_t mr = sc->sc_mr; 1008 struct qec_softc *qec = sc->sc_qec; 1009 u_int32_t qecaddr; 1010 u_int8_t *ea; 1011 int s; 1012 1013 #if defined(SUN4U) || defined(__GNUC__) 1014 (void)&t; 1015 #endif 1016 s = splnet(); 1017 1018 qestop(sc); 1019 1020 /* 1021 * Allocate descriptor ring and buffers 1022 */ 1023 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ); 1024 1025 /* Channel registers: */ 1026 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma); 1027 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma); 1028 1029 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0); 1030 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0); 1031 bus_space_write_4(t, cr, QE_CRI_QMASK, 0); 1032 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL); 1033 bus_space_write_4(t, cr, QE_CRI_CCNT, 0); 1034 bus_space_write_4(t, cr, QE_CRI_PIPG, 0); 1035 1036 qecaddr = sc->sc_channel * qec->sc_msize; 1037 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr); 1038 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr); 1039 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize); 1040 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize); 1041 1042 /* MACE registers: */ 1043 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL); 1044 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT); 1045 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0); 1046 1047 /* 1048 * Mask MACE's receive interrupt, since we're being notified 1049 * by the QEC after DMA completes. 1050 */ 1051 bus_space_write_1(t, mr, QE_MRI_IMR, 1052 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM); 1053 1054 bus_space_write_1(t, mr, QE_MRI_BIUCC, 1055 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS); 1056 1057 bus_space_write_1(t, mr, QE_MRI_FIFOFC, 1058 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 | 1059 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU); 1060 1061 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP); 1062 1063 /* 1064 * Station address 1065 */ 1066 ea = sc->sc_enaddr; 1067 bus_space_write_1(t, mr, QE_MRI_IAC, 1068 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR); 1069 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6); 1070 1071 /* Apply media settings */ 1072 qe_ifmedia_upd(ifp); 1073 1074 /* 1075 * Clear Logical address filter 1076 */ 1077 bus_space_write_1(t, mr, QE_MRI_IAC, 1078 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1079 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8); 1080 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1081 1082 /* Clear missed packet count (register cleared on read) */ 1083 (void)bus_space_read_1(t, mr, QE_MRI_MPC); 1084 1085 #if 0 1086 /* test register: */ 1087 bus_space_write_1(t, mr, QE_MRI_UTR, 0); 1088 #endif 1089 1090 /* Reset multicast filter */ 1091 qe_mcreset(sc); 1092 1093 ifp->if_flags |= IFF_RUNNING; 1094 ifp->if_flags &= ~IFF_OACTIVE; 1095 splx(s); 1096 } 1097 1098 /* 1099 * Reset multicast filter. 1100 */ 1101 void 1102 qe_mcreset(sc) 1103 struct qe_softc *sc; 1104 { 1105 struct ethercom *ec = &sc->sc_ethercom; 1106 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1107 bus_space_tag_t t = sc->sc_bustag; 1108 bus_space_handle_t mr = sc->sc_mr; 1109 struct ether_multi *enm; 1110 struct ether_multistep step; 1111 u_int32_t crc; 1112 u_int16_t hash[4]; 1113 u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0]; 1114 int i, j; 1115 1116 #if defined(SUN4U) || defined(__GNUC__) 1117 (void)&t; 1118 #endif 1119 1120 /* We also enable transmitter & receiver here */ 1121 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV; 1122 1123 if (ifp->if_flags & IFF_PROMISC) { 1124 maccc |= QE_MR_MACCC_PROM; 1125 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1126 return; 1127 } 1128 1129 if (ifp->if_flags & IFF_ALLMULTI) { 1130 bus_space_write_1(t, mr, QE_MRI_IAC, 1131 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1132 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1133 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1134 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1135 return; 1136 } 1137 1138 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1139 1140 ETHER_FIRST_MULTI(step, ec, enm); 1141 while (enm != NULL) { 1142 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1143 ETHER_ADDR_LEN) != 0) { 1144 /* 1145 * We must listen to a range of multicast 1146 * addresses. For now, just accept all 1147 * multicasts, rather than trying to set only 1148 * those filter bits needed to match the range. 1149 * (At this time, the only use of address 1150 * ranges is for IP multicast routing, for 1151 * which the range is big enough to require 1152 * all bits set.) 1153 */ 1154 bus_space_write_1(t, mr, QE_MRI_IAC, 1155 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1156 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1157 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1158 ifp->if_flags |= IFF_ALLMULTI; 1159 break; 1160 } 1161 1162 crc = 0xffffffff; 1163 1164 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1165 octet = enm->enm_addrlo[i]; 1166 1167 for (j = 0; j < 8; j++) { 1168 if ((crc & 1) ^ (octet & 1)) { 1169 crc >>= 1; 1170 crc ^= MC_POLY_LE; 1171 } 1172 else 1173 crc >>= 1; 1174 octet >>= 1; 1175 } 1176 } 1177 1178 crc >>= 26; 1179 hash[crc >> 4] |= 1 << (crc & 0xf); 1180 ETHER_NEXT_MULTI(step, enm); 1181 } 1182 1183 bus_space_write_1(t, mr, QE_MRI_IAC, 1184 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1185 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8); 1186 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1187 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1188 } 1189 1190 /* 1191 * Get current media settings. 1192 */ 1193 void 1194 qe_ifmedia_sts(ifp, ifmr) 1195 struct ifnet *ifp; 1196 struct ifmediareq *ifmr; 1197 { 1198 struct qe_softc *sc = ifp->if_softc; 1199 bus_space_tag_t t = sc->sc_bustag; 1200 bus_space_handle_t mr = sc->sc_mr; 1201 u_int8_t v; 1202 1203 #if defined(SUN4U) || defined(__GNUC__) 1204 (void)&t; 1205 #endif 1206 v = bus_space_read_1(t, mr, QE_MRI_PLSCC); 1207 1208 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) { 1209 case QE_MR_PLSCC_TP: 1210 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1211 break; 1212 case QE_MR_PLSCC_AUI: 1213 ifmr->ifm_active = IFM_ETHER | IFM_10_5; 1214 break; 1215 case QE_MR_PLSCC_GPSI: 1216 case QE_MR_PLSCC_DAI: 1217 /* ... */ 1218 break; 1219 } 1220 1221 v = bus_space_read_1(t, mr, QE_MRI_PHYCC); 1222 ifmr->ifm_status |= IFM_AVALID; 1223 if ((v & QE_MR_PHYCC_LNKFL) != 0) 1224 ifmr->ifm_status &= ~IFM_ACTIVE; 1225 else 1226 ifmr->ifm_status |= IFM_ACTIVE; 1227 1228 } 1229 1230 /* 1231 * Set media options. 1232 */ 1233 int 1234 qe_ifmedia_upd(ifp) 1235 struct ifnet *ifp; 1236 { 1237 struct qe_softc *sc = ifp->if_softc; 1238 struct ifmedia *ifm = &sc->sc_ifmedia; 1239 bus_space_tag_t t = sc->sc_bustag; 1240 bus_space_handle_t mr = sc->sc_mr; 1241 int newmedia = ifm->ifm_media; 1242 u_int8_t plscc, phycc; 1243 1244 #if defined(SUN4U) || defined(__GNUC__) 1245 (void)&t; 1246 #endif 1247 if (IFM_TYPE(newmedia) != IFM_ETHER) 1248 return (EINVAL); 1249 1250 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK; 1251 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL; 1252 1253 if (IFM_SUBTYPE(newmedia) == IFM_AUTO) 1254 phycc |= QE_MR_PHYCC_ASEL; 1255 else if (IFM_SUBTYPE(newmedia) == IFM_10_T) 1256 plscc |= QE_MR_PLSCC_TP; 1257 else if (IFM_SUBTYPE(newmedia) == IFM_10_5) 1258 plscc |= QE_MR_PLSCC_AUI; 1259 1260 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc); 1261 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc); 1262 1263 return (0); 1264 } 1265