1 /* $NetBSD: qe.c,v 1.49 2009/03/14 21:04:22 dsl Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1998 Jason L. Wright. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. The name of the authors may not be used to endorse or promote products 45 * derived from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * Driver for the SBus qec+qe QuadEthernet board. 61 * 62 * This driver was written using the AMD MACE Am79C940 documentation, some 63 * ideas gleaned from the S/Linux driver for this card, Solaris header files, 64 * and a loan of a card from Paul Southworth of the Internet Engineering 65 * Group (www.ieng.com). 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.49 2009/03/14 21:04:22 dsl Exp $"); 70 71 #define QEDEBUG 72 73 #include "opt_ddb.h" 74 #include "opt_inet.h" 75 #include "bpfilter.h" 76 #include "rnd.h" 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/kernel.h> 81 #include <sys/errno.h> 82 #include <sys/ioctl.h> 83 #include <sys/mbuf.h> 84 #include <sys/socket.h> 85 #include <sys/syslog.h> 86 #include <sys/device.h> 87 #include <sys/malloc.h> 88 #if NRND > 0 89 #include <sys/rnd.h> 90 #endif 91 92 #include <net/if.h> 93 #include <net/if_dl.h> 94 #include <net/if_types.h> 95 #include <net/netisr.h> 96 #include <net/if_media.h> 97 #include <net/if_ether.h> 98 99 #ifdef INET 100 #include <netinet/in.h> 101 #include <netinet/if_inarp.h> 102 #include <netinet/in_systm.h> 103 #include <netinet/in_var.h> 104 #include <netinet/ip.h> 105 #endif 106 107 108 #if NBPFILTER > 0 109 #include <net/bpf.h> 110 #include <net/bpfdesc.h> 111 #endif 112 113 #include <sys/bus.h> 114 #include <sys/intr.h> 115 #include <machine/autoconf.h> 116 117 #include <dev/sbus/sbusvar.h> 118 #include <dev/sbus/qecreg.h> 119 #include <dev/sbus/qecvar.h> 120 #include <dev/sbus/qereg.h> 121 122 struct qe_softc { 123 struct device sc_dev; /* base device */ 124 struct sbusdev sc_sd; /* sbus device */ 125 bus_space_tag_t sc_bustag; /* bus & DMA tags */ 126 bus_dma_tag_t sc_dmatag; 127 bus_dmamap_t sc_dmamap; 128 struct ethercom sc_ethercom; 129 struct ifmedia sc_ifmedia; /* interface media */ 130 131 struct qec_softc *sc_qec; /* QEC parent */ 132 133 bus_space_handle_t sc_qr; /* QEC registers */ 134 bus_space_handle_t sc_mr; /* MACE registers */ 135 bus_space_handle_t sc_cr; /* channel registers */ 136 137 int sc_channel; /* channel number */ 138 u_int sc_rev; /* board revision */ 139 140 int sc_burst; 141 142 struct qec_ring sc_rb; /* Packet Ring Buffer */ 143 144 /* MAC address */ 145 u_int8_t sc_enaddr[6]; 146 147 #ifdef QEDEBUG 148 int sc_debug; 149 #endif 150 }; 151 152 int qematch(struct device *, struct cfdata *, void *); 153 void qeattach(struct device *, struct device *, void *); 154 155 void qeinit(struct qe_softc *); 156 void qestart(struct ifnet *); 157 void qestop(struct qe_softc *); 158 void qewatchdog(struct ifnet *); 159 int qeioctl(struct ifnet *, u_long, void *); 160 void qereset(struct qe_softc *); 161 162 int qeintr(void *); 163 int qe_eint(struct qe_softc *, u_int32_t); 164 int qe_rint(struct qe_softc *); 165 int qe_tint(struct qe_softc *); 166 void qe_mcreset(struct qe_softc *); 167 168 static int qe_put(struct qe_softc *, int, struct mbuf *); 169 static void qe_read(struct qe_softc *, int, int); 170 static struct mbuf *qe_get(struct qe_softc *, int, int); 171 172 /* ifmedia callbacks */ 173 void qe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 174 int qe_ifmedia_upd(struct ifnet *); 175 176 CFATTACH_DECL(qe, sizeof(struct qe_softc), 177 qematch, qeattach, NULL, NULL); 178 179 int 180 qematch(struct device *parent, struct cfdata *cf, void *aux) 181 { 182 struct sbus_attach_args *sa = aux; 183 184 return (strcmp(cf->cf_name, sa->sa_name) == 0); 185 } 186 187 void 188 qeattach(struct device *parent, struct device *self, void *aux) 189 { 190 struct sbus_attach_args *sa = aux; 191 struct qec_softc *qec = (struct qec_softc *)parent; 192 struct qe_softc *sc = (struct qe_softc *)self; 193 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 194 int node = sa->sa_node; 195 bus_dma_tag_t dmatag = sa->sa_dmatag; 196 bus_dma_segment_t seg; 197 bus_size_t size; 198 int rseg, error; 199 200 if (sa->sa_nreg < 2) { 201 printf("%s: only %d register sets\n", 202 device_xname(self), sa->sa_nreg); 203 return; 204 } 205 206 if (bus_space_map(sa->sa_bustag, 207 (bus_addr_t)BUS_ADDR( 208 sa->sa_reg[0].oa_space, 209 sa->sa_reg[0].oa_base), 210 (bus_size_t)sa->sa_reg[0].oa_size, 211 0, &sc->sc_cr) != 0) { 212 aprint_error_dev(self, "cannot map registers\n"); 213 return; 214 } 215 216 if (bus_space_map(sa->sa_bustag, 217 (bus_addr_t)BUS_ADDR( 218 sa->sa_reg[1].oa_space, 219 sa->sa_reg[1].oa_base), 220 (bus_size_t)sa->sa_reg[1].oa_size, 221 0, &sc->sc_mr) != 0) { 222 aprint_error_dev(self, "cannot map registers\n"); 223 return; 224 } 225 226 sc->sc_rev = prom_getpropint(node, "mace-version", -1); 227 printf(" rev %x", sc->sc_rev); 228 229 sc->sc_bustag = sa->sa_bustag; 230 sc->sc_dmatag = sa->sa_dmatag; 231 sc->sc_qec = qec; 232 sc->sc_qr = qec->sc_regs; 233 234 sc->sc_channel = prom_getpropint(node, "channel#", -1); 235 sc->sc_burst = qec->sc_burst; 236 237 qestop(sc); 238 239 /* Note: no interrupt level passed */ 240 (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, qeintr, sc); 241 prom_getether(node, sc->sc_enaddr); 242 243 /* 244 * Allocate descriptor ring and buffers. 245 */ 246 247 /* for now, allocate as many bufs as there are ring descriptors */ 248 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 249 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 250 251 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 252 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 253 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + 254 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; 255 256 /* Get a DMA handle */ 257 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 258 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 259 aprint_error_dev(self, "DMA map create error %d\n", 260 error); 261 return; 262 } 263 264 /* Allocate DMA buffer */ 265 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0, 266 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 267 aprint_error_dev(self, "DMA buffer alloc error %d\n", 268 error); 269 return; 270 } 271 272 /* Map DMA buffer in CPU addressable space */ 273 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 274 &sc->sc_rb.rb_membase, 275 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 276 aprint_error_dev(self, "DMA buffer map error %d\n", 277 error); 278 bus_dmamem_free(dmatag, &seg, rseg); 279 return; 280 } 281 282 /* Load the buffer */ 283 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 284 sc->sc_rb.rb_membase, size, NULL, 285 BUS_DMA_NOWAIT)) != 0) { 286 aprint_error_dev(self, "DMA buffer map load error %d\n", 287 error); 288 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 289 bus_dmamem_free(dmatag, &seg, rseg); 290 return; 291 } 292 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 293 294 /* Initialize media properties */ 295 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts); 296 ifmedia_add(&sc->sc_ifmedia, 297 IFM_MAKEWORD(IFM_ETHER,IFM_10_T,0,0), 298 0, NULL); 299 ifmedia_add(&sc->sc_ifmedia, 300 IFM_MAKEWORD(IFM_ETHER,IFM_10_5,0,0), 301 0, NULL); 302 ifmedia_add(&sc->sc_ifmedia, 303 IFM_MAKEWORD(IFM_ETHER,IFM_AUTO,0,0), 304 0, NULL); 305 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 306 307 memcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 308 ifp->if_softc = sc; 309 ifp->if_start = qestart; 310 ifp->if_ioctl = qeioctl; 311 ifp->if_watchdog = qewatchdog; 312 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | 313 IFF_MULTICAST; 314 IFQ_SET_READY(&ifp->if_snd); 315 316 /* Attach the interface. */ 317 if_attach(ifp); 318 ether_ifattach(ifp, sc->sc_enaddr); 319 320 printf(" address %s\n", ether_sprintf(sc->sc_enaddr)); 321 } 322 323 /* 324 * Pull data off an interface. 325 * Len is the length of data, with local net header stripped. 326 * We copy the data into mbufs. When full cluster sized units are present, 327 * we copy into clusters. 328 */ 329 static inline struct mbuf * 330 qe_get(struct qe_softc *sc, int idx, int totlen) 331 { 332 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 333 struct mbuf *m; 334 struct mbuf *top, **mp; 335 int len, pad, boff = 0; 336 void *bp; 337 338 bp = (char *)sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ; 339 340 MGETHDR(m, M_DONTWAIT, MT_DATA); 341 if (m == NULL) 342 return (NULL); 343 m->m_pkthdr.rcvif = ifp; 344 m->m_pkthdr.len = totlen; 345 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); 346 m->m_data += pad; 347 len = MHLEN - pad; 348 top = NULL; 349 mp = ⊤ 350 351 while (totlen > 0) { 352 if (top) { 353 MGET(m, M_DONTWAIT, MT_DATA); 354 if (m == NULL) { 355 m_freem(top); 356 return (NULL); 357 } 358 len = MLEN; 359 } 360 if (top && totlen >= MINCLSIZE) { 361 MCLGET(m, M_DONTWAIT); 362 if (m->m_flags & M_EXT) 363 len = MCLBYTES; 364 } 365 m->m_len = len = min(totlen, len); 366 memcpy(mtod(m, void *), (char *)bp + boff, len); 367 boff += len; 368 totlen -= len; 369 *mp = m; 370 mp = &m->m_next; 371 } 372 373 return (top); 374 } 375 376 /* 377 * Routine to copy from mbuf chain to transmit buffer in 378 * network buffer memory. 379 */ 380 inline int 381 qe_put(struct qe_softc *sc, int idx, struct mbuf *m) 382 { 383 struct mbuf *n; 384 int len, tlen = 0, boff = 0; 385 void *bp; 386 387 bp = (char *)sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ; 388 389 for (; m; m = n) { 390 len = m->m_len; 391 if (len == 0) { 392 MFREE(m, n); 393 continue; 394 } 395 memcpy((char *)bp + boff, mtod(m, void *), len); 396 boff += len; 397 tlen += len; 398 MFREE(m, n); 399 } 400 return (tlen); 401 } 402 403 /* 404 * Pass a packet to the higher levels. 405 */ 406 inline void 407 qe_read(struct qe_softc *sc, int idx, int len) 408 { 409 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 410 struct mbuf *m; 411 412 if (len <= sizeof(struct ether_header) || 413 len > ETHERMTU + sizeof(struct ether_header)) { 414 415 printf("%s: invalid packet size %d; dropping\n", 416 ifp->if_xname, len); 417 418 ifp->if_ierrors++; 419 return; 420 } 421 422 /* 423 * Pull packet off interface. 424 */ 425 m = qe_get(sc, idx, len); 426 if (m == NULL) { 427 ifp->if_ierrors++; 428 return; 429 } 430 ifp->if_ipackets++; 431 432 #if NBPFILTER > 0 433 /* 434 * Check if there's a BPF listener on this interface. 435 * If so, hand off the raw packet to BPF. 436 */ 437 if (ifp->if_bpf) 438 bpf_mtap(ifp->if_bpf, m); 439 #endif 440 /* Pass the packet up. */ 441 (*ifp->if_input)(ifp, m); 442 } 443 444 /* 445 * Start output on interface. 446 * We make two assumptions here: 447 * 1) that the current priority is set to splnet _before_ this code 448 * is called *and* is returned to the appropriate priority after 449 * return 450 * 2) that the IFF_OACTIVE flag is checked before this code is called 451 * (i.e. that the output part of the interface is idle) 452 */ 453 void 454 qestart(struct ifnet *ifp) 455 { 456 struct qe_softc *sc = (struct qe_softc *)ifp->if_softc; 457 struct qec_xd *txd = sc->sc_rb.rb_txd; 458 struct mbuf *m; 459 unsigned int bix, len; 460 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 461 462 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 463 return; 464 465 bix = sc->sc_rb.rb_tdhead; 466 467 for (;;) { 468 IFQ_DEQUEUE(&ifp->if_snd, m); 469 if (m == 0) 470 break; 471 472 #if NBPFILTER > 0 473 /* 474 * If BPF is listening on this interface, let it see the 475 * packet before we commit it to the wire. 476 */ 477 if (ifp->if_bpf) 478 bpf_mtap(ifp->if_bpf, m); 479 #endif 480 481 /* 482 * Copy the mbuf chain into the transmit buffer. 483 */ 484 len = qe_put(sc, bix, m); 485 486 /* 487 * Initialize transmit registers and start transmission 488 */ 489 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP | 490 (len & QEC_XD_LENGTH); 491 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL, 492 QE_CR_CTRL_TWAKEUP); 493 494 if (++bix == QEC_XD_RING_MAXSIZE) 495 bix = 0; 496 497 if (++sc->sc_rb.rb_td_nbusy == ntbuf) { 498 ifp->if_flags |= IFF_OACTIVE; 499 break; 500 } 501 } 502 503 sc->sc_rb.rb_tdhead = bix; 504 } 505 506 void 507 qestop(struct qe_softc *sc) 508 { 509 bus_space_tag_t t = sc->sc_bustag; 510 bus_space_handle_t mr = sc->sc_mr; 511 bus_space_handle_t cr = sc->sc_cr; 512 int n; 513 514 #if defined(SUN4U) || defined(__GNUC__) 515 (void)&t; 516 #endif 517 /* Stop the schwurst */ 518 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST); 519 for (n = 200; n > 0; n--) { 520 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) & 521 QE_MR_BIUCC_SWRST) == 0) 522 break; 523 DELAY(20); 524 } 525 526 /* then reset */ 527 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET); 528 for (n = 200; n > 0; n--) { 529 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) & 530 QE_CR_CTRL_RESET) == 0) 531 break; 532 DELAY(20); 533 } 534 } 535 536 /* 537 * Reset interface. 538 */ 539 void 540 qereset(struct qe_softc *sc) 541 { 542 int s; 543 544 s = splnet(); 545 qestop(sc); 546 qeinit(sc); 547 splx(s); 548 } 549 550 void 551 qewatchdog(struct ifnet *ifp) 552 { 553 struct qe_softc *sc = ifp->if_softc; 554 555 log(LOG_ERR, "%s: device timeout\n", device_xname(&sc->sc_dev)); 556 ifp->if_oerrors++; 557 558 qereset(sc); 559 } 560 561 /* 562 * Interrupt dispatch. 563 */ 564 int 565 qeintr(void *arg) 566 { 567 struct qe_softc *sc = (struct qe_softc *)arg; 568 bus_space_tag_t t = sc->sc_bustag; 569 u_int32_t qecstat, qestat; 570 int r = 0; 571 572 #if defined(SUN4U) || defined(__GNUC__) 573 (void)&t; 574 #endif 575 /* Read QEC status and channel status */ 576 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT); 577 #ifdef QEDEBUG 578 if (sc->sc_debug) { 579 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat); 580 } 581 #endif 582 583 /* Filter out status for this channel */ 584 qecstat = qecstat >> (4 * sc->sc_channel); 585 if ((qecstat & 0xf) == 0) 586 return (r); 587 588 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT); 589 590 #ifdef QEDEBUG 591 if (sc->sc_debug) { 592 char bits[64]; int i; 593 bus_space_tag_t t1 = sc->sc_bustag; 594 bus_space_handle_t mr = sc->sc_mr; 595 596 snprintb(bits, sizeof(bits), QE_CR_STAT_BITS, qestat); 597 printf("qe%d: intr: qestat=%s\n", sc->sc_channel, bits); 598 599 printf("MACE registers:\n"); 600 for (i = 0 ; i < 32; i++) { 601 printf(" m[%d]=%x,", i, bus_space_read_1(t1, mr, i)); 602 if (((i+1) & 7) == 0) 603 printf("\n"); 604 } 605 } 606 #endif 607 608 if (qestat & QE_CR_STAT_ALLERRORS) { 609 #ifdef QEDEBUG 610 if (sc->sc_debug) { 611 char bits[64]; 612 snprintb(bits, sizeof(bits), QE_CR_STAT_BITS, qestat); 613 printf("qe%d: eint: qestat=%s\n", sc->sc_channel, bits); 614 } 615 #endif 616 r |= qe_eint(sc, qestat); 617 if (r == -1) 618 return (1); 619 } 620 621 if (qestat & QE_CR_STAT_TXIRQ) 622 r |= qe_tint(sc); 623 624 if (qestat & QE_CR_STAT_RXIRQ) 625 r |= qe_rint(sc); 626 627 return (r); 628 } 629 630 /* 631 * Transmit interrupt. 632 */ 633 int 634 qe_tint(struct qe_softc *sc) 635 { 636 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 637 unsigned int bix, txflags; 638 639 bix = sc->sc_rb.rb_tdtail; 640 641 for (;;) { 642 if (sc->sc_rb.rb_td_nbusy <= 0) 643 break; 644 645 txflags = sc->sc_rb.rb_txd[bix].xd_flags; 646 647 if (txflags & QEC_XD_OWN) 648 break; 649 650 ifp->if_flags &= ~IFF_OACTIVE; 651 ifp->if_opackets++; 652 653 if (++bix == QEC_XD_RING_MAXSIZE) 654 bix = 0; 655 656 --sc->sc_rb.rb_td_nbusy; 657 } 658 659 sc->sc_rb.rb_tdtail = bix; 660 661 qestart(ifp); 662 663 if (sc->sc_rb.rb_td_nbusy == 0) 664 ifp->if_timer = 0; 665 666 return (1); 667 } 668 669 /* 670 * Receive interrupt. 671 */ 672 int 673 qe_rint(struct qe_softc *sc) 674 { 675 struct qec_xd *xd = sc->sc_rb.rb_rxd; 676 unsigned int bix, len; 677 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 678 #ifdef QEDEBUG 679 int npackets = 0; 680 #endif 681 682 bix = sc->sc_rb.rb_rdtail; 683 684 /* 685 * Process all buffers with valid data. 686 */ 687 for (;;) { 688 len = xd[bix].xd_flags; 689 if (len & QEC_XD_OWN) 690 break; 691 692 #ifdef QEDEBUG 693 npackets++; 694 #endif 695 696 len &= QEC_XD_LENGTH; 697 len -= 4; 698 qe_read(sc, bix, len); 699 700 /* ... */ 701 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags = 702 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH); 703 704 if (++bix == QEC_XD_RING_MAXSIZE) 705 bix = 0; 706 } 707 #ifdef QEDEBUG 708 if (npackets == 0 && sc->sc_debug) 709 printf("%s: rint: no packets; rb index %d; status 0x%x\n", 710 device_xname(&sc->sc_dev), bix, len); 711 #endif 712 713 sc->sc_rb.rb_rdtail = bix; 714 715 return (1); 716 } 717 718 /* 719 * Error interrupt. 720 */ 721 int 722 qe_eint(struct qe_softc *sc, u_int32_t why) 723 { 724 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 725 int r = 0, rst = 0; 726 727 if (why & QE_CR_STAT_EDEFER) { 728 printf("%s: excessive tx defers.\n", device_xname(&sc->sc_dev)); 729 r |= 1; 730 ifp->if_oerrors++; 731 } 732 733 if (why & QE_CR_STAT_CLOSS) { 734 printf("%s: no carrier, link down?\n", device_xname(&sc->sc_dev)); 735 ifp->if_oerrors++; 736 r |= 1; 737 } 738 739 if (why & QE_CR_STAT_ERETRIES) { 740 printf("%s: excessive tx retries\n", device_xname(&sc->sc_dev)); 741 ifp->if_oerrors++; 742 r |= 1; 743 rst = 1; 744 } 745 746 747 if (why & QE_CR_STAT_LCOLL) { 748 printf("%s: late tx transmission\n", device_xname(&sc->sc_dev)); 749 ifp->if_oerrors++; 750 r |= 1; 751 rst = 1; 752 } 753 754 if (why & QE_CR_STAT_FUFLOW) { 755 printf("%s: tx fifo underflow\n", device_xname(&sc->sc_dev)); 756 ifp->if_oerrors++; 757 r |= 1; 758 rst = 1; 759 } 760 761 if (why & QE_CR_STAT_JERROR) { 762 printf("%s: jabber seen\n", device_xname(&sc->sc_dev)); 763 r |= 1; 764 } 765 766 if (why & QE_CR_STAT_BERROR) { 767 printf("%s: babble seen\n", device_xname(&sc->sc_dev)); 768 r |= 1; 769 } 770 771 if (why & QE_CR_STAT_TCCOFLOW) { 772 ifp->if_collisions += 256; 773 ifp->if_oerrors += 256; 774 r |= 1; 775 } 776 777 if (why & QE_CR_STAT_TXDERROR) { 778 printf("%s: tx descriptor is bad\n", device_xname(&sc->sc_dev)); 779 rst = 1; 780 r |= 1; 781 } 782 783 if (why & QE_CR_STAT_TXLERR) { 784 printf("%s: tx late error\n", device_xname(&sc->sc_dev)); 785 ifp->if_oerrors++; 786 rst = 1; 787 r |= 1; 788 } 789 790 if (why & QE_CR_STAT_TXPERR) { 791 printf("%s: tx DMA parity error\n", device_xname(&sc->sc_dev)); 792 ifp->if_oerrors++; 793 rst = 1; 794 r |= 1; 795 } 796 797 if (why & QE_CR_STAT_TXSERR) { 798 printf("%s: tx DMA sbus error ack\n", device_xname(&sc->sc_dev)); 799 ifp->if_oerrors++; 800 rst = 1; 801 r |= 1; 802 } 803 804 if (why & QE_CR_STAT_RCCOFLOW) { 805 ifp->if_collisions += 256; 806 ifp->if_ierrors += 256; 807 r |= 1; 808 } 809 810 if (why & QE_CR_STAT_RUOFLOW) { 811 ifp->if_ierrors += 256; 812 r |= 1; 813 } 814 815 if (why & QE_CR_STAT_MCOFLOW) { 816 ifp->if_ierrors += 256; 817 r |= 1; 818 } 819 820 if (why & QE_CR_STAT_RXFOFLOW) { 821 printf("%s: rx fifo overflow\n", device_xname(&sc->sc_dev)); 822 ifp->if_ierrors++; 823 r |= 1; 824 } 825 826 if (why & QE_CR_STAT_RLCOLL) { 827 printf("%s: rx late collision\n", device_xname(&sc->sc_dev)); 828 ifp->if_ierrors++; 829 ifp->if_collisions++; 830 r |= 1; 831 } 832 833 if (why & QE_CR_STAT_FCOFLOW) { 834 ifp->if_ierrors += 256; 835 r |= 1; 836 } 837 838 if (why & QE_CR_STAT_CECOFLOW) { 839 ifp->if_ierrors += 256; 840 r |= 1; 841 } 842 843 if (why & QE_CR_STAT_RXDROP) { 844 printf("%s: rx packet dropped\n", device_xname(&sc->sc_dev)); 845 ifp->if_ierrors++; 846 r |= 1; 847 } 848 849 if (why & QE_CR_STAT_RXSMALL) { 850 printf("%s: rx buffer too small\n", device_xname(&sc->sc_dev)); 851 ifp->if_ierrors++; 852 r |= 1; 853 rst = 1; 854 } 855 856 if (why & QE_CR_STAT_RXLERR) { 857 printf("%s: rx late error\n", device_xname(&sc->sc_dev)); 858 ifp->if_ierrors++; 859 r |= 1; 860 rst = 1; 861 } 862 863 if (why & QE_CR_STAT_RXPERR) { 864 printf("%s: rx DMA parity error\n", device_xname(&sc->sc_dev)); 865 ifp->if_ierrors++; 866 r |= 1; 867 rst = 1; 868 } 869 870 if (why & QE_CR_STAT_RXSERR) { 871 printf("%s: rx DMA sbus error ack\n", device_xname(&sc->sc_dev)); 872 ifp->if_ierrors++; 873 r |= 1; 874 rst = 1; 875 } 876 877 if (r == 0) 878 aprint_error_dev(&sc->sc_dev, "unexpected interrupt error: %08x\n", 879 why); 880 881 if (rst) { 882 printf("%s: resetting...\n", device_xname(&sc->sc_dev)); 883 qereset(sc); 884 return (-1); 885 } 886 887 return (r); 888 } 889 890 int 891 qeioctl(struct ifnet *ifp, u_long cmd, void *data) 892 { 893 struct qe_softc *sc = ifp->if_softc; 894 struct ifaddr *ifa = (struct ifaddr *)data; 895 struct ifreq *ifr = (struct ifreq *)data; 896 int s, error = 0; 897 898 s = splnet(); 899 900 switch (cmd) { 901 case SIOCINITIFADDR: 902 ifp->if_flags |= IFF_UP; 903 qeinit(sc); 904 switch (ifa->ifa_addr->sa_family) { 905 #ifdef INET 906 case AF_INET: 907 arp_ifinit(ifp, ifa); 908 break; 909 #endif /* INET */ 910 default: 911 break; 912 } 913 break; 914 915 case SIOCSIFFLAGS: 916 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 917 break; 918 /* XXX re-use ether_ioctl() */ 919 switch (ifp->if_flags & (IFF_UP|IFF_RUNNING)) { 920 case IFF_RUNNING: 921 /* 922 * If interface is marked down and it is running, then 923 * stop it. 924 */ 925 qestop(sc); 926 ifp->if_flags &= ~IFF_RUNNING; 927 break; 928 case IFF_UP: 929 /* 930 * If interface is marked up and it is stopped, then 931 * start it. 932 */ 933 qeinit(sc); 934 break; 935 default: 936 /* 937 * Reset the interface to pick up changes in any other 938 * flags that affect hardware registers. 939 */ 940 qestop(sc); 941 qeinit(sc); 942 break; 943 } 944 #ifdef QEDEBUG 945 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 946 #endif 947 break; 948 949 case SIOCADDMULTI: 950 case SIOCDELMULTI: 951 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 952 /* 953 * Multicast list has changed; set the hardware filter 954 * accordingly. 955 */ 956 if (ifp->if_flags & IFF_RUNNING) 957 qe_mcreset(sc); 958 error = 0; 959 } 960 break; 961 962 case SIOCGIFMEDIA: 963 case SIOCSIFMEDIA: 964 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, cmd); 965 break; 966 967 default: 968 error = EINVAL; 969 break; 970 } 971 972 splx(s); 973 return (error); 974 } 975 976 977 void 978 qeinit(struct qe_softc *sc) 979 { 980 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 981 bus_space_tag_t t = sc->sc_bustag; 982 bus_space_handle_t cr = sc->sc_cr; 983 bus_space_handle_t mr = sc->sc_mr; 984 struct qec_softc *qec = sc->sc_qec; 985 u_int32_t qecaddr; 986 u_int8_t *ea; 987 int s; 988 989 #if defined(SUN4U) || defined(__GNUC__) 990 (void)&t; 991 #endif 992 s = splnet(); 993 994 qestop(sc); 995 996 /* 997 * Allocate descriptor ring and buffers 998 */ 999 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ); 1000 1001 /* Channel registers: */ 1002 bus_space_write_4(t, cr, QE_CRI_RXDS, (u_int32_t)sc->sc_rb.rb_rxddma); 1003 bus_space_write_4(t, cr, QE_CRI_TXDS, (u_int32_t)sc->sc_rb.rb_txddma); 1004 1005 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0); 1006 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0); 1007 bus_space_write_4(t, cr, QE_CRI_QMASK, 0); 1008 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL); 1009 bus_space_write_4(t, cr, QE_CRI_CCNT, 0); 1010 bus_space_write_4(t, cr, QE_CRI_PIPG, 0); 1011 1012 qecaddr = sc->sc_channel * qec->sc_msize; 1013 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr); 1014 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr); 1015 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize); 1016 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize); 1017 1018 /* MACE registers: */ 1019 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL); 1020 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT); 1021 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0); 1022 1023 /* 1024 * Mask MACE's receive interrupt, since we're being notified 1025 * by the QEC after DMA completes. 1026 */ 1027 bus_space_write_1(t, mr, QE_MRI_IMR, 1028 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM); 1029 1030 bus_space_write_1(t, mr, QE_MRI_BIUCC, 1031 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS); 1032 1033 bus_space_write_1(t, mr, QE_MRI_FIFOFC, 1034 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 | 1035 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU); 1036 1037 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP); 1038 1039 /* 1040 * Station address 1041 */ 1042 ea = sc->sc_enaddr; 1043 bus_space_write_1(t, mr, QE_MRI_IAC, 1044 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR); 1045 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6); 1046 1047 /* Apply media settings */ 1048 qe_ifmedia_upd(ifp); 1049 1050 /* 1051 * Clear Logical address filter 1052 */ 1053 bus_space_write_1(t, mr, QE_MRI_IAC, 1054 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1055 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8); 1056 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1057 1058 /* Clear missed packet count (register cleared on read) */ 1059 (void)bus_space_read_1(t, mr, QE_MRI_MPC); 1060 1061 #if 0 1062 /* test register: */ 1063 bus_space_write_1(t, mr, QE_MRI_UTR, 0); 1064 #endif 1065 1066 /* Reset multicast filter */ 1067 qe_mcreset(sc); 1068 1069 ifp->if_flags |= IFF_RUNNING; 1070 ifp->if_flags &= ~IFF_OACTIVE; 1071 splx(s); 1072 } 1073 1074 /* 1075 * Reset multicast filter. 1076 */ 1077 void 1078 qe_mcreset(struct qe_softc *sc) 1079 { 1080 struct ethercom *ec = &sc->sc_ethercom; 1081 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1082 bus_space_tag_t t = sc->sc_bustag; 1083 bus_space_handle_t mr = sc->sc_mr; 1084 struct ether_multi *enm; 1085 struct ether_multistep step; 1086 u_int32_t crc; 1087 u_int16_t hash[4]; 1088 u_int8_t octet, maccc, *ladrp = (u_int8_t *)&hash[0]; 1089 int i, j; 1090 1091 #if defined(SUN4U) || defined(__GNUC__) 1092 (void)&t; 1093 #endif 1094 1095 /* We also enable transmitter & receiver here */ 1096 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV; 1097 1098 if (ifp->if_flags & IFF_PROMISC) { 1099 maccc |= QE_MR_MACCC_PROM; 1100 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1101 return; 1102 } 1103 1104 if (ifp->if_flags & IFF_ALLMULTI) { 1105 bus_space_write_1(t, mr, QE_MRI_IAC, 1106 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1107 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1108 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1109 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1110 return; 1111 } 1112 1113 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1114 1115 ETHER_FIRST_MULTI(step, ec, enm); 1116 while (enm != NULL) { 1117 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1118 ETHER_ADDR_LEN) != 0) { 1119 /* 1120 * We must listen to a range of multicast 1121 * addresses. For now, just accept all 1122 * multicasts, rather than trying to set only 1123 * those filter bits needed to match the range. 1124 * (At this time, the only use of address 1125 * ranges is for IP multicast routing, for 1126 * which the range is big enough to require 1127 * all bits set.) 1128 */ 1129 bus_space_write_1(t, mr, QE_MRI_IAC, 1130 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1131 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1132 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1133 ifp->if_flags |= IFF_ALLMULTI; 1134 break; 1135 } 1136 1137 crc = 0xffffffff; 1138 1139 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1140 octet = enm->enm_addrlo[i]; 1141 1142 for (j = 0; j < 8; j++) { 1143 if ((crc & 1) ^ (octet & 1)) { 1144 crc >>= 1; 1145 crc ^= MC_POLY_LE; 1146 } 1147 else 1148 crc >>= 1; 1149 octet >>= 1; 1150 } 1151 } 1152 1153 crc >>= 26; 1154 hash[crc >> 4] |= 1 << (crc & 0xf); 1155 ETHER_NEXT_MULTI(step, enm); 1156 } 1157 1158 bus_space_write_1(t, mr, QE_MRI_IAC, 1159 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1160 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8); 1161 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1162 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1163 } 1164 1165 /* 1166 * Get current media settings. 1167 */ 1168 void 1169 qe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1170 { 1171 struct qe_softc *sc = ifp->if_softc; 1172 bus_space_tag_t t = sc->sc_bustag; 1173 bus_space_handle_t mr = sc->sc_mr; 1174 u_int8_t v; 1175 1176 #if defined(SUN4U) || defined(__GNUC__) 1177 (void)&t; 1178 #endif 1179 v = bus_space_read_1(t, mr, QE_MRI_PLSCC); 1180 1181 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) { 1182 case QE_MR_PLSCC_TP: 1183 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1184 break; 1185 case QE_MR_PLSCC_AUI: 1186 ifmr->ifm_active = IFM_ETHER | IFM_10_5; 1187 break; 1188 case QE_MR_PLSCC_GPSI: 1189 case QE_MR_PLSCC_DAI: 1190 /* ... */ 1191 break; 1192 } 1193 1194 v = bus_space_read_1(t, mr, QE_MRI_PHYCC); 1195 ifmr->ifm_status |= IFM_AVALID; 1196 if ((v & QE_MR_PHYCC_LNKFL) != 0) 1197 ifmr->ifm_status &= ~IFM_ACTIVE; 1198 else 1199 ifmr->ifm_status |= IFM_ACTIVE; 1200 1201 } 1202 1203 /* 1204 * Set media options. 1205 */ 1206 int 1207 qe_ifmedia_upd(struct ifnet *ifp) 1208 { 1209 struct qe_softc *sc = ifp->if_softc; 1210 struct ifmedia *ifm = &sc->sc_ifmedia; 1211 bus_space_tag_t t = sc->sc_bustag; 1212 bus_space_handle_t mr = sc->sc_mr; 1213 int newmedia = ifm->ifm_media; 1214 u_int8_t plscc, phycc; 1215 1216 #if defined(SUN4U) || defined(__GNUC__) 1217 (void)&t; 1218 #endif 1219 if (IFM_TYPE(newmedia) != IFM_ETHER) 1220 return (EINVAL); 1221 1222 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK; 1223 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL; 1224 1225 if (IFM_SUBTYPE(newmedia) == IFM_AUTO) 1226 phycc |= QE_MR_PHYCC_ASEL; 1227 else if (IFM_SUBTYPE(newmedia) == IFM_10_T) 1228 plscc |= QE_MR_PLSCC_TP; 1229 else if (IFM_SUBTYPE(newmedia) == IFM_10_5) 1230 plscc |= QE_MR_PLSCC_AUI; 1231 1232 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc); 1233 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc); 1234 1235 return (0); 1236 } 1237