1 /* $NetBSD: qe.c,v 1.77 2020/03/19 02:58:54 thorpej Exp $ */ 2 3 /*- 4 * Copyright (c) 1999 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Paul Kranenburg. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /* 33 * Copyright (c) 1998 Jason L. Wright. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. The name of the authors may not be used to endorse or promote products 45 * derived from this software without specific prior written permission. 46 * 47 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 48 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 49 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 50 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT, 51 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 52 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 53 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 54 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 55 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 56 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 57 */ 58 59 /* 60 * Driver for the SBus qec+qe QuadEthernet board. 61 * 62 * This driver was written using the AMD MACE Am79C940 documentation, some 63 * ideas gleaned from the S/Linux driver for this card, Solaris header files, 64 * and a loan of a card from Paul Southworth of the Internet Engineering 65 * Group (www.ieng.com). 66 */ 67 68 #include <sys/cdefs.h> 69 __KERNEL_RCSID(0, "$NetBSD: qe.c,v 1.77 2020/03/19 02:58:54 thorpej Exp $"); 70 71 #define QEDEBUG 72 73 #include "opt_ddb.h" 74 #include "opt_inet.h" 75 76 #include <sys/param.h> 77 #include <sys/systm.h> 78 #include <sys/kernel.h> 79 #include <sys/errno.h> 80 #include <sys/ioctl.h> 81 #include <sys/mbuf.h> 82 #include <sys/socket.h> 83 #include <sys/syslog.h> 84 #include <sys/device.h> 85 #include <sys/malloc.h> 86 87 #include <net/if.h> 88 #include <net/if_dl.h> 89 #include <net/if_types.h> 90 #include <net/netisr.h> 91 #include <net/if_media.h> 92 #include <net/if_ether.h> 93 #include <net/bpf.h> 94 95 #ifdef INET 96 #include <netinet/in.h> 97 #include <netinet/if_inarp.h> 98 #include <netinet/in_systm.h> 99 #include <netinet/in_var.h> 100 #include <netinet/ip.h> 101 #endif 102 103 #include <sys/bus.h> 104 #include <sys/intr.h> 105 #include <machine/autoconf.h> 106 107 #include <dev/sbus/sbusvar.h> 108 #include <dev/sbus/qecreg.h> 109 #include <dev/sbus/qecvar.h> 110 #include <dev/sbus/qereg.h> 111 112 struct qe_softc { 113 device_t sc_dev; 114 bus_space_tag_t sc_bustag; /* bus & DMA tags */ 115 bus_dma_tag_t sc_dmatag; 116 bus_dmamap_t sc_dmamap; 117 struct ethercom sc_ethercom; 118 struct ifmedia sc_ifmedia; /* interface media */ 119 120 struct qec_softc *sc_qec; /* QEC parent */ 121 122 bus_space_handle_t sc_qr; /* QEC registers */ 123 bus_space_handle_t sc_mr; /* MACE registers */ 124 bus_space_handle_t sc_cr; /* channel registers */ 125 126 int sc_channel; /* channel number */ 127 u_int sc_rev; /* board revision */ 128 129 int sc_burst; 130 131 struct qec_ring sc_rb; /* Packet Ring Buffer */ 132 133 /* MAC address */ 134 uint8_t sc_enaddr[6]; 135 136 #ifdef QEDEBUG 137 int sc_debug; 138 #endif 139 }; 140 141 int qematch(device_t, cfdata_t, void *); 142 void qeattach(device_t, device_t, void *); 143 144 void qeinit(struct qe_softc *); 145 void qestart(struct ifnet *); 146 void qestop(struct qe_softc *); 147 void qewatchdog(struct ifnet *); 148 int qeioctl(struct ifnet *, u_long, void *); 149 void qereset(struct qe_softc *); 150 151 int qeintr(void *); 152 int qe_eint(struct qe_softc *, uint32_t); 153 int qe_rint(struct qe_softc *); 154 int qe_tint(struct qe_softc *); 155 void qe_mcreset(struct qe_softc *); 156 157 static int qe_put(struct qe_softc *, int, struct mbuf *); 158 static void qe_read(struct qe_softc *, int, int); 159 static struct mbuf *qe_get(struct qe_softc *, int, int); 160 161 /* ifmedia callbacks */ 162 void qe_ifmedia_sts(struct ifnet *, struct ifmediareq *); 163 int qe_ifmedia_upd(struct ifnet *); 164 165 CFATTACH_DECL_NEW(qe, sizeof(struct qe_softc), 166 qematch, qeattach, NULL, NULL); 167 168 int 169 qematch(device_t parent, cfdata_t cf, void *aux) 170 { 171 struct sbus_attach_args *sa = aux; 172 173 return (strcmp(cf->cf_name, sa->sa_name) == 0); 174 } 175 176 void 177 qeattach(device_t parent, device_t self, void *aux) 178 { 179 struct sbus_attach_args *sa = aux; 180 struct qec_softc *qec = device_private(parent); 181 struct qe_softc *sc = device_private(self); 182 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 183 int node = sa->sa_node; 184 bus_dma_tag_t dmatag = sa->sa_dmatag; 185 bus_dma_segment_t seg; 186 bus_size_t size; 187 int rseg, error; 188 189 sc->sc_dev = self; 190 191 if (sa->sa_nreg < 2) { 192 printf("%s: only %d register sets\n", 193 device_xname(self), sa->sa_nreg); 194 return; 195 } 196 197 if (bus_space_map(sa->sa_bustag, 198 (bus_addr_t)BUS_ADDR( 199 sa->sa_reg[0].oa_space, 200 sa->sa_reg[0].oa_base), 201 (bus_size_t)sa->sa_reg[0].oa_size, 202 0, &sc->sc_cr) != 0) { 203 aprint_error_dev(self, "cannot map registers\n"); 204 return; 205 } 206 207 if (bus_space_map(sa->sa_bustag, 208 (bus_addr_t)BUS_ADDR( 209 sa->sa_reg[1].oa_space, 210 sa->sa_reg[1].oa_base), 211 (bus_size_t)sa->sa_reg[1].oa_size, 212 0, &sc->sc_mr) != 0) { 213 aprint_error_dev(self, "cannot map registers\n"); 214 return; 215 } 216 217 sc->sc_rev = prom_getpropint(node, "mace-version", -1); 218 printf(" rev %x", sc->sc_rev); 219 220 sc->sc_bustag = sa->sa_bustag; 221 sc->sc_dmatag = sa->sa_dmatag; 222 sc->sc_qec = qec; 223 sc->sc_qr = qec->sc_regs; 224 225 sc->sc_channel = prom_getpropint(node, "channel#", -1); 226 sc->sc_burst = qec->sc_burst; 227 228 qestop(sc); 229 230 /* Note: no interrupt level passed */ 231 (void)bus_intr_establish(sa->sa_bustag, 0, IPL_NET, qeintr, sc); 232 prom_getether(node, sc->sc_enaddr); 233 234 /* 235 * Allocate descriptor ring and buffers. 236 */ 237 238 /* for now, allocate as many bufs as there are ring descriptors */ 239 sc->sc_rb.rb_ntbuf = QEC_XD_RING_MAXSIZE; 240 sc->sc_rb.rb_nrbuf = QEC_XD_RING_MAXSIZE; 241 242 size = QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 243 QEC_XD_RING_MAXSIZE * sizeof(struct qec_xd) + 244 sc->sc_rb.rb_ntbuf * QE_PKT_BUF_SZ + 245 sc->sc_rb.rb_nrbuf * QE_PKT_BUF_SZ; 246 247 /* Get a DMA handle */ 248 if ((error = bus_dmamap_create(dmatag, size, 1, size, 0, 249 BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { 250 aprint_error_dev(self, "DMA map create error %d\n", 251 error); 252 return; 253 } 254 255 /* Allocate DMA buffer */ 256 if ((error = bus_dmamem_alloc(dmatag, size, 0, 0, 257 &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 258 aprint_error_dev(self, "DMA buffer alloc error %d\n", 259 error); 260 return; 261 } 262 263 /* Map DMA buffer in CPU addressable space */ 264 if ((error = bus_dmamem_map(dmatag, &seg, rseg, size, 265 &sc->sc_rb.rb_membase, 266 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 267 aprint_error_dev(self, "DMA buffer map error %d\n", 268 error); 269 bus_dmamem_free(dmatag, &seg, rseg); 270 return; 271 } 272 273 /* Load the buffer */ 274 if ((error = bus_dmamap_load(dmatag, sc->sc_dmamap, 275 sc->sc_rb.rb_membase, size, NULL, 276 BUS_DMA_NOWAIT)) != 0) { 277 aprint_error_dev(self, "DMA buffer map load error %d\n", 278 error); 279 bus_dmamem_unmap(dmatag, sc->sc_rb.rb_membase, size); 280 bus_dmamem_free(dmatag, &seg, rseg); 281 return; 282 } 283 sc->sc_rb.rb_dmabase = sc->sc_dmamap->dm_segs[0].ds_addr; 284 285 /* Initialize media properties */ 286 sc->sc_ethercom.ec_ifmedia = &sc->sc_ifmedia; 287 ifmedia_init(&sc->sc_ifmedia, 0, qe_ifmedia_upd, qe_ifmedia_sts); 288 ifmedia_add(&sc->sc_ifmedia, 289 IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, 0), 290 0, NULL); 291 ifmedia_add(&sc->sc_ifmedia, 292 IFM_MAKEWORD(IFM_ETHER, IFM_10_5, 0, 0), 293 0, NULL); 294 ifmedia_add(&sc->sc_ifmedia, 295 IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, 0), 296 0, NULL); 297 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER | IFM_AUTO); 298 299 memcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 300 ifp->if_softc = sc; 301 ifp->if_start = qestart; 302 ifp->if_ioctl = qeioctl; 303 ifp->if_watchdog = qewatchdog; 304 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 305 IFQ_SET_READY(&ifp->if_snd); 306 307 /* Attach the interface. */ 308 if_attach(ifp); 309 ether_ifattach(ifp, sc->sc_enaddr); 310 311 printf(" address %s\n", ether_sprintf(sc->sc_enaddr)); 312 } 313 314 /* 315 * Pull data off an interface. 316 * Len is the length of data, with local net header stripped. 317 * We copy the data into mbufs. When full cluster sized units are present, 318 * we copy into clusters. 319 */ 320 static inline struct mbuf * 321 qe_get(struct qe_softc *sc, int idx, int totlen) 322 { 323 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 324 struct mbuf *m; 325 struct mbuf *top, **mp; 326 int len, pad, boff = 0; 327 uint8_t *bp; 328 329 bp = sc->sc_rb.rb_rxbuf + (idx % sc->sc_rb.rb_nrbuf) * QE_PKT_BUF_SZ; 330 331 MGETHDR(m, M_DONTWAIT, MT_DATA); 332 if (m == NULL) 333 return (NULL); 334 m_set_rcvif(m, ifp); 335 m->m_pkthdr.len = totlen; 336 pad = ALIGN(sizeof(struct ether_header)) - sizeof(struct ether_header); 337 m->m_data += pad; 338 len = MHLEN - pad; 339 top = NULL; 340 mp = ⊤ 341 342 while (totlen > 0) { 343 if (top) { 344 MGET(m, M_DONTWAIT, MT_DATA); 345 if (m == NULL) { 346 m_freem(top); 347 return (NULL); 348 } 349 len = MLEN; 350 } 351 if (top && totlen >= MINCLSIZE) { 352 MCLGET(m, M_DONTWAIT); 353 if (m->m_flags & M_EXT) 354 len = MCLBYTES; 355 } 356 m->m_len = len = uimin(totlen, len); 357 memcpy(mtod(m, void *), bp + boff, len); 358 boff += len; 359 totlen -= len; 360 *mp = m; 361 mp = &m->m_next; 362 } 363 364 return (top); 365 } 366 367 /* 368 * Routine to copy from mbuf chain to transmit buffer in 369 * network buffer memory. 370 */ 371 inline int 372 qe_put(struct qe_softc *sc, int idx, struct mbuf *m) 373 { 374 struct mbuf *n; 375 int len, tlen = 0, boff = 0; 376 uint8_t *bp; 377 378 bp = sc->sc_rb.rb_txbuf + (idx % sc->sc_rb.rb_ntbuf) * QE_PKT_BUF_SZ; 379 380 for (; m; m = n) { 381 len = m->m_len; 382 if (len == 0) { 383 n = m_free(m); 384 continue; 385 } 386 memcpy(bp + boff, mtod(m, void *), len); 387 boff += len; 388 tlen += len; 389 n = m_free(m); 390 } 391 return (tlen); 392 } 393 394 /* 395 * Pass a packet to the higher levels. 396 */ 397 inline void 398 qe_read(struct qe_softc *sc, int idx, int len) 399 { 400 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 401 struct mbuf *m; 402 403 if (len <= sizeof(struct ether_header) || 404 len > ETHERMTU + sizeof(struct ether_header)) { 405 406 printf("%s: invalid packet size %d; dropping\n", 407 ifp->if_xname, len); 408 409 if_statinc(ifp, if_ierrors); 410 return; 411 } 412 413 /* 414 * Pull packet off interface. 415 */ 416 m = qe_get(sc, idx, len); 417 if (m == NULL) { 418 if_statinc(ifp, if_ierrors); 419 return; 420 } 421 422 /* Pass the packet up. */ 423 if_percpuq_enqueue(ifp->if_percpuq, m); 424 } 425 426 /* 427 * Start output on interface. 428 * We make an assumption here: 429 * 1) that the current priority is set to splnet _before_ this code 430 * is called *and* is returned to the appropriate priority after 431 * return 432 */ 433 void 434 qestart(struct ifnet *ifp) 435 { 436 struct qe_softc *sc = ifp->if_softc; 437 struct qec_xd *txd = sc->sc_rb.rb_txd; 438 struct mbuf *m; 439 unsigned int bix, len; 440 unsigned int ntbuf = sc->sc_rb.rb_ntbuf; 441 442 if ((ifp->if_flags & IFF_RUNNING) != IFF_RUNNING) 443 return; 444 445 bix = sc->sc_rb.rb_tdhead; 446 447 while (sc->sc_rb.rb_td_nbusy < ntbuf) { 448 IFQ_DEQUEUE(&ifp->if_snd, m); 449 if (m == 0) 450 break; 451 452 /* 453 * If BPF is listening on this interface, let it see the 454 * packet before we commit it to the wire. 455 */ 456 bpf_mtap(ifp, m, BPF_D_OUT); 457 458 /* 459 * Copy the mbuf chain into the transmit buffer. 460 */ 461 len = qe_put(sc, bix, m); 462 463 /* 464 * Initialize transmit registers and start transmission 465 */ 466 txd[bix].xd_flags = QEC_XD_OWN | QEC_XD_SOP | QEC_XD_EOP | 467 (len & QEC_XD_LENGTH); 468 bus_space_write_4(sc->sc_bustag, sc->sc_cr, QE_CRI_CTRL, 469 QE_CR_CTRL_TWAKEUP); 470 471 if (++bix == QEC_XD_RING_MAXSIZE) 472 bix = 0; 473 474 sc->sc_rb.rb_td_nbusy++; 475 } 476 477 sc->sc_rb.rb_tdhead = bix; 478 } 479 480 void 481 qestop(struct qe_softc *sc) 482 { 483 bus_space_tag_t t = sc->sc_bustag; 484 bus_space_handle_t mr = sc->sc_mr; 485 bus_space_handle_t cr = sc->sc_cr; 486 int n; 487 488 #if defined(SUN4U) || defined(__GNUC__) 489 (void)&t; 490 #endif 491 /* Stop the schwurst */ 492 bus_space_write_1(t, mr, QE_MRI_BIUCC, QE_MR_BIUCC_SWRST); 493 for (n = 200; n > 0; n--) { 494 if ((bus_space_read_1(t, mr, QE_MRI_BIUCC) & 495 QE_MR_BIUCC_SWRST) == 0) 496 break; 497 DELAY(20); 498 } 499 500 /* then reset */ 501 bus_space_write_4(t, cr, QE_CRI_CTRL, QE_CR_CTRL_RESET); 502 for (n = 200; n > 0; n--) { 503 if ((bus_space_read_4(t, cr, QE_CRI_CTRL) & 504 QE_CR_CTRL_RESET) == 0) 505 break; 506 DELAY(20); 507 } 508 } 509 510 /* 511 * Reset interface. 512 */ 513 void 514 qereset(struct qe_softc *sc) 515 { 516 int s; 517 518 s = splnet(); 519 qestop(sc); 520 qeinit(sc); 521 splx(s); 522 } 523 524 void 525 qewatchdog(struct ifnet *ifp) 526 { 527 struct qe_softc *sc = ifp->if_softc; 528 529 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 530 if_statinc(ifp, if_oerrors); 531 532 qereset(sc); 533 } 534 535 /* 536 * Interrupt dispatch. 537 */ 538 int 539 qeintr(void *arg) 540 { 541 struct qe_softc *sc = arg; 542 bus_space_tag_t t = sc->sc_bustag; 543 uint32_t qecstat, qestat; 544 int r = 0; 545 546 #if defined(SUN4U) || defined(__GNUC__) 547 (void)&t; 548 #endif 549 /* Read QEC status and channel status */ 550 qecstat = bus_space_read_4(t, sc->sc_qr, QEC_QRI_STAT); 551 #ifdef QEDEBUG 552 if (sc->sc_debug) { 553 printf("qe%d: intr: qecstat=%x\n", sc->sc_channel, qecstat); 554 } 555 #endif 556 557 /* Filter out status for this channel */ 558 qecstat = qecstat >> (4 * sc->sc_channel); 559 if ((qecstat & 0xf) == 0) 560 return (r); 561 562 qestat = bus_space_read_4(t, sc->sc_cr, QE_CRI_STAT); 563 564 #ifdef QEDEBUG 565 if (sc->sc_debug) { 566 char bits[64]; int i; 567 bus_space_tag_t t1 = sc->sc_bustag; 568 bus_space_handle_t mr = sc->sc_mr; 569 570 snprintb(bits, sizeof(bits), QE_CR_STAT_BITS, qestat); 571 printf("qe%d: intr: qestat=%s\n", sc->sc_channel, bits); 572 573 printf("MACE registers:\n"); 574 for (i = 0 ; i < 32; i++) { 575 printf(" m[%d]=%x,", i, bus_space_read_1(t1, mr, i)); 576 if (((i+1) & 7) == 0) 577 printf("\n"); 578 } 579 } 580 #endif 581 582 if (qestat & QE_CR_STAT_ALLERRORS) { 583 #ifdef QEDEBUG 584 if (sc->sc_debug) { 585 char bits[64]; 586 snprintb(bits, sizeof(bits), QE_CR_STAT_BITS, qestat); 587 printf("qe%d: eint: qestat=%s\n", sc->sc_channel, bits); 588 } 589 #endif 590 r |= qe_eint(sc, qestat); 591 if (r == -1) 592 return (1); 593 } 594 595 if (qestat & QE_CR_STAT_TXIRQ) 596 r |= qe_tint(sc); 597 598 if (qestat & QE_CR_STAT_RXIRQ) 599 r |= qe_rint(sc); 600 601 return (r); 602 } 603 604 /* 605 * Transmit interrupt. 606 */ 607 int 608 qe_tint(struct qe_softc *sc) 609 { 610 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 611 unsigned int bix, txflags; 612 613 bix = sc->sc_rb.rb_tdtail; 614 615 for (;;) { 616 if (sc->sc_rb.rb_td_nbusy <= 0) 617 break; 618 619 txflags = sc->sc_rb.rb_txd[bix].xd_flags; 620 621 if (txflags & QEC_XD_OWN) 622 break; 623 624 if_statinc(ifp, if_opackets); 625 626 if (++bix == QEC_XD_RING_MAXSIZE) 627 bix = 0; 628 629 --sc->sc_rb.rb_td_nbusy; 630 } 631 632 sc->sc_rb.rb_tdtail = bix; 633 634 qestart(ifp); 635 636 if (sc->sc_rb.rb_td_nbusy == 0) 637 ifp->if_timer = 0; 638 639 return (1); 640 } 641 642 /* 643 * Receive interrupt. 644 */ 645 int 646 qe_rint(struct qe_softc *sc) 647 { 648 struct qec_xd *xd = sc->sc_rb.rb_rxd; 649 unsigned int bix, len; 650 unsigned int nrbuf = sc->sc_rb.rb_nrbuf; 651 #ifdef QEDEBUG 652 int npackets = 0; 653 #endif 654 655 bix = sc->sc_rb.rb_rdtail; 656 657 /* 658 * Process all buffers with valid data. 659 */ 660 for (;;) { 661 len = xd[bix].xd_flags; 662 if (len & QEC_XD_OWN) 663 break; 664 665 #ifdef QEDEBUG 666 npackets++; 667 #endif 668 669 len &= QEC_XD_LENGTH; 670 len -= 4; 671 qe_read(sc, bix, len); 672 673 /* ... */ 674 xd[(bix+nrbuf) % QEC_XD_RING_MAXSIZE].xd_flags = 675 QEC_XD_OWN | (QE_PKT_BUF_SZ & QEC_XD_LENGTH); 676 677 if (++bix == QEC_XD_RING_MAXSIZE) 678 bix = 0; 679 } 680 #ifdef QEDEBUG 681 if (npackets == 0 && sc->sc_debug) 682 printf("%s: rint: no packets; rb index %d; status 0x%x\n", 683 device_xname(sc->sc_dev), bix, len); 684 #endif 685 686 sc->sc_rb.rb_rdtail = bix; 687 688 return (1); 689 } 690 691 /* 692 * Error interrupt. 693 */ 694 int 695 qe_eint(struct qe_softc *sc, uint32_t why) 696 { 697 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 698 device_t self = sc->sc_dev; 699 const char *xname = device_xname(self); 700 int r = 0, rst = 0; 701 702 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 703 704 if (why & QE_CR_STAT_EDEFER) { 705 printf("%s: excessive tx defers.\n", xname); 706 r |= 1; 707 if_statinc_ref(nsr, if_oerrors); 708 } 709 710 if (why & QE_CR_STAT_CLOSS) { 711 printf("%s: no carrier, link down?\n", xname); 712 if_statinc_ref(nsr, if_oerrors); 713 r |= 1; 714 } 715 716 if (why & QE_CR_STAT_ERETRIES) { 717 printf("%s: excessive tx retries\n", xname); 718 if_statinc_ref(nsr, if_oerrors); 719 r |= 1; 720 rst = 1; 721 } 722 723 724 if (why & QE_CR_STAT_LCOLL) { 725 printf("%s: late tx transmission\n", xname); 726 if_statinc_ref(nsr, if_oerrors); 727 r |= 1; 728 rst = 1; 729 } 730 731 if (why & QE_CR_STAT_FUFLOW) { 732 printf("%s: tx fifo underflow\n", xname); 733 if_statinc_ref(nsr, if_oerrors); 734 r |= 1; 735 rst = 1; 736 } 737 738 if (why & QE_CR_STAT_JERROR) { 739 printf("%s: jabber seen\n", xname); 740 r |= 1; 741 } 742 743 if (why & QE_CR_STAT_BERROR) { 744 printf("%s: babble seen\n", xname); 745 r |= 1; 746 } 747 748 if (why & QE_CR_STAT_TCCOFLOW) { 749 if_statadd_ref(nsr, if_collisions, 256); 750 if_statadd_ref(nsr, if_oerrors, 256); 751 r |= 1; 752 } 753 754 if (why & QE_CR_STAT_TXDERROR) { 755 printf("%s: tx descriptor is bad\n", xname); 756 rst = 1; 757 r |= 1; 758 } 759 760 if (why & QE_CR_STAT_TXLERR) { 761 printf("%s: tx late error\n", xname); 762 if_statinc_ref(nsr, if_oerrors); 763 rst = 1; 764 r |= 1; 765 } 766 767 if (why & QE_CR_STAT_TXPERR) { 768 printf("%s: tx DMA parity error\n", xname); 769 if_statinc_ref(nsr, if_oerrors); 770 rst = 1; 771 r |= 1; 772 } 773 774 if (why & QE_CR_STAT_TXSERR) { 775 printf("%s: tx DMA sbus error ack\n", xname); 776 if_statinc_ref(nsr, if_oerrors); 777 rst = 1; 778 r |= 1; 779 } 780 781 if (why & QE_CR_STAT_RCCOFLOW) { 782 if_statadd_ref(nsr, if_collisions, 256); 783 if_statadd_ref(nsr, if_ierrors, 256); 784 r |= 1; 785 } 786 787 if (why & QE_CR_STAT_RUOFLOW) { 788 if_statadd_ref(nsr, if_ierrors, 256); 789 r |= 1; 790 } 791 792 if (why & QE_CR_STAT_MCOFLOW) { 793 if_statadd_ref(nsr, if_ierrors, 256); 794 r |= 1; 795 } 796 797 if (why & QE_CR_STAT_RXFOFLOW) { 798 printf("%s: rx fifo overflow\n", xname); 799 if_statinc_ref(nsr, if_ierrors); 800 r |= 1; 801 } 802 803 if (why & QE_CR_STAT_RLCOLL) { 804 printf("%s: rx late collision\n", xname); 805 if_statinc_ref(nsr, if_ierrors); 806 if_statinc_ref(nsr, if_collisions); 807 r |= 1; 808 } 809 810 if (why & QE_CR_STAT_FCOFLOW) { 811 if_statadd_ref(nsr, if_ierrors, 256); 812 r |= 1; 813 } 814 815 if (why & QE_CR_STAT_CECOFLOW) { 816 if_statadd_ref(nsr, if_ierrors, 256); 817 r |= 1; 818 } 819 820 if (why & QE_CR_STAT_RXDROP) { 821 printf("%s: rx packet dropped\n", xname); 822 if_statinc_ref(nsr, if_ierrors); 823 r |= 1; 824 } 825 826 if (why & QE_CR_STAT_RXSMALL) { 827 printf("%s: rx buffer too small\n", xname); 828 if_statinc_ref(nsr, if_ierrors); 829 r |= 1; 830 rst = 1; 831 } 832 833 if (why & QE_CR_STAT_RXLERR) { 834 printf("%s: rx late error\n", xname); 835 if_statinc_ref(nsr, if_ierrors); 836 r |= 1; 837 rst = 1; 838 } 839 840 if (why & QE_CR_STAT_RXPERR) { 841 printf("%s: rx DMA parity error\n", xname); 842 if_statinc_ref(nsr, if_ierrors); 843 r |= 1; 844 rst = 1; 845 } 846 847 if (why & QE_CR_STAT_RXSERR) { 848 printf("%s: rx DMA sbus error ack\n", xname); 849 if_statinc_ref(nsr, if_ierrors); 850 r |= 1; 851 rst = 1; 852 } 853 854 IF_STAT_PUTREF(ifp); 855 856 if (r == 0) 857 aprint_error_dev(self, "unexpected interrupt error: %08x\n", 858 why); 859 860 if (rst) { 861 printf("%s: resetting...\n", xname); 862 qereset(sc); 863 return (-1); 864 } 865 866 return (r); 867 } 868 869 int 870 qeioctl(struct ifnet *ifp, u_long cmd, void *data) 871 { 872 struct qe_softc *sc = ifp->if_softc; 873 struct ifaddr *ifa = data; 874 int s, error = 0; 875 876 s = splnet(); 877 878 switch (cmd) { 879 case SIOCINITIFADDR: 880 ifp->if_flags |= IFF_UP; 881 qeinit(sc); 882 switch (ifa->ifa_addr->sa_family) { 883 #ifdef INET 884 case AF_INET: 885 arp_ifinit(ifp, ifa); 886 break; 887 #endif /* INET */ 888 default: 889 break; 890 } 891 break; 892 893 case SIOCSIFFLAGS: 894 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 895 break; 896 /* XXX re-use ether_ioctl() */ 897 switch (ifp->if_flags & (IFF_UP | IFF_RUNNING)) { 898 case IFF_RUNNING: 899 /* 900 * If interface is marked down and it is running, then 901 * stop it. 902 */ 903 qestop(sc); 904 ifp->if_flags &= ~IFF_RUNNING; 905 break; 906 case IFF_UP: 907 /* 908 * If interface is marked up and it is stopped, then 909 * start it. 910 */ 911 qeinit(sc); 912 break; 913 default: 914 /* 915 * Reset the interface to pick up changes in any other 916 * flags that affect hardware registers. 917 */ 918 qestop(sc); 919 qeinit(sc); 920 break; 921 } 922 #ifdef QEDEBUG 923 sc->sc_debug = (ifp->if_flags & IFF_DEBUG) != 0 ? 1 : 0; 924 #endif 925 break; 926 927 case SIOCADDMULTI: 928 case SIOCDELMULTI: 929 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 930 /* 931 * Multicast list has changed; set the hardware filter 932 * accordingly. 933 */ 934 if (ifp->if_flags & IFF_RUNNING) 935 qe_mcreset(sc); 936 error = 0; 937 } 938 break; 939 940 default: 941 error = ether_ioctl(ifp, cmd, data); 942 break; 943 } 944 945 splx(s); 946 return (error); 947 } 948 949 950 void 951 qeinit(struct qe_softc *sc) 952 { 953 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 954 bus_space_tag_t t = sc->sc_bustag; 955 bus_space_handle_t cr = sc->sc_cr; 956 bus_space_handle_t mr = sc->sc_mr; 957 struct qec_softc *qec = sc->sc_qec; 958 uint32_t qecaddr; 959 uint8_t *ea; 960 int s; 961 962 #if defined(SUN4U) || defined(__GNUC__) 963 (void)&t; 964 #endif 965 s = splnet(); 966 967 qestop(sc); 968 969 /* 970 * Allocate descriptor ring and buffers 971 */ 972 qec_meminit(&sc->sc_rb, QE_PKT_BUF_SZ); 973 974 /* Channel registers: */ 975 bus_space_write_4(t, cr, QE_CRI_RXDS, (uint32_t)sc->sc_rb.rb_rxddma); 976 bus_space_write_4(t, cr, QE_CRI_TXDS, (uint32_t)sc->sc_rb.rb_txddma); 977 978 bus_space_write_4(t, cr, QE_CRI_RIMASK, 0); 979 bus_space_write_4(t, cr, QE_CRI_TIMASK, 0); 980 bus_space_write_4(t, cr, QE_CRI_QMASK, 0); 981 bus_space_write_4(t, cr, QE_CRI_MMASK, QE_CR_MMASK_RXCOLL); 982 bus_space_write_4(t, cr, QE_CRI_CCNT, 0); 983 bus_space_write_4(t, cr, QE_CRI_PIPG, 0); 984 985 qecaddr = sc->sc_channel * qec->sc_msize; 986 bus_space_write_4(t, cr, QE_CRI_RXWBUF, qecaddr); 987 bus_space_write_4(t, cr, QE_CRI_RXRBUF, qecaddr); 988 bus_space_write_4(t, cr, QE_CRI_TXWBUF, qecaddr + qec->sc_rsize); 989 bus_space_write_4(t, cr, QE_CRI_TXRBUF, qecaddr + qec->sc_rsize); 990 991 /* MACE registers: */ 992 bus_space_write_1(t, mr, QE_MRI_PHYCC, QE_MR_PHYCC_ASEL); 993 bus_space_write_1(t, mr, QE_MRI_XMTFC, QE_MR_XMTFC_APADXMT); 994 bus_space_write_1(t, mr, QE_MRI_RCVFC, 0); 995 996 /* 997 * Mask MACE's receive interrupt, since we're being notified 998 * by the QEC after DMA completes. 999 */ 1000 bus_space_write_1(t, mr, QE_MRI_IMR, 1001 QE_MR_IMR_CERRM | QE_MR_IMR_RCVINTM); 1002 1003 bus_space_write_1(t, mr, QE_MRI_BIUCC, 1004 QE_MR_BIUCC_BSWAP | QE_MR_BIUCC_64TS); 1005 1006 bus_space_write_1(t, mr, QE_MRI_FIFOFC, 1007 QE_MR_FIFOCC_TXF16 | QE_MR_FIFOCC_RXF32 | 1008 QE_MR_FIFOCC_RFWU | QE_MR_FIFOCC_TFWU); 1009 1010 bus_space_write_1(t, mr, QE_MRI_PLSCC, QE_MR_PLSCC_TP); 1011 1012 /* 1013 * Station address 1014 */ 1015 ea = sc->sc_enaddr; 1016 bus_space_write_1(t, mr, QE_MRI_IAC, 1017 QE_MR_IAC_ADDRCHG | QE_MR_IAC_PHYADDR); 1018 bus_space_write_multi_1(t, mr, QE_MRI_PADR, ea, 6); 1019 1020 /* Apply media settings */ 1021 qe_ifmedia_upd(ifp); 1022 1023 /* 1024 * Clear Logical address filter 1025 */ 1026 bus_space_write_1(t, mr, QE_MRI_IAC, 1027 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1028 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0, 8); 1029 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1030 1031 /* Clear missed packet count (register cleared on read) */ 1032 (void)bus_space_read_1(t, mr, QE_MRI_MPC); 1033 1034 #if 0 1035 /* test register: */ 1036 bus_space_write_1(t, mr, QE_MRI_UTR, 0); 1037 #endif 1038 1039 /* Reset multicast filter */ 1040 qe_mcreset(sc); 1041 1042 ifp->if_flags |= IFF_RUNNING; 1043 splx(s); 1044 } 1045 1046 /* 1047 * Reset multicast filter. 1048 */ 1049 void 1050 qe_mcreset(struct qe_softc *sc) 1051 { 1052 struct ethercom *ec = &sc->sc_ethercom; 1053 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1054 bus_space_tag_t t = sc->sc_bustag; 1055 bus_space_handle_t mr = sc->sc_mr; 1056 struct ether_multi *enm; 1057 struct ether_multistep step; 1058 uint32_t crc; 1059 uint16_t hash[4]; 1060 uint8_t octet, maccc, *ladrp = (uint8_t *)&hash[0]; 1061 int i; 1062 1063 #if defined(SUN4U) || defined(__GNUC__) 1064 (void)&t; 1065 #endif 1066 1067 /* We also enable transmitter & receiver here */ 1068 maccc = QE_MR_MACCC_ENXMT | QE_MR_MACCC_ENRCV; 1069 1070 if (ifp->if_flags & IFF_PROMISC) { 1071 maccc |= QE_MR_MACCC_PROM; 1072 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1073 return; 1074 } 1075 1076 if (ifp->if_flags & IFF_ALLMULTI) { 1077 bus_space_write_1(t, mr, QE_MRI_IAC, 1078 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1079 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1080 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1081 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1082 return; 1083 } 1084 1085 hash[3] = hash[2] = hash[1] = hash[0] = 0; 1086 1087 ETHER_LOCK(ec); 1088 ETHER_FIRST_MULTI(step, ec, enm); 1089 while (enm != NULL) { 1090 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 1091 ETHER_ADDR_LEN) != 0) { 1092 /* 1093 * We must listen to a range of multicast 1094 * addresses. For now, just accept all 1095 * multicasts, rather than trying to set only 1096 * those filter bits needed to match the range. 1097 * (At this time, the only use of address 1098 * ranges is for IP multicast routing, for 1099 * which the range is big enough to require 1100 * all bits set.) 1101 */ 1102 bus_space_write_1(t, mr, QE_MRI_IAC, 1103 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1104 bus_space_set_multi_1(t, mr, QE_MRI_LADRF, 0xff, 8); 1105 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1106 ifp->if_flags |= IFF_ALLMULTI; 1107 break; 1108 } 1109 1110 crc = ether_crc32_le(enm->enm_addrlo, ETHER_ADDR_LEN); 1111 crc >>= 26; 1112 hash[crc >> 4] |= 1 << (crc & 0xf); 1113 ETHER_NEXT_MULTI(step, enm); 1114 } 1115 ETHER_UNLOCK(ec); 1116 1117 /* We need to byte-swap the hash before writing to the chip. */ 1118 for (i = 0; i < 7; i += 2) { 1119 octet = ladrp[i]; 1120 ladrp[i] = ladrp[i + 1]; 1121 ladrp[i + 1] = octet; 1122 } 1123 bus_space_write_1(t, mr, QE_MRI_IAC, 1124 QE_MR_IAC_ADDRCHG | QE_MR_IAC_LOGADDR); 1125 bus_space_write_multi_1(t, mr, QE_MRI_LADRF, ladrp, 8); 1126 bus_space_write_1(t, mr, QE_MRI_IAC, 0); 1127 bus_space_write_1(t, mr, QE_MRI_MACCC, maccc); 1128 } 1129 1130 /* 1131 * Get current media settings. 1132 */ 1133 void 1134 qe_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 1135 { 1136 struct qe_softc *sc = ifp->if_softc; 1137 bus_space_tag_t t = sc->sc_bustag; 1138 bus_space_handle_t mr = sc->sc_mr; 1139 uint8_t v; 1140 1141 #if defined(SUN4U) || defined(__GNUC__) 1142 (void)&t; 1143 #endif 1144 v = bus_space_read_1(t, mr, QE_MRI_PLSCC); 1145 1146 switch (bus_space_read_1(t, mr, QE_MRI_PLSCC) & QE_MR_PLSCC_PORTMASK) { 1147 case QE_MR_PLSCC_TP: 1148 ifmr->ifm_active = IFM_ETHER | IFM_10_T; 1149 break; 1150 case QE_MR_PLSCC_AUI: 1151 ifmr->ifm_active = IFM_ETHER | IFM_10_5; 1152 break; 1153 case QE_MR_PLSCC_GPSI: 1154 case QE_MR_PLSCC_DAI: 1155 /* ... */ 1156 break; 1157 } 1158 1159 v = bus_space_read_1(t, mr, QE_MRI_PHYCC); 1160 ifmr->ifm_status |= IFM_AVALID; 1161 if ((v & QE_MR_PHYCC_LNKFL) != 0) 1162 ifmr->ifm_status &= ~IFM_ACTIVE; 1163 else 1164 ifmr->ifm_status |= IFM_ACTIVE; 1165 1166 } 1167 1168 /* 1169 * Set media options. 1170 */ 1171 int 1172 qe_ifmedia_upd(struct ifnet *ifp) 1173 { 1174 struct qe_softc *sc = ifp->if_softc; 1175 struct ifmedia *ifm = &sc->sc_ifmedia; 1176 bus_space_tag_t t = sc->sc_bustag; 1177 bus_space_handle_t mr = sc->sc_mr; 1178 int newmedia = ifm->ifm_media; 1179 uint8_t plscc, phycc; 1180 1181 #if defined(SUN4U) || defined(__GNUC__) 1182 (void)&t; 1183 #endif 1184 if (IFM_TYPE(newmedia) != IFM_ETHER) 1185 return (EINVAL); 1186 1187 plscc = bus_space_read_1(t, mr, QE_MRI_PLSCC) & ~QE_MR_PLSCC_PORTMASK; 1188 phycc = bus_space_read_1(t, mr, QE_MRI_PHYCC) & ~QE_MR_PHYCC_ASEL; 1189 1190 if (IFM_SUBTYPE(newmedia) == IFM_AUTO) 1191 phycc |= QE_MR_PHYCC_ASEL; 1192 else if (IFM_SUBTYPE(newmedia) == IFM_10_T) 1193 plscc |= QE_MR_PLSCC_TP; 1194 else if (IFM_SUBTYPE(newmedia) == IFM_10_5) 1195 plscc |= QE_MR_PLSCC_AUI; 1196 1197 bus_space_write_1(t, mr, QE_MRI_PLSCC, plscc); 1198 bus_space_write_1(t, mr, QE_MRI_PHYCC, phycc); 1199 1200 return (0); 1201 } 1202