1 /* $NetBSD: if_sq.c,v 1.16 2003/01/13 17:13:10 bouyer Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Rafal K. Boni 5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * Portions of this code are derived from software contributed to The 9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include "bpfilter.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/device.h> 40 #include <sys/callout.h> 41 #include <sys/mbuf.h> 42 #include <sys/malloc.h> 43 #include <sys/kernel.h> 44 #include <sys/socket.h> 45 #include <sys/ioctl.h> 46 #include <sys/errno.h> 47 #include <sys/syslog.h> 48 49 #include <uvm/uvm_extern.h> 50 51 #include <machine/endian.h> 52 53 #include <net/if.h> 54 #include <net/if_dl.h> 55 #include <net/if_media.h> 56 #include <net/if_ether.h> 57 58 #if NBPFILTER > 0 59 #include <net/bpf.h> 60 #endif 61 62 #include <machine/bus.h> 63 #include <machine/intr.h> 64 65 #include <dev/ic/seeq8003reg.h> 66 67 #include <sgimips/hpc/sqvar.h> 68 #include <sgimips/hpc/hpcvar.h> 69 #include <sgimips/hpc/hpcreg.h> 70 71 #include <dev/arcbios/arcbios.h> 72 #include <dev/arcbios/arcbiosvar.h> 73 74 #define static 75 76 /* 77 * Short TODO list: 78 * (1) Do counters for bad-RX packets. 79 * (2) Allow multi-segment transmits, instead of copying to a single, 80 * contiguous mbuf. 81 * (3) Verify sq_stop() turns off enough stuff; I was still getting 82 * seeq interrupts after sq_stop(). 83 * (4) Fix up printfs in driver (most should only fire ifdef SQ_DEBUG 84 * or something similar. 85 * (5) Implement EDLC modes: especially packet auto-pad and simplex 86 * mode. 87 * (6) Should the driver filter out its own transmissions in non-EDLC 88 * mode? 89 * (7) Multicast support -- multicast filter, address management, ... 90 * (8) Deal with RB0 (recv buffer overflow) on reception. Will need 91 * to figure out if RB0 is read-only as stated in one spot in the 92 * HPC spec or read-write (ie, is the 'write a one to clear it') 93 * the correct thing? 94 */ 95 96 static int sq_match(struct device *, struct cfdata *, void *); 97 static void sq_attach(struct device *, struct device *, void *); 98 static int sq_init(struct ifnet *); 99 static void sq_start(struct ifnet *); 100 static void sq_stop(struct ifnet *, int); 101 static void sq_watchdog(struct ifnet *); 102 static int sq_ioctl(struct ifnet *, u_long, caddr_t); 103 104 static void sq_set_filter(struct sq_softc *); 105 static int sq_intr(void *); 106 static int sq_rxintr(struct sq_softc *); 107 static int sq_txintr(struct sq_softc *); 108 static void sq_reset(struct sq_softc *); 109 static int sq_add_rxbuf(struct sq_softc *, int); 110 static void sq_dump_buffer(u_int32_t addr, u_int32_t len); 111 112 static void enaddr_aton(const char*, u_int8_t*); 113 114 /* Actions */ 115 #define SQ_RESET 1 116 #define SQ_ADD_TO_DMA 2 117 #define SQ_START_DMA 3 118 #define SQ_DONE_DMA 4 119 #define SQ_RESTART_DMA 5 120 #define SQ_TXINTR_ENTER 6 121 #define SQ_TXINTR_EXIT 7 122 #define SQ_TXINTR_BUSY 8 123 124 struct sq_action_trace { 125 int action; 126 int bufno; 127 int status; 128 int freebuf; 129 }; 130 131 #define SQ_TRACEBUF_SIZE 100 132 int sq_trace_idx = 0; 133 struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE]; 134 135 void sq_trace_dump(struct sq_softc* sc); 136 137 #define SQ_TRACE(act, buf, stat, free) do { \ 138 sq_trace[sq_trace_idx].action = (act); \ 139 sq_trace[sq_trace_idx].bufno = (buf); \ 140 sq_trace[sq_trace_idx].status = (stat); \ 141 sq_trace[sq_trace_idx].freebuf = (free); \ 142 if (++sq_trace_idx == SQ_TRACEBUF_SIZE) { \ 143 memset(&sq_trace, 0, sizeof(sq_trace)); \ 144 sq_trace_idx = 0; \ 145 } \ 146 } while (0) 147 148 CFATTACH_DECL(sq, sizeof(struct sq_softc), 149 sq_match, sq_attach, NULL, NULL); 150 151 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 152 153 static int 154 sq_match(struct device *parent, struct cfdata *cf, void *aux) 155 { 156 struct hpc_attach_args *ha = aux; 157 158 if (strcmp(ha->ha_name, cf->cf_name) == 0) 159 return (1); 160 161 return (0); 162 } 163 164 static void 165 sq_attach(struct device *parent, struct device *self, void *aux) 166 { 167 int i, err; 168 char* macaddr; 169 struct sq_softc *sc = (void *)self; 170 struct hpc_attach_args *haa = aux; 171 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 172 173 sc->sc_hpct = haa->ha_st; 174 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 175 haa->ha_dmaoff, 176 HPC_ENET_REGS_SIZE, 177 &sc->sc_hpch)) != 0) { 178 printf(": unable to map HPC DMA registers, error = %d\n", err); 179 goto fail_0; 180 } 181 182 sc->sc_regt = haa->ha_st; 183 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 184 haa->ha_devoff, 185 HPC_ENET_DEVREGS_SIZE, 186 &sc->sc_regh)) != 0) { 187 printf(": unable to map Seeq registers, error = %d\n", err); 188 goto fail_0; 189 } 190 191 sc->sc_dmat = haa->ha_dmat; 192 193 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), 194 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 195 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) { 196 printf(": unable to allocate control data, error = %d\n", err); 197 goto fail_0; 198 } 199 200 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, 201 sizeof(struct sq_control), 202 (caddr_t *)&sc->sc_control, 203 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 204 printf(": unable to map control data, error = %d\n", err); 205 goto fail_1; 206 } 207 208 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control), 209 1, sizeof(struct sq_control), PAGE_SIZE, 210 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { 211 printf(": unable to create DMA map for control data, error " 212 "= %d\n", err); 213 goto fail_2; 214 } 215 216 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control, 217 sizeof(struct sq_control), 218 NULL, BUS_DMA_NOWAIT)) != 0) { 219 printf(": unable to load DMA map for control data, error " 220 "= %d\n", err); 221 goto fail_3; 222 } 223 224 memset(sc->sc_control, 0, sizeof(struct sq_control)); 225 226 /* Create transmit buffer DMA maps */ 227 for (i = 0; i < SQ_NTXDESC; i++) { 228 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 229 0, BUS_DMA_NOWAIT, 230 &sc->sc_txmap[i])) != 0) { 231 printf(": unable to create tx DMA map %d, error = %d\n", 232 i, err); 233 goto fail_4; 234 } 235 } 236 237 /* Create transmit buffer DMA maps */ 238 for (i = 0; i < SQ_NRXDESC; i++) { 239 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 240 0, BUS_DMA_NOWAIT, 241 &sc->sc_rxmap[i])) != 0) { 242 printf(": unable to create rx DMA map %d, error = %d\n", 243 i, err); 244 goto fail_5; 245 } 246 } 247 248 /* Pre-allocate the receive buffers. */ 249 for (i = 0; i < SQ_NRXDESC; i++) { 250 if ((err = sq_add_rxbuf(sc, i)) != 0) { 251 printf(": unable to allocate or map rx buffer %d\n," 252 " error = %d\n", i, err); 253 goto fail_6; 254 } 255 } 256 257 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) { 258 printf(": unable to get MAC address!\n"); 259 goto fail_6; 260 } 261 262 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, 263 self->dv_xname, "intr"); 264 265 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { 266 printf(": unable to establish interrupt!\n"); 267 goto fail_6; 268 } 269 270 /* Reset the chip to a known state. */ 271 sq_reset(sc); 272 273 /* 274 * Determine if we're an 8003 or 80c03 by setting the first 275 * MAC address register to non-zero, and then reading it back. 276 * If it's zero, we have an 80c03, because we will have read 277 * the TxCollLSB register. 278 */ 279 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5); 280 if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0) 281 sc->sc_type = SQ_TYPE_80C03; 282 else 283 sc->sc_type = SQ_TYPE_8003; 284 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00); 285 286 printf(": SGI Seeq %s\n", 287 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); 288 289 enaddr_aton(macaddr, sc->sc_enaddr); 290 291 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 292 ether_sprintf(sc->sc_enaddr)); 293 294 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 295 ifp->if_softc = sc; 296 ifp->if_mtu = ETHERMTU; 297 ifp->if_init = sq_init; 298 ifp->if_stop = sq_stop; 299 ifp->if_start = sq_start; 300 ifp->if_ioctl = sq_ioctl; 301 ifp->if_watchdog = sq_watchdog; 302 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST; 303 IFQ_SET_READY(&ifp->if_snd); 304 305 if_attach(ifp); 306 ether_ifattach(ifp, sc->sc_enaddr); 307 308 memset(&sq_trace, 0, sizeof(sq_trace)); 309 /* Done! */ 310 return; 311 312 /* 313 * Free any resources we've allocated during the failed attach 314 * attempt. Do this in reverse order and fall through. 315 */ 316 fail_6: 317 for (i = 0; i < SQ_NRXDESC; i++) { 318 if (sc->sc_rxmbuf[i] != NULL) { 319 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); 320 m_freem(sc->sc_rxmbuf[i]); 321 } 322 } 323 fail_5: 324 for (i = 0; i < SQ_NRXDESC; i++) { 325 if (sc->sc_rxmap[i] != NULL) 326 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); 327 } 328 fail_4: 329 for (i = 0; i < SQ_NTXDESC; i++) { 330 if (sc->sc_txmap[i] != NULL) 331 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); 332 } 333 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); 334 fail_3: 335 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); 336 fail_2: 337 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control, 338 sizeof(struct sq_control)); 339 fail_1: 340 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); 341 fail_0: 342 return; 343 } 344 345 /* Set up data to get the interface up and running. */ 346 int 347 sq_init(struct ifnet *ifp) 348 { 349 int i; 350 u_int32_t reg; 351 struct sq_softc *sc = ifp->if_softc; 352 353 /* Cancel any in-progress I/O */ 354 sq_stop(ifp, 0); 355 356 sc->sc_nextrx = 0; 357 358 sc->sc_nfreetx = SQ_NTXDESC; 359 sc->sc_nexttx = sc->sc_prevtx = 0; 360 361 SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx); 362 363 /* Set into 8003 mode, bank 0 to program ethernet address */ 364 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0); 365 366 /* Now write the address */ 367 for (i = 0; i < ETHER_ADDR_LEN; i++) 368 bus_space_write_1(sc->sc_regt, sc->sc_regh, i, 369 sc->sc_enaddr[i]); 370 371 sc->sc_rxcmd = RXCMD_IE_CRC | 372 RXCMD_IE_DRIB | 373 RXCMD_IE_SHORT | 374 RXCMD_IE_END | 375 RXCMD_IE_GOOD; 376 377 /* 378 * Set the receive filter -- this will add some bits to the 379 * prototype RXCMD register. Do this before setting the 380 * transmit config register, since we might need to switch 381 * banks. 382 */ 383 sq_set_filter(sc); 384 385 /* Set up Seeq transmit command register */ 386 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 387 TXCMD_IE_UFLOW | 388 TXCMD_IE_COLL | 389 TXCMD_IE_16COLL | 390 TXCMD_IE_GOOD); 391 392 /* Now write the receive command register. */ 393 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd); 394 395 /* Set up HPC ethernet DMA config */ 396 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG); 397 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_DMACFG, 398 reg | ENETR_DMACFG_FIX_RXDC | 399 ENETR_DMACFG_FIX_INTR | 400 ENETR_DMACFG_FIX_EOP); 401 402 /* Pass the start of the receive ring to the HPC */ 403 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_NDBP, 404 SQ_CDRXADDR(sc, 0)); 405 406 /* And turn on the HPC ethernet receive channel */ 407 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 408 ENETR_CTL_ACTIVE); 409 410 ifp->if_flags |= IFF_RUNNING; 411 ifp->if_flags &= ~IFF_OACTIVE; 412 413 return 0; 414 } 415 416 static void 417 sq_set_filter(struct sq_softc *sc) 418 { 419 struct ethercom *ec = &sc->sc_ethercom; 420 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 421 struct ether_multi *enm; 422 struct ether_multistep step; 423 424 /* 425 * Check for promiscuous mode. Also implies 426 * all-multicast. 427 */ 428 if (ifp->if_flags & IFF_PROMISC) { 429 sc->sc_rxcmd |= RXCMD_REC_ALL; 430 ifp->if_flags |= IFF_ALLMULTI; 431 return; 432 } 433 434 /* 435 * The 8003 has no hash table. If we have any multicast 436 * addresses on the list, enable reception of all multicast 437 * frames. 438 * 439 * XXX The 80c03 has a hash table. We should use it. 440 */ 441 442 ETHER_FIRST_MULTI(step, ec, enm); 443 444 if (enm == NULL) { 445 sc->sc_rxcmd &= ~RXCMD_REC_MASK; 446 sc->sc_rxcmd |= RXCMD_REC_BROAD; 447 448 ifp->if_flags &= ~IFF_ALLMULTI; 449 return; 450 } 451 452 sc->sc_rxcmd |= RXCMD_REC_MULTI; 453 ifp->if_flags |= IFF_ALLMULTI; 454 } 455 456 int 457 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 458 { 459 int s, error = 0; 460 461 s = splnet(); 462 463 error = ether_ioctl(ifp, cmd, data); 464 if (error == ENETRESET) { 465 /* 466 * Multicast list has changed; set the hardware filter 467 * accordingly. 468 */ 469 error = sq_init(ifp); 470 } 471 472 splx(s); 473 return (error); 474 } 475 476 void 477 sq_start(struct ifnet *ifp) 478 { 479 struct sq_softc *sc = ifp->if_softc; 480 u_int32_t status; 481 struct mbuf *m0, *m; 482 bus_dmamap_t dmamap; 483 int err, totlen, nexttx, firsttx, lasttx, ofree, seg; 484 485 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 486 return; 487 488 /* 489 * Remember the previous number of free descriptors and 490 * the first descriptor we'll use. 491 */ 492 ofree = sc->sc_nfreetx; 493 firsttx = sc->sc_nexttx; 494 495 /* 496 * Loop through the send queue, setting up transmit descriptors 497 * until we drain the queue, or use up all available transmit 498 * descriptors. 499 */ 500 while (sc->sc_nfreetx != 0) { 501 /* 502 * Grab a packet off the queue. 503 */ 504 IFQ_POLL(&ifp->if_snd, m0); 505 if (m0 == NULL) 506 break; 507 m = NULL; 508 509 dmamap = sc->sc_txmap[sc->sc_nexttx]; 510 511 /* 512 * Load the DMA map. If this fails, the packet either 513 * didn't fit in the alloted number of segments, or we were 514 * short on resources. In this case, we'll copy and try 515 * again. 516 * Also copy it if we need to pad, so that we are sure there 517 * is room for the pad buffer. 518 * XXX the right way of doing this is to use a static buffer 519 * for padding and adding it to the transmit descriptor (see 520 * sys/dev/pci/if_tl.c for example). We can't do this here yet 521 * because we can't send packets with more than one fragment. 522 */ 523 if (m0->m_pkthdr.len < ETHER_PAD_LEN || 524 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 525 BUS_DMA_NOWAIT) != 0) { 526 MGETHDR(m, M_DONTWAIT, MT_DATA); 527 if (m == NULL) { 528 printf("%s: unable to allocate Tx mbuf\n", 529 sc->sc_dev.dv_xname); 530 break; 531 } 532 if (m0->m_pkthdr.len > MHLEN) { 533 MCLGET(m, M_DONTWAIT); 534 if ((m->m_flags & M_EXT) == 0) { 535 printf("%s: unable to allocate Tx " 536 "cluster\n", sc->sc_dev.dv_xname); 537 m_freem(m); 538 break; 539 } 540 } 541 542 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 543 if (m0->m_pkthdr.len < ETHER_PAD_LEN) { 544 memset(mtod(m, char *) + m0->m_pkthdr.len, 0, 545 ETHER_PAD_LEN - m0->m_pkthdr.len); 546 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN; 547 } else 548 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 549 550 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 551 m, BUS_DMA_NOWAIT)) != 0) { 552 printf("%s: unable to load Tx buffer, " 553 "error = %d\n", sc->sc_dev.dv_xname, err); 554 break; 555 } 556 } 557 558 /* 559 * Ensure we have enough descriptors free to describe 560 * the packet. 561 */ 562 if (dmamap->dm_nsegs > sc->sc_nfreetx) { 563 /* 564 * Not enough free descriptors to transmit this 565 * packet. We haven't committed to anything yet, 566 * so just unload the DMA map, put the packet 567 * back on the queue, and punt. Notify the upper 568 * layer that there are no more slots left. 569 * 570 * XXX We could allocate an mbuf and copy, but 571 * XXX it is worth it? 572 */ 573 ifp->if_flags |= IFF_OACTIVE; 574 bus_dmamap_unload(sc->sc_dmat, dmamap); 575 if (m != NULL) 576 m_freem(m); 577 break; 578 } 579 580 IFQ_DEQUEUE(&ifp->if_snd, m0); 581 #if NBPFILTER > 0 582 /* 583 * Pass the packet to any BPF listeners. 584 */ 585 if (ifp->if_bpf) 586 bpf_mtap(ifp->if_bpf, m0); 587 #endif /* NBPFILTER > 0 */ 588 if (m != NULL) { 589 m_freem(m0); 590 m0 = m; 591 } 592 593 /* 594 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 595 */ 596 597 /* Sync the DMA map. */ 598 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 599 BUS_DMASYNC_PREWRITE); 600 601 /* 602 * Initialize the transmit descriptors. 603 */ 604 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; 605 seg < dmamap->dm_nsegs; 606 seg++, nexttx = SQ_NEXTTX(nexttx)) { 607 sc->sc_txdesc[nexttx].hdd_bufptr = 608 dmamap->dm_segs[seg].ds_addr; 609 sc->sc_txdesc[nexttx].hdd_ctl = 610 dmamap->dm_segs[seg].ds_len; 611 sc->sc_txdesc[nexttx].hdd_descptr= 612 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); 613 lasttx = nexttx; 614 totlen += dmamap->dm_segs[seg].ds_len; 615 } 616 617 /* Last descriptor gets end-of-packet */ 618 sc->sc_txdesc[lasttx].hdd_ctl |= HDD_CTL_EOPACKET; 619 620 #if 0 621 printf("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname, 622 sc->sc_nexttx, lasttx, 623 totlen); 624 #endif 625 626 if (ifp->if_flags & IFF_DEBUG) { 627 printf(" transmit chain:\n"); 628 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { 629 printf(" descriptor %d:\n", seg); 630 printf(" hdd_bufptr: 0x%08x\n", 631 sc->sc_txdesc[seg].hdd_bufptr); 632 printf(" hdd_ctl: 0x%08x\n", 633 sc->sc_txdesc[seg].hdd_ctl); 634 printf(" hdd_descptr: 0x%08x\n", 635 sc->sc_txdesc[seg].hdd_descptr); 636 637 if (seg == lasttx) 638 break; 639 } 640 } 641 642 /* Sync the descriptors we're using. */ 643 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, 644 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 645 646 /* Store a pointer to the packet so we can free it later */ 647 sc->sc_txmbuf[sc->sc_nexttx] = m0; 648 649 /* Advance the tx pointer. */ 650 sc->sc_nfreetx -= dmamap->dm_nsegs; 651 sc->sc_nexttx = nexttx; 652 653 } 654 655 /* All transmit descriptors used up, let upper layers know */ 656 if (sc->sc_nfreetx == 0) 657 ifp->if_flags |= IFF_OACTIVE; 658 659 if (sc->sc_nfreetx != ofree) { 660 #if 0 661 printf("%s: %d packets enqueued, first %d, INTR on %d\n", 662 sc->sc_dev.dv_xname, lasttx - firsttx + 1, 663 firsttx, lasttx); 664 #endif 665 666 /* 667 * Cause a transmit interrupt to happen on the 668 * last packet we enqueued, mark it as the last 669 * descriptor. 670 */ 671 sc->sc_txdesc[lasttx].hdd_ctl |= (HDD_CTL_INTR | 672 HDD_CTL_EOCHAIN); 673 SQ_CDTXSYNC(sc, lasttx, 1, 674 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 675 676 /* 677 * There is a potential race condition here if the HPC 678 * DMA channel is active and we try and either update 679 * the 'next descriptor' pointer in the HPC PIO space 680 * or the 'next descriptor' pointer in a previous desc- 681 * riptor. 682 * 683 * To avoid this, if the channel is active, we rely on 684 * the transmit interrupt routine noticing that there 685 * are more packets to send and restarting the HPC DMA 686 * engine, rather than mucking with the DMA state here. 687 */ 688 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 689 HPC_ENETX_CTL); 690 691 if ((status & ENETX_CTL_ACTIVE) != 0) { 692 SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status, 693 sc->sc_nfreetx); 694 sc->sc_txdesc[SQ_PREVTX(firsttx)].hdd_ctl &= 695 ~HDD_CTL_EOCHAIN; 696 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, 697 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 698 } else { 699 SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx); 700 701 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 702 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, firsttx)); 703 704 /* Kick DMA channel into life */ 705 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 706 HPC_ENETX_CTL, ENETX_CTL_ACTIVE); 707 } 708 709 /* Set a watchdog timer in case the chip flakes out. */ 710 ifp->if_timer = 5; 711 } 712 } 713 714 void 715 sq_stop(struct ifnet *ifp, int disable) 716 { 717 int i; 718 struct sq_softc *sc = ifp->if_softc; 719 720 for (i =0; i < SQ_NTXDESC; i++) { 721 if (sc->sc_txmbuf[i] != NULL) { 722 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 723 m_freem(sc->sc_txmbuf[i]); 724 sc->sc_txmbuf[i] = NULL; 725 } 726 } 727 728 /* Clear Seeq transmit/receive command registers */ 729 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0); 730 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0); 731 732 sq_reset(sc); 733 734 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 735 ifp->if_timer = 0; 736 } 737 738 /* Device timeout/watchdog routine. */ 739 void 740 sq_watchdog(struct ifnet *ifp) 741 { 742 u_int32_t status; 743 struct sq_softc *sc = ifp->if_softc; 744 745 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); 746 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, " 747 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx, 748 sc->sc_nexttx, sc->sc_nfreetx, status); 749 750 sq_trace_dump(sc); 751 752 memset(&sq_trace, 0, sizeof(sq_trace)); 753 sq_trace_idx = 0; 754 755 ++ifp->if_oerrors; 756 757 sq_init(ifp); 758 } 759 760 void sq_trace_dump(struct sq_softc* sc) 761 { 762 int i; 763 764 for(i = 0; i < sq_trace_idx; i++) { 765 printf("%s: [%d] action %d, buf %d, free %d, status %08x\n", 766 sc->sc_dev.dv_xname, i, sq_trace[i].action, 767 sq_trace[i].bufno, sq_trace[i].freebuf, 768 sq_trace[i].status); 769 } 770 } 771 772 static int 773 sq_intr(void * arg) 774 { 775 struct sq_softc *sc = arg; 776 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 777 int handled = 0; 778 u_int32_t stat; 779 780 stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET); 781 782 if ((stat & 2) == 0) { 783 printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname); 784 return 0; 785 } 786 787 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 2); 788 789 /* 790 * If the interface isn't running, the interrupt couldn't 791 * possibly have come from us. 792 */ 793 if ((ifp->if_flags & IFF_RUNNING) == 0) 794 return 0; 795 796 sc->sq_intrcnt.ev_count++; 797 798 /* Always check for received packets */ 799 if (sq_rxintr(sc) != 0) 800 handled++; 801 802 /* Only handle transmit interrupts if we actually sent something */ 803 if (sc->sc_nfreetx < SQ_NTXDESC) { 804 sq_txintr(sc); 805 handled++; 806 } 807 808 #if NRND > 0 809 if (handled) 810 rnd_add_uint32(&sc->rnd_source, stat); 811 #endif 812 return (handled); 813 } 814 815 static int 816 sq_rxintr(struct sq_softc *sc) 817 { 818 int count = 0; 819 struct mbuf* m; 820 int i, framelen; 821 u_int8_t pktstat; 822 u_int32_t status; 823 int new_end, orig_end; 824 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 825 826 for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) { 827 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 828 829 /* If this is a CPU-owned buffer, we're at the end of the list */ 830 if (sc->sc_rxdesc[i].hdd_ctl & HDD_CTL_OWN) { 831 #if 0 832 u_int32_t reg; 833 834 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 835 HPC_ENETR_CTL); 836 printf("%s: rxintr: done at %d (ctl %08x)\n", 837 sc->sc_dev.dv_xname, i, reg); 838 #endif 839 break; 840 } 841 842 count++; 843 844 m = sc->sc_rxmbuf[i]; 845 framelen = m->m_ext.ext_size - 846 HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hdd_ctl) - 3; 847 848 /* Now sync the actual packet data */ 849 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 850 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 851 852 pktstat = *((u_int8_t*)m->m_data + framelen + 2); 853 854 if ((pktstat & RXSTAT_GOOD) == 0) { 855 ifp->if_ierrors++; 856 857 if (pktstat & RXSTAT_OFLOW) 858 printf("%s: receive FIFO overflow\n", 859 sc->sc_dev.dv_xname); 860 861 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 862 sc->sc_rxmap[i]->dm_mapsize, 863 BUS_DMASYNC_PREREAD); 864 SQ_INIT_RXDESC(sc, i); 865 continue; 866 } 867 868 if (sq_add_rxbuf(sc, i) != 0) { 869 ifp->if_ierrors++; 870 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 871 sc->sc_rxmap[i]->dm_mapsize, 872 BUS_DMASYNC_PREREAD); 873 SQ_INIT_RXDESC(sc, i); 874 continue; 875 } 876 877 878 m->m_data += 2; 879 m->m_pkthdr.rcvif = ifp; 880 m->m_pkthdr.len = m->m_len = framelen; 881 882 ifp->if_ipackets++; 883 884 #if 0 885 printf("%s: sq_rxintr: buf %d len %d\n", sc->sc_dev.dv_xname, 886 i, framelen); 887 #endif 888 889 #if NBPFILTER > 0 890 if (ifp->if_bpf) 891 bpf_mtap(ifp->if_bpf, m); 892 #endif 893 (*ifp->if_input)(ifp, m); 894 } 895 896 897 /* If anything happened, move ring start/end pointers to new spot */ 898 if (i != sc->sc_nextrx) { 899 new_end = SQ_PREVRX(i); 900 sc->sc_rxdesc[new_end].hdd_ctl |= HDD_CTL_EOCHAIN; 901 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD | 902 BUS_DMASYNC_PREWRITE); 903 904 orig_end = SQ_PREVRX(sc->sc_nextrx); 905 sc->sc_rxdesc[orig_end].hdd_ctl &= ~HDD_CTL_EOCHAIN; 906 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD | 907 BUS_DMASYNC_PREWRITE); 908 909 sc->sc_nextrx = i; 910 } 911 912 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL); 913 914 /* If receive channel is stopped, restart it... */ 915 if ((status & ENETR_CTL_ACTIVE) == 0) { 916 /* Pass the start of the receive ring to the HPC */ 917 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 918 HPC_ENETR_NDBP, SQ_CDRXADDR(sc, sc->sc_nextrx)); 919 920 /* And turn on the HPC ethernet receive channel */ 921 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 922 ENETR_CTL_ACTIVE); 923 } 924 925 return count; 926 } 927 928 static int 929 sq_txintr(struct sq_softc *sc) 930 { 931 int i; 932 u_int32_t status; 933 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 934 935 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL); 936 937 SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx); 938 939 if ((status & (ENETX_CTL_ACTIVE | TXSTAT_GOOD)) == 0) { 940 if (status & TXSTAT_COLL) 941 ifp->if_collisions++; 942 943 if (status & TXSTAT_UFLOW) { 944 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname); 945 ifp->if_oerrors++; 946 } 947 948 if (status & TXSTAT_16COLL) { 949 printf("%s: max collisions reached\n", sc->sc_dev.dv_xname); 950 ifp->if_oerrors++; 951 ifp->if_collisions += 16; 952 } 953 } 954 955 i = sc->sc_prevtx; 956 while (sc->sc_nfreetx < SQ_NTXDESC) { 957 /* 958 * Check status first so we don't end up with a case of 959 * the buffer not being finished while the DMA channel 960 * has gone idle. 961 */ 962 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 963 HPC_ENETX_CTL); 964 965 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 966 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 967 968 /* If not yet transmitted, try and start DMA engine again */ 969 if ((sc->sc_txdesc[i].hdd_ctl & HDD_CTL_XMITDONE) == 0) { 970 if ((status & ENETX_CTL_ACTIVE) == 0) { 971 SQ_TRACE(SQ_RESTART_DMA, i, status, 972 sc->sc_nfreetx); 973 974 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 975 HPC_ENETX_NDBP, SQ_CDTXADDR(sc, i)); 976 977 /* Kick DMA channel into life */ 978 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 979 HPC_ENETX_CTL, ENETX_CTL_ACTIVE); 980 981 /* 982 * Set a watchdog timer in case the chip 983 * flakes out. 984 */ 985 ifp->if_timer = 5; 986 } else { 987 SQ_TRACE(SQ_TXINTR_BUSY, i, status, 988 sc->sc_nfreetx); 989 } 990 break; 991 } 992 993 /* Sync the packet data, unload DMA map, free mbuf */ 994 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, 995 sc->sc_txmap[i]->dm_mapsize, 996 BUS_DMASYNC_POSTWRITE); 997 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 998 m_freem(sc->sc_txmbuf[i]); 999 sc->sc_txmbuf[i] = NULL; 1000 1001 ifp->if_opackets++; 1002 sc->sc_nfreetx++; 1003 1004 SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx); 1005 i = SQ_NEXTTX(i); 1006 } 1007 1008 /* prevtx now points to next xmit packet not yet finished */ 1009 sc->sc_prevtx = i; 1010 1011 /* If we have buffers free, let upper layers know */ 1012 if (sc->sc_nfreetx > 0) 1013 ifp->if_flags &= ~IFF_OACTIVE; 1014 1015 /* If all packets have left the coop, cancel watchdog */ 1016 if (sc->sc_nfreetx == SQ_NTXDESC) 1017 ifp->if_timer = 0; 1018 1019 SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx); 1020 sq_start(ifp); 1021 1022 return 1; 1023 } 1024 1025 1026 void 1027 sq_reset(struct sq_softc *sc) 1028 { 1029 /* Stop HPC dma channels */ 1030 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_CTL, 0); 1031 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETX_CTL, 0); 1032 1033 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 3); 1034 delay(20); 1035 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, HPC_ENETR_RESET, 0); 1036 } 1037 1038 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */ 1039 int 1040 sq_add_rxbuf(struct sq_softc *sc, int idx) 1041 { 1042 int err; 1043 struct mbuf *m; 1044 1045 MGETHDR(m, M_DONTWAIT, MT_DATA); 1046 if (m == NULL) 1047 return (ENOBUFS); 1048 1049 MCLGET(m, M_DONTWAIT); 1050 if ((m->m_flags & M_EXT) == 0) { 1051 m_freem(m); 1052 return (ENOBUFS); 1053 } 1054 1055 if (sc->sc_rxmbuf[idx] != NULL) 1056 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]); 1057 1058 sc->sc_rxmbuf[idx] = m; 1059 1060 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx], 1061 m->m_ext.ext_buf, m->m_ext.ext_size, 1062 NULL, BUS_DMA_NOWAIT)) != 0) { 1063 printf("%s: can't load rx DMA map %d, error = %d\n", 1064 sc->sc_dev.dv_xname, idx, err); 1065 panic("sq_add_rxbuf"); /* XXX */ 1066 } 1067 1068 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0, 1069 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1070 1071 SQ_INIT_RXDESC(sc, idx); 1072 1073 return 0; 1074 } 1075 1076 void 1077 sq_dump_buffer(u_int32_t addr, u_int32_t len) 1078 { 1079 u_int i; 1080 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr); 1081 1082 if (len == 0) 1083 return; 1084 1085 printf("%p: ", physaddr); 1086 1087 for(i = 0; i < len; i++) { 1088 printf("%02x ", *(physaddr + i) & 0xff); 1089 if ((i % 16) == 15 && i != len - 1) 1090 printf("\n%p: ", physaddr + i); 1091 } 1092 1093 printf("\n"); 1094 } 1095 1096 1097 void 1098 enaddr_aton(const char* str, u_int8_t* eaddr) 1099 { 1100 int i; 1101 char c; 1102 1103 for(i = 0; i < ETHER_ADDR_LEN; i++) { 1104 if (*str == ':') 1105 str++; 1106 1107 c = *str++; 1108 if (isdigit(c)) { 1109 eaddr[i] = (c - '0'); 1110 } else if (isxdigit(c)) { 1111 eaddr[i] = (toupper(c) + 10 - 'A'); 1112 } 1113 1114 c = *str++; 1115 if (isdigit(c)) { 1116 eaddr[i] = (eaddr[i] << 4) | (c - '0'); 1117 } else if (isxdigit(c)) { 1118 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A'); 1119 } 1120 } 1121 } 1122