1 /* $NetBSD: if_sq.c,v 1.37 2011/01/10 13:29:29 tsutsui Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Rafal K. Boni 5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * Portions of this code are derived from software contributed to The 9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.37 2011/01/10 13:29:29 tsutsui Exp $"); 37 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/device.h> 42 #include <sys/callout.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/ioctl.h> 48 #include <sys/errno.h> 49 #include <sys/syslog.h> 50 51 #include <uvm/uvm_extern.h> 52 53 #include <machine/endian.h> 54 55 #include <net/if.h> 56 #include <net/if_dl.h> 57 #include <net/if_media.h> 58 #include <net/if_ether.h> 59 60 #include <net/bpf.h> 61 62 #include <machine/bus.h> 63 #include <machine/intr.h> 64 #include <machine/sysconf.h> 65 66 #include <dev/ic/seeq8003reg.h> 67 68 #include <sgimips/hpc/sqvar.h> 69 #include <sgimips/hpc/hpcvar.h> 70 #include <sgimips/hpc/hpcreg.h> 71 72 #include <dev/arcbios/arcbios.h> 73 #include <dev/arcbios/arcbiosvar.h> 74 75 #define static 76 77 /* 78 * Short TODO list: 79 * (1) Do counters for bad-RX packets. 80 * (2) Allow multi-segment transmits, instead of copying to a single, 81 * contiguous mbuf. 82 * (3) Verify sq_stop() turns off enough stuff; I was still getting 83 * seeq interrupts after sq_stop(). 84 * (4) Implement EDLC modes: especially packet auto-pad and simplex 85 * mode. 86 * (5) Should the driver filter out its own transmissions in non-EDLC 87 * mode? 88 * (6) Multicast support -- multicast filter, address management, ... 89 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need 90 * to figure out if RB0 is read-only as stated in one spot in the 91 * HPC spec or read-write (ie, is the 'write a one to clear it') 92 * the correct thing? 93 */ 94 95 #if defined(SQ_DEBUG) 96 int sq_debug = 0; 97 #define SQ_DPRINTF(x) if (sq_debug) printf x 98 #else 99 #define SQ_DPRINTF(x) 100 #endif 101 102 static int sq_match(struct device *, struct cfdata *, void *); 103 static void sq_attach(struct device *, struct device *, void *); 104 static int sq_init(struct ifnet *); 105 static void sq_start(struct ifnet *); 106 static void sq_stop(struct ifnet *, int); 107 static void sq_watchdog(struct ifnet *); 108 static int sq_ioctl(struct ifnet *, u_long, void *); 109 110 static void sq_set_filter(struct sq_softc *); 111 static int sq_intr(void *); 112 static int sq_rxintr(struct sq_softc *); 113 static int sq_txintr(struct sq_softc *); 114 static void sq_txring_hpc1(struct sq_softc *); 115 static void sq_txring_hpc3(struct sq_softc *); 116 static void sq_reset(struct sq_softc *); 117 static int sq_add_rxbuf(struct sq_softc *, int); 118 static void sq_dump_buffer(paddr_t addr, psize_t len); 119 static void sq_trace_dump(struct sq_softc *); 120 121 CFATTACH_DECL(sq, sizeof(struct sq_softc), 122 sq_match, sq_attach, NULL, NULL); 123 124 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 125 126 #define sq_seeq_read(sc, off) \ 127 bus_space_read_1(sc->sc_regt, sc->sc_regh, off) 128 #define sq_seeq_write(sc, off, val) \ 129 bus_space_write_1(sc->sc_regt, sc->sc_regh, off, val) 130 131 #define sq_hpc_read(sc, off) \ 132 bus_space_read_4(sc->sc_hpct, sc->sc_hpch, off) 133 #define sq_hpc_write(sc, off, val) \ 134 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, off, val) 135 136 /* MAC address offset for non-onboard implementations */ 137 #define SQ_HPC_EEPROM_ENADDR 250 138 139 #define SGI_OUI_0 0x08 140 #define SGI_OUI_1 0x00 141 #define SGI_OUI_2 0x69 142 143 static int 144 sq_match(struct device *parent, struct cfdata *cf, void *aux) 145 { 146 struct hpc_attach_args *ha = aux; 147 148 if (strcmp(ha->ha_name, cf->cf_name) == 0) { 149 vaddr_t reset, txstat; 150 151 reset = MIPS_PHYS_TO_KSEG1(ha->ha_sh + 152 ha->ha_dmaoff + ha->hpc_regs->enetr_reset); 153 txstat = MIPS_PHYS_TO_KSEG1(ha->ha_sh + 154 ha->ha_devoff + (SEEQ_TXSTAT << 2)); 155 156 if (platform.badaddr((void *)reset, sizeof(reset))) 157 return (0); 158 159 *(volatile uint32_t *)reset = 0x1; 160 delay(20); 161 *(volatile uint32_t *)reset = 0x0; 162 163 if (platform.badaddr((void *)txstat, sizeof(txstat))) 164 return (0); 165 166 if ((*(volatile uint32_t *)txstat & 0xff) == TXSTAT_OLDNEW) 167 return (1); 168 } 169 170 return (0); 171 } 172 173 static void 174 sq_attach(struct device *parent, struct device *self, void *aux) 175 { 176 int i, err; 177 const char* macaddr; 178 struct sq_softc *sc = (void *)self; 179 struct hpc_attach_args *haa = aux; 180 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 181 182 sc->sc_hpct = haa->ha_st; 183 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */ 184 185 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 186 haa->ha_dmaoff, 187 sc->hpc_regs->enet_regs_size, 188 &sc->sc_hpch)) != 0) { 189 printf(": unable to map HPC DMA registers, error = %d\n", err); 190 goto fail_0; 191 } 192 193 sc->sc_regt = haa->ha_st; 194 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 195 haa->ha_devoff, 196 sc->hpc_regs->enet_devregs_size, 197 &sc->sc_regh)) != 0) { 198 printf(": unable to map Seeq registers, error = %d\n", err); 199 goto fail_0; 200 } 201 202 sc->sc_dmat = haa->ha_dmat; 203 204 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), 205 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 206 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) { 207 printf(": unable to allocate control data, error = %d\n", err); 208 goto fail_0; 209 } 210 211 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, 212 sizeof(struct sq_control), 213 (void **)&sc->sc_control, 214 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 215 printf(": unable to map control data, error = %d\n", err); 216 goto fail_1; 217 } 218 219 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control), 220 1, sizeof(struct sq_control), PAGE_SIZE, 221 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { 222 printf(": unable to create DMA map for control data, error " 223 "= %d\n", err); 224 goto fail_2; 225 } 226 227 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control, 228 sizeof(struct sq_control), 229 NULL, BUS_DMA_NOWAIT)) != 0) { 230 printf(": unable to load DMA map for control data, error " 231 "= %d\n", err); 232 goto fail_3; 233 } 234 235 memset(sc->sc_control, 0, sizeof(struct sq_control)); 236 237 /* Create transmit buffer DMA maps */ 238 for (i = 0; i < SQ_NTXDESC; i++) { 239 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 240 0, BUS_DMA_NOWAIT, 241 &sc->sc_txmap[i])) != 0) { 242 printf(": unable to create tx DMA map %d, error = %d\n", 243 i, err); 244 goto fail_4; 245 } 246 } 247 248 /* Create receive buffer DMA maps */ 249 for (i = 0; i < SQ_NRXDESC; i++) { 250 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 251 0, BUS_DMA_NOWAIT, 252 &sc->sc_rxmap[i])) != 0) { 253 printf(": unable to create rx DMA map %d, error = %d\n", 254 i, err); 255 goto fail_5; 256 } 257 } 258 259 /* Pre-allocate the receive buffers. */ 260 for (i = 0; i < SQ_NRXDESC; i++) { 261 if ((err = sq_add_rxbuf(sc, i)) != 0) { 262 printf(": unable to allocate or map rx buffer %d\n," 263 " error = %d\n", i, err); 264 goto fail_6; 265 } 266 } 267 268 memcpy(sc->sc_enaddr, &haa->hpc_eeprom[SQ_HPC_EEPROM_ENADDR], 269 ETHER_ADDR_LEN); 270 271 /* 272 * If our mac address is bogus, obtain it from ARCBIOS. This will 273 * be true of the onboard HPC3 on IP22, since there is no eeprom, 274 * but rather the DS1386 RTC's battery-backed ram is used. 275 */ 276 if (sc->sc_enaddr[0] != SGI_OUI_0 || sc->sc_enaddr[1] != SGI_OUI_1 || 277 sc->sc_enaddr[2] != SGI_OUI_2) { 278 macaddr = ARCBIOS->GetEnvironmentVariable("eaddr"); 279 if (macaddr == NULL) { 280 printf(": unable to get MAC address!\n"); 281 goto fail_6; 282 } 283 ether_aton_r(sc->sc_enaddr, sizeof(sc->sc_enaddr), macaddr); 284 } 285 286 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, 287 self->dv_xname, "intr"); 288 289 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { 290 printf(": unable to establish interrupt!\n"); 291 goto fail_6; 292 } 293 294 /* Reset the chip to a known state. */ 295 sq_reset(sc); 296 297 /* 298 * Determine if we're an 8003 or 80c03 by setting the first 299 * MAC address register to non-zero, and then reading it back. 300 * If it's zero, we have an 80c03, because we will have read 301 * the TxCollLSB register. 302 */ 303 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0xa5); 304 if (sq_seeq_read(sc, SEEQ_TXCOLLS0) == 0) 305 sc->sc_type = SQ_TYPE_80C03; 306 else 307 sc->sc_type = SQ_TYPE_8003; 308 sq_seeq_write(sc, SEEQ_TXCOLLS0, 0x00); 309 310 printf(": SGI Seeq %s\n", 311 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); 312 313 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 314 ether_sprintf(sc->sc_enaddr)); 315 316 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 317 ifp->if_softc = sc; 318 ifp->if_mtu = ETHERMTU; 319 ifp->if_init = sq_init; 320 ifp->if_stop = sq_stop; 321 ifp->if_start = sq_start; 322 ifp->if_ioctl = sq_ioctl; 323 ifp->if_watchdog = sq_watchdog; 324 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST; 325 IFQ_SET_READY(&ifp->if_snd); 326 327 if_attach(ifp); 328 ether_ifattach(ifp, sc->sc_enaddr); 329 330 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace)); 331 /* Done! */ 332 return; 333 334 /* 335 * Free any resources we've allocated during the failed attach 336 * attempt. Do this in reverse order and fall through. 337 */ 338 fail_6: 339 for (i = 0; i < SQ_NRXDESC; i++) { 340 if (sc->sc_rxmbuf[i] != NULL) { 341 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); 342 m_freem(sc->sc_rxmbuf[i]); 343 } 344 } 345 fail_5: 346 for (i = 0; i < SQ_NRXDESC; i++) { 347 if (sc->sc_rxmap[i] != NULL) 348 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); 349 } 350 fail_4: 351 for (i = 0; i < SQ_NTXDESC; i++) { 352 if (sc->sc_txmap[i] != NULL) 353 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); 354 } 355 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); 356 fail_3: 357 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); 358 fail_2: 359 bus_dmamem_unmap(sc->sc_dmat, (void *) sc->sc_control, 360 sizeof(struct sq_control)); 361 fail_1: 362 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); 363 fail_0: 364 return; 365 } 366 367 /* Set up data to get the interface up and running. */ 368 int 369 sq_init(struct ifnet *ifp) 370 { 371 int i; 372 struct sq_softc *sc = ifp->if_softc; 373 374 /* Cancel any in-progress I/O */ 375 sq_stop(ifp, 0); 376 377 sc->sc_nextrx = 0; 378 379 sc->sc_nfreetx = SQ_NTXDESC; 380 sc->sc_nexttx = sc->sc_prevtx = 0; 381 382 SQ_TRACE(SQ_RESET, sc, 0, 0); 383 384 /* Set into 8003 mode, bank 0 to program ethernet address */ 385 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_BANK0); 386 387 /* Now write the address */ 388 for (i = 0; i < ETHER_ADDR_LEN; i++) 389 sq_seeq_write(sc, i, sc->sc_enaddr[i]); 390 391 sc->sc_rxcmd = RXCMD_IE_CRC | 392 RXCMD_IE_DRIB | 393 RXCMD_IE_SHORT | 394 RXCMD_IE_END | 395 RXCMD_IE_GOOD; 396 397 /* 398 * Set the receive filter -- this will add some bits to the 399 * prototype RXCMD register. Do this before setting the 400 * transmit config register, since we might need to switch 401 * banks. 402 */ 403 sq_set_filter(sc); 404 405 /* Set up Seeq transmit command register */ 406 sq_seeq_write(sc, SEEQ_TXCMD, TXCMD_IE_UFLOW | 407 TXCMD_IE_COLL | 408 TXCMD_IE_16COLL | 409 TXCMD_IE_GOOD); 410 411 /* Now write the receive command register. */ 412 sq_seeq_write(sc, SEEQ_RXCMD, sc->sc_rxcmd); 413 414 /* 415 * Set up HPC ethernet PIO and DMA configurations. 416 * 417 * The PROM appears to do most of this for the onboard HPC3, but 418 * not for the Challenge S's IOPLUS chip. We copy how the onboard 419 * chip is configured and assume that it's correct for both. 420 */ 421 if (sc->hpc_regs->revision == 3) { 422 u_int32_t dmareg, pioreg; 423 424 pioreg = HPC3_ENETR_PIOCFG_P1(1) | 425 HPC3_ENETR_PIOCFG_P2(6) | 426 HPC3_ENETR_PIOCFG_P3(1); 427 428 dmareg = HPC3_ENETR_DMACFG_D1(6) | 429 HPC3_ENETR_DMACFG_D2(2) | 430 HPC3_ENETR_DMACFG_D3(0) | 431 HPC3_ENETR_DMACFG_FIX_RXDC | 432 HPC3_ENETR_DMACFG_FIX_INTR | 433 HPC3_ENETR_DMACFG_FIX_EOP | 434 HPC3_ENETR_DMACFG_TIMEOUT; 435 436 sq_hpc_write(sc, HPC3_ENETR_PIOCFG, pioreg); 437 sq_hpc_write(sc, HPC3_ENETR_DMACFG, dmareg); 438 } 439 440 /* Pass the start of the receive ring to the HPC */ 441 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 0)); 442 443 /* And turn on the HPC ethernet receive channel */ 444 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 445 sc->hpc_regs->enetr_ctl_active); 446 447 /* 448 * Turn off delayed receive interrupts on HPC1. 449 * (see Hollywood HPC Specification 2.1.4.3) 450 */ 451 if (sc->hpc_regs->revision != 3) 452 sq_hpc_write(sc, HPC1_ENET_INTDELAY, HPC1_ENET_INTDELAY_OFF); 453 454 ifp->if_flags |= IFF_RUNNING; 455 ifp->if_flags &= ~IFF_OACTIVE; 456 457 return 0; 458 } 459 460 static void 461 sq_set_filter(struct sq_softc *sc) 462 { 463 struct ethercom *ec = &sc->sc_ethercom; 464 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 465 struct ether_multi *enm; 466 struct ether_multistep step; 467 468 /* 469 * Check for promiscuous mode. Also implies 470 * all-multicast. 471 */ 472 if (ifp->if_flags & IFF_PROMISC) { 473 sc->sc_rxcmd |= RXCMD_REC_ALL; 474 ifp->if_flags |= IFF_ALLMULTI; 475 return; 476 } 477 478 /* 479 * The 8003 has no hash table. If we have any multicast 480 * addresses on the list, enable reception of all multicast 481 * frames. 482 * 483 * XXX The 80c03 has a hash table. We should use it. 484 */ 485 486 ETHER_FIRST_MULTI(step, ec, enm); 487 488 if (enm == NULL) { 489 sc->sc_rxcmd &= ~RXCMD_REC_MASK; 490 sc->sc_rxcmd |= RXCMD_REC_BROAD; 491 492 ifp->if_flags &= ~IFF_ALLMULTI; 493 return; 494 } 495 496 sc->sc_rxcmd |= RXCMD_REC_MULTI; 497 ifp->if_flags |= IFF_ALLMULTI; 498 } 499 500 int 501 sq_ioctl(struct ifnet *ifp, u_long cmd, void *data) 502 { 503 int s, error = 0; 504 505 SQ_TRACE(SQ_IOCTL, (struct sq_softc *)ifp->if_softc, 0, 0); 506 507 s = splnet(); 508 509 error = ether_ioctl(ifp, cmd, data); 510 if (error == ENETRESET) { 511 /* 512 * Multicast list has changed; set the hardware filter 513 * accordingly. 514 */ 515 if (ifp->if_flags & IFF_RUNNING) 516 error = sq_init(ifp); 517 else 518 error = 0; 519 } 520 521 splx(s); 522 return (error); 523 } 524 525 void 526 sq_start(struct ifnet *ifp) 527 { 528 struct sq_softc *sc = ifp->if_softc; 529 u_int32_t status; 530 struct mbuf *m0, *m; 531 bus_dmamap_t dmamap; 532 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg; 533 534 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 535 return; 536 537 /* 538 * Remember the previous number of free descriptors and 539 * the first descriptor we'll use. 540 */ 541 ofree = sc->sc_nfreetx; 542 firsttx = sc->sc_nexttx; 543 544 /* 545 * Loop through the send queue, setting up transmit descriptors 546 * until we drain the queue, or use up all available transmit 547 * descriptors. 548 */ 549 while (sc->sc_nfreetx != 0) { 550 /* 551 * Grab a packet off the queue. 552 */ 553 IFQ_POLL(&ifp->if_snd, m0); 554 if (m0 == NULL) 555 break; 556 m = NULL; 557 558 dmamap = sc->sc_txmap[sc->sc_nexttx]; 559 560 /* 561 * Load the DMA map. If this fails, the packet either 562 * didn't fit in the alloted number of segments, or we were 563 * short on resources. In this case, we'll copy and try 564 * again. 565 * Also copy it if we need to pad, so that we are sure there 566 * is room for the pad buffer. 567 * XXX the right way of doing this is to use a static buffer 568 * for padding and adding it to the transmit descriptor (see 569 * sys/dev/pci/if_tl.c for example). We can't do this here yet 570 * because we can't send packets with more than one fragment. 571 */ 572 if (m0->m_pkthdr.len < ETHER_PAD_LEN || 573 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 574 BUS_DMA_NOWAIT) != 0) { 575 MGETHDR(m, M_DONTWAIT, MT_DATA); 576 if (m == NULL) { 577 printf("%s: unable to allocate Tx mbuf\n", 578 sc->sc_dev.dv_xname); 579 break; 580 } 581 if (m0->m_pkthdr.len > MHLEN) { 582 MCLGET(m, M_DONTWAIT); 583 if ((m->m_flags & M_EXT) == 0) { 584 printf("%s: unable to allocate Tx " 585 "cluster\n", sc->sc_dev.dv_xname); 586 m_freem(m); 587 break; 588 } 589 } 590 591 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 592 if (m0->m_pkthdr.len < ETHER_PAD_LEN) { 593 memset(mtod(m, char *) + m0->m_pkthdr.len, 0, 594 ETHER_PAD_LEN - m0->m_pkthdr.len); 595 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN; 596 } else 597 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 598 599 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 600 m, BUS_DMA_NOWAIT)) != 0) { 601 printf("%s: unable to load Tx buffer, " 602 "error = %d\n", sc->sc_dev.dv_xname, err); 603 break; 604 } 605 } 606 607 /* 608 * Ensure we have enough descriptors free to describe 609 * the packet. 610 */ 611 if (dmamap->dm_nsegs > sc->sc_nfreetx) { 612 /* 613 * Not enough free descriptors to transmit this 614 * packet. We haven't committed to anything yet, 615 * so just unload the DMA map, put the packet 616 * back on the queue, and punt. Notify the upper 617 * layer that there are no more slots left. 618 * 619 * XXX We could allocate an mbuf and copy, but 620 * XXX it is worth it? 621 */ 622 ifp->if_flags |= IFF_OACTIVE; 623 bus_dmamap_unload(sc->sc_dmat, dmamap); 624 if (m != NULL) 625 m_freem(m); 626 break; 627 } 628 629 IFQ_DEQUEUE(&ifp->if_snd, m0); 630 /* 631 * Pass the packet to any BPF listeners. 632 */ 633 bpf_mtap(ifp, m0); 634 if (m != NULL) { 635 m_freem(m0); 636 m0 = m; 637 } 638 639 /* 640 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 641 */ 642 643 SQ_TRACE(SQ_ENQUEUE, sc, sc->sc_nexttx, 0); 644 645 /* Sync the DMA map. */ 646 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 647 BUS_DMASYNC_PREWRITE); 648 649 /* 650 * Initialize the transmit descriptors. 651 */ 652 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; 653 seg < dmamap->dm_nsegs; 654 seg++, nexttx = SQ_NEXTTX(nexttx)) { 655 if (sc->hpc_regs->revision == 3) { 656 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr = 657 dmamap->dm_segs[seg].ds_addr; 658 sc->sc_txdesc[nexttx].hpc3_hdd_ctl = 659 dmamap->dm_segs[seg].ds_len; 660 } else { 661 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr = 662 dmamap->dm_segs[seg].ds_addr; 663 sc->sc_txdesc[nexttx].hpc1_hdd_ctl = 664 dmamap->dm_segs[seg].ds_len; 665 } 666 sc->sc_txdesc[nexttx].hdd_descptr= 667 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); 668 lasttx = nexttx; 669 totlen += dmamap->dm_segs[seg].ds_len; 670 } 671 672 /* Last descriptor gets end-of-packet */ 673 KASSERT(lasttx != -1); 674 if (sc->hpc_regs->revision == 3) 675 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= 676 HPC3_HDD_CTL_EOPACKET; 677 else 678 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= 679 HPC1_HDD_CTL_EOPACKET; 680 681 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname, 682 sc->sc_nexttx, lasttx, 683 totlen)); 684 685 if (ifp->if_flags & IFF_DEBUG) { 686 printf(" transmit chain:\n"); 687 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { 688 printf(" descriptor %d:\n", seg); 689 printf(" hdd_bufptr: 0x%08x\n", 690 (sc->hpc_regs->revision == 3) ? 691 sc->sc_txdesc[seg].hpc3_hdd_bufptr : 692 sc->sc_txdesc[seg].hpc1_hdd_bufptr); 693 printf(" hdd_ctl: 0x%08x\n", 694 (sc->hpc_regs->revision == 3) ? 695 sc->sc_txdesc[seg].hpc3_hdd_ctl: 696 sc->sc_txdesc[seg].hpc1_hdd_ctl); 697 printf(" hdd_descptr: 0x%08x\n", 698 sc->sc_txdesc[seg].hdd_descptr); 699 700 if (seg == lasttx) 701 break; 702 } 703 } 704 705 /* Sync the descriptors we're using. */ 706 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, 707 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 708 709 /* Store a pointer to the packet so we can free it later */ 710 sc->sc_txmbuf[sc->sc_nexttx] = m0; 711 712 /* Advance the tx pointer. */ 713 sc->sc_nfreetx -= dmamap->dm_nsegs; 714 sc->sc_nexttx = nexttx; 715 } 716 717 /* All transmit descriptors used up, let upper layers know */ 718 if (sc->sc_nfreetx == 0) 719 ifp->if_flags |= IFF_OACTIVE; 720 721 if (sc->sc_nfreetx != ofree) { 722 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n", 723 sc->sc_dev.dv_xname, lasttx - firsttx + 1, 724 firsttx, lasttx)); 725 726 /* 727 * Cause a transmit interrupt to happen on the 728 * last packet we enqueued, mark it as the last 729 * descriptor. 730 * 731 * HPC1_HDD_CTL_INTR will generate an interrupt on 732 * HPC1. HPC3 requires HPC3_HDD_CTL_EOPACKET in 733 * addition to HPC3_HDD_CTL_INTR to interrupt. 734 */ 735 KASSERT(lasttx != -1); 736 if (sc->hpc_regs->revision == 3) { 737 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= 738 HPC3_HDD_CTL_INTR | HPC3_HDD_CTL_EOCHAIN; 739 } else { 740 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR; 741 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |= 742 HPC1_HDD_CTL_EOCHAIN; 743 } 744 745 SQ_CDTXSYNC(sc, lasttx, 1, 746 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 747 748 /* 749 * There is a potential race condition here if the HPC 750 * DMA channel is active and we try and either update 751 * the 'next descriptor' pointer in the HPC PIO space 752 * or the 'next descriptor' pointer in a previous desc- 753 * riptor. 754 * 755 * To avoid this, if the channel is active, we rely on 756 * the transmit interrupt routine noticing that there 757 * are more packets to send and restarting the HPC DMA 758 * engine, rather than mucking with the DMA state here. 759 */ 760 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl); 761 762 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) { 763 SQ_TRACE(SQ_ADD_TO_DMA, sc, firsttx, status); 764 765 /* 766 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and 767 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN 768 */ 769 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &= 770 ~HPC3_HDD_CTL_EOCHAIN; 771 772 if (sc->hpc_regs->revision != 3) 773 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc1_hdd_ctl 774 &= ~HPC1_HDD_CTL_INTR; 775 776 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, 777 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 778 } else if (sc->hpc_regs->revision == 3) { 779 SQ_TRACE(SQ_START_DMA, sc, firsttx, status); 780 781 sq_hpc_write(sc, HPC3_ENETX_NDBP, SQ_CDTXADDR(sc, 782 firsttx)); 783 784 /* Kick DMA channel into life */ 785 sq_hpc_write(sc, HPC3_ENETX_CTL, HPC3_ENETX_CTL_ACTIVE); 786 } else { 787 /* 788 * In the HPC1 case where transmit DMA is 789 * inactive, we can either kick off if 790 * the ring was previously empty, or call 791 * our transmit interrupt handler to 792 * figure out if the ring stopped short 793 * and restart at the right place. 794 */ 795 if (ofree == SQ_NTXDESC) { 796 SQ_TRACE(SQ_START_DMA, sc, firsttx, status); 797 798 sq_hpc_write(sc, HPC1_ENETX_NDBP, 799 SQ_CDTXADDR(sc, firsttx)); 800 sq_hpc_write(sc, HPC1_ENETX_CFXBP, 801 SQ_CDTXADDR(sc, firsttx)); 802 sq_hpc_write(sc, HPC1_ENETX_CBP, 803 SQ_CDTXADDR(sc, firsttx)); 804 805 /* Kick DMA channel into life */ 806 sq_hpc_write(sc, HPC1_ENETX_CTL, 807 HPC1_ENETX_CTL_ACTIVE); 808 } else 809 sq_txring_hpc1(sc); 810 } 811 812 /* Set a watchdog timer in case the chip flakes out. */ 813 ifp->if_timer = 5; 814 } 815 } 816 817 void 818 sq_stop(struct ifnet *ifp, int disable) 819 { 820 int i; 821 struct sq_softc *sc = ifp->if_softc; 822 823 for (i =0; i < SQ_NTXDESC; i++) { 824 if (sc->sc_txmbuf[i] != NULL) { 825 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 826 m_freem(sc->sc_txmbuf[i]); 827 sc->sc_txmbuf[i] = NULL; 828 } 829 } 830 831 /* Clear Seeq transmit/receive command registers */ 832 sq_seeq_write(sc, SEEQ_TXCMD, 0); 833 sq_seeq_write(sc, SEEQ_RXCMD, 0); 834 835 sq_reset(sc); 836 837 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 838 ifp->if_timer = 0; 839 } 840 841 /* Device timeout/watchdog routine. */ 842 void 843 sq_watchdog(struct ifnet *ifp) 844 { 845 u_int32_t status; 846 struct sq_softc *sc = ifp->if_softc; 847 848 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl); 849 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, " 850 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx, 851 sc->sc_nexttx, sc->sc_nfreetx, status); 852 853 sq_trace_dump(sc); 854 855 memset(&sc->sq_trace, 0, sizeof(sc->sq_trace)); 856 sc->sq_trace_idx = 0; 857 858 ++ifp->if_oerrors; 859 860 sq_init(ifp); 861 } 862 863 static void 864 sq_trace_dump(struct sq_softc *sc) 865 { 866 int i; 867 const char *act; 868 869 for (i = 0; i < sc->sq_trace_idx; i++) { 870 switch (sc->sq_trace[i].action) { 871 case SQ_RESET: act = "SQ_RESET"; break; 872 case SQ_ADD_TO_DMA: act = "SQ_ADD_TO_DMA"; break; 873 case SQ_START_DMA: act = "SQ_START_DMA"; break; 874 case SQ_DONE_DMA: act = "SQ_DONE_DMA"; break; 875 case SQ_RESTART_DMA: act = "SQ_RESTART_DMA"; break; 876 case SQ_TXINTR_ENTER: act = "SQ_TXINTR_ENTER"; break; 877 case SQ_TXINTR_EXIT: act = "SQ_TXINTR_EXIT"; break; 878 case SQ_TXINTR_BUSY: act = "SQ_TXINTR_BUSY"; break; 879 case SQ_IOCTL: act = "SQ_IOCTL"; break; 880 case SQ_ENQUEUE: act = "SQ_ENQUEUE"; break; 881 default: act = "UNKNOWN"; 882 } 883 884 printf("%s: [%03d] action %-16s buf %03d free %03d " 885 "status %08x line %d\n", sc->sc_dev.dv_xname, i, act, 886 sc->sq_trace[i].bufno, sc->sq_trace[i].freebuf, 887 sc->sq_trace[i].status, sc->sq_trace[i].line); 888 } 889 } 890 891 static int 892 sq_intr(void *arg) 893 { 894 struct sq_softc *sc = arg; 895 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 896 int handled = 0; 897 u_int32_t stat; 898 899 stat = sq_hpc_read(sc, sc->hpc_regs->enetr_reset); 900 901 if ((stat & 2) == 0) 902 SQ_DPRINTF(("%s: Unexpected interrupt!\n", 903 sc->sc_dev.dv_xname)); 904 else 905 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, (stat | 2)); 906 907 /* 908 * If the interface isn't running, the interrupt couldn't 909 * possibly have come from us. 910 */ 911 if ((ifp->if_flags & IFF_RUNNING) == 0) 912 return 0; 913 914 sc->sq_intrcnt.ev_count++; 915 916 /* Always check for received packets */ 917 if (sq_rxintr(sc) != 0) 918 handled++; 919 920 /* Only handle transmit interrupts if we actually sent something */ 921 if (sc->sc_nfreetx < SQ_NTXDESC) { 922 sq_txintr(sc); 923 handled++; 924 } 925 926 #if NRND > 0 927 if (handled) 928 rnd_add_uint32(&sc->rnd_source, stat); 929 #endif 930 return (handled); 931 } 932 933 static int 934 sq_rxintr(struct sq_softc *sc) 935 { 936 int count = 0; 937 struct mbuf* m; 938 int i, framelen; 939 u_int8_t pktstat; 940 u_int32_t status; 941 u_int32_t ctl_reg; 942 int new_end, orig_end; 943 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 944 945 for (i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) { 946 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | 947 BUS_DMASYNC_POSTWRITE); 948 949 /* 950 * If this is a CPU-owned buffer, we're at the end of the list. 951 */ 952 if (sc->hpc_regs->revision == 3) 953 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl & 954 HPC3_HDD_CTL_OWN; 955 else 956 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl & 957 HPC1_HDD_CTL_OWN; 958 959 if (ctl_reg) { 960 #if defined(SQ_DEBUG) 961 u_int32_t reg; 962 963 reg = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl); 964 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n", 965 sc->sc_dev.dv_xname, i, reg)); 966 #endif 967 break; 968 } 969 970 count++; 971 972 m = sc->sc_rxmbuf[i]; 973 framelen = m->m_ext.ext_size - 3; 974 if (sc->hpc_regs->revision == 3) 975 framelen -= 976 HPC3_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl); 977 else 978 framelen -= 979 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl); 980 981 /* Now sync the actual packet data */ 982 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 983 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 984 985 pktstat = *((u_int8_t*)m->m_data + framelen + 2); 986 987 if ((pktstat & RXSTAT_GOOD) == 0) { 988 ifp->if_ierrors++; 989 990 if (pktstat & RXSTAT_OFLOW) 991 printf("%s: receive FIFO overflow\n", 992 sc->sc_dev.dv_xname); 993 994 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 995 sc->sc_rxmap[i]->dm_mapsize, 996 BUS_DMASYNC_PREREAD); 997 SQ_INIT_RXDESC(sc, i); 998 SQ_DPRINTF(("%s: sq_rxintr: buf %d no RXSTAT_GOOD\n", 999 sc->sc_dev.dv_xname, i)); 1000 continue; 1001 } 1002 1003 if (sq_add_rxbuf(sc, i) != 0) { 1004 ifp->if_ierrors++; 1005 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 1006 sc->sc_rxmap[i]->dm_mapsize, 1007 BUS_DMASYNC_PREREAD); 1008 SQ_INIT_RXDESC(sc, i); 1009 SQ_DPRINTF(("%s: sq_rxintr: buf %d sq_add_rxbuf() " 1010 "failed\n", sc->sc_dev.dv_xname, i)); 1011 continue; 1012 } 1013 1014 1015 m->m_data += 2; 1016 m->m_pkthdr.rcvif = ifp; 1017 m->m_pkthdr.len = m->m_len = framelen; 1018 1019 ifp->if_ipackets++; 1020 1021 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n", 1022 sc->sc_dev.dv_xname, i, framelen)); 1023 1024 bpf_mtap(ifp, m); 1025 (*ifp->if_input)(ifp, m); 1026 } 1027 1028 1029 /* If anything happened, move ring start/end pointers to new spot */ 1030 if (i != sc->sc_nextrx) { 1031 /* 1032 * NB: hpc3_hdd_ctl == hpc1_hdd_bufptr, and 1033 * HPC1_HDD_CTL_EOCHAIN == HPC3_HDD_CTL_EOCHAIN 1034 */ 1035 1036 new_end = SQ_PREVRX(i); 1037 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HPC3_HDD_CTL_EOCHAIN; 1038 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD | 1039 BUS_DMASYNC_PREWRITE); 1040 1041 orig_end = SQ_PREVRX(sc->sc_nextrx); 1042 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HPC3_HDD_CTL_EOCHAIN; 1043 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD | 1044 BUS_DMASYNC_PREWRITE); 1045 1046 sc->sc_nextrx = i; 1047 } 1048 1049 status = sq_hpc_read(sc, sc->hpc_regs->enetr_ctl); 1050 1051 /* If receive channel is stopped, restart it... */ 1052 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) { 1053 /* Pass the start of the receive ring to the HPC */ 1054 sq_hpc_write(sc, sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, 1055 sc->sc_nextrx)); 1056 1057 /* And turn on the HPC ethernet receive channel */ 1058 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 1059 sc->hpc_regs->enetr_ctl_active); 1060 } 1061 1062 return count; 1063 } 1064 1065 static int 1066 sq_txintr(struct sq_softc *sc) 1067 { 1068 int shift = 0; 1069 u_int32_t status, tmp; 1070 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1071 1072 if (sc->hpc_regs->revision != 3) 1073 shift = 16; 1074 1075 status = sq_hpc_read(sc, sc->hpc_regs->enetx_ctl) >> shift; 1076 1077 SQ_TRACE(SQ_TXINTR_ENTER, sc, sc->sc_prevtx, status); 1078 1079 tmp = (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD; 1080 if ((status & tmp) == 0) { 1081 if (status & TXSTAT_COLL) 1082 ifp->if_collisions++; 1083 1084 if (status & TXSTAT_UFLOW) { 1085 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname); 1086 ifp->if_oerrors++; 1087 } 1088 1089 if (status & TXSTAT_16COLL) { 1090 printf("%s: max collisions reached\n", 1091 sc->sc_dev.dv_xname); 1092 ifp->if_oerrors++; 1093 ifp->if_collisions += 16; 1094 } 1095 } 1096 1097 /* prevtx now points to next xmit packet not yet finished */ 1098 if (sc->hpc_regs->revision == 3) 1099 sq_txring_hpc3(sc); 1100 else 1101 sq_txring_hpc1(sc); 1102 1103 /* If we have buffers free, let upper layers know */ 1104 if (sc->sc_nfreetx > 0) 1105 ifp->if_flags &= ~IFF_OACTIVE; 1106 1107 /* If all packets have left the coop, cancel watchdog */ 1108 if (sc->sc_nfreetx == SQ_NTXDESC) 1109 ifp->if_timer = 0; 1110 1111 SQ_TRACE(SQ_TXINTR_EXIT, sc, sc->sc_prevtx, status); 1112 sq_start(ifp); 1113 1114 return 1; 1115 } 1116 1117 /* 1118 * Reclaim used transmit descriptors and restart the transmit DMA 1119 * engine if necessary. 1120 */ 1121 static void 1122 sq_txring_hpc1(struct sq_softc *sc) 1123 { 1124 /* 1125 * HPC1 doesn't tag transmitted descriptors, however, 1126 * the NDBP register points to the next descriptor that 1127 * has not yet been processed. If DMA is not in progress, 1128 * we can safely reclaim all descriptors up to NDBP, and, 1129 * if necessary, restart DMA at NDBP. Otherwise, if DMA 1130 * is active, we can only safely reclaim up to CBP. 1131 * 1132 * For now, we'll only reclaim on inactive DMA and assume 1133 * that a sufficiently large ring keeps us out of trouble. 1134 */ 1135 u_int32_t reclaimto, status; 1136 int reclaimall, i = sc->sc_prevtx; 1137 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1138 1139 status = sq_hpc_read(sc, HPC1_ENETX_CTL); 1140 if (status & HPC1_ENETX_CTL_ACTIVE) { 1141 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status); 1142 return; 1143 } else 1144 reclaimto = sq_hpc_read(sc, HPC1_ENETX_NDBP); 1145 1146 if (sc->sc_nfreetx == 0 && SQ_CDTXADDR(sc, i) == reclaimto) 1147 reclaimall = 1; 1148 else 1149 reclaimall = 0; 1150 1151 while (sc->sc_nfreetx < SQ_NTXDESC) { 1152 if (SQ_CDTXADDR(sc, i) == reclaimto && !reclaimall) 1153 break; 1154 1155 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 1156 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1157 1158 /* Sync the packet data, unload DMA map, free mbuf */ 1159 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, 1160 sc->sc_txmap[i]->dm_mapsize, 1161 BUS_DMASYNC_POSTWRITE); 1162 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1163 m_freem(sc->sc_txmbuf[i]); 1164 sc->sc_txmbuf[i] = NULL; 1165 1166 ifp->if_opackets++; 1167 sc->sc_nfreetx++; 1168 1169 SQ_TRACE(SQ_DONE_DMA, sc, i, status); 1170 1171 i = SQ_NEXTTX(i); 1172 } 1173 1174 if (sc->sc_nfreetx < SQ_NTXDESC) { 1175 SQ_TRACE(SQ_RESTART_DMA, sc, i, status); 1176 1177 KASSERT(reclaimto == SQ_CDTXADDR(sc, i)); 1178 1179 sq_hpc_write(sc, HPC1_ENETX_CFXBP, reclaimto); 1180 sq_hpc_write(sc, HPC1_ENETX_CBP, reclaimto); 1181 1182 /* Kick DMA channel into life */ 1183 sq_hpc_write(sc, HPC1_ENETX_CTL, HPC1_ENETX_CTL_ACTIVE); 1184 1185 /* 1186 * Set a watchdog timer in case the chip 1187 * flakes out. 1188 */ 1189 ifp->if_timer = 5; 1190 } 1191 1192 sc->sc_prevtx = i; 1193 } 1194 1195 /* 1196 * Reclaim used transmit descriptors and restart the transmit DMA 1197 * engine if necessary. 1198 */ 1199 static void 1200 sq_txring_hpc3(struct sq_softc *sc) 1201 { 1202 /* 1203 * HPC3 tags descriptors with a bit once they've been 1204 * transmitted. We need only free each XMITDONE'd 1205 * descriptor, and restart the DMA engine if any 1206 * descriptors are left over. 1207 */ 1208 int i; 1209 u_int32_t status = 0; 1210 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1211 1212 i = sc->sc_prevtx; 1213 while (sc->sc_nfreetx < SQ_NTXDESC) { 1214 /* 1215 * Check status first so we don't end up with a case of 1216 * the buffer not being finished while the DMA channel 1217 * has gone idle. 1218 */ 1219 status = sq_hpc_read(sc, HPC3_ENETX_CTL); 1220 1221 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 1222 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1223 1224 /* Check for used descriptor and restart DMA chain if needed */ 1225 if (!(sc->sc_txdesc[i].hpc3_hdd_ctl & HPC3_HDD_CTL_XMITDONE)) { 1226 if ((status & HPC3_ENETX_CTL_ACTIVE) == 0) { 1227 SQ_TRACE(SQ_RESTART_DMA, sc, i, status); 1228 1229 sq_hpc_write(sc, HPC3_ENETX_NDBP, 1230 SQ_CDTXADDR(sc, i)); 1231 1232 /* Kick DMA channel into life */ 1233 sq_hpc_write(sc, HPC3_ENETX_CTL, 1234 HPC3_ENETX_CTL_ACTIVE); 1235 1236 /* 1237 * Set a watchdog timer in case the chip 1238 * flakes out. 1239 */ 1240 ifp->if_timer = 5; 1241 } else 1242 SQ_TRACE(SQ_TXINTR_BUSY, sc, i, status); 1243 break; 1244 } 1245 1246 /* Sync the packet data, unload DMA map, free mbuf */ 1247 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, 1248 sc->sc_txmap[i]->dm_mapsize, 1249 BUS_DMASYNC_POSTWRITE); 1250 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1251 m_freem(sc->sc_txmbuf[i]); 1252 sc->sc_txmbuf[i] = NULL; 1253 1254 ifp->if_opackets++; 1255 sc->sc_nfreetx++; 1256 1257 SQ_TRACE(SQ_DONE_DMA, sc, i, status); 1258 i = SQ_NEXTTX(i); 1259 } 1260 1261 sc->sc_prevtx = i; 1262 } 1263 1264 void 1265 sq_reset(struct sq_softc *sc) 1266 { 1267 /* Stop HPC dma channels */ 1268 sq_hpc_write(sc, sc->hpc_regs->enetr_ctl, 0); 1269 sq_hpc_write(sc, sc->hpc_regs->enetx_ctl, 0); 1270 1271 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 3); 1272 delay(20); 1273 sq_hpc_write(sc, sc->hpc_regs->enetr_reset, 0); 1274 } 1275 1276 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */ 1277 int 1278 sq_add_rxbuf(struct sq_softc *sc, int idx) 1279 { 1280 int err; 1281 struct mbuf *m; 1282 1283 MGETHDR(m, M_DONTWAIT, MT_DATA); 1284 if (m == NULL) 1285 return (ENOBUFS); 1286 1287 MCLGET(m, M_DONTWAIT); 1288 if ((m->m_flags & M_EXT) == 0) { 1289 m_freem(m); 1290 return (ENOBUFS); 1291 } 1292 1293 if (sc->sc_rxmbuf[idx] != NULL) 1294 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]); 1295 1296 sc->sc_rxmbuf[idx] = m; 1297 1298 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx], 1299 m->m_ext.ext_buf, m->m_ext.ext_size, 1300 NULL, BUS_DMA_NOWAIT)) != 0) { 1301 printf("%s: can't load rx DMA map %d, error = %d\n", 1302 sc->sc_dev.dv_xname, idx, err); 1303 panic("sq_add_rxbuf"); /* XXX */ 1304 } 1305 1306 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0, 1307 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1308 1309 SQ_INIT_RXDESC(sc, idx); 1310 1311 return 0; 1312 } 1313 1314 void 1315 sq_dump_buffer(paddr_t addr, psize_t len) 1316 { 1317 u_int i; 1318 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1(addr); 1319 1320 if (len == 0) 1321 return; 1322 1323 printf("%p: ", physaddr); 1324 1325 for (i = 0; i < len; i++) { 1326 printf("%02x ", *(physaddr + i) & 0xff); 1327 if ((i % 16) == 15 && i != len - 1) 1328 printf("\n%p: ", physaddr + i); 1329 } 1330 1331 printf("\n"); 1332 } 1333