1 /* $NetBSD: if_sq.c,v 1.21 2004/10/30 18:08:35 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2001 Rafal K. Boni 5 * Copyright (c) 1998, 1999, 2000 The NetBSD Foundation, Inc. 6 * All rights reserved. 7 * 8 * Portions of this code are derived from software contributed to The 9 * NetBSD Foundation by Jason R. Thorpe of the Numerical Aerospace 10 * Simulation Facility, NASA Ames Research Center. 11 * 12 * Redistribution and use in source and binary forms, with or without 13 * modification, are permitted provided that the following conditions 14 * are met: 15 * 1. Redistributions of source code must retain the above copyright 16 * notice, this list of conditions and the following disclaimer. 17 * 2. Redistributions in binary form must reproduce the above copyright 18 * notice, this list of conditions and the following disclaimer in the 19 * documentation and/or other materials provided with the distribution. 20 * 3. The name of the author may not be used to endorse or promote products 21 * derived from this software without specific prior written permission. 22 * 23 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 24 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 25 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 26 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 27 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 28 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 32 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 */ 34 35 #include <sys/cdefs.h> 36 __KERNEL_RCSID(0, "$NetBSD: if_sq.c,v 1.21 2004/10/30 18:08:35 thorpej Exp $"); 37 38 #include "bpfilter.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/device.h> 43 #include <sys/callout.h> 44 #include <sys/mbuf.h> 45 #include <sys/malloc.h> 46 #include <sys/kernel.h> 47 #include <sys/socket.h> 48 #include <sys/ioctl.h> 49 #include <sys/errno.h> 50 #include <sys/syslog.h> 51 52 #include <uvm/uvm_extern.h> 53 54 #include <machine/endian.h> 55 56 #include <net/if.h> 57 #include <net/if_dl.h> 58 #include <net/if_media.h> 59 #include <net/if_ether.h> 60 61 #if NBPFILTER > 0 62 #include <net/bpf.h> 63 #endif 64 65 #include <machine/bus.h> 66 #include <machine/intr.h> 67 68 #include <dev/ic/seeq8003reg.h> 69 70 #include <sgimips/hpc/sqvar.h> 71 #include <sgimips/hpc/hpcvar.h> 72 #include <sgimips/hpc/hpcreg.h> 73 74 #include <dev/arcbios/arcbios.h> 75 #include <dev/arcbios/arcbiosvar.h> 76 77 #define static 78 79 /* 80 * Short TODO list: 81 * (1) Do counters for bad-RX packets. 82 * (2) Allow multi-segment transmits, instead of copying to a single, 83 * contiguous mbuf. 84 * (3) Verify sq_stop() turns off enough stuff; I was still getting 85 * seeq interrupts after sq_stop(). 86 * (4) Implement EDLC modes: especially packet auto-pad and simplex 87 * mode. 88 * (5) Should the driver filter out its own transmissions in non-EDLC 89 * mode? 90 * (6) Multicast support -- multicast filter, address management, ... 91 * (7) Deal with RB0 (recv buffer overflow) on reception. Will need 92 * to figure out if RB0 is read-only as stated in one spot in the 93 * HPC spec or read-write (ie, is the 'write a one to clear it') 94 * the correct thing? 95 */ 96 97 #if defined(SQ_DEBUG) 98 int sq_debug = 0; 99 #define SQ_DPRINTF(x) if (sq_debug) printf x 100 #else 101 #define SQ_DPRINTF(x) 102 #endif 103 104 static int sq_match(struct device *, struct cfdata *, void *); 105 static void sq_attach(struct device *, struct device *, void *); 106 static int sq_init(struct ifnet *); 107 static void sq_start(struct ifnet *); 108 static void sq_stop(struct ifnet *, int); 109 static void sq_watchdog(struct ifnet *); 110 static int sq_ioctl(struct ifnet *, u_long, caddr_t); 111 112 static void sq_set_filter(struct sq_softc *); 113 static int sq_intr(void *); 114 static int sq_rxintr(struct sq_softc *); 115 static int sq_txintr(struct sq_softc *); 116 static void sq_reset(struct sq_softc *); 117 static int sq_add_rxbuf(struct sq_softc *, int); 118 static void sq_dump_buffer(u_int32_t addr, u_int32_t len); 119 120 static void enaddr_aton(const char*, u_int8_t*); 121 122 /* Actions */ 123 #define SQ_RESET 1 124 #define SQ_ADD_TO_DMA 2 125 #define SQ_START_DMA 3 126 #define SQ_DONE_DMA 4 127 #define SQ_RESTART_DMA 5 128 #define SQ_TXINTR_ENTER 6 129 #define SQ_TXINTR_EXIT 7 130 #define SQ_TXINTR_BUSY 8 131 132 struct sq_action_trace { 133 int action; 134 int bufno; 135 int status; 136 int freebuf; 137 }; 138 139 #define SQ_TRACEBUF_SIZE 100 140 int sq_trace_idx = 0; 141 struct sq_action_trace sq_trace[SQ_TRACEBUF_SIZE]; 142 143 void sq_trace_dump(struct sq_softc* sc); 144 145 #define SQ_TRACE(act, buf, stat, free) do { \ 146 sq_trace[sq_trace_idx].action = (act); \ 147 sq_trace[sq_trace_idx].bufno = (buf); \ 148 sq_trace[sq_trace_idx].status = (stat); \ 149 sq_trace[sq_trace_idx].freebuf = (free); \ 150 if (++sq_trace_idx == SQ_TRACEBUF_SIZE) { \ 151 memset(&sq_trace, 0, sizeof(sq_trace)); \ 152 sq_trace_idx = 0; \ 153 } \ 154 } while (0) 155 156 CFATTACH_DECL(sq, sizeof(struct sq_softc), 157 sq_match, sq_attach, NULL, NULL); 158 159 #define ETHER_PAD_LEN (ETHER_MIN_LEN - ETHER_CRC_LEN) 160 161 static int 162 sq_match(struct device *parent, struct cfdata *cf, void *aux) 163 { 164 struct hpc_attach_args *ha = aux; 165 166 if (strcmp(ha->ha_name, cf->cf_name) == 0) 167 return (1); 168 169 return (0); 170 } 171 172 static void 173 sq_attach(struct device *parent, struct device *self, void *aux) 174 { 175 int i, err; 176 char* macaddr; 177 struct sq_softc *sc = (void *)self; 178 struct hpc_attach_args *haa = aux; 179 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 180 181 sc->sc_hpct = haa->ha_st; 182 sc->hpc_regs = haa->hpc_regs; /* HPC register definitions */ 183 184 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 185 haa->ha_dmaoff, 186 sc->hpc_regs->enet_regs_size, 187 &sc->sc_hpch)) != 0) { 188 printf(": unable to map HPC DMA registers, error = %d\n", err); 189 goto fail_0; 190 } 191 192 sc->sc_regt = haa->ha_st; 193 if ((err = bus_space_subregion(haa->ha_st, haa->ha_sh, 194 haa->ha_devoff, 195 sc->hpc_regs->enet_devregs_size, 196 &sc->sc_regh)) != 0) { 197 printf(": unable to map Seeq registers, error = %d\n", err); 198 goto fail_0; 199 } 200 201 sc->sc_dmat = haa->ha_dmat; 202 203 if ((err = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct sq_control), 204 PAGE_SIZE, PAGE_SIZE, &sc->sc_cdseg, 205 1, &sc->sc_ncdseg, BUS_DMA_NOWAIT)) != 0) { 206 printf(": unable to allocate control data, error = %d\n", err); 207 goto fail_0; 208 } 209 210 if ((err = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg, 211 sizeof(struct sq_control), 212 (caddr_t *)&sc->sc_control, 213 BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { 214 printf(": unable to map control data, error = %d\n", err); 215 goto fail_1; 216 } 217 218 if ((err = bus_dmamap_create(sc->sc_dmat, sizeof(struct sq_control), 219 1, sizeof(struct sq_control), PAGE_SIZE, 220 BUS_DMA_NOWAIT, &sc->sc_cdmap)) != 0) { 221 printf(": unable to create DMA map for control data, error " 222 "= %d\n", err); 223 goto fail_2; 224 } 225 226 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_cdmap, sc->sc_control, 227 sizeof(struct sq_control), 228 NULL, BUS_DMA_NOWAIT)) != 0) { 229 printf(": unable to load DMA map for control data, error " 230 "= %d\n", err); 231 goto fail_3; 232 } 233 234 memset(sc->sc_control, 0, sizeof(struct sq_control)); 235 236 /* Create transmit buffer DMA maps */ 237 for (i = 0; i < SQ_NTXDESC; i++) { 238 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 239 0, BUS_DMA_NOWAIT, 240 &sc->sc_txmap[i])) != 0) { 241 printf(": unable to create tx DMA map %d, error = %d\n", 242 i, err); 243 goto fail_4; 244 } 245 } 246 247 /* Create receive buffer DMA maps */ 248 for (i = 0; i < SQ_NRXDESC; i++) { 249 if ((err = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 250 0, BUS_DMA_NOWAIT, 251 &sc->sc_rxmap[i])) != 0) { 252 printf(": unable to create rx DMA map %d, error = %d\n", 253 i, err); 254 goto fail_5; 255 } 256 } 257 258 /* Pre-allocate the receive buffers. */ 259 for (i = 0; i < SQ_NRXDESC; i++) { 260 if ((err = sq_add_rxbuf(sc, i)) != 0) { 261 printf(": unable to allocate or map rx buffer %d\n," 262 " error = %d\n", i, err); 263 goto fail_6; 264 } 265 } 266 267 if ((macaddr = ARCBIOS->GetEnvironmentVariable("eaddr")) == NULL) { 268 printf(": unable to get MAC address!\n"); 269 goto fail_6; 270 } 271 272 evcnt_attach_dynamic(&sc->sq_intrcnt, EVCNT_TYPE_INTR, NULL, 273 self->dv_xname, "intr"); 274 275 if ((cpu_intr_establish(haa->ha_irq, IPL_NET, sq_intr, sc)) == NULL) { 276 printf(": unable to establish interrupt!\n"); 277 goto fail_6; 278 } 279 280 /* Reset the chip to a known state. */ 281 sq_reset(sc); 282 283 /* 284 * Determine if we're an 8003 or 80c03 by setting the first 285 * MAC address register to non-zero, and then reading it back. 286 * If it's zero, we have an 80c03, because we will have read 287 * the TxCollLSB register. 288 */ 289 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0xa5); 290 if (bus_space_read_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0) == 0) 291 sc->sc_type = SQ_TYPE_80C03; 292 else 293 sc->sc_type = SQ_TYPE_8003; 294 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCOLLS0, 0x00); 295 296 printf(": SGI Seeq %s\n", 297 sc->sc_type == SQ_TYPE_80C03 ? "80c03" : "8003"); 298 299 enaddr_aton(macaddr, sc->sc_enaddr); 300 301 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 302 ether_sprintf(sc->sc_enaddr)); 303 304 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 305 ifp->if_softc = sc; 306 ifp->if_mtu = ETHERMTU; 307 ifp->if_init = sq_init; 308 ifp->if_stop = sq_stop; 309 ifp->if_start = sq_start; 310 ifp->if_ioctl = sq_ioctl; 311 ifp->if_watchdog = sq_watchdog; 312 ifp->if_flags = IFF_BROADCAST | IFF_NOTRAILERS | IFF_MULTICAST; 313 IFQ_SET_READY(&ifp->if_snd); 314 315 if_attach(ifp); 316 ether_ifattach(ifp, sc->sc_enaddr); 317 318 memset(&sq_trace, 0, sizeof(sq_trace)); 319 /* Done! */ 320 return; 321 322 /* 323 * Free any resources we've allocated during the failed attach 324 * attempt. Do this in reverse order and fall through. 325 */ 326 fail_6: 327 for (i = 0; i < SQ_NRXDESC; i++) { 328 if (sc->sc_rxmbuf[i] != NULL) { 329 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[i]); 330 m_freem(sc->sc_rxmbuf[i]); 331 } 332 } 333 fail_5: 334 for (i = 0; i < SQ_NRXDESC; i++) { 335 if (sc->sc_rxmap[i] != NULL) 336 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap[i]); 337 } 338 fail_4: 339 for (i = 0; i < SQ_NTXDESC; i++) { 340 if (sc->sc_txmap[i] != NULL) 341 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap[i]); 342 } 343 bus_dmamap_unload(sc->sc_dmat, sc->sc_cdmap); 344 fail_3: 345 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cdmap); 346 fail_2: 347 bus_dmamem_unmap(sc->sc_dmat, (caddr_t) sc->sc_control, 348 sizeof(struct sq_control)); 349 fail_1: 350 bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_ncdseg); 351 fail_0: 352 return; 353 } 354 355 /* Set up data to get the interface up and running. */ 356 int 357 sq_init(struct ifnet *ifp) 358 { 359 int i; 360 u_int32_t reg; 361 struct sq_softc *sc = ifp->if_softc; 362 363 /* Cancel any in-progress I/O */ 364 sq_stop(ifp, 0); 365 366 sc->sc_nextrx = 0; 367 368 sc->sc_nfreetx = SQ_NTXDESC; 369 sc->sc_nexttx = sc->sc_prevtx = 0; 370 371 SQ_TRACE(SQ_RESET, 0, 0, sc->sc_nfreetx); 372 373 /* Set into 8003 mode, bank 0 to program ethernet address */ 374 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, TXCMD_BANK0); 375 376 /* Now write the address */ 377 for (i = 0; i < ETHER_ADDR_LEN; i++) 378 bus_space_write_1(sc->sc_regt, sc->sc_regh, i, 379 sc->sc_enaddr[i]); 380 381 sc->sc_rxcmd = RXCMD_IE_CRC | 382 RXCMD_IE_DRIB | 383 RXCMD_IE_SHORT | 384 RXCMD_IE_END | 385 RXCMD_IE_GOOD; 386 387 /* 388 * Set the receive filter -- this will add some bits to the 389 * prototype RXCMD register. Do this before setting the 390 * transmit config register, since we might need to switch 391 * banks. 392 */ 393 sq_set_filter(sc); 394 395 /* Set up Seeq transmit command register */ 396 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 397 TXCMD_IE_UFLOW | 398 TXCMD_IE_COLL | 399 TXCMD_IE_16COLL | 400 TXCMD_IE_GOOD); 401 402 /* Now write the receive command register. */ 403 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, sc->sc_rxcmd); 404 405 /* Set up HPC ethernet DMA config */ 406 if (sc->hpc_regs->revision == 3) { 407 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 408 sc->hpc_regs->enetr_dmacfg); 409 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 410 sc->hpc_regs->enetr_dmacfg, 411 reg | ENETR_DMACFG_FIX_RXDC | 412 ENETR_DMACFG_FIX_INTR | 413 ENETR_DMACFG_FIX_EOP); 414 } 415 416 /* Pass the start of the receive ring to the HPC */ 417 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ndbp, 418 SQ_CDRXADDR(sc, 0)); 419 420 /* And turn on the HPC ethernet receive channel */ 421 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ctl, 422 sc->hpc_regs->enetr_ctl_active); 423 424 ifp->if_flags |= IFF_RUNNING; 425 ifp->if_flags &= ~IFF_OACTIVE; 426 427 return 0; 428 } 429 430 static void 431 sq_set_filter(struct sq_softc *sc) 432 { 433 struct ethercom *ec = &sc->sc_ethercom; 434 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 435 struct ether_multi *enm; 436 struct ether_multistep step; 437 438 /* 439 * Check for promiscuous mode. Also implies 440 * all-multicast. 441 */ 442 if (ifp->if_flags & IFF_PROMISC) { 443 sc->sc_rxcmd |= RXCMD_REC_ALL; 444 ifp->if_flags |= IFF_ALLMULTI; 445 return; 446 } 447 448 /* 449 * The 8003 has no hash table. If we have any multicast 450 * addresses on the list, enable reception of all multicast 451 * frames. 452 * 453 * XXX The 80c03 has a hash table. We should use it. 454 */ 455 456 ETHER_FIRST_MULTI(step, ec, enm); 457 458 if (enm == NULL) { 459 sc->sc_rxcmd &= ~RXCMD_REC_MASK; 460 sc->sc_rxcmd |= RXCMD_REC_BROAD; 461 462 ifp->if_flags &= ~IFF_ALLMULTI; 463 return; 464 } 465 466 sc->sc_rxcmd |= RXCMD_REC_MULTI; 467 ifp->if_flags |= IFF_ALLMULTI; 468 } 469 470 int 471 sq_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 472 { 473 int s, error = 0; 474 475 s = splnet(); 476 477 error = ether_ioctl(ifp, cmd, data); 478 if (error == ENETRESET) { 479 /* 480 * Multicast list has changed; set the hardware filter 481 * accordingly. 482 */ 483 if (ifp->if_flags & IFF_RUNNING) 484 error = sq_init(ifp); 485 else 486 error = 0; 487 } 488 489 splx(s); 490 return (error); 491 } 492 493 void 494 sq_start(struct ifnet *ifp) 495 { 496 struct sq_softc *sc = ifp->if_softc; 497 u_int32_t status; 498 struct mbuf *m0, *m; 499 bus_dmamap_t dmamap; 500 int err, totlen, nexttx, firsttx, lasttx = -1, ofree, seg; 501 502 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 503 return; 504 505 /* 506 * Remember the previous number of free descriptors and 507 * the first descriptor we'll use. 508 */ 509 ofree = sc->sc_nfreetx; 510 firsttx = sc->sc_nexttx; 511 512 /* 513 * Loop through the send queue, setting up transmit descriptors 514 * until we drain the queue, or use up all available transmit 515 * descriptors. 516 */ 517 while (sc->sc_nfreetx != 0) { 518 /* 519 * Grab a packet off the queue. 520 */ 521 IFQ_POLL(&ifp->if_snd, m0); 522 if (m0 == NULL) 523 break; 524 m = NULL; 525 526 dmamap = sc->sc_txmap[sc->sc_nexttx]; 527 528 /* 529 * Load the DMA map. If this fails, the packet either 530 * didn't fit in the alloted number of segments, or we were 531 * short on resources. In this case, we'll copy and try 532 * again. 533 * Also copy it if we need to pad, so that we are sure there 534 * is room for the pad buffer. 535 * XXX the right way of doing this is to use a static buffer 536 * for padding and adding it to the transmit descriptor (see 537 * sys/dev/pci/if_tl.c for example). We can't do this here yet 538 * because we can't send packets with more than one fragment. 539 */ 540 if (m0->m_pkthdr.len < ETHER_PAD_LEN || 541 bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 542 BUS_DMA_NOWAIT) != 0) { 543 MGETHDR(m, M_DONTWAIT, MT_DATA); 544 if (m == NULL) { 545 printf("%s: unable to allocate Tx mbuf\n", 546 sc->sc_dev.dv_xname); 547 break; 548 } 549 if (m0->m_pkthdr.len > MHLEN) { 550 MCLGET(m, M_DONTWAIT); 551 if ((m->m_flags & M_EXT) == 0) { 552 printf("%s: unable to allocate Tx " 553 "cluster\n", sc->sc_dev.dv_xname); 554 m_freem(m); 555 break; 556 } 557 } 558 559 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 560 if (m0->m_pkthdr.len < ETHER_PAD_LEN) { 561 memset(mtod(m, char *) + m0->m_pkthdr.len, 0, 562 ETHER_PAD_LEN - m0->m_pkthdr.len); 563 m->m_pkthdr.len = m->m_len = ETHER_PAD_LEN; 564 } else 565 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 566 567 if ((err = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 568 m, BUS_DMA_NOWAIT)) != 0) { 569 printf("%s: unable to load Tx buffer, " 570 "error = %d\n", sc->sc_dev.dv_xname, err); 571 break; 572 } 573 } 574 575 /* 576 * Ensure we have enough descriptors free to describe 577 * the packet. 578 */ 579 if (dmamap->dm_nsegs > sc->sc_nfreetx) { 580 /* 581 * Not enough free descriptors to transmit this 582 * packet. We haven't committed to anything yet, 583 * so just unload the DMA map, put the packet 584 * back on the queue, and punt. Notify the upper 585 * layer that there are no more slots left. 586 * 587 * XXX We could allocate an mbuf and copy, but 588 * XXX it is worth it? 589 */ 590 ifp->if_flags |= IFF_OACTIVE; 591 bus_dmamap_unload(sc->sc_dmat, dmamap); 592 if (m != NULL) 593 m_freem(m); 594 break; 595 } 596 597 IFQ_DEQUEUE(&ifp->if_snd, m0); 598 #if NBPFILTER > 0 599 /* 600 * Pass the packet to any BPF listeners. 601 */ 602 if (ifp->if_bpf) 603 bpf_mtap(ifp->if_bpf, m0); 604 #endif /* NBPFILTER > 0 */ 605 if (m != NULL) { 606 m_freem(m0); 607 m0 = m; 608 } 609 610 /* 611 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 612 */ 613 614 /* Sync the DMA map. */ 615 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 616 BUS_DMASYNC_PREWRITE); 617 618 /* 619 * Initialize the transmit descriptors. 620 */ 621 for (nexttx = sc->sc_nexttx, seg = 0, totlen = 0; 622 seg < dmamap->dm_nsegs; 623 seg++, nexttx = SQ_NEXTTX(nexttx)) { 624 if (sc->hpc_regs->revision == 3) { 625 sc->sc_txdesc[nexttx].hpc3_hdd_bufptr = 626 dmamap->dm_segs[seg].ds_addr; 627 sc->sc_txdesc[nexttx].hpc3_hdd_ctl = 628 dmamap->dm_segs[seg].ds_len; 629 } else { 630 sc->sc_txdesc[nexttx].hpc1_hdd_bufptr = 631 dmamap->dm_segs[seg].ds_addr; 632 sc->sc_txdesc[nexttx].hpc1_hdd_ctl = 633 dmamap->dm_segs[seg].ds_len; 634 } 635 sc->sc_txdesc[nexttx].hdd_descptr= 636 SQ_CDTXADDR(sc, SQ_NEXTTX(nexttx)); 637 lasttx = nexttx; 638 totlen += dmamap->dm_segs[seg].ds_len; 639 } 640 641 /* Last descriptor gets end-of-packet */ 642 KASSERT(lasttx != -1); 643 if (sc->hpc_regs->revision == 3) 644 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_EOPACKET; 645 else 646 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= 647 HPC1_HDD_CTL_EOPACKET; 648 649 SQ_DPRINTF(("%s: transmit %d-%d, len %d\n", sc->sc_dev.dv_xname, 650 sc->sc_nexttx, lasttx, 651 totlen)); 652 653 if (ifp->if_flags & IFF_DEBUG) { 654 printf(" transmit chain:\n"); 655 for (seg = sc->sc_nexttx;; seg = SQ_NEXTTX(seg)) { 656 printf(" descriptor %d:\n", seg); 657 printf(" hdd_bufptr: 0x%08x\n", 658 (sc->hpc_regs->revision == 3) ? 659 sc->sc_txdesc[seg].hpc3_hdd_bufptr : 660 sc->sc_txdesc[seg].hpc1_hdd_bufptr); 661 printf(" hdd_ctl: 0x%08x\n", 662 (sc->hpc_regs->revision == 3) ? 663 sc->sc_txdesc[seg].hpc3_hdd_ctl: 664 sc->sc_txdesc[seg].hpc1_hdd_ctl); 665 printf(" hdd_descptr: 0x%08x\n", 666 sc->sc_txdesc[seg].hdd_descptr); 667 668 if (seg == lasttx) 669 break; 670 } 671 } 672 673 /* Sync the descriptors we're using. */ 674 SQ_CDTXSYNC(sc, sc->sc_nexttx, dmamap->dm_nsegs, 675 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 676 677 /* Store a pointer to the packet so we can free it later */ 678 sc->sc_txmbuf[sc->sc_nexttx] = m0; 679 680 /* Advance the tx pointer. */ 681 sc->sc_nfreetx -= dmamap->dm_nsegs; 682 sc->sc_nexttx = nexttx; 683 684 } 685 686 /* All transmit descriptors used up, let upper layers know */ 687 if (sc->sc_nfreetx == 0) 688 ifp->if_flags |= IFF_OACTIVE; 689 690 if (sc->sc_nfreetx != ofree) { 691 SQ_DPRINTF(("%s: %d packets enqueued, first %d, INTR on %d\n", 692 sc->sc_dev.dv_xname, lasttx - firsttx + 1, 693 firsttx, lasttx)); 694 695 /* 696 * Cause a transmit interrupt to happen on the 697 * last packet we enqueued, mark it as the last 698 * descriptor. 699 * 700 * HDD_CTL_EOPACKET && HDD_CTL_INTR cause an 701 * interrupt. 702 */ 703 KASSERT(lasttx != -1); 704 if (sc->hpc_regs->revision == 3) { 705 sc->sc_txdesc[lasttx].hpc3_hdd_ctl |= HDD_CTL_INTR | 706 HDD_CTL_EOCHAIN; 707 } else { 708 sc->sc_txdesc[lasttx].hpc1_hdd_ctl |= HPC1_HDD_CTL_INTR; 709 sc->sc_txdesc[lasttx].hpc1_hdd_bufptr |= 710 HPC1_HDD_CTL_EOCHAIN; 711 } 712 713 SQ_CDTXSYNC(sc, lasttx, 1, 714 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 715 716 /* 717 * There is a potential race condition here if the HPC 718 * DMA channel is active and we try and either update 719 * the 'next descriptor' pointer in the HPC PIO space 720 * or the 'next descriptor' pointer in a previous desc- 721 * riptor. 722 * 723 * To avoid this, if the channel is active, we rely on 724 * the transmit interrupt routine noticing that there 725 * are more packets to send and restarting the HPC DMA 726 * engine, rather than mucking with the DMA state here. 727 */ 728 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 729 sc->hpc_regs->enetx_ctl); 730 731 if ((status & sc->hpc_regs->enetx_ctl_active) != 0) { 732 SQ_TRACE(SQ_ADD_TO_DMA, firsttx, status, 733 sc->sc_nfreetx); 734 735 /* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */ 736 sc->sc_txdesc[SQ_PREVTX(firsttx)].hpc3_hdd_ctl &= 737 ~HDD_CTL_EOCHAIN; 738 739 SQ_CDTXSYNC(sc, SQ_PREVTX(firsttx), 1, 740 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 741 } else { 742 SQ_TRACE(SQ_START_DMA, firsttx, status, sc->sc_nfreetx); 743 744 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 745 sc->hpc_regs->enetx_ndbp, SQ_CDTXADDR(sc, firsttx)); 746 747 if (sc->hpc_regs->revision != 3) { 748 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 749 HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, firsttx)); 750 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 751 HPC1_ENETX_CBP, SQ_CDTXADDR(sc, firsttx)); 752 } 753 754 /* Kick DMA channel into life */ 755 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 756 sc->hpc_regs->enetx_ctl, 757 sc->hpc_regs->enetx_ctl_active); 758 } 759 760 /* Set a watchdog timer in case the chip flakes out. */ 761 ifp->if_timer = 5; 762 } 763 } 764 765 void 766 sq_stop(struct ifnet *ifp, int disable) 767 { 768 int i; 769 struct sq_softc *sc = ifp->if_softc; 770 771 for (i =0; i < SQ_NTXDESC; i++) { 772 if (sc->sc_txmbuf[i] != NULL) { 773 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 774 m_freem(sc->sc_txmbuf[i]); 775 sc->sc_txmbuf[i] = NULL; 776 } 777 } 778 779 /* Clear Seeq transmit/receive command registers */ 780 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_TXCMD, 0); 781 bus_space_write_1(sc->sc_regt, sc->sc_regh, SEEQ_RXCMD, 0); 782 783 sq_reset(sc); 784 785 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 786 ifp->if_timer = 0; 787 } 788 789 /* Device timeout/watchdog routine. */ 790 void 791 sq_watchdog(struct ifnet *ifp) 792 { 793 u_int32_t status; 794 struct sq_softc *sc = ifp->if_softc; 795 796 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 797 sc->hpc_regs->enetx_ctl); 798 log(LOG_ERR, "%s: device timeout (prev %d, next %d, free %d, " 799 "status %08x)\n", sc->sc_dev.dv_xname, sc->sc_prevtx, 800 sc->sc_nexttx, sc->sc_nfreetx, status); 801 802 sq_trace_dump(sc); 803 804 memset(&sq_trace, 0, sizeof(sq_trace)); 805 sq_trace_idx = 0; 806 807 ++ifp->if_oerrors; 808 809 sq_init(ifp); 810 } 811 812 void sq_trace_dump(struct sq_softc* sc) 813 { 814 int i; 815 816 for(i = 0; i < sq_trace_idx; i++) { 817 printf("%s: [%d] action %d, buf %d, free %d, status %08x\n", 818 sc->sc_dev.dv_xname, i, sq_trace[i].action, 819 sq_trace[i].bufno, sq_trace[i].freebuf, 820 sq_trace[i].status); 821 } 822 } 823 824 static int 825 sq_intr(void * arg) 826 { 827 struct sq_softc *sc = arg; 828 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 829 int handled = 0; 830 u_int32_t stat; 831 832 stat = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 833 sc->hpc_regs->enetr_reset); 834 835 if ((stat & 2) == 0) { 836 printf("%s: Unexpected interrupt!\n", sc->sc_dev.dv_xname); 837 return 0; 838 } 839 840 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 841 sc->hpc_regs->enetr_reset, (stat | 2)); 842 843 /* 844 * If the interface isn't running, the interrupt couldn't 845 * possibly have come from us. 846 */ 847 if ((ifp->if_flags & IFF_RUNNING) == 0) 848 return 0; 849 850 sc->sq_intrcnt.ev_count++; 851 852 /* Always check for received packets */ 853 if (sq_rxintr(sc) != 0) 854 handled++; 855 856 /* Only handle transmit interrupts if we actually sent something */ 857 if (sc->sc_nfreetx < SQ_NTXDESC) { 858 sq_txintr(sc); 859 handled++; 860 } 861 862 #if NRND > 0 863 if (handled) 864 rnd_add_uint32(&sc->rnd_source, stat); 865 #endif 866 return (handled); 867 } 868 869 static int 870 sq_rxintr(struct sq_softc *sc) 871 { 872 int count = 0; 873 struct mbuf* m; 874 int i, framelen; 875 u_int8_t pktstat; 876 u_int32_t status; 877 u_int32_t ctl_reg; 878 int new_end, orig_end; 879 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 880 881 for(i = sc->sc_nextrx;; i = SQ_NEXTRX(i)) { 882 SQ_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 883 884 /* If this is a CPU-owned buffer, we're at the end of the list */ 885 if (sc->hpc_regs->revision == 3) 886 ctl_reg = sc->sc_rxdesc[i].hpc3_hdd_ctl & HDD_CTL_OWN; 887 else 888 ctl_reg = sc->sc_rxdesc[i].hpc1_hdd_ctl & 889 HPC1_HDD_CTL_OWN; 890 891 if (ctl_reg) { 892 #if defined(SQ_DEBUG) 893 u_int32_t reg; 894 895 reg = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 896 sc->hpc_regs->enetr_ctl); 897 SQ_DPRINTF(("%s: rxintr: done at %d (ctl %08x)\n", 898 sc->sc_dev.dv_xname, i, reg)); 899 #endif 900 break; 901 } 902 903 count++; 904 905 m = sc->sc_rxmbuf[i]; 906 framelen = m->m_ext.ext_size - 3; 907 if (sc->hpc_regs->revision == 3) 908 framelen -= 909 HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc3_hdd_ctl); 910 else 911 framelen -= 912 HPC1_HDD_CTL_BYTECNT(sc->sc_rxdesc[i].hpc1_hdd_ctl); 913 914 /* Now sync the actual packet data */ 915 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 916 sc->sc_rxmap[i]->dm_mapsize, BUS_DMASYNC_POSTREAD); 917 918 pktstat = *((u_int8_t*)m->m_data + framelen + 2); 919 920 if ((pktstat & RXSTAT_GOOD) == 0) { 921 ifp->if_ierrors++; 922 923 if (pktstat & RXSTAT_OFLOW) 924 printf("%s: receive FIFO overflow\n", 925 sc->sc_dev.dv_xname); 926 927 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 928 sc->sc_rxmap[i]->dm_mapsize, 929 BUS_DMASYNC_PREREAD); 930 SQ_INIT_RXDESC(sc, i); 931 continue; 932 } 933 934 if (sq_add_rxbuf(sc, i) != 0) { 935 ifp->if_ierrors++; 936 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[i], 0, 937 sc->sc_rxmap[i]->dm_mapsize, 938 BUS_DMASYNC_PREREAD); 939 SQ_INIT_RXDESC(sc, i); 940 continue; 941 } 942 943 944 m->m_data += 2; 945 m->m_pkthdr.rcvif = ifp; 946 m->m_pkthdr.len = m->m_len = framelen; 947 948 ifp->if_ipackets++; 949 950 SQ_DPRINTF(("%s: sq_rxintr: buf %d len %d\n", 951 sc->sc_dev.dv_xname, i, framelen)); 952 953 #if NBPFILTER > 0 954 if (ifp->if_bpf) 955 bpf_mtap(ifp->if_bpf, m); 956 #endif 957 (*ifp->if_input)(ifp, m); 958 } 959 960 961 /* If anything happened, move ring start/end pointers to new spot */ 962 if (i != sc->sc_nextrx) { 963 /* NB: hpc3_hdd_ctl is also hpc1_hdd_bufptr */ 964 965 new_end = SQ_PREVRX(i); 966 sc->sc_rxdesc[new_end].hpc3_hdd_ctl |= HDD_CTL_EOCHAIN; 967 SQ_CDRXSYNC(sc, new_end, BUS_DMASYNC_PREREAD | 968 BUS_DMASYNC_PREWRITE); 969 970 orig_end = SQ_PREVRX(sc->sc_nextrx); 971 sc->sc_rxdesc[orig_end].hpc3_hdd_ctl &= ~HDD_CTL_EOCHAIN; 972 SQ_CDRXSYNC(sc, orig_end, BUS_DMASYNC_PREREAD | 973 BUS_DMASYNC_PREWRITE); 974 975 sc->sc_nextrx = i; 976 } 977 978 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 979 sc->hpc_regs->enetr_ctl); 980 981 /* If receive channel is stopped, restart it... */ 982 if ((status & sc->hpc_regs->enetr_ctl_active) == 0) { 983 /* Pass the start of the receive ring to the HPC */ 984 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 985 sc->hpc_regs->enetr_ndbp, SQ_CDRXADDR(sc, sc->sc_nextrx)); 986 987 /* And turn on the HPC ethernet receive channel */ 988 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 989 sc->hpc_regs->enetr_ctl, sc->hpc_regs->enetr_ctl_active); 990 } 991 992 return count; 993 } 994 995 static int 996 sq_txintr(struct sq_softc *sc) 997 { 998 int i; 999 int shift = 0; 1000 u_int32_t status; 1001 u_int32_t hpc1_ready = 0; 1002 u_int32_t hpc3_not_ready = 1; 1003 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1004 1005 if (sc->hpc_regs->revision != 3) 1006 shift = 16; 1007 1008 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 1009 sc->hpc_regs->enetx_ctl) >> shift; 1010 1011 SQ_TRACE(SQ_TXINTR_ENTER, sc->sc_prevtx, status, sc->sc_nfreetx); 1012 1013 if ((status & ( (sc->hpc_regs->enetx_ctl_active >> shift) | TXSTAT_GOOD)) == 0) { 1014 /* XXX */ printf("txstat: %x\n", status); 1015 if (status & TXSTAT_COLL) 1016 ifp->if_collisions++; 1017 1018 if (status & TXSTAT_UFLOW) { 1019 printf("%s: transmit underflow\n", sc->sc_dev.dv_xname); 1020 ifp->if_oerrors++; 1021 } 1022 1023 if (status & TXSTAT_16COLL) { 1024 printf("%s: max collisions reached\n", sc->sc_dev.dv_xname); 1025 ifp->if_oerrors++; 1026 ifp->if_collisions += 16; 1027 } 1028 } 1029 1030 i = sc->sc_prevtx; 1031 while (sc->sc_nfreetx < SQ_NTXDESC) { 1032 /* 1033 * Check status first so we don't end up with a case of 1034 * the buffer not being finished while the DMA channel 1035 * has gone idle. 1036 */ 1037 status = bus_space_read_4(sc->sc_hpct, sc->sc_hpch, 1038 sc->hpc_regs->enetx_ctl) >> shift; 1039 1040 SQ_CDTXSYNC(sc, i, sc->sc_txmap[i]->dm_nsegs, 1041 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1042 1043 /* 1044 * If not yet transmitted, try and start DMA engine again. 1045 * HPC3 tags transmitted descriptors with XMITDONE whereas 1046 * HPC1 will not halt before sending through EOCHAIN. 1047 */ 1048 if (sc->hpc_regs->revision == 3) { 1049 hpc3_not_ready = 1050 sc->sc_txdesc[i].hpc3_hdd_ctl & HDD_CTL_XMITDONE; 1051 } else { 1052 if (hpc1_ready) 1053 hpc1_ready++; 1054 else { 1055 if (sc->sc_txdesc[i].hpc1_hdd_ctl & 1056 HPC1_HDD_CTL_EOPACKET) 1057 hpc1_ready = 1; 1058 } 1059 } 1060 1061 if (hpc3_not_ready == 0 || hpc1_ready == 2) { 1062 if ((status & (sc->hpc_regs->enetx_ctl_active >> shift)) == 0) { // XXX 1063 SQ_TRACE(SQ_RESTART_DMA, i, status, 1064 sc->sc_nfreetx); 1065 1066 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 1067 sc->hpc_regs->enetx_ndbp, SQ_CDTXADDR(sc, i)); 1068 1069 if (sc->hpc_regs->revision != 3) { 1070 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 1071 HPC1_ENETX_CFXBP, SQ_CDTXADDR(sc, i)); 1072 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 1073 HPC1_ENETX_CBP, SQ_CDTXADDR(sc, i)); 1074 } 1075 1076 /* Kick DMA channel into life */ 1077 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, 1078 sc->hpc_regs->enetx_ctl, 1079 sc->hpc_regs->enetx_ctl_active); 1080 1081 /* 1082 * Set a watchdog timer in case the chip 1083 * flakes out. 1084 */ 1085 ifp->if_timer = 5; 1086 } else { 1087 SQ_TRACE(SQ_TXINTR_BUSY, i, status, 1088 sc->sc_nfreetx); 1089 } 1090 break; 1091 } 1092 1093 /* Sync the packet data, unload DMA map, free mbuf */ 1094 bus_dmamap_sync(sc->sc_dmat, sc->sc_txmap[i], 0, 1095 sc->sc_txmap[i]->dm_mapsize, 1096 BUS_DMASYNC_POSTWRITE); 1097 bus_dmamap_unload(sc->sc_dmat, sc->sc_txmap[i]); 1098 m_freem(sc->sc_txmbuf[i]); 1099 sc->sc_txmbuf[i] = NULL; 1100 1101 ifp->if_opackets++; 1102 sc->sc_nfreetx++; 1103 1104 SQ_TRACE(SQ_DONE_DMA, i, status, sc->sc_nfreetx); 1105 i = SQ_NEXTTX(i); 1106 } 1107 1108 /* prevtx now points to next xmit packet not yet finished */ 1109 sc->sc_prevtx = i; 1110 1111 /* If we have buffers free, let upper layers know */ 1112 if (sc->sc_nfreetx > 0) 1113 ifp->if_flags &= ~IFF_OACTIVE; 1114 1115 /* If all packets have left the coop, cancel watchdog */ 1116 if (sc->sc_nfreetx == SQ_NTXDESC) 1117 ifp->if_timer = 0; 1118 1119 SQ_TRACE(SQ_TXINTR_EXIT, sc->sc_prevtx, status, sc->sc_nfreetx); 1120 sq_start(ifp); 1121 1122 return 1; 1123 } 1124 1125 1126 void 1127 sq_reset(struct sq_softc *sc) 1128 { 1129 /* Stop HPC dma channels */ 1130 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_ctl, 0); 1131 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetx_ctl, 0); 1132 1133 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_reset, 3); 1134 delay(20); 1135 bus_space_write_4(sc->sc_hpct, sc->sc_hpch, sc->hpc_regs->enetr_reset, 0); 1136 } 1137 1138 /* sq_add_rxbuf: Add a receive buffer to the indicated descriptor. */ 1139 int 1140 sq_add_rxbuf(struct sq_softc *sc, int idx) 1141 { 1142 int err; 1143 struct mbuf *m; 1144 1145 MGETHDR(m, M_DONTWAIT, MT_DATA); 1146 if (m == NULL) 1147 return (ENOBUFS); 1148 1149 MCLGET(m, M_DONTWAIT); 1150 if ((m->m_flags & M_EXT) == 0) { 1151 m_freem(m); 1152 return (ENOBUFS); 1153 } 1154 1155 if (sc->sc_rxmbuf[idx] != NULL) 1156 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxmap[idx]); 1157 1158 sc->sc_rxmbuf[idx] = m; 1159 1160 if ((err = bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap[idx], 1161 m->m_ext.ext_buf, m->m_ext.ext_size, 1162 NULL, BUS_DMA_NOWAIT)) != 0) { 1163 printf("%s: can't load rx DMA map %d, error = %d\n", 1164 sc->sc_dev.dv_xname, idx, err); 1165 panic("sq_add_rxbuf"); /* XXX */ 1166 } 1167 1168 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap[idx], 0, 1169 sc->sc_rxmap[idx]->dm_mapsize, BUS_DMASYNC_PREREAD); 1170 1171 SQ_INIT_RXDESC(sc, idx); 1172 1173 return 0; 1174 } 1175 1176 void 1177 sq_dump_buffer(u_int32_t addr, u_int32_t len) 1178 { 1179 u_int i; 1180 u_char* physaddr = (char*) MIPS_PHYS_TO_KSEG1((caddr_t)addr); 1181 1182 if (len == 0) 1183 return; 1184 1185 printf("%p: ", physaddr); 1186 1187 for(i = 0; i < len; i++) { 1188 printf("%02x ", *(physaddr + i) & 0xff); 1189 if ((i % 16) == 15 && i != len - 1) 1190 printf("\n%p: ", physaddr + i); 1191 } 1192 1193 printf("\n"); 1194 } 1195 1196 1197 void 1198 enaddr_aton(const char* str, u_int8_t* eaddr) 1199 { 1200 int i; 1201 char c; 1202 1203 for(i = 0; i < ETHER_ADDR_LEN; i++) { 1204 if (*str == ':') 1205 str++; 1206 1207 c = *str++; 1208 if (isdigit(c)) { 1209 eaddr[i] = (c - '0'); 1210 } else if (isxdigit(c)) { 1211 eaddr[i] = (toupper(c) + 10 - 'A'); 1212 } 1213 1214 c = *str++; 1215 if (isdigit(c)) { 1216 eaddr[i] = (eaddr[i] << 4) | (c - '0'); 1217 } else if (isxdigit(c)) { 1218 eaddr[i] = (eaddr[i] << 4) | (toupper(c) + 10 - 'A'); 1219 } 1220 } 1221 } 1222