1 /* $NetBSD: if_cas.c,v 1.2 2010/01/09 13:34:33 martin Exp $ */ 2 /* $OpenBSD: if_cas.c,v 1.29 2009/11/29 16:19:38 kettenis Exp $ */ 3 4 /* 5 * 6 * Copyright (C) 2007 Mark Kettenis. 7 * Copyright (C) 2001 Eduardo Horvath. 8 * All rights reserved. 9 * 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 */ 33 34 /* 35 * Driver for Sun Cassini ethernet controllers. 36 * 37 * There are basically two variants of this chip: Cassini and 38 * Cassini+. We can distinguish between the two by revision: 0x10 and 39 * up are Cassini+. The most important difference is that Cassini+ 40 * has a second RX descriptor ring. Cassini+ will not work without 41 * configuring that second ring. However, since we don't use it we 42 * don't actually fill the descriptors, and only hand off the first 43 * four to the chip. 44 */ 45 46 #include <sys/cdefs.h> 47 __KERNEL_RCSID(0, "$NetBSD: if_cas.c,v 1.2 2010/01/09 13:34:33 martin Exp $"); 48 49 #include "opt_inet.h" 50 #include "bpfilter.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/callout.h> 55 #include <sys/mbuf.h> 56 #include <sys/syslog.h> 57 #include <sys/malloc.h> 58 #include <sys/kernel.h> 59 #include <sys/socket.h> 60 #include <sys/ioctl.h> 61 #include <sys/errno.h> 62 #include <sys/device.h> 63 64 #include <machine/endian.h> 65 66 #include <uvm/uvm_extern.h> 67 68 #include <net/if.h> 69 #include <net/if_dl.h> 70 #include <net/if_media.h> 71 #include <net/if_ether.h> 72 73 #ifdef INET 74 #include <netinet/in.h> 75 #include <netinet/in_systm.h> 76 #include <netinet/in_var.h> 77 #include <netinet/ip.h> 78 #include <netinet/tcp.h> 79 #include <netinet/udp.h> 80 #endif 81 82 #if NBPFILTER > 0 83 #include <net/bpf.h> 84 #endif 85 86 #include <sys/bus.h> 87 #include <sys/intr.h> 88 89 #include <dev/mii/mii.h> 90 #include <dev/mii/miivar.h> 91 #include <dev/mii/mii_bitbang.h> 92 93 #include <dev/pci/pcivar.h> 94 #include <dev/pci/pcireg.h> 95 #include <dev/pci/pcidevs.h> 96 97 #include <dev/pci/if_casreg.h> 98 #include <dev/pci/if_casvar.h> 99 100 /* XXX Should use Properties when that's fleshed out. */ 101 #ifdef macppc 102 #include <dev/ofw/openfirm.h> 103 #endif /* macppc */ 104 #ifdef __sparc__ 105 #include <machine/promlib.h> 106 #endif 107 108 #ifndef CAS_USE_LOCAL_MAC_ADDRESS 109 #if defined (macppc) || defined (__sparc__) 110 #define CAS_USE_LOCAL_MAC_ADDRESS 0 /* use system-wide address */ 111 #else 112 #define CAS_USE_LOCAL_MAC_ADDRESS 1 113 #endif 114 #endif 115 116 #define TRIES 10000 117 118 static bool cas_estintr(struct cas_softc *sc); 119 bool cas_shutdown(device_t, int); 120 static bool cas_suspend(device_t, pmf_qual_t); 121 static bool cas_resume(device_t, pmf_qual_t); 122 static int cas_detach(device_t, int); 123 static void cas_partial_detach(struct cas_softc *, enum cas_attach_stage); 124 125 int cas_match(device_t, cfdata_t, void *); 126 void cas_attach(device_t, device_t, void *); 127 128 129 CFATTACH_DECL3_NEW(cas, sizeof(struct cas_softc), 130 cas_match, cas_attach, cas_detach, NULL, NULL, NULL, 131 DVF_DETACH_SHUTDOWN); 132 133 #if CAS_USE_LOCAL_MAC_ADDRESS 134 int cas_pci_enaddr(struct cas_softc *, struct pci_attach_args *, uint8_t *); 135 #endif 136 137 void cas_config(struct cas_softc *, const uint8_t *); 138 void cas_start(struct ifnet *); 139 void cas_stop(struct ifnet *, int); 140 int cas_ioctl(struct ifnet *, u_long, void *); 141 void cas_tick(void *); 142 void cas_watchdog(struct ifnet *); 143 int cas_init(struct ifnet *); 144 void cas_init_regs(struct cas_softc *); 145 int cas_ringsize(int); 146 int cas_cringsize(int); 147 int cas_meminit(struct cas_softc *); 148 void cas_mifinit(struct cas_softc *); 149 int cas_bitwait(struct cas_softc *, bus_space_handle_t, int, 150 u_int32_t, u_int32_t); 151 void cas_reset(struct cas_softc *); 152 int cas_reset_rx(struct cas_softc *); 153 int cas_reset_tx(struct cas_softc *); 154 int cas_disable_rx(struct cas_softc *); 155 int cas_disable_tx(struct cas_softc *); 156 void cas_rxdrain(struct cas_softc *); 157 int cas_add_rxbuf(struct cas_softc *, int idx); 158 void cas_iff(struct cas_softc *); 159 int cas_encap(struct cas_softc *, struct mbuf *, u_int32_t *); 160 161 /* MII methods & callbacks */ 162 int cas_mii_readreg(device_t, int, int); 163 void cas_mii_writereg(device_t, int, int, int); 164 void cas_mii_statchg(device_t); 165 int cas_pcs_readreg(device_t, int, int); 166 void cas_pcs_writereg(device_t, int, int, int); 167 168 int cas_mediachange(struct ifnet *); 169 void cas_mediastatus(struct ifnet *, struct ifmediareq *); 170 171 int cas_eint(struct cas_softc *, u_int); 172 int cas_rint(struct cas_softc *); 173 int cas_tint(struct cas_softc *, u_int32_t); 174 int cas_pint(struct cas_softc *); 175 int cas_intr(void *); 176 177 #ifdef CAS_DEBUG 178 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 179 printf x 180 #else 181 #define DPRINTF(sc, x) /* nothing */ 182 #endif 183 184 int 185 cas_match(device_t parent, cfdata_t cf, void *aux) 186 { 187 struct pci_attach_args *pa = aux; 188 189 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN && 190 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_CASSINI)) 191 return 1; 192 193 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS && 194 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_SATURN)) 195 return 1; 196 197 return 0; 198 } 199 200 #if CAS_USE_LOCAL_MAC_ADDRESS 201 #define PROMHDR_PTR_DATA 0x18 202 #define PROMDATA_PTR_VPD 0x08 203 #define PROMDATA_DATA2 0x0a 204 205 static const u_int8_t cas_promhdr[] = { 0x55, 0xaa }; 206 static const u_int8_t cas_promdat[] = { 207 'P', 'C', 'I', 'R', 208 PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8, 209 PCI_PRODUCT_SUN_CASSINI & 0xff, PCI_PRODUCT_SUN_CASSINI >> 8 210 }; 211 212 static const u_int8_t cas_promdat2[] = { 213 0x18, 0x00, /* structure length */ 214 0x00, /* structure revision */ 215 0x00, /* interface revision */ 216 PCI_SUBCLASS_NETWORK_ETHERNET, /* subclass code */ 217 PCI_CLASS_NETWORK /* class code */ 218 }; 219 220 int 221 cas_pci_enaddr(struct cas_softc *sc, struct pci_attach_args *pa, 222 uint8_t *enaddr) 223 { 224 struct pci_vpd_largeres *res; 225 struct pci_vpd *vpd; 226 bus_space_handle_t romh; 227 bus_space_tag_t romt; 228 bus_size_t romsize = 0; 229 u_int8_t buf[32], *desc; 230 pcireg_t address; 231 int dataoff, vpdoff, len; 232 int rv = -1; 233 234 if (pci_mapreg_map(pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_MEM, 0, 235 &romt, &romh, NULL, &romsize)) 236 return (-1); 237 238 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START); 239 address |= PCI_MAPREG_ROM_ENABLE; 240 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START, address); 241 242 bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf)); 243 if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr))) 244 goto fail; 245 246 dataoff = buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8); 247 if (dataoff < 0x1c) 248 goto fail; 249 250 bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf)); 251 if (bcmp(buf, cas_promdat, sizeof(cas_promdat)) || 252 bcmp(buf + PROMDATA_DATA2, cas_promdat2, sizeof(cas_promdat2))) 253 goto fail; 254 255 vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8); 256 if (vpdoff < 0x1c) 257 goto fail; 258 259 next: 260 bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf)); 261 if (!PCI_VPDRES_ISLARGE(buf[0])) 262 goto fail; 263 264 res = (struct pci_vpd_largeres *)buf; 265 vpdoff += sizeof(*res); 266 267 len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb); 268 switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) { 269 case PCI_VPDRES_TYPE_IDENTIFIER_STRING: 270 /* Skip identifier string. */ 271 vpdoff += len; 272 goto next; 273 274 case PCI_VPDRES_TYPE_VPD: 275 while (len > 0) { 276 bus_space_read_region_1(romt, romh, vpdoff, 277 buf, sizeof(buf)); 278 279 vpd = (struct pci_vpd *)buf; 280 vpdoff += sizeof(*vpd) + vpd->vpd_len; 281 len -= sizeof(*vpd) + vpd->vpd_len; 282 283 /* 284 * We're looking for an "Enhanced" VPD... 285 */ 286 if (vpd->vpd_key0 != 'Z') 287 continue; 288 289 desc = buf + sizeof(*vpd); 290 291 /* 292 * ...which is an instance property... 293 */ 294 if (desc[0] != 'I') 295 continue; 296 desc += 3; 297 298 /* 299 * ...that's a byte array with the proper 300 * length for a MAC address... 301 */ 302 if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN) 303 continue; 304 desc += 2; 305 306 /* 307 * ...named "local-mac-address". 308 */ 309 if (strcmp(desc, "local-mac-address") != 0) 310 continue; 311 desc += strlen("local-mac-address") + 1; 312 313 memcpy(enaddr, enp, ETHER_ADDR_LEN); 314 rv = 0; 315 } 316 break; 317 318 default: 319 goto fail; 320 } 321 322 fail: 323 if (romsize != 0) 324 bus_space_unmap(romt, romh, romsize); 325 326 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM); 327 address &= ~PCI_MAPREG_ROM_ENABLE; 328 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM, address); 329 330 return (rv); 331 } 332 #endif /* CAS_USE_LOCAL_MAC_ADDRESS */ 333 334 void 335 cas_attach(device_t parent, device_t self, void *aux) 336 { 337 struct pci_attach_args *pa = aux; 338 struct cas_softc *sc = device_private(self); 339 char devinfo[256]; 340 uint8_t enaddr[ETHER_ADDR_LEN]; 341 342 sc->sc_dev = self; 343 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 344 sc->sc_rev = PCI_REVISION(pa->pa_class); 345 aprint_normal(": %s (rev. 0x%02x)\n", devinfo, sc->sc_rev); 346 sc->sc_dmatag = pa->pa_dmat; 347 348 #define PCI_CAS_BASEADDR 0x10 349 if (pci_mapreg_map(pa, PCI_CAS_BASEADDR, PCI_MAPREG_TYPE_MEM, 0, 350 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_size) != 0) { 351 aprint_error_dev(sc->sc_dev, 352 "unable to map device registers\n"); 353 return; 354 } 355 356 #if CAS_USE_LOCAL_MAC_ADDRESS 357 if (cas_pci_enaddr(sc, pa, enaddr) != 0) 358 aprint_error_dev(sc->sc_dev, "no Ethernet address found\n"); 359 #endif 360 #ifdef __sparc64__ 361 prom_getether(PCITAG_NODE(pa->pa_tag), enaddr); 362 #else 363 #ifdef macppc 364 { 365 int node; 366 367 node = pcidev_to_ofdev(pa->pa_pc, pa->pa_tag); 368 if (node == 0) { 369 aprint_error_dev(sc->sc_dev, "unable to locate OpenFirmware node\n"); 370 return; 371 } 372 373 OF_getprop(node, "local-mac-address", enaddr, sizeof(enaddr)); 374 } 375 #endif /* macppc */ 376 #endif /* __sparc__ */ 377 378 sc->sc_burst = 16; /* XXX */ 379 380 sc->sc_att_stage = CAS_ATT_BACKEND_0; 381 382 if (pci_intr_map(pa, &sc->sc_handle) != 0) { 383 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 384 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 385 return; 386 } 387 sc->sc_pc = pa->pa_pc; 388 if (!cas_estintr(sc)) { 389 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 390 aprint_error_dev(sc->sc_dev, "unable to establish interrupt\n"); 391 return; 392 } 393 394 sc->sc_att_stage = CAS_ATT_BACKEND_1; 395 396 /* 397 * call the main configure 398 */ 399 cas_config(sc, enaddr); 400 401 if (pmf_device_register1(sc->sc_dev, 402 cas_suspend, cas_resume, cas_shutdown)) 403 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if); 404 else 405 aprint_error_dev(sc->sc_dev, 406 "could not establish power handlers\n"); 407 408 sc->sc_att_stage = CAS_ATT_FINISHED; 409 /*FALLTHROUGH*/ 410 } 411 412 /* 413 * cas_config: 414 * 415 * Attach a Cassini interface to the system. 416 */ 417 void 418 cas_config(struct cas_softc *sc, const uint8_t *enaddr) 419 { 420 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 421 struct mii_data *mii = &sc->sc_mii; 422 struct mii_softc *child; 423 int i, error; 424 425 /* Make sure the chip is stopped. */ 426 ifp->if_softc = sc; 427 cas_reset(sc); 428 429 /* 430 * Allocate the control data structures, and create and load the 431 * DMA map for it. 432 */ 433 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 434 sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg, 435 1, &sc->sc_cdnseg, 0)) != 0) { 436 aprint_error_dev(sc->sc_dev, 437 "unable to allocate control data, error = %d\n", 438 error); 439 cas_partial_detach(sc, CAS_ATT_0); 440 } 441 442 /* XXX should map this in with correct endianness */ 443 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 444 sizeof(struct cas_control_data), (void **)&sc->sc_control_data, 445 BUS_DMA_COHERENT)) != 0) { 446 aprint_error_dev(sc->sc_dev, 447 "unable to map control data, error = %d\n", error); 448 cas_partial_detach(sc, CAS_ATT_1); 449 } 450 451 if ((error = bus_dmamap_create(sc->sc_dmatag, 452 sizeof(struct cas_control_data), 1, 453 sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 454 aprint_error_dev(sc->sc_dev, 455 "unable to create control data DMA map, error = %d\n", error); 456 cas_partial_detach(sc, CAS_ATT_2); 457 } 458 459 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 460 sc->sc_control_data, sizeof(struct cas_control_data), NULL, 461 0)) != 0) { 462 aprint_error_dev(sc->sc_dev, 463 "unable to load control data DMA map, error = %d\n", 464 error); 465 cas_partial_detach(sc, CAS_ATT_3); 466 } 467 468 memset(sc->sc_control_data, 0, sizeof(struct cas_control_data)); 469 470 /* 471 * Create the receive buffer DMA maps. 472 */ 473 for (i = 0; i < CAS_NRXDESC; i++) { 474 bus_dma_segment_t seg; 475 char *kva; 476 int rseg; 477 478 if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE, 479 CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 480 aprint_error_dev(sc->sc_dev, 481 "unable to alloc rx DMA mem %d, error = %d\n", 482 i, error); 483 cas_partial_detach(sc, CAS_ATT_5); 484 } 485 sc->sc_rxsoft[i].rxs_dmaseg = seg; 486 487 if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 488 CAS_PAGE_SIZE, (void **)&kva, BUS_DMA_NOWAIT)) != 0) { 489 aprint_error_dev(sc->sc_dev, 490 "unable to alloc rx DMA mem %d, error = %d\n", 491 i, error); 492 cas_partial_detach(sc, CAS_ATT_5); 493 } 494 sc->sc_rxsoft[i].rxs_kva = kva; 495 496 if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1, 497 CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 498 aprint_error_dev(sc->sc_dev, 499 "unable to create rx DMA map %d, error = %d\n", 500 i, error); 501 cas_partial_detach(sc, CAS_ATT_5); 502 } 503 504 if ((error = bus_dmamap_load(sc->sc_dmatag, 505 sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL, 506 BUS_DMA_NOWAIT)) != 0) { 507 aprint_error_dev(sc->sc_dev, 508 "unable to load rx DMA map %d, error = %d\n", 509 i, error); 510 cas_partial_detach(sc, CAS_ATT_5); 511 } 512 } 513 514 /* 515 * Create the transmit buffer DMA maps. 516 */ 517 for (i = 0; i < CAS_NTXDESC; i++) { 518 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 519 CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 520 &sc->sc_txd[i].sd_map)) != 0) { 521 aprint_error_dev(sc->sc_dev, 522 "unable to create tx DMA map %d, error = %d\n", 523 i, error); 524 cas_partial_detach(sc, CAS_ATT_6); 525 } 526 sc->sc_txd[i].sd_mbuf = NULL; 527 } 528 529 /* 530 * From this point forward, the attachment cannot fail. A failure 531 * before this point releases all resources that may have been 532 * allocated. 533 */ 534 535 /* Announce ourselves. */ 536 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 537 ether_sprintf(enaddr)); 538 539 /* Get RX FIFO size */ 540 sc->sc_rxfifosize = 16 * 1024; 541 542 /* Initialize ifnet structure. */ 543 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 544 ifp->if_softc = sc; 545 ifp->if_flags = 546 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 547 ifp->if_start = cas_start; 548 ifp->if_ioctl = cas_ioctl; 549 ifp->if_watchdog = cas_watchdog; 550 ifp->if_stop = cas_stop; 551 ifp->if_init = cas_init; 552 IFQ_SET_MAXLEN(&ifp->if_snd, CAS_NTXDESC - 1); 553 IFQ_SET_READY(&ifp->if_snd); 554 555 /* Initialize ifmedia structures and MII info */ 556 mii->mii_ifp = ifp; 557 mii->mii_readreg = cas_mii_readreg; 558 mii->mii_writereg = cas_mii_writereg; 559 mii->mii_statchg = cas_mii_statchg; 560 561 ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus); 562 sc->sc_ethercom.ec_mii = mii; 563 564 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0); 565 566 cas_mifinit(sc); 567 568 if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) { 569 sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL; 570 bus_space_write_4(sc->sc_memt, sc->sc_memh, 571 CAS_MIF_CONFIG, sc->sc_mif_config); 572 } 573 574 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 575 MII_OFFSET_ANY, 0); 576 577 child = LIST_FIRST(&mii->mii_phys); 578 if (child == NULL && 579 sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0|CAS_MIF_CONFIG_MDI1)) { 580 /* 581 * Try the external PCS SERDES if we didn't find any 582 * MII devices. 583 */ 584 bus_space_write_4(sc->sc_memt, sc->sc_memh, 585 CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES); 586 587 bus_space_write_4(sc->sc_memt, sc->sc_memh, 588 CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE); 589 590 mii->mii_readreg = cas_pcs_readreg; 591 mii->mii_writereg = cas_pcs_writereg; 592 593 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 594 MII_OFFSET_ANY, MIIF_NOISOLATE); 595 } 596 597 child = LIST_FIRST(&mii->mii_phys); 598 if (child == NULL) { 599 /* No PHY attached */ 600 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 601 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 602 } else { 603 /* 604 * Walk along the list of attached MII devices and 605 * establish an `MII instance' to `phy number' 606 * mapping. We'll use this mapping in media change 607 * requests to determine which phy to use to program 608 * the MIF configuration register. 609 */ 610 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 611 /* 612 * Note: we support just two PHYs: the built-in 613 * internal device and an external on the MII 614 * connector. 615 */ 616 if (child->mii_phy > 1 || child->mii_inst > 1) { 617 aprint_error_dev(sc->sc_dev, 618 "cannot accommodate MII device %s" 619 " at phy %d, instance %d\n", 620 device_xname(child->mii_dev), 621 child->mii_phy, child->mii_inst); 622 continue; 623 } 624 625 sc->sc_phys[child->mii_inst] = child->mii_phy; 626 } 627 628 /* 629 * XXX - we can really do the following ONLY if the 630 * phy indeed has the auto negotiation capability!! 631 */ 632 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 633 } 634 635 /* claim 802.1q capability */ 636 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 637 638 /* Attach the interface. */ 639 if_attach(ifp); 640 ether_ifattach(ifp, enaddr); 641 642 #if NRND > 0 643 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 644 RND_TYPE_NET, 0); 645 #endif 646 647 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 648 NULL, device_xname(sc->sc_dev), "interrupts"); 649 650 callout_init(&sc->sc_tick_ch, 0); 651 652 return; 653 } 654 655 int 656 cas_detach(device_t self, int flags) 657 { 658 int i; 659 struct cas_softc *sc = device_private(self); 660 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 661 662 /* 663 * Free any resources we've allocated during the failed attach 664 * attempt. Do this in reverse order and fall through. 665 */ 666 switch (sc->sc_att_stage) { 667 case CAS_ATT_FINISHED: 668 pmf_device_deregister(self); 669 cas_stop(&sc->sc_ethercom.ec_if, 1); 670 evcnt_detach(&sc->sc_ev_intr); 671 672 #if NRND > 0 673 rnd_detach_source(&sc->rnd_source); 674 #endif 675 676 ether_ifdetach(ifp); 677 if_detach(ifp); 678 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 679 680 callout_destroy(&sc->sc_tick_ch); 681 682 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 683 684 /*FALLTHROUGH*/ 685 case CAS_ATT_MII: 686 case CAS_ATT_7: 687 case CAS_ATT_6: 688 for (i = 0; i < CAS_NTXDESC; i++) { 689 if (sc->sc_txd[i].sd_map != NULL) 690 bus_dmamap_destroy(sc->sc_dmatag, 691 sc->sc_txd[i].sd_map); 692 } 693 /*FALLTHROUGH*/ 694 case CAS_ATT_5: 695 for (i = 0; i < CAS_NRXDESC; i++) { 696 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 697 bus_dmamap_unload(sc->sc_dmatag, 698 sc->sc_rxsoft[i].rxs_dmamap); 699 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 700 bus_dmamap_destroy(sc->sc_dmatag, 701 sc->sc_rxsoft[i].rxs_dmamap); 702 if (sc->sc_rxsoft[i].rxs_kva != NULL) 703 bus_dmamem_unmap(sc->sc_dmatag, 704 sc->sc_rxsoft[i].rxs_kva, CAS_PAGE_SIZE); 705 /* XXX need to check that bus_dmamem_alloc suceeded 706 if (sc->sc_rxsoft[i].rxs_dmaseg != NULL) 707 */ 708 bus_dmamem_free(sc->sc_dmatag, 709 &(sc->sc_rxsoft[i].rxs_dmaseg), 1); 710 } 711 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 712 /*FALLTHROUGH*/ 713 case CAS_ATT_4: 714 case CAS_ATT_3: 715 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 716 /*FALLTHROUGH*/ 717 case CAS_ATT_2: 718 bus_dmamem_unmap(sc->sc_dmatag, sc->sc_control_data, 719 sizeof(struct cas_control_data)); 720 /*FALLTHROUGH*/ 721 case CAS_ATT_1: 722 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 723 /*FALLTHROUGH*/ 724 case CAS_ATT_0: 725 sc->sc_att_stage = CAS_ATT_0; 726 /*FALLTHROUGH*/ 727 case CAS_ATT_BACKEND_2: 728 case CAS_ATT_BACKEND_1: 729 if (sc->sc_ih != NULL) { 730 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 731 sc->sc_ih = NULL; 732 } 733 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 734 /*FALLTHROUGH*/ 735 case CAS_ATT_BACKEND_0: 736 break; 737 } 738 return 0; 739 } 740 741 static void 742 cas_partial_detach(struct cas_softc *sc, enum cas_attach_stage stage) 743 { 744 cfattach_t ca = device_cfattach(sc->sc_dev); 745 746 sc->sc_att_stage = stage; 747 (*ca->ca_detach)(sc->sc_dev, 0); 748 } 749 750 void 751 cas_tick(void *arg) 752 { 753 struct cas_softc *sc = arg; 754 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 755 bus_space_tag_t t = sc->sc_memt; 756 bus_space_handle_t mac = sc->sc_memh; 757 int s; 758 u_int32_t v; 759 760 /* unload collisions counters */ 761 v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT) + 762 bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT); 763 ifp->if_collisions += v + 764 bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT) + 765 bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT); 766 ifp->if_oerrors += v; 767 768 /* read error counters */ 769 ifp->if_ierrors += 770 bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT) + 771 bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR) + 772 bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT) + 773 bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL); 774 775 /* clear the hardware counters */ 776 bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0); 777 bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0); 778 bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0); 779 bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0); 780 bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0); 781 bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0); 782 bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0); 783 bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0); 784 785 s = splnet(); 786 mii_tick(&sc->sc_mii); 787 splx(s); 788 789 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 790 } 791 792 int 793 cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r, 794 u_int32_t clr, u_int32_t set) 795 { 796 int i; 797 u_int32_t reg; 798 799 for (i = TRIES; i--; DELAY(100)) { 800 reg = bus_space_read_4(sc->sc_memt, h, r); 801 if ((reg & clr) == 0 && (reg & set) == set) 802 return (1); 803 } 804 805 return (0); 806 } 807 808 void 809 cas_reset(struct cas_softc *sc) 810 { 811 bus_space_tag_t t = sc->sc_memt; 812 bus_space_handle_t h = sc->sc_memh; 813 int s; 814 815 s = splnet(); 816 DPRINTF(sc, ("%s: cas_reset\n", device_xname(sc->sc_dev))); 817 cas_reset_rx(sc); 818 cas_reset_tx(sc); 819 820 /* Do a full reset */ 821 bus_space_write_4(t, h, CAS_RESET, 822 CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS); 823 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) 824 aprint_error_dev(sc->sc_dev, "cannot reset device\n"); 825 splx(s); 826 } 827 828 829 /* 830 * cas_rxdrain: 831 * 832 * Drain the receive queue. 833 */ 834 void 835 cas_rxdrain(struct cas_softc *sc) 836 { 837 /* Nothing to do yet. */ 838 } 839 840 /* 841 * Reset the whole thing. 842 */ 843 void 844 cas_stop(struct ifnet *ifp, int disable) 845 { 846 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 847 struct cas_sxd *sd; 848 u_int32_t i; 849 850 DPRINTF(sc, ("%s: cas_stop\n", device_xname(sc->sc_dev))); 851 852 callout_stop(&sc->sc_tick_ch); 853 854 /* 855 * Mark the interface down and cancel the watchdog timer. 856 */ 857 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 858 ifp->if_timer = 0; 859 860 mii_down(&sc->sc_mii); 861 862 cas_reset_rx(sc); 863 cas_reset_tx(sc); 864 865 /* 866 * Release any queued transmit buffers. 867 */ 868 for (i = 0; i < CAS_NTXDESC; i++) { 869 sd = &sc->sc_txd[i]; 870 if (sd->sd_mbuf != NULL) { 871 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 872 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 873 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 874 m_freem(sd->sd_mbuf); 875 sd->sd_mbuf = NULL; 876 } 877 } 878 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 879 880 if (disable) 881 cas_rxdrain(sc); 882 } 883 884 885 /* 886 * Reset the receiver 887 */ 888 int 889 cas_reset_rx(struct cas_softc *sc) 890 { 891 bus_space_tag_t t = sc->sc_memt; 892 bus_space_handle_t h = sc->sc_memh; 893 894 /* 895 * Resetting while DMA is in progress can cause a bus hang, so we 896 * disable DMA first. 897 */ 898 cas_disable_rx(sc); 899 bus_space_write_4(t, h, CAS_RX_CONFIG, 0); 900 /* Wait till it finishes */ 901 if (!cas_bitwait(sc, h, CAS_RX_CONFIG, 1, 0)) 902 aprint_error_dev(sc->sc_dev, "cannot disable rx dma\n"); 903 /* Wait 5ms extra. */ 904 delay(5000); 905 906 /* Finally, reset the ERX */ 907 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX); 908 /* Wait till it finishes */ 909 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX, 0)) { 910 aprint_error_dev(sc->sc_dev, "cannot reset receiver\n"); 911 return (1); 912 } 913 return (0); 914 } 915 916 917 /* 918 * Reset the transmitter 919 */ 920 int 921 cas_reset_tx(struct cas_softc *sc) 922 { 923 bus_space_tag_t t = sc->sc_memt; 924 bus_space_handle_t h = sc->sc_memh; 925 926 /* 927 * Resetting while DMA is in progress can cause a bus hang, so we 928 * disable DMA first. 929 */ 930 cas_disable_tx(sc); 931 bus_space_write_4(t, h, CAS_TX_CONFIG, 0); 932 /* Wait till it finishes */ 933 if (!cas_bitwait(sc, h, CAS_TX_CONFIG, 1, 0)) 934 aprint_error_dev(sc->sc_dev, "cannot disable tx dma\n"); 935 /* Wait 5ms extra. */ 936 delay(5000); 937 938 /* Finally, reset the ETX */ 939 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX); 940 /* Wait till it finishes */ 941 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_TX, 0)) { 942 aprint_error_dev(sc->sc_dev, "cannot reset transmitter\n"); 943 return (1); 944 } 945 return (0); 946 } 947 948 /* 949 * Disable receiver. 950 */ 951 int 952 cas_disable_rx(struct cas_softc *sc) 953 { 954 bus_space_tag_t t = sc->sc_memt; 955 bus_space_handle_t h = sc->sc_memh; 956 u_int32_t cfg; 957 958 /* Flip the enable bit */ 959 cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 960 cfg &= ~CAS_MAC_RX_ENABLE; 961 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg); 962 963 /* Wait for it to finish */ 964 return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG, CAS_MAC_RX_ENABLE, 0)); 965 } 966 967 /* 968 * Disable transmitter. 969 */ 970 int 971 cas_disable_tx(struct cas_softc *sc) 972 { 973 bus_space_tag_t t = sc->sc_memt; 974 bus_space_handle_t h = sc->sc_memh; 975 u_int32_t cfg; 976 977 /* Flip the enable bit */ 978 cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG); 979 cfg &= ~CAS_MAC_TX_ENABLE; 980 bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg); 981 982 /* Wait for it to finish */ 983 return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG, CAS_MAC_TX_ENABLE, 0)); 984 } 985 986 /* 987 * Initialize interface. 988 */ 989 int 990 cas_meminit(struct cas_softc *sc) 991 { 992 struct cas_rxsoft *rxs; 993 int i, error; 994 995 rxs = (void *)&error; 996 997 /* 998 * Initialize the transmit descriptor ring. 999 */ 1000 for (i = 0; i < CAS_NTXDESC; i++) { 1001 sc->sc_txdescs[i].cd_flags = 0; 1002 sc->sc_txdescs[i].cd_addr = 0; 1003 } 1004 CAS_CDTXSYNC(sc, 0, CAS_NTXDESC, 1005 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1006 1007 /* 1008 * Initialize the receive descriptor and receive job 1009 * descriptor rings. 1010 */ 1011 for (i = 0; i < CAS_NRXDESC; i++) 1012 CAS_INIT_RXDESC(sc, i, i); 1013 sc->sc_rxdptr = 0; 1014 sc->sc_rxptr = 0; 1015 1016 /* 1017 * Initialize the receive completion ring. 1018 */ 1019 for (i = 0; i < CAS_NRXCOMP; i++) { 1020 sc->sc_rxcomps[i].cc_word[0] = 0; 1021 sc->sc_rxcomps[i].cc_word[1] = 0; 1022 sc->sc_rxcomps[i].cc_word[2] = 0; 1023 sc->sc_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN); 1024 CAS_CDRXCSYNC(sc, i, 1025 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1026 } 1027 1028 return (0); 1029 } 1030 1031 int 1032 cas_ringsize(int sz) 1033 { 1034 switch (sz) { 1035 case 32: 1036 return CAS_RING_SZ_32; 1037 case 64: 1038 return CAS_RING_SZ_64; 1039 case 128: 1040 return CAS_RING_SZ_128; 1041 case 256: 1042 return CAS_RING_SZ_256; 1043 case 512: 1044 return CAS_RING_SZ_512; 1045 case 1024: 1046 return CAS_RING_SZ_1024; 1047 case 2048: 1048 return CAS_RING_SZ_2048; 1049 case 4096: 1050 return CAS_RING_SZ_4096; 1051 case 8192: 1052 return CAS_RING_SZ_8192; 1053 default: 1054 aprint_error("cas: invalid Receive Descriptor ring size %d\n", 1055 sz); 1056 return CAS_RING_SZ_32; 1057 } 1058 } 1059 1060 int 1061 cas_cringsize(int sz) 1062 { 1063 int i; 1064 1065 for (i = 0; i < 9; i++) 1066 if (sz == (128 << i)) 1067 return i; 1068 1069 aprint_error("cas: invalid completion ring size %d\n", sz); 1070 return 128; 1071 } 1072 1073 /* 1074 * Initialization of interface; set up initialization block 1075 * and transmit/receive descriptor rings. 1076 */ 1077 int 1078 cas_init(struct ifnet *ifp) 1079 { 1080 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 1081 bus_space_tag_t t = sc->sc_memt; 1082 bus_space_handle_t h = sc->sc_memh; 1083 int s; 1084 u_int max_frame_size; 1085 u_int32_t v; 1086 1087 s = splnet(); 1088 1089 DPRINTF(sc, ("%s: cas_init: calling stop\n", device_xname(sc->sc_dev))); 1090 /* 1091 * Initialization sequence. The numbered steps below correspond 1092 * to the sequence outlined in section 6.3.5.1 in the Ethernet 1093 * Channel Engine manual (part of the PCIO manual). 1094 * See also the STP2002-STQ document from Sun Microsystems. 1095 */ 1096 1097 /* step 1 & 2. Reset the Ethernet Channel */ 1098 cas_stop(ifp, 0); 1099 cas_reset(sc); 1100 DPRINTF(sc, ("%s: cas_init: restarting\n", device_xname(sc->sc_dev))); 1101 1102 /* Re-initialize the MIF */ 1103 cas_mifinit(sc); 1104 1105 /* step 3. Setup data structures in host memory */ 1106 cas_meminit(sc); 1107 1108 /* step 4. TX MAC registers & counters */ 1109 cas_init_regs(sc); 1110 max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN; 1111 v = (max_frame_size) | (0x2000 << 16) /* Burst size */; 1112 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 1113 1114 /* step 5. RX MAC registers & counters */ 1115 cas_iff(sc); 1116 1117 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 1118 KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0); 1119 bus_space_write_4(t, h, CAS_TX_RING_PTR_HI, 1120 (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32)); 1121 bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0)); 1122 1123 KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0); 1124 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI, 1125 (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32)); 1126 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0)); 1127 1128 KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0); 1129 bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI, 1130 (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32)); 1131 bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0)); 1132 1133 if (CAS_PLUS(sc)) { 1134 KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0); 1135 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2, 1136 (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32)); 1137 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2, 1138 CAS_CDRXADDR2(sc, 0)); 1139 } 1140 1141 /* step 8. Global Configuration & Interrupt Mask */ 1142 bus_space_write_4(t, h, CAS_INTMASK, 1143 ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY| 1144 CAS_INTR_TX_TAG_ERR| 1145 CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF| 1146 CAS_INTR_RX_TAG_ERR| 1147 CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS| 1148 CAS_INTR_MAC_CONTROL|CAS_INTR_MIF| 1149 CAS_INTR_BERR)); 1150 bus_space_write_4(t, h, CAS_MAC_RX_MASK, 1151 CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT); 1152 bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE); 1153 bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */ 1154 1155 /* step 9. ETX Configuration: use mostly default values */ 1156 1157 /* Enable DMA */ 1158 v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10; 1159 bus_space_write_4(t, h, CAS_TX_CONFIG, 1160 v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29)); 1161 bus_space_write_4(t, h, CAS_TX_KICK, 0); 1162 1163 /* step 10. ERX Configuration */ 1164 1165 /* Encode Receive Descriptor ring size */ 1166 v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT; 1167 if (CAS_PLUS(sc)) 1168 v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT; 1169 1170 /* Encode Receive Completion ring size */ 1171 v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT; 1172 1173 /* Enable DMA */ 1174 bus_space_write_4(t, h, CAS_RX_CONFIG, 1175 v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN); 1176 1177 /* 1178 * The following value is for an OFF Threshold of about 3/4 full 1179 * and an ON Threshold of 1/4 full. 1180 */ 1181 bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH, 1182 (3 * sc->sc_rxfifosize / 256) | 1183 ((sc->sc_rxfifosize / 256) << 12)); 1184 bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6); 1185 1186 /* step 11. Configure Media */ 1187 mii_ifmedia_change(&sc->sc_mii); 1188 1189 /* step 12. RX_MAC Configuration Register */ 1190 v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1191 v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC; 1192 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v); 1193 1194 /* step 14. Issue Transmit Pending command */ 1195 1196 /* step 15. Give the receiver a swift kick */ 1197 bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4); 1198 if (CAS_PLUS(sc)) 1199 bus_space_write_4(t, h, CAS_RX_KICK2, 4); 1200 1201 /* Start the one second timer. */ 1202 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 1203 1204 ifp->if_flags |= IFF_RUNNING; 1205 ifp->if_flags &= ~IFF_OACTIVE; 1206 ifp->if_timer = 0; 1207 splx(s); 1208 1209 return (0); 1210 } 1211 1212 void 1213 cas_init_regs(struct cas_softc *sc) 1214 { 1215 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1216 bus_space_tag_t t = sc->sc_memt; 1217 bus_space_handle_t h = sc->sc_memh; 1218 const u_char *laddr = CLLADDR(ifp->if_sadl); 1219 u_int32_t v, r; 1220 1221 /* These regs are not cleared on reset */ 1222 sc->sc_inited = 0; 1223 if (!sc->sc_inited) { 1224 /* Load recommended values */ 1225 bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00); 1226 bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08); 1227 bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04); 1228 1229 bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1230 /* Max frame and max burst size */ 1231 v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */; 1232 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 1233 1234 bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07); 1235 bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04); 1236 bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10); 1237 bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088); 1238 bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED, 1239 ((laddr[5]<<8)|laddr[4])&0x3ff); 1240 1241 /* Secondary MAC addresses set to 0:0:0:0:0:0 */ 1242 for (r = CAS_MAC_ADDR3; r < CAS_MAC_ADDR42; r += 4) 1243 bus_space_write_4(t, h, r, 0); 1244 1245 /* MAC control addr set to 0:1:c2:0:1:80 */ 1246 bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001); 1247 bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200); 1248 bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180); 1249 1250 /* MAC filter addr set to 0:0:0:0:0:0 */ 1251 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0); 1252 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0); 1253 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0); 1254 1255 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0); 1256 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0); 1257 1258 /* Hash table initialized to 0 */ 1259 for (r = CAS_MAC_HASH0; r <= CAS_MAC_HASH15; r += 4) 1260 bus_space_write_4(t, h, r, 0); 1261 1262 sc->sc_inited = 1; 1263 } 1264 1265 /* Counters need to be zeroed */ 1266 bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0); 1267 bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0); 1268 bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0); 1269 bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0); 1270 bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0); 1271 bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0); 1272 bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0); 1273 bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0); 1274 bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0); 1275 bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0); 1276 bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0); 1277 1278 /* Un-pause stuff */ 1279 bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0); 1280 1281 /* 1282 * Set the station address. 1283 */ 1284 bus_space_write_4(t, h, CAS_MAC_ADDR0, (laddr[4]<<8) | laddr[5]); 1285 bus_space_write_4(t, h, CAS_MAC_ADDR1, (laddr[2]<<8) | laddr[3]); 1286 bus_space_write_4(t, h, CAS_MAC_ADDR2, (laddr[0]<<8) | laddr[1]); 1287 } 1288 1289 /* 1290 * Receive interrupt. 1291 */ 1292 int 1293 cas_rint(struct cas_softc *sc) 1294 { 1295 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1296 bus_space_tag_t t = sc->sc_memt; 1297 bus_space_handle_t h = sc->sc_memh; 1298 struct cas_rxsoft *rxs; 1299 struct mbuf *m; 1300 u_int64_t word[4]; 1301 int len, off, idx; 1302 int i, skip; 1303 void *cp; 1304 1305 for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) { 1306 CAS_CDRXCSYNC(sc, i, 1307 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1308 1309 word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]); 1310 word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]); 1311 word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]); 1312 word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]); 1313 1314 /* Stop if the hardware still owns the descriptor. */ 1315 if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN) 1316 break; 1317 1318 len = CAS_RC1_HDR_LEN(word[1]); 1319 if (len > 0) { 1320 off = CAS_RC1_HDR_OFF(word[1]); 1321 idx = CAS_RC1_HDR_IDX(word[1]); 1322 rxs = &sc->sc_rxsoft[idx]; 1323 1324 DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n", 1325 idx, off, len)); 1326 1327 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1328 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1329 1330 cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN; 1331 m = m_devget(cp, len, 0, ifp, NULL); 1332 1333 if (word[0] & CAS_RC0_RELEASE_HDR) 1334 cas_add_rxbuf(sc, idx); 1335 1336 if (m != NULL) { 1337 1338 #if NBPFILTER > 0 1339 /* 1340 * Pass this up to any BPF listeners, but only 1341 * pass it up the stack if its for us. 1342 */ 1343 if (ifp->if_bpf) 1344 bpf_mtap(ifp->if_bpf, m); 1345 #endif /* NBPFILTER > 0 */ 1346 1347 ifp->if_ipackets++; 1348 m->m_pkthdr.csum_flags = 0; 1349 (*ifp->if_input)(ifp, m); 1350 } else 1351 ifp->if_ierrors++; 1352 } 1353 1354 len = CAS_RC0_DATA_LEN(word[0]); 1355 if (len > 0) { 1356 off = CAS_RC0_DATA_OFF(word[0]); 1357 idx = CAS_RC0_DATA_IDX(word[0]); 1358 rxs = &sc->sc_rxsoft[idx]; 1359 1360 DPRINTF(sc, ("data at idx %d, off %d, len %d\n", 1361 idx, off, len)); 1362 1363 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1364 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1365 1366 /* XXX We should not be copying the packet here. */ 1367 cp = rxs->rxs_kva + off + ETHER_ALIGN; 1368 m = m_devget(cp, len, 0, ifp, NULL); 1369 1370 if (word[0] & CAS_RC0_RELEASE_DATA) 1371 cas_add_rxbuf(sc, idx); 1372 1373 if (m != NULL) { 1374 #if NBPFILTER > 0 1375 /* 1376 * Pass this up to any BPF listeners, but only 1377 * pass it up the stack if its for us. 1378 */ 1379 if (ifp->if_bpf) 1380 bpf_mtap(ifp->if_bpf, m); 1381 #endif /* NBPFILTER > 0 */ 1382 1383 ifp->if_ipackets++; 1384 m->m_pkthdr.csum_flags = 0; 1385 (*ifp->if_input)(ifp, m); 1386 } else 1387 ifp->if_ierrors++; 1388 } 1389 1390 if (word[0] & CAS_RC0_SPLIT) 1391 aprint_error_dev(sc->sc_dev, "split packet\n"); 1392 1393 skip = CAS_RC0_SKIP(word[0]); 1394 } 1395 1396 while (sc->sc_rxptr != i) { 1397 sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0; 1398 sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0; 1399 sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0; 1400 sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] = 1401 CAS_DMA_WRITE(CAS_RC3_OWN); 1402 CAS_CDRXCSYNC(sc, sc->sc_rxptr, 1403 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1404 1405 sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr); 1406 } 1407 1408 bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr); 1409 1410 DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n", 1411 sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION))); 1412 1413 return (1); 1414 } 1415 1416 /* 1417 * cas_add_rxbuf: 1418 * 1419 * Add a receive buffer to the indicated descriptor. 1420 */ 1421 int 1422 cas_add_rxbuf(struct cas_softc *sc, int idx) 1423 { 1424 bus_space_tag_t t = sc->sc_memt; 1425 bus_space_handle_t h = sc->sc_memh; 1426 1427 CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx); 1428 1429 if ((sc->sc_rxdptr % 4) == 0) 1430 bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr); 1431 1432 if (++sc->sc_rxdptr == CAS_NRXDESC) 1433 sc->sc_rxdptr = 0; 1434 1435 return (0); 1436 } 1437 1438 int 1439 cas_eint(struct cas_softc *sc, u_int status) 1440 { 1441 char bits[128]; 1442 if ((status & CAS_INTR_MIF) != 0) { 1443 DPRINTF(sc, ("%s: link status changed\n", 1444 device_xname(sc->sc_dev))); 1445 return (1); 1446 } 1447 1448 snprintb(bits, sizeof(bits), CAS_INTR_BITS, status); 1449 printf("%s: status=%s\n", device_xname(sc->sc_dev), bits); 1450 return (1); 1451 } 1452 1453 int 1454 cas_pint(struct cas_softc *sc) 1455 { 1456 bus_space_tag_t t = sc->sc_memt; 1457 bus_space_handle_t seb = sc->sc_memh; 1458 u_int32_t status; 1459 1460 status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1461 status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1462 #ifdef CAS_DEBUG 1463 if (status) 1464 printf("%s: link status changed\n", device_xname(sc->sc_dev)); 1465 #endif 1466 return (1); 1467 } 1468 1469 int 1470 cas_intr(void *v) 1471 { 1472 struct cas_softc *sc = (struct cas_softc *)v; 1473 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1474 bus_space_tag_t t = sc->sc_memt; 1475 bus_space_handle_t seb = sc->sc_memh; 1476 u_int32_t status; 1477 int r = 0; 1478 #ifdef CAS_DEBUG 1479 char bits[128]; 1480 #endif 1481 1482 sc->sc_ev_intr.ev_count++; 1483 1484 status = bus_space_read_4(t, seb, CAS_STATUS); 1485 #ifdef CAS_DEBUG 1486 snprintb(bits, sizeof(bits), CAS_INTR_BITS, status); 1487 #endif 1488 DPRINTF(sc, ("%s: cas_intr: cplt %x status %s\n", 1489 device_xname(sc->sc_dev), (status>>19), bits)); 1490 1491 if ((status & CAS_INTR_PCS) != 0) 1492 r |= cas_pint(sc); 1493 1494 if ((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | 1495 CAS_INTR_RX_COMP_FULL | CAS_INTR_BERR)) != 0) 1496 r |= cas_eint(sc, status); 1497 1498 if ((status & (CAS_INTR_TX_EMPTY | CAS_INTR_TX_INTME)) != 0) 1499 r |= cas_tint(sc, status); 1500 1501 if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF)) != 0) 1502 r |= cas_rint(sc); 1503 1504 /* We should eventually do more than just print out error stats. */ 1505 if (status & CAS_INTR_TX_MAC) { 1506 int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS); 1507 #ifdef CAS_DEBUG 1508 if (txstat & ~CAS_MAC_TX_XMIT_DONE) 1509 printf("%s: MAC tx fault, status %x\n", 1510 device_xname(sc->sc_dev), txstat); 1511 #endif 1512 if (txstat & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_PKT_TOO_LONG)) 1513 cas_init(ifp); 1514 } 1515 if (status & CAS_INTR_RX_MAC) { 1516 int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS); 1517 #ifdef CAS_DEBUG 1518 if (rxstat & ~CAS_MAC_RX_DONE) 1519 printf("%s: MAC rx fault, status %x\n", 1520 device_xname(sc->sc_dev), rxstat); 1521 #endif 1522 /* 1523 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often 1524 * due to a silicon bug so handle them silently. 1525 */ 1526 if (rxstat & CAS_MAC_RX_OVERFLOW) { 1527 ifp->if_ierrors++; 1528 cas_init(ifp); 1529 } 1530 #ifdef CAS_DEBUG 1531 else if (rxstat & ~(CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT)) 1532 printf("%s: MAC rx fault, status %x\n", 1533 device_xname(sc->sc_dev), rxstat); 1534 #endif 1535 } 1536 #if NRND > 0 1537 rnd_add_uint32(&sc->rnd_source, status); 1538 #endif 1539 return (r); 1540 } 1541 1542 1543 void 1544 cas_watchdog(struct ifnet *ifp) 1545 { 1546 struct cas_softc *sc = ifp->if_softc; 1547 1548 DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x " 1549 "CAS_MAC_RX_CONFIG %x\n", 1550 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG), 1551 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS), 1552 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG))); 1553 1554 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 1555 ++ifp->if_oerrors; 1556 1557 /* Try to get more packets going. */ 1558 cas_init(ifp); 1559 } 1560 1561 /* 1562 * Initialize the MII Management Interface 1563 */ 1564 void 1565 cas_mifinit(struct cas_softc *sc) 1566 { 1567 bus_space_tag_t t = sc->sc_memt; 1568 bus_space_handle_t mif = sc->sc_memh; 1569 1570 /* Configure the MIF in frame mode */ 1571 sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG); 1572 sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA; 1573 bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config); 1574 } 1575 1576 /* 1577 * MII interface 1578 * 1579 * The Cassini MII interface supports at least three different operating modes: 1580 * 1581 * Bitbang mode is implemented using data, clock and output enable registers. 1582 * 1583 * Frame mode is implemented by loading a complete frame into the frame 1584 * register and polling the valid bit for completion. 1585 * 1586 * Polling mode uses the frame register but completion is indicated by 1587 * an interrupt. 1588 * 1589 */ 1590 int 1591 cas_mii_readreg(device_t self, int phy, int reg) 1592 { 1593 struct cas_softc *sc = device_private(self); 1594 bus_space_tag_t t = sc->sc_memt; 1595 bus_space_handle_t mif = sc->sc_memh; 1596 int n; 1597 u_int32_t v; 1598 1599 #ifdef CAS_DEBUG 1600 if (sc->sc_debug) 1601 printf("cas_mii_readreg: phy %d reg %d\n", phy, reg); 1602 #endif 1603 1604 /* Construct the frame command */ 1605 v = (reg << CAS_MIF_REG_SHIFT) | (phy << CAS_MIF_PHY_SHIFT) | 1606 CAS_MIF_FRAME_READ; 1607 1608 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1609 for (n = 0; n < 100; n++) { 1610 DELAY(1); 1611 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1612 if (v & CAS_MIF_FRAME_TA0) 1613 return (v & CAS_MIF_FRAME_DATA); 1614 } 1615 1616 printf("%s: mii_read timeout\n", device_xname(sc->sc_dev)); 1617 return (0); 1618 } 1619 1620 void 1621 cas_mii_writereg(device_t self, int phy, int reg, int val) 1622 { 1623 struct cas_softc *sc = device_private(self); 1624 bus_space_tag_t t = sc->sc_memt; 1625 bus_space_handle_t mif = sc->sc_memh; 1626 int n; 1627 u_int32_t v; 1628 1629 #ifdef CAS_DEBUG 1630 if (sc->sc_debug) 1631 printf("cas_mii_writereg: phy %d reg %d val %x\n", 1632 phy, reg, val); 1633 #endif 1634 1635 /* Construct the frame command */ 1636 v = CAS_MIF_FRAME_WRITE | 1637 (phy << CAS_MIF_PHY_SHIFT) | 1638 (reg << CAS_MIF_REG_SHIFT) | 1639 (val & CAS_MIF_FRAME_DATA); 1640 1641 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1642 for (n = 0; n < 100; n++) { 1643 DELAY(1); 1644 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1645 if (v & CAS_MIF_FRAME_TA0) 1646 return; 1647 } 1648 1649 printf("%s: mii_write timeout\n", device_xname(sc->sc_dev)); 1650 } 1651 1652 void 1653 cas_mii_statchg(device_t self) 1654 { 1655 struct cas_softc *sc = device_private(self); 1656 #ifdef CAS_DEBUG 1657 int instance = IFM_INST(sc->sc_media.ifm_cur->ifm_media); 1658 #endif 1659 bus_space_tag_t t = sc->sc_memt; 1660 bus_space_handle_t mac = sc->sc_memh; 1661 u_int32_t v; 1662 1663 #ifdef CAS_DEBUG 1664 if (sc->sc_debug) 1665 printf("cas_mii_statchg: status change: phy = %d\n", 1666 sc->sc_phys[instance]); 1667 #endif 1668 1669 /* Set tx full duplex options */ 1670 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0); 1671 delay(10000); /* reg must be cleared and delay before changing. */ 1672 v = CAS_MAC_TX_ENA_IPG0|CAS_MAC_TX_NGU|CAS_MAC_TX_NGU_LIMIT| 1673 CAS_MAC_TX_ENABLE; 1674 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1675 v |= CAS_MAC_TX_IGN_CARRIER|CAS_MAC_TX_IGN_COLLIS; 1676 } 1677 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v); 1678 1679 /* XIF Configuration */ 1680 v = CAS_MAC_XIF_TX_MII_ENA; 1681 v |= CAS_MAC_XIF_LINK_LED; 1682 1683 /* MII needs echo disable if half duplex. */ 1684 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1685 /* turn on full duplex LED */ 1686 v |= CAS_MAC_XIF_FDPLX_LED; 1687 else 1688 /* half duplex -- disable echo */ 1689 v |= CAS_MAC_XIF_ECHO_DISABL; 1690 1691 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1692 case IFM_1000_T: /* Gigabit using GMII interface */ 1693 case IFM_1000_SX: 1694 v |= CAS_MAC_XIF_GMII_MODE; 1695 break; 1696 default: 1697 v &= ~CAS_MAC_XIF_GMII_MODE; 1698 } 1699 bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v); 1700 } 1701 1702 int 1703 cas_pcs_readreg(device_t self, int phy, int reg) 1704 { 1705 struct cas_softc *sc = device_private(self); 1706 bus_space_tag_t t = sc->sc_memt; 1707 bus_space_handle_t pcs = sc->sc_memh; 1708 1709 #ifdef CAS_DEBUG 1710 if (sc->sc_debug) 1711 printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg); 1712 #endif 1713 1714 if (phy != CAS_PHYAD_EXTERNAL) 1715 return (0); 1716 1717 switch (reg) { 1718 case MII_BMCR: 1719 reg = CAS_MII_CONTROL; 1720 break; 1721 case MII_BMSR: 1722 reg = CAS_MII_STATUS; 1723 break; 1724 case MII_ANAR: 1725 reg = CAS_MII_ANAR; 1726 break; 1727 case MII_ANLPAR: 1728 reg = CAS_MII_ANLPAR; 1729 break; 1730 case MII_EXTSR: 1731 return (EXTSR_1000XFDX|EXTSR_1000XHDX); 1732 default: 1733 return (0); 1734 } 1735 1736 return bus_space_read_4(t, pcs, reg); 1737 } 1738 1739 void 1740 cas_pcs_writereg(device_t self, int phy, int reg, int val) 1741 { 1742 struct cas_softc *sc = device_private(self); 1743 bus_space_tag_t t = sc->sc_memt; 1744 bus_space_handle_t pcs = sc->sc_memh; 1745 int reset = 0; 1746 1747 #ifdef CAS_DEBUG 1748 if (sc->sc_debug) 1749 printf("cas_pcs_writereg: phy %d reg %d val %x\n", 1750 phy, reg, val); 1751 #endif 1752 1753 if (phy != CAS_PHYAD_EXTERNAL) 1754 return; 1755 1756 if (reg == MII_ANAR) 1757 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0); 1758 1759 switch (reg) { 1760 case MII_BMCR: 1761 reset = (val & CAS_MII_CONTROL_RESET); 1762 reg = CAS_MII_CONTROL; 1763 break; 1764 case MII_BMSR: 1765 reg = CAS_MII_STATUS; 1766 break; 1767 case MII_ANAR: 1768 reg = CAS_MII_ANAR; 1769 break; 1770 case MII_ANLPAR: 1771 reg = CAS_MII_ANLPAR; 1772 break; 1773 default: 1774 return; 1775 } 1776 1777 bus_space_write_4(t, pcs, reg, val); 1778 1779 if (reset) 1780 cas_bitwait(sc, pcs, CAS_MII_CONTROL, CAS_MII_CONTROL_RESET, 0); 1781 1782 if (reg == CAS_MII_ANAR || reset) 1783 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 1784 CAS_MII_CONFIG_ENABLE); 1785 } 1786 1787 int 1788 cas_mediachange(struct ifnet *ifp) 1789 { 1790 struct cas_softc *sc = ifp->if_softc; 1791 struct mii_data *mii = &sc->sc_mii; 1792 1793 if (mii->mii_instance) { 1794 struct mii_softc *miisc; 1795 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1796 mii_phy_reset(miisc); 1797 } 1798 1799 return (mii_mediachg(&sc->sc_mii)); 1800 } 1801 1802 void 1803 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1804 { 1805 struct cas_softc *sc = ifp->if_softc; 1806 1807 mii_pollstat(&sc->sc_mii); 1808 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1809 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1810 } 1811 1812 /* 1813 * Process an ioctl request. 1814 */ 1815 int 1816 cas_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1817 { 1818 struct cas_softc *sc = ifp->if_softc; 1819 int s, error = 0; 1820 1821 s = splnet(); 1822 1823 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 1824 error = 0; 1825 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1826 ; 1827 else if (ifp->if_flags & IFF_RUNNING) { 1828 /* 1829 * Multicast list has changed; set the hardware filter 1830 * accordingly. 1831 */ 1832 cas_iff(sc); 1833 } 1834 } 1835 1836 /* Try to get things going again */ 1837 /* 1838 if (ifp->if_flags & IFF_UP) 1839 cas_start(ifp); 1840 */ 1841 splx(s); 1842 return (error); 1843 } 1844 1845 static bool 1846 cas_suspend(device_t self, pmf_qual_t qual) 1847 { 1848 struct cas_softc *sc = device_private(self); 1849 1850 if (sc->sc_ih != NULL) { 1851 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 1852 sc->sc_ih = NULL; 1853 } 1854 1855 return true; 1856 } 1857 1858 static bool 1859 cas_resume(device_t self, pmf_qual_t qual) 1860 { 1861 struct cas_softc *sc = device_private(self); 1862 1863 return cas_estintr(sc); 1864 } 1865 1866 static bool 1867 cas_estintr(struct cas_softc *sc) 1868 { 1869 const char *intrstr = NULL; 1870 1871 intrstr = pci_intr_string(sc->sc_pc, sc->sc_handle); 1872 sc->sc_ih = pci_intr_establish(sc->sc_pc, sc->sc_handle, 1873 IPL_NET, cas_intr, sc); 1874 if (sc->sc_ih == NULL) { 1875 aprint_error_dev(sc->sc_dev, "unable to establish interrupt"); 1876 if (intrstr != NULL) 1877 aprint_error(" at %s", intrstr); 1878 aprint_error("\n"); 1879 return false; 1880 } 1881 1882 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1883 return true; 1884 } 1885 1886 bool 1887 cas_shutdown(device_t self, int howto) 1888 { 1889 struct cas_softc *sc = device_private(self); 1890 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1891 1892 cas_stop(ifp, 1); 1893 1894 return true; 1895 } 1896 1897 void 1898 cas_iff(struct cas_softc *sc) 1899 { 1900 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1901 struct ethercom *ec = &sc->sc_ethercom; 1902 struct ether_multi *enm; 1903 struct ether_multistep step; 1904 bus_space_tag_t t = sc->sc_memt; 1905 bus_space_handle_t h = sc->sc_memh; 1906 u_int32_t crc, hash[16], rxcfg; 1907 int i; 1908 1909 rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1910 rxcfg &= ~(CAS_MAC_RX_HASH_FILTER | CAS_MAC_RX_PROMISCUOUS | 1911 CAS_MAC_RX_PROMISC_GRP); 1912 ifp->if_flags &= ~IFF_ALLMULTI; 1913 1914 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 1915 ifp->if_flags |= IFF_ALLMULTI; 1916 if (ifp->if_flags & IFF_PROMISC) 1917 rxcfg |= CAS_MAC_RX_PROMISCUOUS; 1918 else 1919 rxcfg |= CAS_MAC_RX_PROMISC_GRP; 1920 } else { 1921 /* 1922 * Set up multicast address filter by passing all multicast 1923 * addresses through a crc generator, and then using the 1924 * high order 8 bits as an index into the 256 bit logical 1925 * address filter. The high order 4 bits selects the word, 1926 * while the other 4 bits select the bit within the word 1927 * (where bit 0 is the MSB). 1928 */ 1929 1930 rxcfg |= CAS_MAC_RX_HASH_FILTER; 1931 1932 /* Clear hash table */ 1933 for (i = 0; i < 16; i++) 1934 hash[i] = 0; 1935 1936 ETHER_FIRST_MULTI(step, ec, enm); 1937 while (enm != NULL) { 1938 crc = ether_crc32_le(enm->enm_addrlo, 1939 ETHER_ADDR_LEN); 1940 1941 /* Just want the 8 most significant bits. */ 1942 crc >>= 24; 1943 1944 /* Set the corresponding bit in the filter. */ 1945 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1946 1947 ETHER_NEXT_MULTI(step, enm); 1948 } 1949 1950 /* Now load the hash table into the chip (if we are using it) */ 1951 for (i = 0; i < 16; i++) { 1952 bus_space_write_4(t, h, 1953 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), 1954 hash[i]); 1955 } 1956 } 1957 1958 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg); 1959 } 1960 1961 int 1962 cas_encap(struct cas_softc *sc, struct mbuf *mhead, u_int32_t *bixp) 1963 { 1964 u_int64_t flags; 1965 u_int32_t cur, frag, i; 1966 bus_dmamap_t map; 1967 1968 cur = frag = *bixp; 1969 map = sc->sc_txd[cur].sd_map; 1970 1971 if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead, 1972 BUS_DMA_NOWAIT) != 0) { 1973 return (ENOBUFS); 1974 } 1975 1976 if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) { 1977 bus_dmamap_unload(sc->sc_dmatag, map); 1978 return (ENOBUFS); 1979 } 1980 1981 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1982 BUS_DMASYNC_PREWRITE); 1983 1984 for (i = 0; i < map->dm_nsegs; i++) { 1985 sc->sc_txdescs[frag].cd_addr = 1986 CAS_DMA_WRITE(map->dm_segs[i].ds_addr); 1987 flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) | 1988 (i == 0 ? CAS_TD_START_OF_PACKET : 0) | 1989 ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0); 1990 sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags); 1991 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, 1992 CAS_CDTXOFF(frag), sizeof(struct cas_desc), 1993 BUS_DMASYNC_PREWRITE); 1994 cur = frag; 1995 if (++frag == CAS_NTXDESC) 1996 frag = 0; 1997 } 1998 1999 sc->sc_tx_cnt += map->dm_nsegs; 2000 sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map; 2001 sc->sc_txd[cur].sd_map = map; 2002 sc->sc_txd[cur].sd_mbuf = mhead; 2003 2004 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag); 2005 2006 *bixp = frag; 2007 2008 /* sync descriptors */ 2009 2010 return (0); 2011 } 2012 2013 /* 2014 * Transmit interrupt. 2015 */ 2016 int 2017 cas_tint(struct cas_softc *sc, u_int32_t status) 2018 { 2019 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2020 struct cas_sxd *sd; 2021 u_int32_t cons, comp; 2022 2023 comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION); 2024 cons = sc->sc_tx_cons; 2025 while (cons != comp) { 2026 sd = &sc->sc_txd[cons]; 2027 if (sd->sd_mbuf != NULL) { 2028 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 2029 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2030 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 2031 m_freem(sd->sd_mbuf); 2032 sd->sd_mbuf = NULL; 2033 ifp->if_opackets++; 2034 } 2035 sc->sc_tx_cnt--; 2036 if (++cons == CAS_NTXDESC) 2037 cons = 0; 2038 } 2039 sc->sc_tx_cons = cons; 2040 2041 if (sc->sc_tx_cnt < CAS_NTXDESC - 2) 2042 ifp->if_flags &= ~IFF_OACTIVE; 2043 if (sc->sc_tx_cnt == 0) 2044 ifp->if_timer = 0; 2045 2046 cas_start(ifp); 2047 2048 return (1); 2049 } 2050 2051 void 2052 cas_start(struct ifnet *ifp) 2053 { 2054 struct cas_softc *sc = ifp->if_softc; 2055 struct mbuf *m; 2056 u_int32_t bix; 2057 2058 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2059 return; 2060 2061 bix = sc->sc_tx_prod; 2062 while (sc->sc_txd[bix].sd_mbuf == NULL) { 2063 IFQ_POLL(&ifp->if_snd, m); 2064 if (m == NULL) 2065 break; 2066 2067 #if NBPFILTER > 0 2068 /* 2069 * If BPF is listening on this interface, let it see the 2070 * packet before we commit it to the wire. 2071 */ 2072 if (ifp->if_bpf) 2073 bpf_mtap(ifp->if_bpf, m); 2074 #endif 2075 2076 /* 2077 * Encapsulate this packet and start it going... 2078 * or fail... 2079 */ 2080 if (cas_encap(sc, m, &bix)) { 2081 ifp->if_flags |= IFF_OACTIVE; 2082 break; 2083 } 2084 2085 IFQ_DEQUEUE(&ifp->if_snd, m); 2086 ifp->if_timer = 5; 2087 } 2088 2089 sc->sc_tx_prod = bix; 2090 } 2091