1 /* $NetBSD: if_cas.c,v 1.9 2010/06/17 06:41:05 mrg Exp $ */ 2 /* $OpenBSD: if_cas.c,v 1.29 2009/11/29 16:19:38 kettenis Exp $ */ 3 4 /* 5 * 6 * Copyright (C) 2007 Mark Kettenis. 7 * Copyright (C) 2001 Eduardo Horvath. 8 * All rights reserved. 9 * 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 */ 33 34 /* 35 * Driver for Sun Cassini ethernet controllers. 36 * 37 * There are basically two variants of this chip: Cassini and 38 * Cassini+. We can distinguish between the two by revision: 0x10 and 39 * up are Cassini+. The most important difference is that Cassini+ 40 * has a second RX descriptor ring. Cassini+ will not work without 41 * configuring that second ring. However, since we don't use it we 42 * don't actually fill the descriptors, and only hand off the first 43 * four to the chip. 44 */ 45 46 #include <sys/cdefs.h> 47 __KERNEL_RCSID(0, "$NetBSD: if_cas.c,v 1.9 2010/06/17 06:41:05 mrg Exp $"); 48 49 #include "opt_inet.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/callout.h> 54 #include <sys/mbuf.h> 55 #include <sys/syslog.h> 56 #include <sys/malloc.h> 57 #include <sys/kernel.h> 58 #include <sys/socket.h> 59 #include <sys/ioctl.h> 60 #include <sys/errno.h> 61 #include <sys/device.h> 62 63 #include <machine/endian.h> 64 65 #include <uvm/uvm_extern.h> 66 67 #include <net/if.h> 68 #include <net/if_dl.h> 69 #include <net/if_media.h> 70 #include <net/if_ether.h> 71 72 #ifdef INET 73 #include <netinet/in.h> 74 #include <netinet/in_systm.h> 75 #include <netinet/in_var.h> 76 #include <netinet/ip.h> 77 #include <netinet/tcp.h> 78 #include <netinet/udp.h> 79 #endif 80 81 #include <net/bpf.h> 82 83 #include <sys/bus.h> 84 #include <sys/intr.h> 85 86 #include <dev/mii/mii.h> 87 #include <dev/mii/miivar.h> 88 #include <dev/mii/mii_bitbang.h> 89 90 #include <dev/pci/pcivar.h> 91 #include <dev/pci/pcireg.h> 92 #include <dev/pci/pcidevs.h> 93 #include <prop/proplib.h> 94 95 #include <dev/pci/if_casreg.h> 96 #include <dev/pci/if_casvar.h> 97 98 #define TRIES 10000 99 100 static bool cas_estintr(struct cas_softc *sc, int); 101 bool cas_shutdown(device_t, int); 102 static bool cas_suspend(device_t, const pmf_qual_t *); 103 static bool cas_resume(device_t, const pmf_qual_t *); 104 static int cas_detach(device_t, int); 105 static void cas_partial_detach(struct cas_softc *, enum cas_attach_stage); 106 107 int cas_match(device_t, cfdata_t, void *); 108 void cas_attach(device_t, device_t, void *); 109 110 111 CFATTACH_DECL3_NEW(cas, sizeof(struct cas_softc), 112 cas_match, cas_attach, cas_detach, NULL, NULL, NULL, 113 DVF_DETACH_SHUTDOWN); 114 115 int cas_pci_enaddr(struct cas_softc *, struct pci_attach_args *, uint8_t *); 116 117 void cas_config(struct cas_softc *, const uint8_t *); 118 void cas_start(struct ifnet *); 119 void cas_stop(struct ifnet *, int); 120 int cas_ioctl(struct ifnet *, u_long, void *); 121 void cas_tick(void *); 122 void cas_watchdog(struct ifnet *); 123 int cas_init(struct ifnet *); 124 void cas_init_regs(struct cas_softc *); 125 int cas_ringsize(int); 126 int cas_cringsize(int); 127 int cas_meminit(struct cas_softc *); 128 void cas_mifinit(struct cas_softc *); 129 int cas_bitwait(struct cas_softc *, bus_space_handle_t, int, 130 u_int32_t, u_int32_t); 131 void cas_reset(struct cas_softc *); 132 int cas_reset_rx(struct cas_softc *); 133 int cas_reset_tx(struct cas_softc *); 134 int cas_disable_rx(struct cas_softc *); 135 int cas_disable_tx(struct cas_softc *); 136 void cas_rxdrain(struct cas_softc *); 137 int cas_add_rxbuf(struct cas_softc *, int idx); 138 void cas_iff(struct cas_softc *); 139 int cas_encap(struct cas_softc *, struct mbuf *, u_int32_t *); 140 141 /* MII methods & callbacks */ 142 int cas_mii_readreg(device_t, int, int); 143 void cas_mii_writereg(device_t, int, int, int); 144 void cas_mii_statchg(device_t); 145 int cas_pcs_readreg(device_t, int, int); 146 void cas_pcs_writereg(device_t, int, int, int); 147 148 int cas_mediachange(struct ifnet *); 149 void cas_mediastatus(struct ifnet *, struct ifmediareq *); 150 151 int cas_eint(struct cas_softc *, u_int); 152 int cas_rint(struct cas_softc *); 153 int cas_tint(struct cas_softc *, u_int32_t); 154 int cas_pint(struct cas_softc *); 155 int cas_intr(void *); 156 157 #ifdef CAS_DEBUG 158 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 159 printf x 160 #else 161 #define DPRINTF(sc, x) /* nothing */ 162 #endif 163 164 int 165 cas_match(device_t parent, cfdata_t cf, void *aux) 166 { 167 struct pci_attach_args *pa = aux; 168 169 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN && 170 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_CASSINI)) 171 return 1; 172 173 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS && 174 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_SATURN)) 175 return 1; 176 177 return 0; 178 } 179 180 #define PROMHDR_PTR_DATA 0x18 181 #define PROMDATA_PTR_VPD 0x08 182 #define PROMDATA_DATA2 0x0a 183 184 static const u_int8_t cas_promhdr[] = { 0x55, 0xaa }; 185 static const u_int8_t cas_promdat[] = { 186 'P', 'C', 'I', 'R', 187 PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8, 188 PCI_PRODUCT_SUN_CASSINI & 0xff, PCI_PRODUCT_SUN_CASSINI >> 8 189 }; 190 191 static const u_int8_t cas_promdat2[] = { 192 0x18, 0x00, /* structure length */ 193 0x00, /* structure revision */ 194 0x00, /* interface revision */ 195 PCI_SUBCLASS_NETWORK_ETHERNET, /* subclass code */ 196 PCI_CLASS_NETWORK /* class code */ 197 }; 198 199 int 200 cas_pci_enaddr(struct cas_softc *sc, struct pci_attach_args *pa, 201 uint8_t *enaddr) 202 { 203 struct pci_vpd_largeres *res; 204 struct pci_vpd *vpd; 205 bus_space_handle_t romh; 206 bus_space_tag_t romt; 207 bus_size_t romsize = 0; 208 u_int8_t buf[32], *desc; 209 pcireg_t address; 210 int dataoff, vpdoff, len; 211 int rv = -1; 212 213 if (pci_mapreg_map(pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_MEM, 0, 214 &romt, &romh, NULL, &romsize)) 215 return (-1); 216 217 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START); 218 address |= PCI_MAPREG_ROM_ENABLE; 219 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START, address); 220 221 bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf)); 222 if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr))) 223 goto fail; 224 225 dataoff = buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8); 226 if (dataoff < 0x1c) 227 goto fail; 228 229 bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf)); 230 if (bcmp(buf, cas_promdat, sizeof(cas_promdat)) || 231 bcmp(buf + PROMDATA_DATA2, cas_promdat2, sizeof(cas_promdat2))) 232 goto fail; 233 234 vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8); 235 if (vpdoff < 0x1c) 236 goto fail; 237 238 next: 239 bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf)); 240 if (!PCI_VPDRES_ISLARGE(buf[0])) 241 goto fail; 242 243 res = (struct pci_vpd_largeres *)buf; 244 vpdoff += sizeof(*res); 245 246 len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb); 247 switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) { 248 case PCI_VPDRES_TYPE_IDENTIFIER_STRING: 249 /* Skip identifier string. */ 250 vpdoff += len; 251 goto next; 252 253 case PCI_VPDRES_TYPE_VPD: 254 while (len > 0) { 255 bus_space_read_region_1(romt, romh, vpdoff, 256 buf, sizeof(buf)); 257 258 vpd = (struct pci_vpd *)buf; 259 vpdoff += sizeof(*vpd) + vpd->vpd_len; 260 len -= sizeof(*vpd) + vpd->vpd_len; 261 262 /* 263 * We're looking for an "Enhanced" VPD... 264 */ 265 if (vpd->vpd_key0 != 'Z') 266 continue; 267 268 desc = buf + sizeof(*vpd); 269 270 /* 271 * ...which is an instance property... 272 */ 273 if (desc[0] != 'I') 274 continue; 275 desc += 3; 276 277 /* 278 * ...that's a byte array with the proper 279 * length for a MAC address... 280 */ 281 if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN) 282 continue; 283 desc += 2; 284 285 /* 286 * ...named "local-mac-address". 287 */ 288 if (strcmp(desc, "local-mac-address") != 0) 289 continue; 290 desc += strlen("local-mac-address") + 1; 291 292 memcpy(enaddr, desc, ETHER_ADDR_LEN); 293 rv = 0; 294 } 295 break; 296 297 default: 298 goto fail; 299 } 300 301 fail: 302 if (romsize != 0) 303 bus_space_unmap(romt, romh, romsize); 304 305 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM); 306 address &= ~PCI_MAPREG_ROM_ENABLE; 307 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM, address); 308 309 return (rv); 310 } 311 312 void 313 cas_attach(device_t parent, device_t self, void *aux) 314 { 315 struct pci_attach_args *pa = aux; 316 struct cas_softc *sc = device_private(self); 317 char devinfo[256]; 318 prop_data_t data; 319 uint8_t enaddr[ETHER_ADDR_LEN]; 320 321 sc->sc_dev = self; 322 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 323 sc->sc_rev = PCI_REVISION(pa->pa_class); 324 aprint_normal(": %s (rev. 0x%02x)\n", devinfo, sc->sc_rev); 325 sc->sc_dmatag = pa->pa_dmat; 326 327 #define PCI_CAS_BASEADDR 0x10 328 if (pci_mapreg_map(pa, PCI_CAS_BASEADDR, PCI_MAPREG_TYPE_MEM, 0, 329 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_size) != 0) { 330 aprint_error_dev(sc->sc_dev, 331 "unable to map device registers\n"); 332 return; 333 } 334 335 if ((data = prop_dictionary_get(device_properties(sc->sc_dev), 336 "mac-address")) != NULL) 337 memcpy(enaddr, prop_data_data_nocopy(data), ETHER_ADDR_LEN); 338 else if (cas_pci_enaddr(sc, pa, enaddr) != 0) 339 aprint_error_dev(sc->sc_dev, "no Ethernet address found\n"); 340 341 sc->sc_burst = 16; /* XXX */ 342 343 sc->sc_att_stage = CAS_ATT_BACKEND_0; 344 345 if (pci_intr_map(pa, &sc->sc_handle) != 0) { 346 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 347 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 348 return; 349 } 350 sc->sc_pc = pa->pa_pc; 351 if (!cas_estintr(sc, CAS_INTR_PCI)) { 352 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 353 aprint_error_dev(sc->sc_dev, "unable to establish interrupt\n"); 354 return; 355 } 356 357 sc->sc_att_stage = CAS_ATT_BACKEND_1; 358 359 /* 360 * call the main configure 361 */ 362 cas_config(sc, enaddr); 363 364 if (pmf_device_register1(sc->sc_dev, 365 cas_suspend, cas_resume, cas_shutdown)) 366 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if); 367 else 368 aprint_error_dev(sc->sc_dev, 369 "could not establish power handlers\n"); 370 371 sc->sc_att_stage = CAS_ATT_FINISHED; 372 /*FALLTHROUGH*/ 373 } 374 375 /* 376 * cas_config: 377 * 378 * Attach a Cassini interface to the system. 379 */ 380 void 381 cas_config(struct cas_softc *sc, const uint8_t *enaddr) 382 { 383 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 384 struct mii_data *mii = &sc->sc_mii; 385 struct mii_softc *child; 386 int i, error; 387 388 /* Make sure the chip is stopped. */ 389 ifp->if_softc = sc; 390 cas_reset(sc); 391 392 /* 393 * Allocate the control data structures, and create and load the 394 * DMA map for it. 395 */ 396 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 397 sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg, 398 1, &sc->sc_cdnseg, 0)) != 0) { 399 aprint_error_dev(sc->sc_dev, 400 "unable to allocate control data, error = %d\n", 401 error); 402 cas_partial_detach(sc, CAS_ATT_0); 403 } 404 405 /* XXX should map this in with correct endianness */ 406 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 407 sizeof(struct cas_control_data), (void **)&sc->sc_control_data, 408 BUS_DMA_COHERENT)) != 0) { 409 aprint_error_dev(sc->sc_dev, 410 "unable to map control data, error = %d\n", error); 411 cas_partial_detach(sc, CAS_ATT_1); 412 } 413 414 if ((error = bus_dmamap_create(sc->sc_dmatag, 415 sizeof(struct cas_control_data), 1, 416 sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 417 aprint_error_dev(sc->sc_dev, 418 "unable to create control data DMA map, error = %d\n", error); 419 cas_partial_detach(sc, CAS_ATT_2); 420 } 421 422 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 423 sc->sc_control_data, sizeof(struct cas_control_data), NULL, 424 0)) != 0) { 425 aprint_error_dev(sc->sc_dev, 426 "unable to load control data DMA map, error = %d\n", 427 error); 428 cas_partial_detach(sc, CAS_ATT_3); 429 } 430 431 memset(sc->sc_control_data, 0, sizeof(struct cas_control_data)); 432 433 /* 434 * Create the receive buffer DMA maps. 435 */ 436 for (i = 0; i < CAS_NRXDESC; i++) { 437 bus_dma_segment_t seg; 438 char *kva; 439 int rseg; 440 441 if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE, 442 CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 443 aprint_error_dev(sc->sc_dev, 444 "unable to alloc rx DMA mem %d, error = %d\n", 445 i, error); 446 cas_partial_detach(sc, CAS_ATT_5); 447 } 448 sc->sc_rxsoft[i].rxs_dmaseg = seg; 449 450 if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 451 CAS_PAGE_SIZE, (void **)&kva, BUS_DMA_NOWAIT)) != 0) { 452 aprint_error_dev(sc->sc_dev, 453 "unable to alloc rx DMA mem %d, error = %d\n", 454 i, error); 455 cas_partial_detach(sc, CAS_ATT_5); 456 } 457 sc->sc_rxsoft[i].rxs_kva = kva; 458 459 if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1, 460 CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 461 aprint_error_dev(sc->sc_dev, 462 "unable to create rx DMA map %d, error = %d\n", 463 i, error); 464 cas_partial_detach(sc, CAS_ATT_5); 465 } 466 467 if ((error = bus_dmamap_load(sc->sc_dmatag, 468 sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL, 469 BUS_DMA_NOWAIT)) != 0) { 470 aprint_error_dev(sc->sc_dev, 471 "unable to load rx DMA map %d, error = %d\n", 472 i, error); 473 cas_partial_detach(sc, CAS_ATT_5); 474 } 475 } 476 477 /* 478 * Create the transmit buffer DMA maps. 479 */ 480 for (i = 0; i < CAS_NTXDESC; i++) { 481 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 482 CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 483 &sc->sc_txd[i].sd_map)) != 0) { 484 aprint_error_dev(sc->sc_dev, 485 "unable to create tx DMA map %d, error = %d\n", 486 i, error); 487 cas_partial_detach(sc, CAS_ATT_6); 488 } 489 sc->sc_txd[i].sd_mbuf = NULL; 490 } 491 492 /* 493 * From this point forward, the attachment cannot fail. A failure 494 * before this point releases all resources that may have been 495 * allocated. 496 */ 497 498 /* Announce ourselves. */ 499 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 500 ether_sprintf(enaddr)); 501 aprint_naive(": Ethernet controller\n"); 502 503 /* Get RX FIFO size */ 504 sc->sc_rxfifosize = 16 * 1024; 505 506 /* Initialize ifnet structure. */ 507 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 508 ifp->if_softc = sc; 509 ifp->if_flags = 510 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 511 ifp->if_start = cas_start; 512 ifp->if_ioctl = cas_ioctl; 513 ifp->if_watchdog = cas_watchdog; 514 ifp->if_stop = cas_stop; 515 ifp->if_init = cas_init; 516 IFQ_SET_MAXLEN(&ifp->if_snd, CAS_NTXDESC - 1); 517 IFQ_SET_READY(&ifp->if_snd); 518 519 /* Initialize ifmedia structures and MII info */ 520 mii->mii_ifp = ifp; 521 mii->mii_readreg = cas_mii_readreg; 522 mii->mii_writereg = cas_mii_writereg; 523 mii->mii_statchg = cas_mii_statchg; 524 525 ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus); 526 sc->sc_ethercom.ec_mii = mii; 527 528 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0); 529 530 cas_mifinit(sc); 531 532 if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) { 533 sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL; 534 bus_space_write_4(sc->sc_memt, sc->sc_memh, 535 CAS_MIF_CONFIG, sc->sc_mif_config); 536 } 537 538 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 539 MII_OFFSET_ANY, 0); 540 541 child = LIST_FIRST(&mii->mii_phys); 542 if (child == NULL && 543 sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0|CAS_MIF_CONFIG_MDI1)) { 544 /* 545 * Try the external PCS SERDES if we didn't find any 546 * MII devices. 547 */ 548 bus_space_write_4(sc->sc_memt, sc->sc_memh, 549 CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES); 550 551 bus_space_write_4(sc->sc_memt, sc->sc_memh, 552 CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE); 553 554 mii->mii_readreg = cas_pcs_readreg; 555 mii->mii_writereg = cas_pcs_writereg; 556 557 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 558 MII_OFFSET_ANY, MIIF_NOISOLATE); 559 } 560 561 child = LIST_FIRST(&mii->mii_phys); 562 if (child == NULL) { 563 /* No PHY attached */ 564 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 565 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 566 } else { 567 /* 568 * Walk along the list of attached MII devices and 569 * establish an `MII instance' to `phy number' 570 * mapping. We'll use this mapping in media change 571 * requests to determine which phy to use to program 572 * the MIF configuration register. 573 */ 574 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 575 /* 576 * Note: we support just two PHYs: the built-in 577 * internal device and an external on the MII 578 * connector. 579 */ 580 if (child->mii_phy > 1 || child->mii_inst > 1) { 581 aprint_error_dev(sc->sc_dev, 582 "cannot accommodate MII device %s" 583 " at phy %d, instance %d\n", 584 device_xname(child->mii_dev), 585 child->mii_phy, child->mii_inst); 586 continue; 587 } 588 589 sc->sc_phys[child->mii_inst] = child->mii_phy; 590 } 591 592 /* 593 * XXX - we can really do the following ONLY if the 594 * phy indeed has the auto negotiation capability!! 595 */ 596 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 597 } 598 599 /* claim 802.1q capability */ 600 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 601 602 /* Attach the interface. */ 603 if_attach(ifp); 604 ether_ifattach(ifp, enaddr); 605 606 #if NRND > 0 607 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 608 RND_TYPE_NET, 0); 609 #endif 610 611 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 612 NULL, device_xname(sc->sc_dev), "interrupts"); 613 614 callout_init(&sc->sc_tick_ch, 0); 615 616 return; 617 } 618 619 int 620 cas_detach(device_t self, int flags) 621 { 622 int i; 623 struct cas_softc *sc = device_private(self); 624 bus_space_tag_t t = sc->sc_memt; 625 bus_space_handle_t h = sc->sc_memh; 626 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 627 628 /* 629 * Free any resources we've allocated during the failed attach 630 * attempt. Do this in reverse order and fall through. 631 */ 632 switch (sc->sc_att_stage) { 633 case CAS_ATT_FINISHED: 634 bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0); 635 pmf_device_deregister(self); 636 cas_stop(&sc->sc_ethercom.ec_if, 1); 637 evcnt_detach(&sc->sc_ev_intr); 638 639 #if NRND > 0 640 rnd_detach_source(&sc->rnd_source); 641 #endif 642 643 ether_ifdetach(ifp); 644 if_detach(ifp); 645 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 646 647 callout_destroy(&sc->sc_tick_ch); 648 649 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 650 651 /*FALLTHROUGH*/ 652 case CAS_ATT_MII: 653 case CAS_ATT_7: 654 case CAS_ATT_6: 655 for (i = 0; i < CAS_NTXDESC; i++) { 656 if (sc->sc_txd[i].sd_map != NULL) 657 bus_dmamap_destroy(sc->sc_dmatag, 658 sc->sc_txd[i].sd_map); 659 } 660 /*FALLTHROUGH*/ 661 case CAS_ATT_5: 662 for (i = 0; i < CAS_NRXDESC; i++) { 663 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 664 bus_dmamap_unload(sc->sc_dmatag, 665 sc->sc_rxsoft[i].rxs_dmamap); 666 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 667 bus_dmamap_destroy(sc->sc_dmatag, 668 sc->sc_rxsoft[i].rxs_dmamap); 669 if (sc->sc_rxsoft[i].rxs_kva != NULL) 670 bus_dmamem_unmap(sc->sc_dmatag, 671 sc->sc_rxsoft[i].rxs_kva, CAS_PAGE_SIZE); 672 /* XXX need to check that bus_dmamem_alloc suceeded 673 if (sc->sc_rxsoft[i].rxs_dmaseg != NULL) 674 */ 675 bus_dmamem_free(sc->sc_dmatag, 676 &(sc->sc_rxsoft[i].rxs_dmaseg), 1); 677 } 678 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 679 /*FALLTHROUGH*/ 680 case CAS_ATT_4: 681 case CAS_ATT_3: 682 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 683 /*FALLTHROUGH*/ 684 case CAS_ATT_2: 685 bus_dmamem_unmap(sc->sc_dmatag, sc->sc_control_data, 686 sizeof(struct cas_control_data)); 687 /*FALLTHROUGH*/ 688 case CAS_ATT_1: 689 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 690 /*FALLTHROUGH*/ 691 case CAS_ATT_0: 692 sc->sc_att_stage = CAS_ATT_0; 693 /*FALLTHROUGH*/ 694 case CAS_ATT_BACKEND_2: 695 case CAS_ATT_BACKEND_1: 696 if (sc->sc_ih != NULL) { 697 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 698 sc->sc_ih = NULL; 699 } 700 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 701 /*FALLTHROUGH*/ 702 case CAS_ATT_BACKEND_0: 703 break; 704 } 705 return 0; 706 } 707 708 static void 709 cas_partial_detach(struct cas_softc *sc, enum cas_attach_stage stage) 710 { 711 cfattach_t ca = device_cfattach(sc->sc_dev); 712 713 sc->sc_att_stage = stage; 714 (*ca->ca_detach)(sc->sc_dev, 0); 715 } 716 717 void 718 cas_tick(void *arg) 719 { 720 struct cas_softc *sc = arg; 721 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 722 bus_space_tag_t t = sc->sc_memt; 723 bus_space_handle_t mac = sc->sc_memh; 724 int s; 725 u_int32_t v; 726 727 /* unload collisions counters */ 728 v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT) + 729 bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT); 730 ifp->if_collisions += v + 731 bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT) + 732 bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT); 733 ifp->if_oerrors += v; 734 735 /* read error counters */ 736 ifp->if_ierrors += 737 bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT) + 738 bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR) + 739 bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT) + 740 bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL); 741 742 /* clear the hardware counters */ 743 bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0); 744 bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0); 745 bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0); 746 bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0); 747 bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0); 748 bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0); 749 bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0); 750 bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0); 751 752 s = splnet(); 753 mii_tick(&sc->sc_mii); 754 splx(s); 755 756 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 757 } 758 759 int 760 cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r, 761 u_int32_t clr, u_int32_t set) 762 { 763 int i; 764 u_int32_t reg; 765 766 for (i = TRIES; i--; DELAY(100)) { 767 reg = bus_space_read_4(sc->sc_memt, h, r); 768 if ((reg & clr) == 0 && (reg & set) == set) 769 return (1); 770 } 771 772 return (0); 773 } 774 775 void 776 cas_reset(struct cas_softc *sc) 777 { 778 bus_space_tag_t t = sc->sc_memt; 779 bus_space_handle_t h = sc->sc_memh; 780 int s; 781 782 s = splnet(); 783 DPRINTF(sc, ("%s: cas_reset\n", device_xname(sc->sc_dev))); 784 cas_reset_rx(sc); 785 cas_reset_tx(sc); 786 787 /* Disable interrupts */ 788 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_INTMASK, ~(uint32_t)0); 789 790 /* Do a full reset */ 791 bus_space_write_4(t, h, CAS_RESET, 792 CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS); 793 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) 794 aprint_error_dev(sc->sc_dev, "cannot reset device\n"); 795 splx(s); 796 } 797 798 799 /* 800 * cas_rxdrain: 801 * 802 * Drain the receive queue. 803 */ 804 void 805 cas_rxdrain(struct cas_softc *sc) 806 { 807 /* Nothing to do yet. */ 808 } 809 810 /* 811 * Reset the whole thing. 812 */ 813 void 814 cas_stop(struct ifnet *ifp, int disable) 815 { 816 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 817 struct cas_sxd *sd; 818 u_int32_t i; 819 820 DPRINTF(sc, ("%s: cas_stop\n", device_xname(sc->sc_dev))); 821 822 callout_stop(&sc->sc_tick_ch); 823 824 /* 825 * Mark the interface down and cancel the watchdog timer. 826 */ 827 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 828 ifp->if_timer = 0; 829 830 mii_down(&sc->sc_mii); 831 832 cas_reset_rx(sc); 833 cas_reset_tx(sc); 834 835 /* 836 * Release any queued transmit buffers. 837 */ 838 for (i = 0; i < CAS_NTXDESC; i++) { 839 sd = &sc->sc_txd[i]; 840 if (sd->sd_mbuf != NULL) { 841 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 842 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 843 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 844 m_freem(sd->sd_mbuf); 845 sd->sd_mbuf = NULL; 846 } 847 } 848 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 849 850 if (disable) 851 cas_rxdrain(sc); 852 } 853 854 855 /* 856 * Reset the receiver 857 */ 858 int 859 cas_reset_rx(struct cas_softc *sc) 860 { 861 bus_space_tag_t t = sc->sc_memt; 862 bus_space_handle_t h = sc->sc_memh; 863 864 /* 865 * Resetting while DMA is in progress can cause a bus hang, so we 866 * disable DMA first. 867 */ 868 cas_disable_rx(sc); 869 bus_space_write_4(t, h, CAS_RX_CONFIG, 0); 870 /* Wait till it finishes */ 871 if (!cas_bitwait(sc, h, CAS_RX_CONFIG, 1, 0)) 872 aprint_error_dev(sc->sc_dev, "cannot disable rx dma\n"); 873 /* Wait 5ms extra. */ 874 delay(5000); 875 876 /* Finally, reset the ERX */ 877 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX); 878 /* Wait till it finishes */ 879 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX, 0)) { 880 aprint_error_dev(sc->sc_dev, "cannot reset receiver\n"); 881 return (1); 882 } 883 return (0); 884 } 885 886 887 /* 888 * Reset the transmitter 889 */ 890 int 891 cas_reset_tx(struct cas_softc *sc) 892 { 893 bus_space_tag_t t = sc->sc_memt; 894 bus_space_handle_t h = sc->sc_memh; 895 896 /* 897 * Resetting while DMA is in progress can cause a bus hang, so we 898 * disable DMA first. 899 */ 900 cas_disable_tx(sc); 901 bus_space_write_4(t, h, CAS_TX_CONFIG, 0); 902 /* Wait till it finishes */ 903 if (!cas_bitwait(sc, h, CAS_TX_CONFIG, 1, 0)) 904 aprint_error_dev(sc->sc_dev, "cannot disable tx dma\n"); 905 /* Wait 5ms extra. */ 906 delay(5000); 907 908 /* Finally, reset the ETX */ 909 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX); 910 /* Wait till it finishes */ 911 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_TX, 0)) { 912 aprint_error_dev(sc->sc_dev, "cannot reset transmitter\n"); 913 return (1); 914 } 915 return (0); 916 } 917 918 /* 919 * Disable receiver. 920 */ 921 int 922 cas_disable_rx(struct cas_softc *sc) 923 { 924 bus_space_tag_t t = sc->sc_memt; 925 bus_space_handle_t h = sc->sc_memh; 926 u_int32_t cfg; 927 928 /* Flip the enable bit */ 929 cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 930 cfg &= ~CAS_MAC_RX_ENABLE; 931 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg); 932 933 /* Wait for it to finish */ 934 return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG, CAS_MAC_RX_ENABLE, 0)); 935 } 936 937 /* 938 * Disable transmitter. 939 */ 940 int 941 cas_disable_tx(struct cas_softc *sc) 942 { 943 bus_space_tag_t t = sc->sc_memt; 944 bus_space_handle_t h = sc->sc_memh; 945 u_int32_t cfg; 946 947 /* Flip the enable bit */ 948 cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG); 949 cfg &= ~CAS_MAC_TX_ENABLE; 950 bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg); 951 952 /* Wait for it to finish */ 953 return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG, CAS_MAC_TX_ENABLE, 0)); 954 } 955 956 /* 957 * Initialize interface. 958 */ 959 int 960 cas_meminit(struct cas_softc *sc) 961 { 962 struct cas_rxsoft *rxs; 963 int i, error; 964 965 rxs = (void *)&error; 966 967 /* 968 * Initialize the transmit descriptor ring. 969 */ 970 for (i = 0; i < CAS_NTXDESC; i++) { 971 sc->sc_txdescs[i].cd_flags = 0; 972 sc->sc_txdescs[i].cd_addr = 0; 973 } 974 CAS_CDTXSYNC(sc, 0, CAS_NTXDESC, 975 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 976 977 /* 978 * Initialize the receive descriptor and receive job 979 * descriptor rings. 980 */ 981 for (i = 0; i < CAS_NRXDESC; i++) 982 CAS_INIT_RXDESC(sc, i, i); 983 sc->sc_rxdptr = 0; 984 sc->sc_rxptr = 0; 985 986 /* 987 * Initialize the receive completion ring. 988 */ 989 for (i = 0; i < CAS_NRXCOMP; i++) { 990 sc->sc_rxcomps[i].cc_word[0] = 0; 991 sc->sc_rxcomps[i].cc_word[1] = 0; 992 sc->sc_rxcomps[i].cc_word[2] = 0; 993 sc->sc_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN); 994 CAS_CDRXCSYNC(sc, i, 995 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 996 } 997 998 return (0); 999 } 1000 1001 int 1002 cas_ringsize(int sz) 1003 { 1004 switch (sz) { 1005 case 32: 1006 return CAS_RING_SZ_32; 1007 case 64: 1008 return CAS_RING_SZ_64; 1009 case 128: 1010 return CAS_RING_SZ_128; 1011 case 256: 1012 return CAS_RING_SZ_256; 1013 case 512: 1014 return CAS_RING_SZ_512; 1015 case 1024: 1016 return CAS_RING_SZ_1024; 1017 case 2048: 1018 return CAS_RING_SZ_2048; 1019 case 4096: 1020 return CAS_RING_SZ_4096; 1021 case 8192: 1022 return CAS_RING_SZ_8192; 1023 default: 1024 aprint_error("cas: invalid Receive Descriptor ring size %d\n", 1025 sz); 1026 return CAS_RING_SZ_32; 1027 } 1028 } 1029 1030 int 1031 cas_cringsize(int sz) 1032 { 1033 int i; 1034 1035 for (i = 0; i < 9; i++) 1036 if (sz == (128 << i)) 1037 return i; 1038 1039 aprint_error("cas: invalid completion ring size %d\n", sz); 1040 return 128; 1041 } 1042 1043 /* 1044 * Initialization of interface; set up initialization block 1045 * and transmit/receive descriptor rings. 1046 */ 1047 int 1048 cas_init(struct ifnet *ifp) 1049 { 1050 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 1051 bus_space_tag_t t = sc->sc_memt; 1052 bus_space_handle_t h = sc->sc_memh; 1053 int s; 1054 u_int max_frame_size; 1055 u_int32_t v; 1056 1057 s = splnet(); 1058 1059 DPRINTF(sc, ("%s: cas_init: calling stop\n", device_xname(sc->sc_dev))); 1060 /* 1061 * Initialization sequence. The numbered steps below correspond 1062 * to the sequence outlined in section 6.3.5.1 in the Ethernet 1063 * Channel Engine manual (part of the PCIO manual). 1064 * See also the STP2002-STQ document from Sun Microsystems. 1065 */ 1066 1067 /* step 1 & 2. Reset the Ethernet Channel */ 1068 cas_stop(ifp, 0); 1069 cas_reset(sc); 1070 DPRINTF(sc, ("%s: cas_init: restarting\n", device_xname(sc->sc_dev))); 1071 1072 /* Re-initialize the MIF */ 1073 cas_mifinit(sc); 1074 1075 /* step 3. Setup data structures in host memory */ 1076 cas_meminit(sc); 1077 1078 /* step 4. TX MAC registers & counters */ 1079 cas_init_regs(sc); 1080 max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN; 1081 v = (max_frame_size) | (0x2000 << 16) /* Burst size */; 1082 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 1083 1084 /* step 5. RX MAC registers & counters */ 1085 cas_iff(sc); 1086 1087 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 1088 KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0); 1089 bus_space_write_4(t, h, CAS_TX_RING_PTR_HI, 1090 (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32)); 1091 bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0)); 1092 1093 KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0); 1094 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI, 1095 (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32)); 1096 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0)); 1097 1098 KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0); 1099 bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI, 1100 (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32)); 1101 bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0)); 1102 1103 if (CAS_PLUS(sc)) { 1104 KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0); 1105 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2, 1106 (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32)); 1107 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2, 1108 CAS_CDRXADDR2(sc, 0)); 1109 } 1110 1111 /* step 8. Global Configuration & Interrupt Mask */ 1112 cas_estintr(sc, CAS_INTR_REG); 1113 1114 /* step 9. ETX Configuration: use mostly default values */ 1115 1116 /* Enable DMA */ 1117 v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10; 1118 bus_space_write_4(t, h, CAS_TX_CONFIG, 1119 v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29)); 1120 bus_space_write_4(t, h, CAS_TX_KICK, 0); 1121 1122 /* step 10. ERX Configuration */ 1123 1124 /* Encode Receive Descriptor ring size */ 1125 v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT; 1126 if (CAS_PLUS(sc)) 1127 v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT; 1128 1129 /* Encode Receive Completion ring size */ 1130 v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT; 1131 1132 /* Enable DMA */ 1133 bus_space_write_4(t, h, CAS_RX_CONFIG, 1134 v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN); 1135 1136 /* 1137 * The following value is for an OFF Threshold of about 3/4 full 1138 * and an ON Threshold of 1/4 full. 1139 */ 1140 bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH, 1141 (3 * sc->sc_rxfifosize / 256) | 1142 ((sc->sc_rxfifosize / 256) << 12)); 1143 bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6); 1144 1145 /* step 11. Configure Media */ 1146 mii_ifmedia_change(&sc->sc_mii); 1147 1148 /* step 12. RX_MAC Configuration Register */ 1149 v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1150 v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC; 1151 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v); 1152 1153 /* step 14. Issue Transmit Pending command */ 1154 1155 /* step 15. Give the receiver a swift kick */ 1156 bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4); 1157 if (CAS_PLUS(sc)) 1158 bus_space_write_4(t, h, CAS_RX_KICK2, 4); 1159 1160 /* Start the one second timer. */ 1161 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 1162 1163 ifp->if_flags |= IFF_RUNNING; 1164 ifp->if_flags &= ~IFF_OACTIVE; 1165 ifp->if_timer = 0; 1166 splx(s); 1167 1168 return (0); 1169 } 1170 1171 void 1172 cas_init_regs(struct cas_softc *sc) 1173 { 1174 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1175 bus_space_tag_t t = sc->sc_memt; 1176 bus_space_handle_t h = sc->sc_memh; 1177 const u_char *laddr = CLLADDR(ifp->if_sadl); 1178 u_int32_t v, r; 1179 1180 /* These regs are not cleared on reset */ 1181 sc->sc_inited = 0; 1182 if (!sc->sc_inited) { 1183 /* Load recommended values */ 1184 bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00); 1185 bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08); 1186 bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04); 1187 1188 bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1189 /* Max frame and max burst size */ 1190 v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */; 1191 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 1192 1193 bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07); 1194 bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04); 1195 bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10); 1196 bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088); 1197 bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED, 1198 ((laddr[5]<<8)|laddr[4])&0x3ff); 1199 1200 /* Secondary MAC addresses set to 0:0:0:0:0:0 */ 1201 for (r = CAS_MAC_ADDR3; r < CAS_MAC_ADDR42; r += 4) 1202 bus_space_write_4(t, h, r, 0); 1203 1204 /* MAC control addr set to 0:1:c2:0:1:80 */ 1205 bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001); 1206 bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200); 1207 bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180); 1208 1209 /* MAC filter addr set to 0:0:0:0:0:0 */ 1210 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0); 1211 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0); 1212 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0); 1213 1214 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0); 1215 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0); 1216 1217 /* Hash table initialized to 0 */ 1218 for (r = CAS_MAC_HASH0; r <= CAS_MAC_HASH15; r += 4) 1219 bus_space_write_4(t, h, r, 0); 1220 1221 sc->sc_inited = 1; 1222 } 1223 1224 /* Counters need to be zeroed */ 1225 bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0); 1226 bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0); 1227 bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0); 1228 bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0); 1229 bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0); 1230 bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0); 1231 bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0); 1232 bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0); 1233 bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0); 1234 bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0); 1235 bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0); 1236 1237 /* Un-pause stuff */ 1238 bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0); 1239 1240 /* 1241 * Set the station address. 1242 */ 1243 bus_space_write_4(t, h, CAS_MAC_ADDR0, (laddr[4]<<8) | laddr[5]); 1244 bus_space_write_4(t, h, CAS_MAC_ADDR1, (laddr[2]<<8) | laddr[3]); 1245 bus_space_write_4(t, h, CAS_MAC_ADDR2, (laddr[0]<<8) | laddr[1]); 1246 } 1247 1248 /* 1249 * Receive interrupt. 1250 */ 1251 int 1252 cas_rint(struct cas_softc *sc) 1253 { 1254 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1255 bus_space_tag_t t = sc->sc_memt; 1256 bus_space_handle_t h = sc->sc_memh; 1257 struct cas_rxsoft *rxs; 1258 struct mbuf *m; 1259 u_int64_t word[4]; 1260 int len, off, idx; 1261 int i, skip; 1262 void *cp; 1263 1264 for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) { 1265 CAS_CDRXCSYNC(sc, i, 1266 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1267 1268 word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]); 1269 word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]); 1270 word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]); 1271 word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]); 1272 1273 /* Stop if the hardware still owns the descriptor. */ 1274 if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN) 1275 break; 1276 1277 len = CAS_RC1_HDR_LEN(word[1]); 1278 if (len > 0) { 1279 off = CAS_RC1_HDR_OFF(word[1]); 1280 idx = CAS_RC1_HDR_IDX(word[1]); 1281 rxs = &sc->sc_rxsoft[idx]; 1282 1283 DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n", 1284 idx, off, len)); 1285 1286 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1287 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1288 1289 cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN; 1290 m = m_devget(cp, len, 0, ifp, NULL); 1291 1292 if (word[0] & CAS_RC0_RELEASE_HDR) 1293 cas_add_rxbuf(sc, idx); 1294 1295 if (m != NULL) { 1296 1297 /* 1298 * Pass this up to any BPF listeners, but only 1299 * pass it up the stack if its for us. 1300 */ 1301 bpf_mtap(ifp, m); 1302 1303 ifp->if_ipackets++; 1304 m->m_pkthdr.csum_flags = 0; 1305 (*ifp->if_input)(ifp, m); 1306 } else 1307 ifp->if_ierrors++; 1308 } 1309 1310 len = CAS_RC0_DATA_LEN(word[0]); 1311 if (len > 0) { 1312 off = CAS_RC0_DATA_OFF(word[0]); 1313 idx = CAS_RC0_DATA_IDX(word[0]); 1314 rxs = &sc->sc_rxsoft[idx]; 1315 1316 DPRINTF(sc, ("data at idx %d, off %d, len %d\n", 1317 idx, off, len)); 1318 1319 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1320 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1321 1322 /* XXX We should not be copying the packet here. */ 1323 cp = rxs->rxs_kva + off + ETHER_ALIGN; 1324 m = m_devget(cp, len, 0, ifp, NULL); 1325 1326 if (word[0] & CAS_RC0_RELEASE_DATA) 1327 cas_add_rxbuf(sc, idx); 1328 1329 if (m != NULL) { 1330 /* 1331 * Pass this up to any BPF listeners, but only 1332 * pass it up the stack if its for us. 1333 */ 1334 bpf_mtap(ifp, m); 1335 1336 ifp->if_ipackets++; 1337 m->m_pkthdr.csum_flags = 0; 1338 (*ifp->if_input)(ifp, m); 1339 } else 1340 ifp->if_ierrors++; 1341 } 1342 1343 if (word[0] & CAS_RC0_SPLIT) 1344 aprint_error_dev(sc->sc_dev, "split packet\n"); 1345 1346 skip = CAS_RC0_SKIP(word[0]); 1347 } 1348 1349 while (sc->sc_rxptr != i) { 1350 sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0; 1351 sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0; 1352 sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0; 1353 sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] = 1354 CAS_DMA_WRITE(CAS_RC3_OWN); 1355 CAS_CDRXCSYNC(sc, sc->sc_rxptr, 1356 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1357 1358 sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr); 1359 } 1360 1361 bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr); 1362 1363 DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n", 1364 sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION))); 1365 1366 return (1); 1367 } 1368 1369 /* 1370 * cas_add_rxbuf: 1371 * 1372 * Add a receive buffer to the indicated descriptor. 1373 */ 1374 int 1375 cas_add_rxbuf(struct cas_softc *sc, int idx) 1376 { 1377 bus_space_tag_t t = sc->sc_memt; 1378 bus_space_handle_t h = sc->sc_memh; 1379 1380 CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx); 1381 1382 if ((sc->sc_rxdptr % 4) == 0) 1383 bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr); 1384 1385 if (++sc->sc_rxdptr == CAS_NRXDESC) 1386 sc->sc_rxdptr = 0; 1387 1388 return (0); 1389 } 1390 1391 int 1392 cas_eint(struct cas_softc *sc, u_int status) 1393 { 1394 char bits[128]; 1395 if ((status & CAS_INTR_MIF) != 0) { 1396 DPRINTF(sc, ("%s: link status changed\n", 1397 device_xname(sc->sc_dev))); 1398 return (1); 1399 } 1400 1401 snprintb(bits, sizeof(bits), CAS_INTR_BITS, status); 1402 printf("%s: status=%s\n", device_xname(sc->sc_dev), bits); 1403 return (1); 1404 } 1405 1406 int 1407 cas_pint(struct cas_softc *sc) 1408 { 1409 bus_space_tag_t t = sc->sc_memt; 1410 bus_space_handle_t seb = sc->sc_memh; 1411 u_int32_t status; 1412 1413 status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1414 status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1415 #ifdef CAS_DEBUG 1416 if (status) 1417 printf("%s: link status changed\n", device_xname(sc->sc_dev)); 1418 #endif 1419 return (1); 1420 } 1421 1422 int 1423 cas_intr(void *v) 1424 { 1425 struct cas_softc *sc = (struct cas_softc *)v; 1426 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1427 bus_space_tag_t t = sc->sc_memt; 1428 bus_space_handle_t seb = sc->sc_memh; 1429 u_int32_t status; 1430 int r = 0; 1431 #ifdef CAS_DEBUG 1432 char bits[128]; 1433 #endif 1434 1435 sc->sc_ev_intr.ev_count++; 1436 1437 status = bus_space_read_4(t, seb, CAS_STATUS); 1438 #ifdef CAS_DEBUG 1439 snprintb(bits, sizeof(bits), CAS_INTR_BITS, status); 1440 #endif 1441 DPRINTF(sc, ("%s: cas_intr: cplt %x status %s\n", 1442 device_xname(sc->sc_dev), (status>>19), bits)); 1443 1444 if ((status & CAS_INTR_PCS) != 0) 1445 r |= cas_pint(sc); 1446 1447 if ((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | 1448 CAS_INTR_RX_COMP_FULL | CAS_INTR_BERR)) != 0) 1449 r |= cas_eint(sc, status); 1450 1451 if ((status & (CAS_INTR_TX_EMPTY | CAS_INTR_TX_INTME)) != 0) 1452 r |= cas_tint(sc, status); 1453 1454 if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF)) != 0) 1455 r |= cas_rint(sc); 1456 1457 /* We should eventually do more than just print out error stats. */ 1458 if (status & CAS_INTR_TX_MAC) { 1459 int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS); 1460 #ifdef CAS_DEBUG 1461 if (txstat & ~CAS_MAC_TX_XMIT_DONE) 1462 printf("%s: MAC tx fault, status %x\n", 1463 device_xname(sc->sc_dev), txstat); 1464 #endif 1465 if (txstat & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_PKT_TOO_LONG)) 1466 cas_init(ifp); 1467 } 1468 if (status & CAS_INTR_RX_MAC) { 1469 int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS); 1470 #ifdef CAS_DEBUG 1471 if (rxstat & ~CAS_MAC_RX_DONE) 1472 printf("%s: MAC rx fault, status %x\n", 1473 device_xname(sc->sc_dev), rxstat); 1474 #endif 1475 /* 1476 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often 1477 * due to a silicon bug so handle them silently. 1478 */ 1479 if (rxstat & CAS_MAC_RX_OVERFLOW) { 1480 ifp->if_ierrors++; 1481 cas_init(ifp); 1482 } 1483 #ifdef CAS_DEBUG 1484 else if (rxstat & ~(CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT)) 1485 printf("%s: MAC rx fault, status %x\n", 1486 device_xname(sc->sc_dev), rxstat); 1487 #endif 1488 } 1489 #if NRND > 0 1490 rnd_add_uint32(&sc->rnd_source, status); 1491 #endif 1492 return (r); 1493 } 1494 1495 1496 void 1497 cas_watchdog(struct ifnet *ifp) 1498 { 1499 struct cas_softc *sc = ifp->if_softc; 1500 1501 DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x " 1502 "CAS_MAC_RX_CONFIG %x\n", 1503 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG), 1504 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS), 1505 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG))); 1506 1507 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 1508 ++ifp->if_oerrors; 1509 1510 /* Try to get more packets going. */ 1511 cas_init(ifp); 1512 } 1513 1514 /* 1515 * Initialize the MII Management Interface 1516 */ 1517 void 1518 cas_mifinit(struct cas_softc *sc) 1519 { 1520 bus_space_tag_t t = sc->sc_memt; 1521 bus_space_handle_t mif = sc->sc_memh; 1522 1523 /* Configure the MIF in frame mode */ 1524 sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG); 1525 sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA; 1526 bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config); 1527 } 1528 1529 /* 1530 * MII interface 1531 * 1532 * The Cassini MII interface supports at least three different operating modes: 1533 * 1534 * Bitbang mode is implemented using data, clock and output enable registers. 1535 * 1536 * Frame mode is implemented by loading a complete frame into the frame 1537 * register and polling the valid bit for completion. 1538 * 1539 * Polling mode uses the frame register but completion is indicated by 1540 * an interrupt. 1541 * 1542 */ 1543 int 1544 cas_mii_readreg(device_t self, int phy, int reg) 1545 { 1546 struct cas_softc *sc = device_private(self); 1547 bus_space_tag_t t = sc->sc_memt; 1548 bus_space_handle_t mif = sc->sc_memh; 1549 int n; 1550 u_int32_t v; 1551 1552 #ifdef CAS_DEBUG 1553 if (sc->sc_debug) 1554 printf("cas_mii_readreg: phy %d reg %d\n", phy, reg); 1555 #endif 1556 1557 /* Construct the frame command */ 1558 v = (reg << CAS_MIF_REG_SHIFT) | (phy << CAS_MIF_PHY_SHIFT) | 1559 CAS_MIF_FRAME_READ; 1560 1561 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1562 for (n = 0; n < 100; n++) { 1563 DELAY(1); 1564 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1565 if (v & CAS_MIF_FRAME_TA0) 1566 return (v & CAS_MIF_FRAME_DATA); 1567 } 1568 1569 printf("%s: mii_read timeout\n", device_xname(sc->sc_dev)); 1570 return (0); 1571 } 1572 1573 void 1574 cas_mii_writereg(device_t self, int phy, int reg, int val) 1575 { 1576 struct cas_softc *sc = device_private(self); 1577 bus_space_tag_t t = sc->sc_memt; 1578 bus_space_handle_t mif = sc->sc_memh; 1579 int n; 1580 u_int32_t v; 1581 1582 #ifdef CAS_DEBUG 1583 if (sc->sc_debug) 1584 printf("cas_mii_writereg: phy %d reg %d val %x\n", 1585 phy, reg, val); 1586 #endif 1587 1588 /* Construct the frame command */ 1589 v = CAS_MIF_FRAME_WRITE | 1590 (phy << CAS_MIF_PHY_SHIFT) | 1591 (reg << CAS_MIF_REG_SHIFT) | 1592 (val & CAS_MIF_FRAME_DATA); 1593 1594 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1595 for (n = 0; n < 100; n++) { 1596 DELAY(1); 1597 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1598 if (v & CAS_MIF_FRAME_TA0) 1599 return; 1600 } 1601 1602 printf("%s: mii_write timeout\n", device_xname(sc->sc_dev)); 1603 } 1604 1605 void 1606 cas_mii_statchg(device_t self) 1607 { 1608 struct cas_softc *sc = device_private(self); 1609 #ifdef CAS_DEBUG 1610 int instance = IFM_INST(sc->sc_media.ifm_cur->ifm_media); 1611 #endif 1612 bus_space_tag_t t = sc->sc_memt; 1613 bus_space_handle_t mac = sc->sc_memh; 1614 u_int32_t v; 1615 1616 #ifdef CAS_DEBUG 1617 if (sc->sc_debug) 1618 printf("cas_mii_statchg: status change: phy = %d\n", 1619 sc->sc_phys[instance]); 1620 #endif 1621 1622 /* Set tx full duplex options */ 1623 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0); 1624 delay(10000); /* reg must be cleared and delay before changing. */ 1625 v = CAS_MAC_TX_ENA_IPG0|CAS_MAC_TX_NGU|CAS_MAC_TX_NGU_LIMIT| 1626 CAS_MAC_TX_ENABLE; 1627 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1628 v |= CAS_MAC_TX_IGN_CARRIER|CAS_MAC_TX_IGN_COLLIS; 1629 } 1630 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v); 1631 1632 /* XIF Configuration */ 1633 v = CAS_MAC_XIF_TX_MII_ENA; 1634 v |= CAS_MAC_XIF_LINK_LED; 1635 1636 /* MII needs echo disable if half duplex. */ 1637 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1638 /* turn on full duplex LED */ 1639 v |= CAS_MAC_XIF_FDPLX_LED; 1640 else 1641 /* half duplex -- disable echo */ 1642 v |= CAS_MAC_XIF_ECHO_DISABL; 1643 1644 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1645 case IFM_1000_T: /* Gigabit using GMII interface */ 1646 case IFM_1000_SX: 1647 v |= CAS_MAC_XIF_GMII_MODE; 1648 break; 1649 default: 1650 v &= ~CAS_MAC_XIF_GMII_MODE; 1651 } 1652 bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v); 1653 } 1654 1655 int 1656 cas_pcs_readreg(device_t self, int phy, int reg) 1657 { 1658 struct cas_softc *sc = device_private(self); 1659 bus_space_tag_t t = sc->sc_memt; 1660 bus_space_handle_t pcs = sc->sc_memh; 1661 1662 #ifdef CAS_DEBUG 1663 if (sc->sc_debug) 1664 printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg); 1665 #endif 1666 1667 if (phy != CAS_PHYAD_EXTERNAL) 1668 return (0); 1669 1670 switch (reg) { 1671 case MII_BMCR: 1672 reg = CAS_MII_CONTROL; 1673 break; 1674 case MII_BMSR: 1675 reg = CAS_MII_STATUS; 1676 break; 1677 case MII_ANAR: 1678 reg = CAS_MII_ANAR; 1679 break; 1680 case MII_ANLPAR: 1681 reg = CAS_MII_ANLPAR; 1682 break; 1683 case MII_EXTSR: 1684 return (EXTSR_1000XFDX|EXTSR_1000XHDX); 1685 default: 1686 return (0); 1687 } 1688 1689 return bus_space_read_4(t, pcs, reg); 1690 } 1691 1692 void 1693 cas_pcs_writereg(device_t self, int phy, int reg, int val) 1694 { 1695 struct cas_softc *sc = device_private(self); 1696 bus_space_tag_t t = sc->sc_memt; 1697 bus_space_handle_t pcs = sc->sc_memh; 1698 int reset = 0; 1699 1700 #ifdef CAS_DEBUG 1701 if (sc->sc_debug) 1702 printf("cas_pcs_writereg: phy %d reg %d val %x\n", 1703 phy, reg, val); 1704 #endif 1705 1706 if (phy != CAS_PHYAD_EXTERNAL) 1707 return; 1708 1709 if (reg == MII_ANAR) 1710 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0); 1711 1712 switch (reg) { 1713 case MII_BMCR: 1714 reset = (val & CAS_MII_CONTROL_RESET); 1715 reg = CAS_MII_CONTROL; 1716 break; 1717 case MII_BMSR: 1718 reg = CAS_MII_STATUS; 1719 break; 1720 case MII_ANAR: 1721 reg = CAS_MII_ANAR; 1722 break; 1723 case MII_ANLPAR: 1724 reg = CAS_MII_ANLPAR; 1725 break; 1726 default: 1727 return; 1728 } 1729 1730 bus_space_write_4(t, pcs, reg, val); 1731 1732 if (reset) 1733 cas_bitwait(sc, pcs, CAS_MII_CONTROL, CAS_MII_CONTROL_RESET, 0); 1734 1735 if (reg == CAS_MII_ANAR || reset) 1736 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 1737 CAS_MII_CONFIG_ENABLE); 1738 } 1739 1740 int 1741 cas_mediachange(struct ifnet *ifp) 1742 { 1743 struct cas_softc *sc = ifp->if_softc; 1744 struct mii_data *mii = &sc->sc_mii; 1745 1746 if (mii->mii_instance) { 1747 struct mii_softc *miisc; 1748 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1749 mii_phy_reset(miisc); 1750 } 1751 1752 return (mii_mediachg(&sc->sc_mii)); 1753 } 1754 1755 void 1756 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1757 { 1758 struct cas_softc *sc = ifp->if_softc; 1759 1760 mii_pollstat(&sc->sc_mii); 1761 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1762 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1763 } 1764 1765 /* 1766 * Process an ioctl request. 1767 */ 1768 int 1769 cas_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1770 { 1771 struct cas_softc *sc = ifp->if_softc; 1772 int s, error = 0; 1773 1774 s = splnet(); 1775 1776 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 1777 error = 0; 1778 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1779 ; 1780 else if (ifp->if_flags & IFF_RUNNING) { 1781 /* 1782 * Multicast list has changed; set the hardware filter 1783 * accordingly. 1784 */ 1785 cas_iff(sc); 1786 } 1787 } 1788 1789 splx(s); 1790 return (error); 1791 } 1792 1793 static bool 1794 cas_suspend(device_t self, const pmf_qual_t *qual) 1795 { 1796 struct cas_softc *sc = device_private(self); 1797 bus_space_tag_t t = sc->sc_memt; 1798 bus_space_handle_t h = sc->sc_memh; 1799 1800 bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0); 1801 if (sc->sc_ih != NULL) { 1802 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 1803 sc->sc_ih = NULL; 1804 } 1805 1806 return true; 1807 } 1808 1809 static bool 1810 cas_resume(device_t self, const pmf_qual_t *qual) 1811 { 1812 struct cas_softc *sc = device_private(self); 1813 1814 return cas_estintr(sc, CAS_INTR_PCI | CAS_INTR_REG); 1815 } 1816 1817 static bool 1818 cas_estintr(struct cas_softc *sc, int what) 1819 { 1820 bus_space_tag_t t = sc->sc_memt; 1821 bus_space_handle_t h = sc->sc_memh; 1822 const char *intrstr = NULL; 1823 1824 /* PCI interrupts */ 1825 if (what & CAS_INTR_PCI) { 1826 intrstr = pci_intr_string(sc->sc_pc, sc->sc_handle); 1827 sc->sc_ih = pci_intr_establish(sc->sc_pc, sc->sc_handle, 1828 IPL_NET, cas_intr, sc); 1829 if (sc->sc_ih == NULL) { 1830 aprint_error_dev(sc->sc_dev, 1831 "unable to establish interrupt"); 1832 if (intrstr != NULL) 1833 aprint_error(" at %s", intrstr); 1834 aprint_error("\n"); 1835 return false; 1836 } 1837 1838 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1839 } 1840 1841 /* Interrupt register */ 1842 if (what & CAS_INTR_REG) { 1843 bus_space_write_4(t, h, CAS_INTMASK, 1844 ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY| 1845 CAS_INTR_TX_TAG_ERR| 1846 CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF| 1847 CAS_INTR_RX_TAG_ERR| 1848 CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS| 1849 CAS_INTR_MAC_CONTROL|CAS_INTR_MIF| 1850 CAS_INTR_BERR)); 1851 bus_space_write_4(t, h, CAS_MAC_RX_MASK, 1852 CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT); 1853 bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE); 1854 bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */ 1855 } 1856 return true; 1857 } 1858 1859 bool 1860 cas_shutdown(device_t self, int howto) 1861 { 1862 struct cas_softc *sc = device_private(self); 1863 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1864 1865 cas_stop(ifp, 1); 1866 1867 return true; 1868 } 1869 1870 void 1871 cas_iff(struct cas_softc *sc) 1872 { 1873 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1874 struct ethercom *ec = &sc->sc_ethercom; 1875 struct ether_multi *enm; 1876 struct ether_multistep step; 1877 bus_space_tag_t t = sc->sc_memt; 1878 bus_space_handle_t h = sc->sc_memh; 1879 u_int32_t crc, hash[16], rxcfg; 1880 int i; 1881 1882 rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1883 rxcfg &= ~(CAS_MAC_RX_HASH_FILTER | CAS_MAC_RX_PROMISCUOUS | 1884 CAS_MAC_RX_PROMISC_GRP); 1885 ifp->if_flags &= ~IFF_ALLMULTI; 1886 1887 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 1888 ifp->if_flags |= IFF_ALLMULTI; 1889 if (ifp->if_flags & IFF_PROMISC) 1890 rxcfg |= CAS_MAC_RX_PROMISCUOUS; 1891 else 1892 rxcfg |= CAS_MAC_RX_PROMISC_GRP; 1893 } else { 1894 /* 1895 * Set up multicast address filter by passing all multicast 1896 * addresses through a crc generator, and then using the 1897 * high order 8 bits as an index into the 256 bit logical 1898 * address filter. The high order 4 bits selects the word, 1899 * while the other 4 bits select the bit within the word 1900 * (where bit 0 is the MSB). 1901 */ 1902 1903 rxcfg |= CAS_MAC_RX_HASH_FILTER; 1904 1905 /* Clear hash table */ 1906 for (i = 0; i < 16; i++) 1907 hash[i] = 0; 1908 1909 ETHER_FIRST_MULTI(step, ec, enm); 1910 while (enm != NULL) { 1911 crc = ether_crc32_le(enm->enm_addrlo, 1912 ETHER_ADDR_LEN); 1913 1914 /* Just want the 8 most significant bits. */ 1915 crc >>= 24; 1916 1917 /* Set the corresponding bit in the filter. */ 1918 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1919 1920 ETHER_NEXT_MULTI(step, enm); 1921 } 1922 1923 /* Now load the hash table into the chip (if we are using it) */ 1924 for (i = 0; i < 16; i++) { 1925 bus_space_write_4(t, h, 1926 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), 1927 hash[i]); 1928 } 1929 } 1930 1931 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg); 1932 } 1933 1934 int 1935 cas_encap(struct cas_softc *sc, struct mbuf *mhead, u_int32_t *bixp) 1936 { 1937 u_int64_t flags; 1938 u_int32_t cur, frag, i; 1939 bus_dmamap_t map; 1940 1941 cur = frag = *bixp; 1942 map = sc->sc_txd[cur].sd_map; 1943 1944 if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead, 1945 BUS_DMA_NOWAIT) != 0) { 1946 return (ENOBUFS); 1947 } 1948 1949 if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) { 1950 bus_dmamap_unload(sc->sc_dmatag, map); 1951 return (ENOBUFS); 1952 } 1953 1954 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1955 BUS_DMASYNC_PREWRITE); 1956 1957 for (i = 0; i < map->dm_nsegs; i++) { 1958 sc->sc_txdescs[frag].cd_addr = 1959 CAS_DMA_WRITE(map->dm_segs[i].ds_addr); 1960 flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) | 1961 (i == 0 ? CAS_TD_START_OF_PACKET : 0) | 1962 ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0); 1963 sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags); 1964 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, 1965 CAS_CDTXOFF(frag), sizeof(struct cas_desc), 1966 BUS_DMASYNC_PREWRITE); 1967 cur = frag; 1968 if (++frag == CAS_NTXDESC) 1969 frag = 0; 1970 } 1971 1972 sc->sc_tx_cnt += map->dm_nsegs; 1973 sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map; 1974 sc->sc_txd[cur].sd_map = map; 1975 sc->sc_txd[cur].sd_mbuf = mhead; 1976 1977 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag); 1978 1979 *bixp = frag; 1980 1981 /* sync descriptors */ 1982 1983 return (0); 1984 } 1985 1986 /* 1987 * Transmit interrupt. 1988 */ 1989 int 1990 cas_tint(struct cas_softc *sc, u_int32_t status) 1991 { 1992 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1993 struct cas_sxd *sd; 1994 u_int32_t cons, comp; 1995 1996 comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION); 1997 cons = sc->sc_tx_cons; 1998 while (cons != comp) { 1999 sd = &sc->sc_txd[cons]; 2000 if (sd->sd_mbuf != NULL) { 2001 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 2002 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2003 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 2004 m_freem(sd->sd_mbuf); 2005 sd->sd_mbuf = NULL; 2006 ifp->if_opackets++; 2007 } 2008 sc->sc_tx_cnt--; 2009 if (++cons == CAS_NTXDESC) 2010 cons = 0; 2011 } 2012 sc->sc_tx_cons = cons; 2013 2014 if (sc->sc_tx_cnt < CAS_NTXDESC - 2) 2015 ifp->if_flags &= ~IFF_OACTIVE; 2016 if (sc->sc_tx_cnt == 0) 2017 ifp->if_timer = 0; 2018 2019 cas_start(ifp); 2020 2021 return (1); 2022 } 2023 2024 void 2025 cas_start(struct ifnet *ifp) 2026 { 2027 struct cas_softc *sc = ifp->if_softc; 2028 struct mbuf *m; 2029 u_int32_t bix; 2030 2031 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2032 return; 2033 2034 bix = sc->sc_tx_prod; 2035 while (sc->sc_txd[bix].sd_mbuf == NULL) { 2036 IFQ_POLL(&ifp->if_snd, m); 2037 if (m == NULL) 2038 break; 2039 2040 /* 2041 * If BPF is listening on this interface, let it see the 2042 * packet before we commit it to the wire. 2043 */ 2044 bpf_mtap(ifp, m); 2045 2046 /* 2047 * Encapsulate this packet and start it going... 2048 * or fail... 2049 */ 2050 if (cas_encap(sc, m, &bix)) { 2051 ifp->if_flags |= IFF_OACTIVE; 2052 break; 2053 } 2054 2055 IFQ_DEQUEUE(&ifp->if_snd, m); 2056 ifp->if_timer = 5; 2057 } 2058 2059 sc->sc_tx_prod = bix; 2060 } 2061