1 /* $NetBSD: if_cas.c,v 1.11 2010/07/14 09:52:39 jnemeth Exp $ */ 2 /* $OpenBSD: if_cas.c,v 1.29 2009/11/29 16:19:38 kettenis Exp $ */ 3 4 /* 5 * 6 * Copyright (C) 2007 Mark Kettenis. 7 * Copyright (C) 2001 Eduardo Horvath. 8 * All rights reserved. 9 * 10 * 11 * Redistribution and use in source and binary forms, with or without 12 * modification, are permitted provided that the following conditions 13 * are met: 14 * 1. Redistributions of source code must retain the above copyright 15 * notice, this list of conditions and the following disclaimer. 16 * 2. Redistributions in binary form must reproduce the above copyright 17 * notice, this list of conditions and the following disclaimer in the 18 * documentation and/or other materials provided with the distribution. 19 * 20 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND 21 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 22 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 23 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE 24 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 25 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 26 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 27 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 28 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 29 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 30 * SUCH DAMAGE. 31 * 32 */ 33 34 /* 35 * Driver for Sun Cassini ethernet controllers. 36 * 37 * There are basically two variants of this chip: Cassini and 38 * Cassini+. We can distinguish between the two by revision: 0x10 and 39 * up are Cassini+. The most important difference is that Cassini+ 40 * has a second RX descriptor ring. Cassini+ will not work without 41 * configuring that second ring. However, since we don't use it we 42 * don't actually fill the descriptors, and only hand off the first 43 * four to the chip. 44 */ 45 46 #include <sys/cdefs.h> 47 __KERNEL_RCSID(0, "$NetBSD: if_cas.c,v 1.11 2010/07/14 09:52:39 jnemeth Exp $"); 48 49 #include "opt_inet.h" 50 51 #include <sys/param.h> 52 #include <sys/systm.h> 53 #include <sys/callout.h> 54 #include <sys/mbuf.h> 55 #include <sys/syslog.h> 56 #include <sys/malloc.h> 57 #include <sys/kernel.h> 58 #include <sys/socket.h> 59 #include <sys/ioctl.h> 60 #include <sys/errno.h> 61 #include <sys/device.h> 62 63 #include <machine/endian.h> 64 65 #include <uvm/uvm_extern.h> 66 67 #include <net/if.h> 68 #include <net/if_dl.h> 69 #include <net/if_media.h> 70 #include <net/if_ether.h> 71 72 #ifdef INET 73 #include <netinet/in.h> 74 #include <netinet/in_systm.h> 75 #include <netinet/in_var.h> 76 #include <netinet/ip.h> 77 #include <netinet/tcp.h> 78 #include <netinet/udp.h> 79 #endif 80 81 #include <net/bpf.h> 82 83 #include <sys/bus.h> 84 #include <sys/intr.h> 85 86 #include <dev/mii/mii.h> 87 #include <dev/mii/miivar.h> 88 #include <dev/mii/mii_bitbang.h> 89 90 #include <dev/pci/pcivar.h> 91 #include <dev/pci/pcireg.h> 92 #include <dev/pci/pcidevs.h> 93 #include <prop/proplib.h> 94 95 #include <dev/pci/if_casreg.h> 96 #include <dev/pci/if_casvar.h> 97 98 #define TRIES 10000 99 100 static bool cas_estintr(struct cas_softc *sc, int); 101 bool cas_shutdown(device_t, int); 102 static bool cas_suspend(device_t, const pmf_qual_t *); 103 static bool cas_resume(device_t, const pmf_qual_t *); 104 static int cas_detach(device_t, int); 105 static void cas_partial_detach(struct cas_softc *, enum cas_attach_stage); 106 107 int cas_match(device_t, cfdata_t, void *); 108 void cas_attach(device_t, device_t, void *); 109 110 111 CFATTACH_DECL3_NEW(cas, sizeof(struct cas_softc), 112 cas_match, cas_attach, cas_detach, NULL, NULL, NULL, 113 DVF_DETACH_SHUTDOWN); 114 115 int cas_pci_enaddr(struct cas_softc *, struct pci_attach_args *, uint8_t *); 116 117 void cas_config(struct cas_softc *, const uint8_t *); 118 void cas_start(struct ifnet *); 119 void cas_stop(struct ifnet *, int); 120 int cas_ioctl(struct ifnet *, u_long, void *); 121 void cas_tick(void *); 122 void cas_watchdog(struct ifnet *); 123 int cas_init(struct ifnet *); 124 void cas_init_regs(struct cas_softc *); 125 int cas_ringsize(int); 126 int cas_cringsize(int); 127 int cas_meminit(struct cas_softc *); 128 void cas_mifinit(struct cas_softc *); 129 int cas_bitwait(struct cas_softc *, bus_space_handle_t, int, 130 u_int32_t, u_int32_t); 131 void cas_reset(struct cas_softc *); 132 int cas_reset_rx(struct cas_softc *); 133 int cas_reset_tx(struct cas_softc *); 134 int cas_disable_rx(struct cas_softc *); 135 int cas_disable_tx(struct cas_softc *); 136 void cas_rxdrain(struct cas_softc *); 137 int cas_add_rxbuf(struct cas_softc *, int idx); 138 void cas_iff(struct cas_softc *); 139 int cas_encap(struct cas_softc *, struct mbuf *, u_int32_t *); 140 141 /* MII methods & callbacks */ 142 int cas_mii_readreg(device_t, int, int); 143 void cas_mii_writereg(device_t, int, int, int); 144 void cas_mii_statchg(device_t); 145 int cas_pcs_readreg(device_t, int, int); 146 void cas_pcs_writereg(device_t, int, int, int); 147 148 int cas_mediachange(struct ifnet *); 149 void cas_mediastatus(struct ifnet *, struct ifmediareq *); 150 151 int cas_eint(struct cas_softc *, u_int); 152 int cas_rint(struct cas_softc *); 153 int cas_tint(struct cas_softc *, u_int32_t); 154 int cas_pint(struct cas_softc *); 155 int cas_intr(void *); 156 157 #ifdef CAS_DEBUG 158 #define DPRINTF(sc, x) if ((sc)->sc_ethercom.ec_if.if_flags & IFF_DEBUG) \ 159 printf x 160 #else 161 #define DPRINTF(sc, x) /* nothing */ 162 #endif 163 164 int 165 cas_match(device_t parent, cfdata_t cf, void *aux) 166 { 167 struct pci_attach_args *pa = aux; 168 169 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN && 170 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_CASSINI)) 171 return 1; 172 173 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NS && 174 (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NS_SATURN)) 175 return 1; 176 177 return 0; 178 } 179 180 #define PROMHDR_PTR_DATA 0x18 181 #define PROMDATA_PTR_VPD 0x08 182 #define PROMDATA_DATA2 0x0a 183 184 static const u_int8_t cas_promhdr[] = { 0x55, 0xaa }; 185 static const u_int8_t cas_promdat[] = { 186 'P', 'C', 'I', 'R', 187 PCI_VENDOR_SUN & 0xff, PCI_VENDOR_SUN >> 8, 188 PCI_PRODUCT_SUN_CASSINI & 0xff, PCI_PRODUCT_SUN_CASSINI >> 8 189 }; 190 static const u_int8_t cas_promdat_ns[] = { 191 'P', 'C', 'I', 'R', 192 PCI_VENDOR_NS & 0xff, PCI_VENDOR_NS >> 8, 193 PCI_PRODUCT_NS_SATURN & 0xff, PCI_PRODUCT_NS_SATURN >> 8 194 }; 195 196 static const u_int8_t cas_promdat2[] = { 197 0x18, 0x00, /* structure length */ 198 0x00, /* structure revision */ 199 0x00, /* interface revision */ 200 PCI_SUBCLASS_NETWORK_ETHERNET, /* subclass code */ 201 PCI_CLASS_NETWORK /* class code */ 202 }; 203 204 int 205 cas_pci_enaddr(struct cas_softc *sc, struct pci_attach_args *pa, 206 uint8_t *enaddr) 207 { 208 struct pci_vpd_largeres *res; 209 struct pci_vpd *vpd; 210 bus_space_handle_t romh; 211 bus_space_tag_t romt; 212 bus_size_t romsize = 0; 213 u_int8_t buf[32], *desc; 214 pcireg_t address; 215 int dataoff, vpdoff, len; 216 int rv = -1; 217 218 if (pci_mapreg_map(pa, PCI_MAPREG_ROM, PCI_MAPREG_TYPE_MEM, 0, 219 &romt, &romh, NULL, &romsize)) 220 return (-1); 221 222 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START); 223 address |= PCI_MAPREG_ROM_ENABLE; 224 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_START, address); 225 226 bus_space_read_region_1(romt, romh, 0, buf, sizeof(buf)); 227 if (bcmp(buf, cas_promhdr, sizeof(cas_promhdr))) 228 goto fail; 229 230 dataoff = buf[PROMHDR_PTR_DATA] | (buf[PROMHDR_PTR_DATA + 1] << 8); 231 if (dataoff < 0x1c) 232 goto fail; 233 234 bus_space_read_region_1(romt, romh, dataoff, buf, sizeof(buf)); 235 if ((bcmp(buf, cas_promdat, sizeof(cas_promdat)) && 236 bcmp(buf, cas_promdat_ns, sizeof(cas_promdat_ns))) || 237 bcmp(buf + PROMDATA_DATA2, cas_promdat2, sizeof(cas_promdat2))) 238 goto fail; 239 240 vpdoff = buf[PROMDATA_PTR_VPD] | (buf[PROMDATA_PTR_VPD + 1] << 8); 241 if (vpdoff < 0x1c) 242 goto fail; 243 244 next: 245 bus_space_read_region_1(romt, romh, vpdoff, buf, sizeof(buf)); 246 if (!PCI_VPDRES_ISLARGE(buf[0])) 247 goto fail; 248 249 res = (struct pci_vpd_largeres *)buf; 250 vpdoff += sizeof(*res); 251 252 len = ((res->vpdres_len_msb << 8) + res->vpdres_len_lsb); 253 switch(PCI_VPDRES_LARGE_NAME(res->vpdres_byte0)) { 254 case PCI_VPDRES_TYPE_IDENTIFIER_STRING: 255 /* Skip identifier string. */ 256 vpdoff += len; 257 goto next; 258 259 case PCI_VPDRES_TYPE_VPD: 260 while (len > 0) { 261 bus_space_read_region_1(romt, romh, vpdoff, 262 buf, sizeof(buf)); 263 264 vpd = (struct pci_vpd *)buf; 265 vpdoff += sizeof(*vpd) + vpd->vpd_len; 266 len -= sizeof(*vpd) + vpd->vpd_len; 267 268 /* 269 * We're looking for an "Enhanced" VPD... 270 */ 271 if (vpd->vpd_key0 != 'Z') 272 continue; 273 274 desc = buf + sizeof(*vpd); 275 276 /* 277 * ...which is an instance property... 278 */ 279 if (desc[0] != 'I') 280 continue; 281 desc += 3; 282 283 /* 284 * ...that's a byte array with the proper 285 * length for a MAC address... 286 */ 287 if (desc[0] != 'B' || desc[1] != ETHER_ADDR_LEN) 288 continue; 289 desc += 2; 290 291 /* 292 * ...named "local-mac-address". 293 */ 294 if (strcmp(desc, "local-mac-address") != 0) 295 continue; 296 desc += strlen("local-mac-address") + 1; 297 298 memcpy(enaddr, desc, ETHER_ADDR_LEN); 299 rv = 0; 300 } 301 break; 302 303 default: 304 goto fail; 305 } 306 307 fail: 308 if (romsize != 0) 309 bus_space_unmap(romt, romh, romsize); 310 311 address = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM); 312 address &= ~PCI_MAPREG_ROM_ENABLE; 313 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM, address); 314 315 return (rv); 316 } 317 318 void 319 cas_attach(device_t parent, device_t self, void *aux) 320 { 321 struct pci_attach_args *pa = aux; 322 struct cas_softc *sc = device_private(self); 323 char devinfo[256]; 324 prop_data_t data; 325 uint8_t enaddr[ETHER_ADDR_LEN]; 326 327 sc->sc_dev = self; 328 pci_devinfo(pa->pa_id, pa->pa_class, 0, devinfo, sizeof(devinfo)); 329 sc->sc_rev = PCI_REVISION(pa->pa_class); 330 aprint_normal(": %s (rev. 0x%02x)\n", devinfo, sc->sc_rev); 331 sc->sc_dmatag = pa->pa_dmat; 332 333 #define PCI_CAS_BASEADDR 0x10 334 if (pci_mapreg_map(pa, PCI_CAS_BASEADDR, PCI_MAPREG_TYPE_MEM, 0, 335 &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_size) != 0) { 336 aprint_error_dev(sc->sc_dev, 337 "unable to map device registers\n"); 338 return; 339 } 340 341 if ((data = prop_dictionary_get(device_properties(sc->sc_dev), 342 "mac-address")) != NULL) 343 memcpy(enaddr, prop_data_data_nocopy(data), ETHER_ADDR_LEN); 344 else if (cas_pci_enaddr(sc, pa, enaddr) != 0) { 345 aprint_error_dev(sc->sc_dev, "no Ethernet address found\n"); 346 memset(enaddr, 0, sizeof(enaddr)); 347 } 348 349 sc->sc_burst = 16; /* XXX */ 350 351 sc->sc_att_stage = CAS_ATT_BACKEND_0; 352 353 if (pci_intr_map(pa, &sc->sc_handle) != 0) { 354 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 355 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 356 return; 357 } 358 sc->sc_pc = pa->pa_pc; 359 if (!cas_estintr(sc, CAS_INTR_PCI)) { 360 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 361 aprint_error_dev(sc->sc_dev, "unable to establish interrupt\n"); 362 return; 363 } 364 365 sc->sc_att_stage = CAS_ATT_BACKEND_1; 366 367 /* 368 * call the main configure 369 */ 370 cas_config(sc, enaddr); 371 372 if (pmf_device_register1(sc->sc_dev, 373 cas_suspend, cas_resume, cas_shutdown)) 374 pmf_class_network_register(sc->sc_dev, &sc->sc_ethercom.ec_if); 375 else 376 aprint_error_dev(sc->sc_dev, 377 "could not establish power handlers\n"); 378 379 sc->sc_att_stage = CAS_ATT_FINISHED; 380 /*FALLTHROUGH*/ 381 } 382 383 /* 384 * cas_config: 385 * 386 * Attach a Cassini interface to the system. 387 */ 388 void 389 cas_config(struct cas_softc *sc, const uint8_t *enaddr) 390 { 391 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 392 struct mii_data *mii = &sc->sc_mii; 393 struct mii_softc *child; 394 int i, error; 395 396 /* Make sure the chip is stopped. */ 397 ifp->if_softc = sc; 398 cas_reset(sc); 399 400 /* 401 * Allocate the control data structures, and create and load the 402 * DMA map for it. 403 */ 404 if ((error = bus_dmamem_alloc(sc->sc_dmatag, 405 sizeof(struct cas_control_data), CAS_PAGE_SIZE, 0, &sc->sc_cdseg, 406 1, &sc->sc_cdnseg, 0)) != 0) { 407 aprint_error_dev(sc->sc_dev, 408 "unable to allocate control data, error = %d\n", 409 error); 410 cas_partial_detach(sc, CAS_ATT_0); 411 } 412 413 /* XXX should map this in with correct endianness */ 414 if ((error = bus_dmamem_map(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg, 415 sizeof(struct cas_control_data), (void **)&sc->sc_control_data, 416 BUS_DMA_COHERENT)) != 0) { 417 aprint_error_dev(sc->sc_dev, 418 "unable to map control data, error = %d\n", error); 419 cas_partial_detach(sc, CAS_ATT_1); 420 } 421 422 if ((error = bus_dmamap_create(sc->sc_dmatag, 423 sizeof(struct cas_control_data), 1, 424 sizeof(struct cas_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 425 aprint_error_dev(sc->sc_dev, 426 "unable to create control data DMA map, error = %d\n", error); 427 cas_partial_detach(sc, CAS_ATT_2); 428 } 429 430 if ((error = bus_dmamap_load(sc->sc_dmatag, sc->sc_cddmamap, 431 sc->sc_control_data, sizeof(struct cas_control_data), NULL, 432 0)) != 0) { 433 aprint_error_dev(sc->sc_dev, 434 "unable to load control data DMA map, error = %d\n", 435 error); 436 cas_partial_detach(sc, CAS_ATT_3); 437 } 438 439 memset(sc->sc_control_data, 0, sizeof(struct cas_control_data)); 440 441 /* 442 * Create the receive buffer DMA maps. 443 */ 444 for (i = 0; i < CAS_NRXDESC; i++) { 445 bus_dma_segment_t seg; 446 char *kva; 447 int rseg; 448 449 if ((error = bus_dmamem_alloc(sc->sc_dmatag, CAS_PAGE_SIZE, 450 CAS_PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { 451 aprint_error_dev(sc->sc_dev, 452 "unable to alloc rx DMA mem %d, error = %d\n", 453 i, error); 454 cas_partial_detach(sc, CAS_ATT_5); 455 } 456 sc->sc_rxsoft[i].rxs_dmaseg = seg; 457 458 if ((error = bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 459 CAS_PAGE_SIZE, (void **)&kva, BUS_DMA_NOWAIT)) != 0) { 460 aprint_error_dev(sc->sc_dev, 461 "unable to alloc rx DMA mem %d, error = %d\n", 462 i, error); 463 cas_partial_detach(sc, CAS_ATT_5); 464 } 465 sc->sc_rxsoft[i].rxs_kva = kva; 466 467 if ((error = bus_dmamap_create(sc->sc_dmatag, CAS_PAGE_SIZE, 1, 468 CAS_PAGE_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 469 aprint_error_dev(sc->sc_dev, 470 "unable to create rx DMA map %d, error = %d\n", 471 i, error); 472 cas_partial_detach(sc, CAS_ATT_5); 473 } 474 475 if ((error = bus_dmamap_load(sc->sc_dmatag, 476 sc->sc_rxsoft[i].rxs_dmamap, kva, CAS_PAGE_SIZE, NULL, 477 BUS_DMA_NOWAIT)) != 0) { 478 aprint_error_dev(sc->sc_dev, 479 "unable to load rx DMA map %d, error = %d\n", 480 i, error); 481 cas_partial_detach(sc, CAS_ATT_5); 482 } 483 } 484 485 /* 486 * Create the transmit buffer DMA maps. 487 */ 488 for (i = 0; i < CAS_NTXDESC; i++) { 489 if ((error = bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 490 CAS_NTXSEGS, MCLBYTES, 0, BUS_DMA_NOWAIT, 491 &sc->sc_txd[i].sd_map)) != 0) { 492 aprint_error_dev(sc->sc_dev, 493 "unable to create tx DMA map %d, error = %d\n", 494 i, error); 495 cas_partial_detach(sc, CAS_ATT_6); 496 } 497 sc->sc_txd[i].sd_mbuf = NULL; 498 } 499 500 /* 501 * From this point forward, the attachment cannot fail. A failure 502 * before this point releases all resources that may have been 503 * allocated. 504 */ 505 506 /* Announce ourselves. */ 507 aprint_normal_dev(sc->sc_dev, "Ethernet address %s\n", 508 ether_sprintf(enaddr)); 509 aprint_naive(": Ethernet controller\n"); 510 511 /* Get RX FIFO size */ 512 sc->sc_rxfifosize = 16 * 1024; 513 514 /* Initialize ifnet structure. */ 515 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 516 ifp->if_softc = sc; 517 ifp->if_flags = 518 IFF_BROADCAST | IFF_SIMPLEX | IFF_NOTRAILERS | IFF_MULTICAST; 519 ifp->if_start = cas_start; 520 ifp->if_ioctl = cas_ioctl; 521 ifp->if_watchdog = cas_watchdog; 522 ifp->if_stop = cas_stop; 523 ifp->if_init = cas_init; 524 IFQ_SET_MAXLEN(&ifp->if_snd, CAS_NTXDESC - 1); 525 IFQ_SET_READY(&ifp->if_snd); 526 527 /* Initialize ifmedia structures and MII info */ 528 mii->mii_ifp = ifp; 529 mii->mii_readreg = cas_mii_readreg; 530 mii->mii_writereg = cas_mii_writereg; 531 mii->mii_statchg = cas_mii_statchg; 532 533 ifmedia_init(&mii->mii_media, 0, cas_mediachange, cas_mediastatus); 534 sc->sc_ethercom.ec_mii = mii; 535 536 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_MII_DATAPATH_MODE, 0); 537 538 cas_mifinit(sc); 539 540 if (sc->sc_mif_config & CAS_MIF_CONFIG_MDI1) { 541 sc->sc_mif_config |= CAS_MIF_CONFIG_PHY_SEL; 542 bus_space_write_4(sc->sc_memt, sc->sc_memh, 543 CAS_MIF_CONFIG, sc->sc_mif_config); 544 } 545 546 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 547 MII_OFFSET_ANY, 0); 548 549 child = LIST_FIRST(&mii->mii_phys); 550 if (child == NULL && 551 sc->sc_mif_config & (CAS_MIF_CONFIG_MDI0|CAS_MIF_CONFIG_MDI1)) { 552 /* 553 * Try the external PCS SERDES if we didn't find any 554 * MII devices. 555 */ 556 bus_space_write_4(sc->sc_memt, sc->sc_memh, 557 CAS_MII_DATAPATH_MODE, CAS_MII_DATAPATH_SERDES); 558 559 bus_space_write_4(sc->sc_memt, sc->sc_memh, 560 CAS_MII_CONFIG, CAS_MII_CONFIG_ENABLE); 561 562 mii->mii_readreg = cas_pcs_readreg; 563 mii->mii_writereg = cas_pcs_writereg; 564 565 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 566 MII_OFFSET_ANY, MIIF_NOISOLATE); 567 } 568 569 child = LIST_FIRST(&mii->mii_phys); 570 if (child == NULL) { 571 /* No PHY attached */ 572 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_MANUAL, 0, NULL); 573 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_MANUAL); 574 } else { 575 /* 576 * Walk along the list of attached MII devices and 577 * establish an `MII instance' to `phy number' 578 * mapping. We'll use this mapping in media change 579 * requests to determine which phy to use to program 580 * the MIF configuration register. 581 */ 582 for (; child != NULL; child = LIST_NEXT(child, mii_list)) { 583 /* 584 * Note: we support just two PHYs: the built-in 585 * internal device and an external on the MII 586 * connector. 587 */ 588 if (child->mii_phy > 1 || child->mii_inst > 1) { 589 aprint_error_dev(sc->sc_dev, 590 "cannot accommodate MII device %s" 591 " at phy %d, instance %d\n", 592 device_xname(child->mii_dev), 593 child->mii_phy, child->mii_inst); 594 continue; 595 } 596 597 sc->sc_phys[child->mii_inst] = child->mii_phy; 598 } 599 600 /* 601 * XXX - we can really do the following ONLY if the 602 * phy indeed has the auto negotiation capability!! 603 */ 604 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_AUTO); 605 } 606 607 /* claim 802.1q capability */ 608 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 609 610 /* Attach the interface. */ 611 if_attach(ifp); 612 ether_ifattach(ifp, enaddr); 613 614 #if NRND > 0 615 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 616 RND_TYPE_NET, 0); 617 #endif 618 619 evcnt_attach_dynamic(&sc->sc_ev_intr, EVCNT_TYPE_INTR, 620 NULL, device_xname(sc->sc_dev), "interrupts"); 621 622 callout_init(&sc->sc_tick_ch, 0); 623 624 return; 625 } 626 627 int 628 cas_detach(device_t self, int flags) 629 { 630 int i; 631 struct cas_softc *sc = device_private(self); 632 bus_space_tag_t t = sc->sc_memt; 633 bus_space_handle_t h = sc->sc_memh; 634 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 635 636 /* 637 * Free any resources we've allocated during the failed attach 638 * attempt. Do this in reverse order and fall through. 639 */ 640 switch (sc->sc_att_stage) { 641 case CAS_ATT_FINISHED: 642 bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0); 643 pmf_device_deregister(self); 644 cas_stop(&sc->sc_ethercom.ec_if, 1); 645 evcnt_detach(&sc->sc_ev_intr); 646 647 #if NRND > 0 648 rnd_detach_source(&sc->rnd_source); 649 #endif 650 651 ether_ifdetach(ifp); 652 if_detach(ifp); 653 ifmedia_delete_instance(&sc->sc_mii.mii_media, IFM_INST_ANY); 654 655 callout_destroy(&sc->sc_tick_ch); 656 657 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 658 659 /*FALLTHROUGH*/ 660 case CAS_ATT_MII: 661 case CAS_ATT_7: 662 case CAS_ATT_6: 663 for (i = 0; i < CAS_NTXDESC; i++) { 664 if (sc->sc_txd[i].sd_map != NULL) 665 bus_dmamap_destroy(sc->sc_dmatag, 666 sc->sc_txd[i].sd_map); 667 } 668 /*FALLTHROUGH*/ 669 case CAS_ATT_5: 670 for (i = 0; i < CAS_NRXDESC; i++) { 671 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 672 bus_dmamap_unload(sc->sc_dmatag, 673 sc->sc_rxsoft[i].rxs_dmamap); 674 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 675 bus_dmamap_destroy(sc->sc_dmatag, 676 sc->sc_rxsoft[i].rxs_dmamap); 677 if (sc->sc_rxsoft[i].rxs_kva != NULL) 678 bus_dmamem_unmap(sc->sc_dmatag, 679 sc->sc_rxsoft[i].rxs_kva, CAS_PAGE_SIZE); 680 /* XXX need to check that bus_dmamem_alloc suceeded 681 if (sc->sc_rxsoft[i].rxs_dmaseg != NULL) 682 */ 683 bus_dmamem_free(sc->sc_dmatag, 684 &(sc->sc_rxsoft[i].rxs_dmaseg), 1); 685 } 686 bus_dmamap_unload(sc->sc_dmatag, sc->sc_cddmamap); 687 /*FALLTHROUGH*/ 688 case CAS_ATT_4: 689 case CAS_ATT_3: 690 bus_dmamap_destroy(sc->sc_dmatag, sc->sc_cddmamap); 691 /*FALLTHROUGH*/ 692 case CAS_ATT_2: 693 bus_dmamem_unmap(sc->sc_dmatag, sc->sc_control_data, 694 sizeof(struct cas_control_data)); 695 /*FALLTHROUGH*/ 696 case CAS_ATT_1: 697 bus_dmamem_free(sc->sc_dmatag, &sc->sc_cdseg, sc->sc_cdnseg); 698 /*FALLTHROUGH*/ 699 case CAS_ATT_0: 700 sc->sc_att_stage = CAS_ATT_0; 701 /*FALLTHROUGH*/ 702 case CAS_ATT_BACKEND_2: 703 case CAS_ATT_BACKEND_1: 704 if (sc->sc_ih != NULL) { 705 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 706 sc->sc_ih = NULL; 707 } 708 bus_space_unmap(sc->sc_memt, sc->sc_memh, sc->sc_size); 709 /*FALLTHROUGH*/ 710 case CAS_ATT_BACKEND_0: 711 break; 712 } 713 return 0; 714 } 715 716 static void 717 cas_partial_detach(struct cas_softc *sc, enum cas_attach_stage stage) 718 { 719 cfattach_t ca = device_cfattach(sc->sc_dev); 720 721 sc->sc_att_stage = stage; 722 (*ca->ca_detach)(sc->sc_dev, 0); 723 } 724 725 void 726 cas_tick(void *arg) 727 { 728 struct cas_softc *sc = arg; 729 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 730 bus_space_tag_t t = sc->sc_memt; 731 bus_space_handle_t mac = sc->sc_memh; 732 int s; 733 u_int32_t v; 734 735 /* unload collisions counters */ 736 v = bus_space_read_4(t, mac, CAS_MAC_EXCESS_COLL_CNT) + 737 bus_space_read_4(t, mac, CAS_MAC_LATE_COLL_CNT); 738 ifp->if_collisions += v + 739 bus_space_read_4(t, mac, CAS_MAC_NORM_COLL_CNT) + 740 bus_space_read_4(t, mac, CAS_MAC_FIRST_COLL_CNT); 741 ifp->if_oerrors += v; 742 743 /* read error counters */ 744 ifp->if_ierrors += 745 bus_space_read_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT) + 746 bus_space_read_4(t, mac, CAS_MAC_RX_ALIGN_ERR) + 747 bus_space_read_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT) + 748 bus_space_read_4(t, mac, CAS_MAC_RX_CODE_VIOL); 749 750 /* clear the hardware counters */ 751 bus_space_write_4(t, mac, CAS_MAC_NORM_COLL_CNT, 0); 752 bus_space_write_4(t, mac, CAS_MAC_FIRST_COLL_CNT, 0); 753 bus_space_write_4(t, mac, CAS_MAC_EXCESS_COLL_CNT, 0); 754 bus_space_write_4(t, mac, CAS_MAC_LATE_COLL_CNT, 0); 755 bus_space_write_4(t, mac, CAS_MAC_RX_LEN_ERR_CNT, 0); 756 bus_space_write_4(t, mac, CAS_MAC_RX_ALIGN_ERR, 0); 757 bus_space_write_4(t, mac, CAS_MAC_RX_CRC_ERR_CNT, 0); 758 bus_space_write_4(t, mac, CAS_MAC_RX_CODE_VIOL, 0); 759 760 s = splnet(); 761 mii_tick(&sc->sc_mii); 762 splx(s); 763 764 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 765 } 766 767 int 768 cas_bitwait(struct cas_softc *sc, bus_space_handle_t h, int r, 769 u_int32_t clr, u_int32_t set) 770 { 771 int i; 772 u_int32_t reg; 773 774 for (i = TRIES; i--; DELAY(100)) { 775 reg = bus_space_read_4(sc->sc_memt, h, r); 776 if ((reg & clr) == 0 && (reg & set) == set) 777 return (1); 778 } 779 780 return (0); 781 } 782 783 void 784 cas_reset(struct cas_softc *sc) 785 { 786 bus_space_tag_t t = sc->sc_memt; 787 bus_space_handle_t h = sc->sc_memh; 788 int s; 789 790 s = splnet(); 791 DPRINTF(sc, ("%s: cas_reset\n", device_xname(sc->sc_dev))); 792 cas_reset_rx(sc); 793 cas_reset_tx(sc); 794 795 /* Disable interrupts */ 796 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_INTMASK, ~(uint32_t)0); 797 798 /* Do a full reset */ 799 bus_space_write_4(t, h, CAS_RESET, 800 CAS_RESET_RX | CAS_RESET_TX | CAS_RESET_BLOCK_PCS); 801 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX | CAS_RESET_TX, 0)) 802 aprint_error_dev(sc->sc_dev, "cannot reset device\n"); 803 splx(s); 804 } 805 806 807 /* 808 * cas_rxdrain: 809 * 810 * Drain the receive queue. 811 */ 812 void 813 cas_rxdrain(struct cas_softc *sc) 814 { 815 /* Nothing to do yet. */ 816 } 817 818 /* 819 * Reset the whole thing. 820 */ 821 void 822 cas_stop(struct ifnet *ifp, int disable) 823 { 824 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 825 struct cas_sxd *sd; 826 u_int32_t i; 827 828 DPRINTF(sc, ("%s: cas_stop\n", device_xname(sc->sc_dev))); 829 830 callout_stop(&sc->sc_tick_ch); 831 832 /* 833 * Mark the interface down and cancel the watchdog timer. 834 */ 835 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 836 ifp->if_timer = 0; 837 838 mii_down(&sc->sc_mii); 839 840 cas_reset_rx(sc); 841 cas_reset_tx(sc); 842 843 /* 844 * Release any queued transmit buffers. 845 */ 846 for (i = 0; i < CAS_NTXDESC; i++) { 847 sd = &sc->sc_txd[i]; 848 if (sd->sd_mbuf != NULL) { 849 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 850 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 851 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 852 m_freem(sd->sd_mbuf); 853 sd->sd_mbuf = NULL; 854 } 855 } 856 sc->sc_tx_cnt = sc->sc_tx_prod = sc->sc_tx_cons = 0; 857 858 if (disable) 859 cas_rxdrain(sc); 860 } 861 862 863 /* 864 * Reset the receiver 865 */ 866 int 867 cas_reset_rx(struct cas_softc *sc) 868 { 869 bus_space_tag_t t = sc->sc_memt; 870 bus_space_handle_t h = sc->sc_memh; 871 872 /* 873 * Resetting while DMA is in progress can cause a bus hang, so we 874 * disable DMA first. 875 */ 876 cas_disable_rx(sc); 877 bus_space_write_4(t, h, CAS_RX_CONFIG, 0); 878 /* Wait till it finishes */ 879 if (!cas_bitwait(sc, h, CAS_RX_CONFIG, 1, 0)) 880 aprint_error_dev(sc->sc_dev, "cannot disable rx dma\n"); 881 /* Wait 5ms extra. */ 882 delay(5000); 883 884 /* Finally, reset the ERX */ 885 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_RX); 886 /* Wait till it finishes */ 887 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_RX, 0)) { 888 aprint_error_dev(sc->sc_dev, "cannot reset receiver\n"); 889 return (1); 890 } 891 return (0); 892 } 893 894 895 /* 896 * Reset the transmitter 897 */ 898 int 899 cas_reset_tx(struct cas_softc *sc) 900 { 901 bus_space_tag_t t = sc->sc_memt; 902 bus_space_handle_t h = sc->sc_memh; 903 904 /* 905 * Resetting while DMA is in progress can cause a bus hang, so we 906 * disable DMA first. 907 */ 908 cas_disable_tx(sc); 909 bus_space_write_4(t, h, CAS_TX_CONFIG, 0); 910 /* Wait till it finishes */ 911 if (!cas_bitwait(sc, h, CAS_TX_CONFIG, 1, 0)) 912 aprint_error_dev(sc->sc_dev, "cannot disable tx dma\n"); 913 /* Wait 5ms extra. */ 914 delay(5000); 915 916 /* Finally, reset the ETX */ 917 bus_space_write_4(t, h, CAS_RESET, CAS_RESET_TX); 918 /* Wait till it finishes */ 919 if (!cas_bitwait(sc, h, CAS_RESET, CAS_RESET_TX, 0)) { 920 aprint_error_dev(sc->sc_dev, "cannot reset transmitter\n"); 921 return (1); 922 } 923 return (0); 924 } 925 926 /* 927 * Disable receiver. 928 */ 929 int 930 cas_disable_rx(struct cas_softc *sc) 931 { 932 bus_space_tag_t t = sc->sc_memt; 933 bus_space_handle_t h = sc->sc_memh; 934 u_int32_t cfg; 935 936 /* Flip the enable bit */ 937 cfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 938 cfg &= ~CAS_MAC_RX_ENABLE; 939 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, cfg); 940 941 /* Wait for it to finish */ 942 return (cas_bitwait(sc, h, CAS_MAC_RX_CONFIG, CAS_MAC_RX_ENABLE, 0)); 943 } 944 945 /* 946 * Disable transmitter. 947 */ 948 int 949 cas_disable_tx(struct cas_softc *sc) 950 { 951 bus_space_tag_t t = sc->sc_memt; 952 bus_space_handle_t h = sc->sc_memh; 953 u_int32_t cfg; 954 955 /* Flip the enable bit */ 956 cfg = bus_space_read_4(t, h, CAS_MAC_TX_CONFIG); 957 cfg &= ~CAS_MAC_TX_ENABLE; 958 bus_space_write_4(t, h, CAS_MAC_TX_CONFIG, cfg); 959 960 /* Wait for it to finish */ 961 return (cas_bitwait(sc, h, CAS_MAC_TX_CONFIG, CAS_MAC_TX_ENABLE, 0)); 962 } 963 964 /* 965 * Initialize interface. 966 */ 967 int 968 cas_meminit(struct cas_softc *sc) 969 { 970 struct cas_rxsoft *rxs; 971 int i, error; 972 973 rxs = (void *)&error; 974 975 /* 976 * Initialize the transmit descriptor ring. 977 */ 978 for (i = 0; i < CAS_NTXDESC; i++) { 979 sc->sc_txdescs[i].cd_flags = 0; 980 sc->sc_txdescs[i].cd_addr = 0; 981 } 982 CAS_CDTXSYNC(sc, 0, CAS_NTXDESC, 983 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 984 985 /* 986 * Initialize the receive descriptor and receive job 987 * descriptor rings. 988 */ 989 for (i = 0; i < CAS_NRXDESC; i++) 990 CAS_INIT_RXDESC(sc, i, i); 991 sc->sc_rxdptr = 0; 992 sc->sc_rxptr = 0; 993 994 /* 995 * Initialize the receive completion ring. 996 */ 997 for (i = 0; i < CAS_NRXCOMP; i++) { 998 sc->sc_rxcomps[i].cc_word[0] = 0; 999 sc->sc_rxcomps[i].cc_word[1] = 0; 1000 sc->sc_rxcomps[i].cc_word[2] = 0; 1001 sc->sc_rxcomps[i].cc_word[3] = CAS_DMA_WRITE(CAS_RC3_OWN); 1002 CAS_CDRXCSYNC(sc, i, 1003 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1004 } 1005 1006 return (0); 1007 } 1008 1009 int 1010 cas_ringsize(int sz) 1011 { 1012 switch (sz) { 1013 case 32: 1014 return CAS_RING_SZ_32; 1015 case 64: 1016 return CAS_RING_SZ_64; 1017 case 128: 1018 return CAS_RING_SZ_128; 1019 case 256: 1020 return CAS_RING_SZ_256; 1021 case 512: 1022 return CAS_RING_SZ_512; 1023 case 1024: 1024 return CAS_RING_SZ_1024; 1025 case 2048: 1026 return CAS_RING_SZ_2048; 1027 case 4096: 1028 return CAS_RING_SZ_4096; 1029 case 8192: 1030 return CAS_RING_SZ_8192; 1031 default: 1032 aprint_error("cas: invalid Receive Descriptor ring size %d\n", 1033 sz); 1034 return CAS_RING_SZ_32; 1035 } 1036 } 1037 1038 int 1039 cas_cringsize(int sz) 1040 { 1041 int i; 1042 1043 for (i = 0; i < 9; i++) 1044 if (sz == (128 << i)) 1045 return i; 1046 1047 aprint_error("cas: invalid completion ring size %d\n", sz); 1048 return 128; 1049 } 1050 1051 /* 1052 * Initialization of interface; set up initialization block 1053 * and transmit/receive descriptor rings. 1054 */ 1055 int 1056 cas_init(struct ifnet *ifp) 1057 { 1058 struct cas_softc *sc = (struct cas_softc *)ifp->if_softc; 1059 bus_space_tag_t t = sc->sc_memt; 1060 bus_space_handle_t h = sc->sc_memh; 1061 int s; 1062 u_int max_frame_size; 1063 u_int32_t v; 1064 1065 s = splnet(); 1066 1067 DPRINTF(sc, ("%s: cas_init: calling stop\n", device_xname(sc->sc_dev))); 1068 /* 1069 * Initialization sequence. The numbered steps below correspond 1070 * to the sequence outlined in section 6.3.5.1 in the Ethernet 1071 * Channel Engine manual (part of the PCIO manual). 1072 * See also the STP2002-STQ document from Sun Microsystems. 1073 */ 1074 1075 /* step 1 & 2. Reset the Ethernet Channel */ 1076 cas_stop(ifp, 0); 1077 cas_reset(sc); 1078 DPRINTF(sc, ("%s: cas_init: restarting\n", device_xname(sc->sc_dev))); 1079 1080 /* Re-initialize the MIF */ 1081 cas_mifinit(sc); 1082 1083 /* step 3. Setup data structures in host memory */ 1084 cas_meminit(sc); 1085 1086 /* step 4. TX MAC registers & counters */ 1087 cas_init_regs(sc); 1088 max_frame_size = ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN; 1089 v = (max_frame_size) | (0x2000 << 16) /* Burst size */; 1090 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 1091 1092 /* step 5. RX MAC registers & counters */ 1093 cas_iff(sc); 1094 1095 /* step 6 & 7. Program Descriptor Ring Base Addresses */ 1096 KASSERT((CAS_CDTXADDR(sc, 0) & 0x1fff) == 0); 1097 bus_space_write_4(t, h, CAS_TX_RING_PTR_HI, 1098 (((uint64_t)CAS_CDTXADDR(sc,0)) >> 32)); 1099 bus_space_write_4(t, h, CAS_TX_RING_PTR_LO, CAS_CDTXADDR(sc, 0)); 1100 1101 KASSERT((CAS_CDRXADDR(sc, 0) & 0x1fff) == 0); 1102 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI, 1103 (((uint64_t)CAS_CDRXADDR(sc,0)) >> 32)); 1104 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO, CAS_CDRXADDR(sc, 0)); 1105 1106 KASSERT((CAS_CDRXCADDR(sc, 0) & 0x1fff) == 0); 1107 bus_space_write_4(t, h, CAS_RX_CRING_PTR_HI, 1108 (((uint64_t)CAS_CDRXCADDR(sc,0)) >> 32)); 1109 bus_space_write_4(t, h, CAS_RX_CRING_PTR_LO, CAS_CDRXCADDR(sc, 0)); 1110 1111 if (CAS_PLUS(sc)) { 1112 KASSERT((CAS_CDRXADDR2(sc, 0) & 0x1fff) == 0); 1113 bus_space_write_4(t, h, CAS_RX_DRING_PTR_HI2, 1114 (((uint64_t)CAS_CDRXADDR2(sc,0)) >> 32)); 1115 bus_space_write_4(t, h, CAS_RX_DRING_PTR_LO2, 1116 CAS_CDRXADDR2(sc, 0)); 1117 } 1118 1119 /* step 8. Global Configuration & Interrupt Mask */ 1120 cas_estintr(sc, CAS_INTR_REG); 1121 1122 /* step 9. ETX Configuration: use mostly default values */ 1123 1124 /* Enable DMA */ 1125 v = cas_ringsize(CAS_NTXDESC /*XXX*/) << 10; 1126 bus_space_write_4(t, h, CAS_TX_CONFIG, 1127 v|CAS_TX_CONFIG_TXDMA_EN|(1<<24)|(1<<29)); 1128 bus_space_write_4(t, h, CAS_TX_KICK, 0); 1129 1130 /* step 10. ERX Configuration */ 1131 1132 /* Encode Receive Descriptor ring size */ 1133 v = cas_ringsize(CAS_NRXDESC) << CAS_RX_CONFIG_RXDRNG_SZ_SHIFT; 1134 if (CAS_PLUS(sc)) 1135 v |= cas_ringsize(32) << CAS_RX_CONFIG_RXDRNG2_SZ_SHIFT; 1136 1137 /* Encode Receive Completion ring size */ 1138 v |= cas_cringsize(CAS_NRXCOMP) << CAS_RX_CONFIG_RXCRNG_SZ_SHIFT; 1139 1140 /* Enable DMA */ 1141 bus_space_write_4(t, h, CAS_RX_CONFIG, 1142 v|(2<<CAS_RX_CONFIG_FBOFF_SHFT)|CAS_RX_CONFIG_RXDMA_EN); 1143 1144 /* 1145 * The following value is for an OFF Threshold of about 3/4 full 1146 * and an ON Threshold of 1/4 full. 1147 */ 1148 bus_space_write_4(t, h, CAS_RX_PAUSE_THRESH, 1149 (3 * sc->sc_rxfifosize / 256) | 1150 ((sc->sc_rxfifosize / 256) << 12)); 1151 bus_space_write_4(t, h, CAS_RX_BLANKING, (6 << 12) | 6); 1152 1153 /* step 11. Configure Media */ 1154 mii_ifmedia_change(&sc->sc_mii); 1155 1156 /* step 12. RX_MAC Configuration Register */ 1157 v = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1158 v |= CAS_MAC_RX_ENABLE | CAS_MAC_RX_STRIP_CRC; 1159 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, v); 1160 1161 /* step 14. Issue Transmit Pending command */ 1162 1163 /* step 15. Give the receiver a swift kick */ 1164 bus_space_write_4(t, h, CAS_RX_KICK, CAS_NRXDESC-4); 1165 if (CAS_PLUS(sc)) 1166 bus_space_write_4(t, h, CAS_RX_KICK2, 4); 1167 1168 /* Start the one second timer. */ 1169 callout_reset(&sc->sc_tick_ch, hz, cas_tick, sc); 1170 1171 ifp->if_flags |= IFF_RUNNING; 1172 ifp->if_flags &= ~IFF_OACTIVE; 1173 ifp->if_timer = 0; 1174 splx(s); 1175 1176 return (0); 1177 } 1178 1179 void 1180 cas_init_regs(struct cas_softc *sc) 1181 { 1182 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1183 bus_space_tag_t t = sc->sc_memt; 1184 bus_space_handle_t h = sc->sc_memh; 1185 const u_char *laddr = CLLADDR(ifp->if_sadl); 1186 u_int32_t v, r; 1187 1188 /* These regs are not cleared on reset */ 1189 sc->sc_inited = 0; 1190 if (!sc->sc_inited) { 1191 /* Load recommended values */ 1192 bus_space_write_4(t, h, CAS_MAC_IPG0, 0x00); 1193 bus_space_write_4(t, h, CAS_MAC_IPG1, 0x08); 1194 bus_space_write_4(t, h, CAS_MAC_IPG2, 0x04); 1195 1196 bus_space_write_4(t, h, CAS_MAC_MAC_MIN_FRAME, ETHER_MIN_LEN); 1197 /* Max frame and max burst size */ 1198 v = ETHER_MAX_LEN | (0x2000 << 16) /* Burst size */; 1199 bus_space_write_4(t, h, CAS_MAC_MAC_MAX_FRAME, v); 1200 1201 bus_space_write_4(t, h, CAS_MAC_PREAMBLE_LEN, 0x07); 1202 bus_space_write_4(t, h, CAS_MAC_JAM_SIZE, 0x04); 1203 bus_space_write_4(t, h, CAS_MAC_ATTEMPT_LIMIT, 0x10); 1204 bus_space_write_4(t, h, CAS_MAC_CONTROL_TYPE, 0x8088); 1205 bus_space_write_4(t, h, CAS_MAC_RANDOM_SEED, 1206 ((laddr[5]<<8)|laddr[4])&0x3ff); 1207 1208 /* Secondary MAC addresses set to 0:0:0:0:0:0 */ 1209 for (r = CAS_MAC_ADDR3; r < CAS_MAC_ADDR42; r += 4) 1210 bus_space_write_4(t, h, r, 0); 1211 1212 /* MAC control addr set to 0:1:c2:0:1:80 */ 1213 bus_space_write_4(t, h, CAS_MAC_ADDR42, 0x0001); 1214 bus_space_write_4(t, h, CAS_MAC_ADDR43, 0xc200); 1215 bus_space_write_4(t, h, CAS_MAC_ADDR44, 0x0180); 1216 1217 /* MAC filter addr set to 0:0:0:0:0:0 */ 1218 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER0, 0); 1219 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER1, 0); 1220 bus_space_write_4(t, h, CAS_MAC_ADDR_FILTER2, 0); 1221 1222 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK1_2, 0); 1223 bus_space_write_4(t, h, CAS_MAC_ADR_FLT_MASK0, 0); 1224 1225 /* Hash table initialized to 0 */ 1226 for (r = CAS_MAC_HASH0; r <= CAS_MAC_HASH15; r += 4) 1227 bus_space_write_4(t, h, r, 0); 1228 1229 sc->sc_inited = 1; 1230 } 1231 1232 /* Counters need to be zeroed */ 1233 bus_space_write_4(t, h, CAS_MAC_NORM_COLL_CNT, 0); 1234 bus_space_write_4(t, h, CAS_MAC_FIRST_COLL_CNT, 0); 1235 bus_space_write_4(t, h, CAS_MAC_EXCESS_COLL_CNT, 0); 1236 bus_space_write_4(t, h, CAS_MAC_LATE_COLL_CNT, 0); 1237 bus_space_write_4(t, h, CAS_MAC_DEFER_TMR_CNT, 0); 1238 bus_space_write_4(t, h, CAS_MAC_PEAK_ATTEMPTS, 0); 1239 bus_space_write_4(t, h, CAS_MAC_RX_FRAME_COUNT, 0); 1240 bus_space_write_4(t, h, CAS_MAC_RX_LEN_ERR_CNT, 0); 1241 bus_space_write_4(t, h, CAS_MAC_RX_ALIGN_ERR, 0); 1242 bus_space_write_4(t, h, CAS_MAC_RX_CRC_ERR_CNT, 0); 1243 bus_space_write_4(t, h, CAS_MAC_RX_CODE_VIOL, 0); 1244 1245 /* Un-pause stuff */ 1246 bus_space_write_4(t, h, CAS_MAC_SEND_PAUSE_CMD, 0); 1247 1248 /* 1249 * Set the station address. 1250 */ 1251 bus_space_write_4(t, h, CAS_MAC_ADDR0, (laddr[4]<<8) | laddr[5]); 1252 bus_space_write_4(t, h, CAS_MAC_ADDR1, (laddr[2]<<8) | laddr[3]); 1253 bus_space_write_4(t, h, CAS_MAC_ADDR2, (laddr[0]<<8) | laddr[1]); 1254 } 1255 1256 /* 1257 * Receive interrupt. 1258 */ 1259 int 1260 cas_rint(struct cas_softc *sc) 1261 { 1262 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1263 bus_space_tag_t t = sc->sc_memt; 1264 bus_space_handle_t h = sc->sc_memh; 1265 struct cas_rxsoft *rxs; 1266 struct mbuf *m; 1267 u_int64_t word[4]; 1268 int len, off, idx; 1269 int i, skip; 1270 void *cp; 1271 1272 for (i = sc->sc_rxptr;; i = CAS_NEXTRX(i + skip)) { 1273 CAS_CDRXCSYNC(sc, i, 1274 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1275 1276 word[0] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[0]); 1277 word[1] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[1]); 1278 word[2] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[2]); 1279 word[3] = CAS_DMA_READ(sc->sc_rxcomps[i].cc_word[3]); 1280 1281 /* Stop if the hardware still owns the descriptor. */ 1282 if ((word[0] & CAS_RC0_TYPE) == 0 || word[3] & CAS_RC3_OWN) 1283 break; 1284 1285 len = CAS_RC1_HDR_LEN(word[1]); 1286 if (len > 0) { 1287 off = CAS_RC1_HDR_OFF(word[1]); 1288 idx = CAS_RC1_HDR_IDX(word[1]); 1289 rxs = &sc->sc_rxsoft[idx]; 1290 1291 DPRINTF(sc, ("hdr at idx %d, off %d, len %d\n", 1292 idx, off, len)); 1293 1294 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1295 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1296 1297 cp = rxs->rxs_kva + off * 256 + ETHER_ALIGN; 1298 m = m_devget(cp, len, 0, ifp, NULL); 1299 1300 if (word[0] & CAS_RC0_RELEASE_HDR) 1301 cas_add_rxbuf(sc, idx); 1302 1303 if (m != NULL) { 1304 1305 /* 1306 * Pass this up to any BPF listeners, but only 1307 * pass it up the stack if its for us. 1308 */ 1309 bpf_mtap(ifp, m); 1310 1311 ifp->if_ipackets++; 1312 m->m_pkthdr.csum_flags = 0; 1313 (*ifp->if_input)(ifp, m); 1314 } else 1315 ifp->if_ierrors++; 1316 } 1317 1318 len = CAS_RC0_DATA_LEN(word[0]); 1319 if (len > 0) { 1320 off = CAS_RC0_DATA_OFF(word[0]); 1321 idx = CAS_RC0_DATA_IDX(word[0]); 1322 rxs = &sc->sc_rxsoft[idx]; 1323 1324 DPRINTF(sc, ("data at idx %d, off %d, len %d\n", 1325 idx, off, len)); 1326 1327 bus_dmamap_sync(sc->sc_dmatag, rxs->rxs_dmamap, 0, 1328 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1329 1330 /* XXX We should not be copying the packet here. */ 1331 cp = rxs->rxs_kva + off + ETHER_ALIGN; 1332 m = m_devget(cp, len, 0, ifp, NULL); 1333 1334 if (word[0] & CAS_RC0_RELEASE_DATA) 1335 cas_add_rxbuf(sc, idx); 1336 1337 if (m != NULL) { 1338 /* 1339 * Pass this up to any BPF listeners, but only 1340 * pass it up the stack if its for us. 1341 */ 1342 bpf_mtap(ifp, m); 1343 1344 ifp->if_ipackets++; 1345 m->m_pkthdr.csum_flags = 0; 1346 (*ifp->if_input)(ifp, m); 1347 } else 1348 ifp->if_ierrors++; 1349 } 1350 1351 if (word[0] & CAS_RC0_SPLIT) 1352 aprint_error_dev(sc->sc_dev, "split packet\n"); 1353 1354 skip = CAS_RC0_SKIP(word[0]); 1355 } 1356 1357 while (sc->sc_rxptr != i) { 1358 sc->sc_rxcomps[sc->sc_rxptr].cc_word[0] = 0; 1359 sc->sc_rxcomps[sc->sc_rxptr].cc_word[1] = 0; 1360 sc->sc_rxcomps[sc->sc_rxptr].cc_word[2] = 0; 1361 sc->sc_rxcomps[sc->sc_rxptr].cc_word[3] = 1362 CAS_DMA_WRITE(CAS_RC3_OWN); 1363 CAS_CDRXCSYNC(sc, sc->sc_rxptr, 1364 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1365 1366 sc->sc_rxptr = CAS_NEXTRX(sc->sc_rxptr); 1367 } 1368 1369 bus_space_write_4(t, h, CAS_RX_COMP_TAIL, sc->sc_rxptr); 1370 1371 DPRINTF(sc, ("cas_rint: done sc->rxptr %d, complete %d\n", 1372 sc->sc_rxptr, bus_space_read_4(t, h, CAS_RX_COMPLETION))); 1373 1374 return (1); 1375 } 1376 1377 /* 1378 * cas_add_rxbuf: 1379 * 1380 * Add a receive buffer to the indicated descriptor. 1381 */ 1382 int 1383 cas_add_rxbuf(struct cas_softc *sc, int idx) 1384 { 1385 bus_space_tag_t t = sc->sc_memt; 1386 bus_space_handle_t h = sc->sc_memh; 1387 1388 CAS_INIT_RXDESC(sc, sc->sc_rxdptr, idx); 1389 1390 if ((sc->sc_rxdptr % 4) == 0) 1391 bus_space_write_4(t, h, CAS_RX_KICK, sc->sc_rxdptr); 1392 1393 if (++sc->sc_rxdptr == CAS_NRXDESC) 1394 sc->sc_rxdptr = 0; 1395 1396 return (0); 1397 } 1398 1399 int 1400 cas_eint(struct cas_softc *sc, u_int status) 1401 { 1402 char bits[128]; 1403 if ((status & CAS_INTR_MIF) != 0) { 1404 DPRINTF(sc, ("%s: link status changed\n", 1405 device_xname(sc->sc_dev))); 1406 return (1); 1407 } 1408 1409 snprintb(bits, sizeof(bits), CAS_INTR_BITS, status); 1410 printf("%s: status=%s\n", device_xname(sc->sc_dev), bits); 1411 return (1); 1412 } 1413 1414 int 1415 cas_pint(struct cas_softc *sc) 1416 { 1417 bus_space_tag_t t = sc->sc_memt; 1418 bus_space_handle_t seb = sc->sc_memh; 1419 u_int32_t status; 1420 1421 status = bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1422 status |= bus_space_read_4(t, seb, CAS_MII_INTERRUP_STATUS); 1423 #ifdef CAS_DEBUG 1424 if (status) 1425 printf("%s: link status changed\n", device_xname(sc->sc_dev)); 1426 #endif 1427 return (1); 1428 } 1429 1430 int 1431 cas_intr(void *v) 1432 { 1433 struct cas_softc *sc = (struct cas_softc *)v; 1434 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1435 bus_space_tag_t t = sc->sc_memt; 1436 bus_space_handle_t seb = sc->sc_memh; 1437 u_int32_t status; 1438 int r = 0; 1439 #ifdef CAS_DEBUG 1440 char bits[128]; 1441 #endif 1442 1443 sc->sc_ev_intr.ev_count++; 1444 1445 status = bus_space_read_4(t, seb, CAS_STATUS); 1446 #ifdef CAS_DEBUG 1447 snprintb(bits, sizeof(bits), CAS_INTR_BITS, status); 1448 #endif 1449 DPRINTF(sc, ("%s: cas_intr: cplt %x status %s\n", 1450 device_xname(sc->sc_dev), (status>>19), bits)); 1451 1452 if ((status & CAS_INTR_PCS) != 0) 1453 r |= cas_pint(sc); 1454 1455 if ((status & (CAS_INTR_TX_TAG_ERR | CAS_INTR_RX_TAG_ERR | 1456 CAS_INTR_RX_COMP_FULL | CAS_INTR_BERR)) != 0) 1457 r |= cas_eint(sc, status); 1458 1459 if ((status & (CAS_INTR_TX_EMPTY | CAS_INTR_TX_INTME)) != 0) 1460 r |= cas_tint(sc, status); 1461 1462 if ((status & (CAS_INTR_RX_DONE | CAS_INTR_RX_NOBUF)) != 0) 1463 r |= cas_rint(sc); 1464 1465 /* We should eventually do more than just print out error stats. */ 1466 if (status & CAS_INTR_TX_MAC) { 1467 int txstat = bus_space_read_4(t, seb, CAS_MAC_TX_STATUS); 1468 #ifdef CAS_DEBUG 1469 if (txstat & ~CAS_MAC_TX_XMIT_DONE) 1470 printf("%s: MAC tx fault, status %x\n", 1471 device_xname(sc->sc_dev), txstat); 1472 #endif 1473 if (txstat & (CAS_MAC_TX_UNDERRUN | CAS_MAC_TX_PKT_TOO_LONG)) 1474 cas_init(ifp); 1475 } 1476 if (status & CAS_INTR_RX_MAC) { 1477 int rxstat = bus_space_read_4(t, seb, CAS_MAC_RX_STATUS); 1478 #ifdef CAS_DEBUG 1479 if (rxstat & ~CAS_MAC_RX_DONE) 1480 printf("%s: MAC rx fault, status %x\n", 1481 device_xname(sc->sc_dev), rxstat); 1482 #endif 1483 /* 1484 * On some chip revisions CAS_MAC_RX_OVERFLOW happen often 1485 * due to a silicon bug so handle them silently. 1486 */ 1487 if (rxstat & CAS_MAC_RX_OVERFLOW) { 1488 ifp->if_ierrors++; 1489 cas_init(ifp); 1490 } 1491 #ifdef CAS_DEBUG 1492 else if (rxstat & ~(CAS_MAC_RX_DONE | CAS_MAC_RX_FRAME_CNT)) 1493 printf("%s: MAC rx fault, status %x\n", 1494 device_xname(sc->sc_dev), rxstat); 1495 #endif 1496 } 1497 #if NRND > 0 1498 rnd_add_uint32(&sc->rnd_source, status); 1499 #endif 1500 return (r); 1501 } 1502 1503 1504 void 1505 cas_watchdog(struct ifnet *ifp) 1506 { 1507 struct cas_softc *sc = ifp->if_softc; 1508 1509 DPRINTF(sc, ("cas_watchdog: CAS_RX_CONFIG %x CAS_MAC_RX_STATUS %x " 1510 "CAS_MAC_RX_CONFIG %x\n", 1511 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_RX_CONFIG), 1512 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_STATUS), 1513 bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_MAC_RX_CONFIG))); 1514 1515 log(LOG_ERR, "%s: device timeout\n", device_xname(sc->sc_dev)); 1516 ++ifp->if_oerrors; 1517 1518 /* Try to get more packets going. */ 1519 cas_init(ifp); 1520 } 1521 1522 /* 1523 * Initialize the MII Management Interface 1524 */ 1525 void 1526 cas_mifinit(struct cas_softc *sc) 1527 { 1528 bus_space_tag_t t = sc->sc_memt; 1529 bus_space_handle_t mif = sc->sc_memh; 1530 1531 /* Configure the MIF in frame mode */ 1532 sc->sc_mif_config = bus_space_read_4(t, mif, CAS_MIF_CONFIG); 1533 sc->sc_mif_config &= ~CAS_MIF_CONFIG_BB_ENA; 1534 bus_space_write_4(t, mif, CAS_MIF_CONFIG, sc->sc_mif_config); 1535 } 1536 1537 /* 1538 * MII interface 1539 * 1540 * The Cassini MII interface supports at least three different operating modes: 1541 * 1542 * Bitbang mode is implemented using data, clock and output enable registers. 1543 * 1544 * Frame mode is implemented by loading a complete frame into the frame 1545 * register and polling the valid bit for completion. 1546 * 1547 * Polling mode uses the frame register but completion is indicated by 1548 * an interrupt. 1549 * 1550 */ 1551 int 1552 cas_mii_readreg(device_t self, int phy, int reg) 1553 { 1554 struct cas_softc *sc = device_private(self); 1555 bus_space_tag_t t = sc->sc_memt; 1556 bus_space_handle_t mif = sc->sc_memh; 1557 int n; 1558 u_int32_t v; 1559 1560 #ifdef CAS_DEBUG 1561 if (sc->sc_debug) 1562 printf("cas_mii_readreg: phy %d reg %d\n", phy, reg); 1563 #endif 1564 1565 /* Construct the frame command */ 1566 v = (reg << CAS_MIF_REG_SHIFT) | (phy << CAS_MIF_PHY_SHIFT) | 1567 CAS_MIF_FRAME_READ; 1568 1569 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1570 for (n = 0; n < 100; n++) { 1571 DELAY(1); 1572 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1573 if (v & CAS_MIF_FRAME_TA0) 1574 return (v & CAS_MIF_FRAME_DATA); 1575 } 1576 1577 printf("%s: mii_read timeout\n", device_xname(sc->sc_dev)); 1578 return (0); 1579 } 1580 1581 void 1582 cas_mii_writereg(device_t self, int phy, int reg, int val) 1583 { 1584 struct cas_softc *sc = device_private(self); 1585 bus_space_tag_t t = sc->sc_memt; 1586 bus_space_handle_t mif = sc->sc_memh; 1587 int n; 1588 u_int32_t v; 1589 1590 #ifdef CAS_DEBUG 1591 if (sc->sc_debug) 1592 printf("cas_mii_writereg: phy %d reg %d val %x\n", 1593 phy, reg, val); 1594 #endif 1595 1596 /* Construct the frame command */ 1597 v = CAS_MIF_FRAME_WRITE | 1598 (phy << CAS_MIF_PHY_SHIFT) | 1599 (reg << CAS_MIF_REG_SHIFT) | 1600 (val & CAS_MIF_FRAME_DATA); 1601 1602 bus_space_write_4(t, mif, CAS_MIF_FRAME, v); 1603 for (n = 0; n < 100; n++) { 1604 DELAY(1); 1605 v = bus_space_read_4(t, mif, CAS_MIF_FRAME); 1606 if (v & CAS_MIF_FRAME_TA0) 1607 return; 1608 } 1609 1610 printf("%s: mii_write timeout\n", device_xname(sc->sc_dev)); 1611 } 1612 1613 void 1614 cas_mii_statchg(device_t self) 1615 { 1616 struct cas_softc *sc = device_private(self); 1617 #ifdef CAS_DEBUG 1618 int instance = IFM_INST(sc->sc_media.ifm_cur->ifm_media); 1619 #endif 1620 bus_space_tag_t t = sc->sc_memt; 1621 bus_space_handle_t mac = sc->sc_memh; 1622 u_int32_t v; 1623 1624 #ifdef CAS_DEBUG 1625 if (sc->sc_debug) 1626 printf("cas_mii_statchg: status change: phy = %d\n", 1627 sc->sc_phys[instance]); 1628 #endif 1629 1630 /* Set tx full duplex options */ 1631 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, 0); 1632 delay(10000); /* reg must be cleared and delay before changing. */ 1633 v = CAS_MAC_TX_ENA_IPG0|CAS_MAC_TX_NGU|CAS_MAC_TX_NGU_LIMIT| 1634 CAS_MAC_TX_ENABLE; 1635 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) { 1636 v |= CAS_MAC_TX_IGN_CARRIER|CAS_MAC_TX_IGN_COLLIS; 1637 } 1638 bus_space_write_4(t, mac, CAS_MAC_TX_CONFIG, v); 1639 1640 /* XIF Configuration */ 1641 v = CAS_MAC_XIF_TX_MII_ENA; 1642 v |= CAS_MAC_XIF_LINK_LED; 1643 1644 /* MII needs echo disable if half duplex. */ 1645 if ((IFM_OPTIONS(sc->sc_mii.mii_media_active) & IFM_FDX) != 0) 1646 /* turn on full duplex LED */ 1647 v |= CAS_MAC_XIF_FDPLX_LED; 1648 else 1649 /* half duplex -- disable echo */ 1650 v |= CAS_MAC_XIF_ECHO_DISABL; 1651 1652 switch (IFM_SUBTYPE(sc->sc_mii.mii_media_active)) { 1653 case IFM_1000_T: /* Gigabit using GMII interface */ 1654 case IFM_1000_SX: 1655 v |= CAS_MAC_XIF_GMII_MODE; 1656 break; 1657 default: 1658 v &= ~CAS_MAC_XIF_GMII_MODE; 1659 } 1660 bus_space_write_4(t, mac, CAS_MAC_XIF_CONFIG, v); 1661 } 1662 1663 int 1664 cas_pcs_readreg(device_t self, int phy, int reg) 1665 { 1666 struct cas_softc *sc = device_private(self); 1667 bus_space_tag_t t = sc->sc_memt; 1668 bus_space_handle_t pcs = sc->sc_memh; 1669 1670 #ifdef CAS_DEBUG 1671 if (sc->sc_debug) 1672 printf("cas_pcs_readreg: phy %d reg %d\n", phy, reg); 1673 #endif 1674 1675 if (phy != CAS_PHYAD_EXTERNAL) 1676 return (0); 1677 1678 switch (reg) { 1679 case MII_BMCR: 1680 reg = CAS_MII_CONTROL; 1681 break; 1682 case MII_BMSR: 1683 reg = CAS_MII_STATUS; 1684 break; 1685 case MII_ANAR: 1686 reg = CAS_MII_ANAR; 1687 break; 1688 case MII_ANLPAR: 1689 reg = CAS_MII_ANLPAR; 1690 break; 1691 case MII_EXTSR: 1692 return (EXTSR_1000XFDX|EXTSR_1000XHDX); 1693 default: 1694 return (0); 1695 } 1696 1697 return bus_space_read_4(t, pcs, reg); 1698 } 1699 1700 void 1701 cas_pcs_writereg(device_t self, int phy, int reg, int val) 1702 { 1703 struct cas_softc *sc = device_private(self); 1704 bus_space_tag_t t = sc->sc_memt; 1705 bus_space_handle_t pcs = sc->sc_memh; 1706 int reset = 0; 1707 1708 #ifdef CAS_DEBUG 1709 if (sc->sc_debug) 1710 printf("cas_pcs_writereg: phy %d reg %d val %x\n", 1711 phy, reg, val); 1712 #endif 1713 1714 if (phy != CAS_PHYAD_EXTERNAL) 1715 return; 1716 1717 if (reg == MII_ANAR) 1718 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 0); 1719 1720 switch (reg) { 1721 case MII_BMCR: 1722 reset = (val & CAS_MII_CONTROL_RESET); 1723 reg = CAS_MII_CONTROL; 1724 break; 1725 case MII_BMSR: 1726 reg = CAS_MII_STATUS; 1727 break; 1728 case MII_ANAR: 1729 reg = CAS_MII_ANAR; 1730 break; 1731 case MII_ANLPAR: 1732 reg = CAS_MII_ANLPAR; 1733 break; 1734 default: 1735 return; 1736 } 1737 1738 bus_space_write_4(t, pcs, reg, val); 1739 1740 if (reset) 1741 cas_bitwait(sc, pcs, CAS_MII_CONTROL, CAS_MII_CONTROL_RESET, 0); 1742 1743 if (reg == CAS_MII_ANAR || reset) 1744 bus_space_write_4(t, pcs, CAS_MII_CONFIG, 1745 CAS_MII_CONFIG_ENABLE); 1746 } 1747 1748 int 1749 cas_mediachange(struct ifnet *ifp) 1750 { 1751 struct cas_softc *sc = ifp->if_softc; 1752 struct mii_data *mii = &sc->sc_mii; 1753 1754 if (mii->mii_instance) { 1755 struct mii_softc *miisc; 1756 LIST_FOREACH(miisc, &mii->mii_phys, mii_list) 1757 mii_phy_reset(miisc); 1758 } 1759 1760 return (mii_mediachg(&sc->sc_mii)); 1761 } 1762 1763 void 1764 cas_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 1765 { 1766 struct cas_softc *sc = ifp->if_softc; 1767 1768 mii_pollstat(&sc->sc_mii); 1769 ifmr->ifm_active = sc->sc_mii.mii_media_active; 1770 ifmr->ifm_status = sc->sc_mii.mii_media_status; 1771 } 1772 1773 /* 1774 * Process an ioctl request. 1775 */ 1776 int 1777 cas_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1778 { 1779 struct cas_softc *sc = ifp->if_softc; 1780 int s, error = 0; 1781 1782 s = splnet(); 1783 1784 if ((error = ether_ioctl(ifp, cmd, data)) == ENETRESET) { 1785 error = 0; 1786 if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1787 ; 1788 else if (ifp->if_flags & IFF_RUNNING) { 1789 /* 1790 * Multicast list has changed; set the hardware filter 1791 * accordingly. 1792 */ 1793 cas_iff(sc); 1794 } 1795 } 1796 1797 splx(s); 1798 return (error); 1799 } 1800 1801 static bool 1802 cas_suspend(device_t self, const pmf_qual_t *qual) 1803 { 1804 struct cas_softc *sc = device_private(self); 1805 bus_space_tag_t t = sc->sc_memt; 1806 bus_space_handle_t h = sc->sc_memh; 1807 1808 bus_space_write_4(t, h, CAS_INTMASK, ~(uint32_t)0); 1809 if (sc->sc_ih != NULL) { 1810 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 1811 sc->sc_ih = NULL; 1812 } 1813 1814 return true; 1815 } 1816 1817 static bool 1818 cas_resume(device_t self, const pmf_qual_t *qual) 1819 { 1820 struct cas_softc *sc = device_private(self); 1821 1822 return cas_estintr(sc, CAS_INTR_PCI | CAS_INTR_REG); 1823 } 1824 1825 static bool 1826 cas_estintr(struct cas_softc *sc, int what) 1827 { 1828 bus_space_tag_t t = sc->sc_memt; 1829 bus_space_handle_t h = sc->sc_memh; 1830 const char *intrstr = NULL; 1831 1832 /* PCI interrupts */ 1833 if (what & CAS_INTR_PCI) { 1834 intrstr = pci_intr_string(sc->sc_pc, sc->sc_handle); 1835 sc->sc_ih = pci_intr_establish(sc->sc_pc, sc->sc_handle, 1836 IPL_NET, cas_intr, sc); 1837 if (sc->sc_ih == NULL) { 1838 aprint_error_dev(sc->sc_dev, 1839 "unable to establish interrupt"); 1840 if (intrstr != NULL) 1841 aprint_error(" at %s", intrstr); 1842 aprint_error("\n"); 1843 return false; 1844 } 1845 1846 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1847 } 1848 1849 /* Interrupt register */ 1850 if (what & CAS_INTR_REG) { 1851 bus_space_write_4(t, h, CAS_INTMASK, 1852 ~(CAS_INTR_TX_INTME|CAS_INTR_TX_EMPTY| 1853 CAS_INTR_TX_TAG_ERR| 1854 CAS_INTR_RX_DONE|CAS_INTR_RX_NOBUF| 1855 CAS_INTR_RX_TAG_ERR| 1856 CAS_INTR_RX_COMP_FULL|CAS_INTR_PCS| 1857 CAS_INTR_MAC_CONTROL|CAS_INTR_MIF| 1858 CAS_INTR_BERR)); 1859 bus_space_write_4(t, h, CAS_MAC_RX_MASK, 1860 CAS_MAC_RX_DONE|CAS_MAC_RX_FRAME_CNT); 1861 bus_space_write_4(t, h, CAS_MAC_TX_MASK, CAS_MAC_TX_XMIT_DONE); 1862 bus_space_write_4(t, h, CAS_MAC_CONTROL_MASK, 0); /* XXXX */ 1863 } 1864 return true; 1865 } 1866 1867 bool 1868 cas_shutdown(device_t self, int howto) 1869 { 1870 struct cas_softc *sc = device_private(self); 1871 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1872 1873 cas_stop(ifp, 1); 1874 1875 return true; 1876 } 1877 1878 void 1879 cas_iff(struct cas_softc *sc) 1880 { 1881 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1882 struct ethercom *ec = &sc->sc_ethercom; 1883 struct ether_multi *enm; 1884 struct ether_multistep step; 1885 bus_space_tag_t t = sc->sc_memt; 1886 bus_space_handle_t h = sc->sc_memh; 1887 u_int32_t crc, hash[16], rxcfg; 1888 int i; 1889 1890 rxcfg = bus_space_read_4(t, h, CAS_MAC_RX_CONFIG); 1891 rxcfg &= ~(CAS_MAC_RX_HASH_FILTER | CAS_MAC_RX_PROMISCUOUS | 1892 CAS_MAC_RX_PROMISC_GRP); 1893 ifp->if_flags &= ~IFF_ALLMULTI; 1894 1895 if (ifp->if_flags & IFF_PROMISC || ec->ec_multicnt > 0) { 1896 ifp->if_flags |= IFF_ALLMULTI; 1897 if (ifp->if_flags & IFF_PROMISC) 1898 rxcfg |= CAS_MAC_RX_PROMISCUOUS; 1899 else 1900 rxcfg |= CAS_MAC_RX_PROMISC_GRP; 1901 } else { 1902 /* 1903 * Set up multicast address filter by passing all multicast 1904 * addresses through a crc generator, and then using the 1905 * high order 8 bits as an index into the 256 bit logical 1906 * address filter. The high order 4 bits selects the word, 1907 * while the other 4 bits select the bit within the word 1908 * (where bit 0 is the MSB). 1909 */ 1910 1911 rxcfg |= CAS_MAC_RX_HASH_FILTER; 1912 1913 /* Clear hash table */ 1914 for (i = 0; i < 16; i++) 1915 hash[i] = 0; 1916 1917 ETHER_FIRST_MULTI(step, ec, enm); 1918 while (enm != NULL) { 1919 crc = ether_crc32_le(enm->enm_addrlo, 1920 ETHER_ADDR_LEN); 1921 1922 /* Just want the 8 most significant bits. */ 1923 crc >>= 24; 1924 1925 /* Set the corresponding bit in the filter. */ 1926 hash[crc >> 4] |= 1 << (15 - (crc & 15)); 1927 1928 ETHER_NEXT_MULTI(step, enm); 1929 } 1930 1931 /* Now load the hash table into the chip (if we are using it) */ 1932 for (i = 0; i < 16; i++) { 1933 bus_space_write_4(t, h, 1934 CAS_MAC_HASH0 + i * (CAS_MAC_HASH1 - CAS_MAC_HASH0), 1935 hash[i]); 1936 } 1937 } 1938 1939 bus_space_write_4(t, h, CAS_MAC_RX_CONFIG, rxcfg); 1940 } 1941 1942 int 1943 cas_encap(struct cas_softc *sc, struct mbuf *mhead, u_int32_t *bixp) 1944 { 1945 u_int64_t flags; 1946 u_int32_t cur, frag, i; 1947 bus_dmamap_t map; 1948 1949 cur = frag = *bixp; 1950 map = sc->sc_txd[cur].sd_map; 1951 1952 if (bus_dmamap_load_mbuf(sc->sc_dmatag, map, mhead, 1953 BUS_DMA_NOWAIT) != 0) { 1954 return (ENOBUFS); 1955 } 1956 1957 if ((sc->sc_tx_cnt + map->dm_nsegs) > (CAS_NTXDESC - 2)) { 1958 bus_dmamap_unload(sc->sc_dmatag, map); 1959 return (ENOBUFS); 1960 } 1961 1962 bus_dmamap_sync(sc->sc_dmatag, map, 0, map->dm_mapsize, 1963 BUS_DMASYNC_PREWRITE); 1964 1965 for (i = 0; i < map->dm_nsegs; i++) { 1966 sc->sc_txdescs[frag].cd_addr = 1967 CAS_DMA_WRITE(map->dm_segs[i].ds_addr); 1968 flags = (map->dm_segs[i].ds_len & CAS_TD_BUFSIZE) | 1969 (i == 0 ? CAS_TD_START_OF_PACKET : 0) | 1970 ((i == (map->dm_nsegs - 1)) ? CAS_TD_END_OF_PACKET : 0); 1971 sc->sc_txdescs[frag].cd_flags = CAS_DMA_WRITE(flags); 1972 bus_dmamap_sync(sc->sc_dmatag, sc->sc_cddmamap, 1973 CAS_CDTXOFF(frag), sizeof(struct cas_desc), 1974 BUS_DMASYNC_PREWRITE); 1975 cur = frag; 1976 if (++frag == CAS_NTXDESC) 1977 frag = 0; 1978 } 1979 1980 sc->sc_tx_cnt += map->dm_nsegs; 1981 sc->sc_txd[*bixp].sd_map = sc->sc_txd[cur].sd_map; 1982 sc->sc_txd[cur].sd_map = map; 1983 sc->sc_txd[cur].sd_mbuf = mhead; 1984 1985 bus_space_write_4(sc->sc_memt, sc->sc_memh, CAS_TX_KICK, frag); 1986 1987 *bixp = frag; 1988 1989 /* sync descriptors */ 1990 1991 return (0); 1992 } 1993 1994 /* 1995 * Transmit interrupt. 1996 */ 1997 int 1998 cas_tint(struct cas_softc *sc, u_int32_t status) 1999 { 2000 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2001 struct cas_sxd *sd; 2002 u_int32_t cons, comp; 2003 2004 comp = bus_space_read_4(sc->sc_memt, sc->sc_memh, CAS_TX_COMPLETION); 2005 cons = sc->sc_tx_cons; 2006 while (cons != comp) { 2007 sd = &sc->sc_txd[cons]; 2008 if (sd->sd_mbuf != NULL) { 2009 bus_dmamap_sync(sc->sc_dmatag, sd->sd_map, 0, 2010 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2011 bus_dmamap_unload(sc->sc_dmatag, sd->sd_map); 2012 m_freem(sd->sd_mbuf); 2013 sd->sd_mbuf = NULL; 2014 ifp->if_opackets++; 2015 } 2016 sc->sc_tx_cnt--; 2017 if (++cons == CAS_NTXDESC) 2018 cons = 0; 2019 } 2020 sc->sc_tx_cons = cons; 2021 2022 if (sc->sc_tx_cnt < CAS_NTXDESC - 2) 2023 ifp->if_flags &= ~IFF_OACTIVE; 2024 if (sc->sc_tx_cnt == 0) 2025 ifp->if_timer = 0; 2026 2027 cas_start(ifp); 2028 2029 return (1); 2030 } 2031 2032 void 2033 cas_start(struct ifnet *ifp) 2034 { 2035 struct cas_softc *sc = ifp->if_softc; 2036 struct mbuf *m; 2037 u_int32_t bix; 2038 2039 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 2040 return; 2041 2042 bix = sc->sc_tx_prod; 2043 while (sc->sc_txd[bix].sd_mbuf == NULL) { 2044 IFQ_POLL(&ifp->if_snd, m); 2045 if (m == NULL) 2046 break; 2047 2048 /* 2049 * If BPF is listening on this interface, let it see the 2050 * packet before we commit it to the wire. 2051 */ 2052 bpf_mtap(ifp, m); 2053 2054 /* 2055 * Encapsulate this packet and start it going... 2056 * or fail... 2057 */ 2058 if (cas_encap(sc, m, &bix)) { 2059 ifp->if_flags |= IFF_OACTIVE; 2060 break; 2061 } 2062 2063 IFQ_DEQUEUE(&ifp->if_snd, m); 2064 ifp->if_timer = 5; 2065 } 2066 2067 sc->sc_tx_prod = bix; 2068 } 2069