1 /* $NetBSD: if_tl.c,v 1.122 2020/07/07 06:27:37 msaitoh Exp $ */ 2 3 /* 4 * Copyright (c) 1997 Manuel Bouyer. All rights reserved. 5 * 6 * Redistribution and use in source and binary forms, with or without 7 * modification, are permitted provided that the following conditions 8 * are met: 9 * 1. Redistributions of source code must retain the above copyright 10 * notice, this list of conditions and the following disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 */ 26 27 /* 28 * Texas Instruments ThunderLAN ethernet controller 29 * ThunderLAN Programmer's Guide (TI Literature Number SPWU013A) 30 * available from www.ti.com 31 */ 32 33 #include <sys/cdefs.h> 34 __KERNEL_RCSID(0, "$NetBSD: if_tl.c,v 1.122 2020/07/07 06:27:37 msaitoh Exp $"); 35 36 #undef TLDEBUG 37 #define TL_PRIV_STATS 38 #undef TLDEBUG_RX 39 #undef TLDEBUG_TX 40 #undef TLDEBUG_ADDR 41 42 #include "opt_inet.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/mbuf.h> 47 #include <sys/protosw.h> 48 #include <sys/socket.h> 49 #include <sys/ioctl.h> 50 #include <sys/errno.h> 51 #include <sys/malloc.h> 52 #include <sys/kernel.h> 53 #include <sys/proc.h> /* only for declaration of wakeup() used by vm.h */ 54 #include <sys/device.h> 55 56 #include <net/if.h> 57 #include <net/if_media.h> 58 #include <net/if_types.h> 59 #include <net/if_dl.h> 60 #include <net/route.h> 61 #include <net/netisr.h> 62 #include <net/bpf.h> 63 64 #include <sys/rndsource.h> 65 66 #ifdef INET 67 #include <netinet/in.h> 68 #include <netinet/in_systm.h> 69 #include <netinet/in_var.h> 70 #include <netinet/ip.h> 71 #endif 72 73 74 #if defined(__NetBSD__) 75 #include <net/if_ether.h> 76 #if defined(INET) 77 #include <netinet/if_inarp.h> 78 #endif 79 80 #include <sys/bus.h> 81 #include <sys/intr.h> 82 83 #include <dev/pci/pcireg.h> 84 #include <dev/pci/pcivar.h> 85 #include <dev/pci/pcidevs.h> 86 87 #include <dev/i2c/i2cvar.h> 88 #include <dev/i2c/i2c_bitbang.h> 89 #include <dev/i2c/at24cxxvar.h> 90 91 #include <dev/mii/mii.h> 92 #include <dev/mii/miivar.h> 93 94 #include <dev/mii/tlphyvar.h> 95 96 #include <dev/pci/if_tlregs.h> 97 #include <dev/pci/if_tlvar.h> 98 #endif /* __NetBSD__ */ 99 100 /* number of transmit/receive buffers */ 101 #ifndef TL_NBUF 102 #define TL_NBUF 32 103 #endif 104 105 static int tl_pci_match(device_t, cfdata_t, void *); 106 static void tl_pci_attach(device_t, device_t, void *); 107 static int tl_intr(void *); 108 109 static int tl_ifioctl(struct ifnet *, ioctl_cmd_t, void *); 110 static void tl_ifwatchdog(struct ifnet *); 111 static bool tl_shutdown(device_t, int); 112 113 static void tl_ifstart(struct ifnet *); 114 static void tl_reset(tl_softc_t *); 115 static int tl_init(struct ifnet *); 116 static void tl_stop(struct ifnet *, int); 117 static void tl_restart(void *); 118 static int tl_add_RxBuff(tl_softc_t *, struct Rx_list *, struct mbuf *); 119 static void tl_read_stats(tl_softc_t *); 120 static void tl_ticks(void *); 121 static int tl_multicast_hash(uint8_t *); 122 static void tl_addr_filter(tl_softc_t *); 123 124 static uint32_t tl_intreg_read(tl_softc_t *, uint32_t); 125 static void tl_intreg_write(tl_softc_t *, uint32_t, uint32_t); 126 static uint8_t tl_intreg_read_byte(tl_softc_t *, uint32_t); 127 static void tl_intreg_write_byte(tl_softc_t *, uint32_t, uint8_t); 128 129 void tl_mii_sync(struct tl_softc *); 130 void tl_mii_sendbits(struct tl_softc *, uint32_t, int); 131 132 133 #if defined(TLDEBUG_RX) 134 static void ether_printheader(struct ether_header *); 135 #endif 136 137 int tl_mii_read(device_t, int, int, uint16_t *); 138 int tl_mii_write(device_t, int, int, uint16_t); 139 140 void tl_statchg(struct ifnet *); 141 142 /* I2C glue */ 143 static int tl_i2c_send_start(void *, int); 144 static int tl_i2c_send_stop(void *, int); 145 static int tl_i2c_initiate_xfer(void *, i2c_addr_t, int); 146 static int tl_i2c_read_byte(void *, uint8_t *, int); 147 static int tl_i2c_write_byte(void *, uint8_t, int); 148 149 /* I2C bit-bang glue */ 150 static void tl_i2cbb_set_bits(void *, uint32_t); 151 static void tl_i2cbb_set_dir(void *, uint32_t); 152 static uint32_t tl_i2cbb_read(void *); 153 static const struct i2c_bitbang_ops tl_i2cbb_ops = { 154 tl_i2cbb_set_bits, 155 tl_i2cbb_set_dir, 156 tl_i2cbb_read, 157 { 158 TL_NETSIO_EDATA, /* SDA */ 159 TL_NETSIO_ECLOCK, /* SCL */ 160 TL_NETSIO_ETXEN, /* SDA is output */ 161 0, /* SDA is input */ 162 } 163 }; 164 165 static inline void netsio_clr(tl_softc_t *, uint8_t); 166 static inline void netsio_set(tl_softc_t *, uint8_t); 167 static inline uint8_t netsio_read(tl_softc_t *, uint8_t); 168 169 static inline void 170 netsio_clr(tl_softc_t *sc, uint8_t bits) 171 { 172 173 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetSio, 174 tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetSio) & (~bits)); 175 } 176 177 static inline void 178 netsio_set(tl_softc_t *sc, uint8_t bits) 179 { 180 181 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetSio, 182 tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetSio) | bits); 183 } 184 185 static inline uint8_t 186 netsio_read(tl_softc_t *sc, uint8_t bits) 187 { 188 189 return tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetSio) & bits; 190 } 191 192 CFATTACH_DECL_NEW(tl, sizeof(tl_softc_t), 193 tl_pci_match, tl_pci_attach, NULL, NULL); 194 195 static const struct tl_product_desc tl_compaq_products[] = { 196 { PCI_PRODUCT_COMPAQ_N100TX, TLPHY_MEDIA_NO_10_T, 197 "Compaq Netelligent 10/100 TX" }, 198 { PCI_PRODUCT_COMPAQ_INT100TX, TLPHY_MEDIA_NO_10_T, 199 "Integrated Compaq Netelligent 10/100 TX" }, 200 { PCI_PRODUCT_COMPAQ_N10T, TLPHY_MEDIA_10_5, 201 "Compaq Netelligent 10 T" }, 202 { PCI_PRODUCT_COMPAQ_N10T2, TLPHY_MEDIA_10_2, 203 "Compaq Netelligent 10 T/2 UTP/Coax" }, 204 { PCI_PRODUCT_COMPAQ_IntNF3P, TLPHY_MEDIA_10_2, 205 "Compaq Integrated NetFlex 3/P" }, 206 { PCI_PRODUCT_COMPAQ_IntPL100TX, TLPHY_MEDIA_10_2 |TLPHY_MEDIA_NO_10_T, 207 "Compaq ProLiant Integrated Netelligent 10/100 TX" }, 208 { PCI_PRODUCT_COMPAQ_DPNet100TX, TLPHY_MEDIA_10_5 |TLPHY_MEDIA_NO_10_T, 209 "Compaq Dual Port Netelligent 10/100 TX" }, 210 { PCI_PRODUCT_COMPAQ_DP4000, TLPHY_MEDIA_10_5 | TLPHY_MEDIA_NO_10_T, 211 "Compaq Deskpro 4000 5233MMX" }, 212 { PCI_PRODUCT_COMPAQ_NF3P_BNC, TLPHY_MEDIA_10_2, 213 "Compaq NetFlex 3/P w/ BNC" }, 214 { PCI_PRODUCT_COMPAQ_NF3P, TLPHY_MEDIA_10_5, 215 "Compaq NetFlex 3/P" }, 216 { 0, 0, NULL }, 217 }; 218 219 static const struct tl_product_desc tl_ti_products[] = { 220 /* 221 * Built-in Ethernet on the TI TravelMate 5000 222 * docking station; better product description? 223 */ 224 { PCI_PRODUCT_TI_TLAN, 0, 225 "Texas Instruments ThunderLAN" }, 226 { 0, 0, NULL }, 227 }; 228 229 struct tl_vendor_desc { 230 uint32_t tv_vendor; 231 const struct tl_product_desc *tv_products; 232 }; 233 234 const struct tl_vendor_desc tl_vendors[] = { 235 { PCI_VENDOR_COMPAQ, tl_compaq_products }, 236 { PCI_VENDOR_TI, tl_ti_products }, 237 { 0, NULL }, 238 }; 239 240 static const struct tl_product_desc *tl_lookup_product(uint32_t); 241 242 static const struct tl_product_desc * 243 tl_lookup_product(uint32_t id) 244 { 245 const struct tl_product_desc *tp; 246 const struct tl_vendor_desc *tv; 247 248 for (tv = tl_vendors; tv->tv_products != NULL; tv++) 249 if (PCI_VENDOR(id) == tv->tv_vendor) 250 break; 251 252 if ((tp = tv->tv_products) == NULL) 253 return NULL; 254 255 for (; tp->tp_desc != NULL; tp++) 256 if (PCI_PRODUCT(id) == tp->tp_product) 257 break; 258 259 if (tp->tp_desc == NULL) 260 return NULL; 261 262 return tp; 263 } 264 265 static int 266 tl_pci_match(device_t parent, cfdata_t cf, void *aux) 267 { 268 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 269 270 if (tl_lookup_product(pa->pa_id) != NULL) 271 return 1; 272 273 return 0; 274 } 275 276 static void 277 tl_pci_attach(device_t parent, device_t self, void *aux) 278 { 279 tl_softc_t *sc = device_private(self); 280 struct pci_attach_args * const pa = (struct pci_attach_args *)aux; 281 const struct tl_product_desc *tp; 282 struct ifnet * const ifp = &sc->tl_if; 283 struct mii_data * const mii = &sc->tl_mii; 284 bus_space_tag_t iot, memt; 285 bus_space_handle_t ioh, memh; 286 pci_intr_handle_t intrhandle; 287 const char *intrstr; 288 int ioh_valid, memh_valid; 289 int reg_io, reg_mem; 290 pcireg_t reg10, reg14; 291 pcireg_t csr; 292 char intrbuf[PCI_INTRSTR_LEN]; 293 294 sc->sc_dev = self; 295 aprint_normal("\n"); 296 297 callout_init(&sc->tl_tick_ch, 0); 298 callout_init(&sc->tl_restart_ch, 0); 299 300 tp = tl_lookup_product(pa->pa_id); 301 if (tp == NULL) 302 panic("%s: impossible", __func__); 303 sc->tl_product = tp; 304 305 /* 306 * Map the card space. First we have to find the I/O and MEM 307 * registers. I/O is supposed to be at 0x10, MEM at 0x14, 308 * but some boards (Compaq Netflex 3/P PCI) seem to have it reversed. 309 * The ThunderLAN manual is not consistent about this either (there 310 * are both cases in code examples). 311 */ 312 reg10 = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x10); 313 reg14 = pci_conf_read(pa->pa_pc, pa->pa_tag, 0x14); 314 if (PCI_MAPREG_TYPE(reg10) == PCI_MAPREG_TYPE_IO) 315 reg_io = 0x10; 316 else if (PCI_MAPREG_TYPE(reg14) == PCI_MAPREG_TYPE_IO) 317 reg_io = 0x14; 318 else 319 reg_io = 0; 320 if (PCI_MAPREG_TYPE(reg10) == PCI_MAPREG_TYPE_MEM) 321 reg_mem = 0x10; 322 else if (PCI_MAPREG_TYPE(reg14) == PCI_MAPREG_TYPE_MEM) 323 reg_mem = 0x14; 324 else 325 reg_mem = 0; 326 327 if (reg_io != 0) 328 ioh_valid = (pci_mapreg_map(pa, reg_io, PCI_MAPREG_TYPE_IO, 329 0, &iot, &ioh, NULL, NULL) == 0); 330 else 331 ioh_valid = 0; 332 if (reg_mem != 0) 333 memh_valid = (pci_mapreg_map(pa, PCI_CBMA, 334 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 335 0, &memt, &memh, NULL, NULL) == 0); 336 else 337 memh_valid = 0; 338 339 if (ioh_valid) { 340 sc->tl_bustag = iot; 341 sc->tl_bushandle = ioh; 342 } else if (memh_valid) { 343 sc->tl_bustag = memt; 344 sc->tl_bushandle = memh; 345 } else { 346 aprint_error_dev(self, "unable to map device registers\n"); 347 return; 348 } 349 sc->tl_dmatag = pa->pa_dmat; 350 351 /* Enable the device. */ 352 csr = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 353 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 354 csr | PCI_COMMAND_MASTER_ENABLE); 355 356 aprint_normal_dev(self, "%s\n", tp->tp_desc); 357 358 tl_reset(sc); 359 360 /* fill in the i2c tag */ 361 iic_tag_init(&sc->sc_i2c); 362 sc->sc_i2c.ic_cookie = sc; 363 sc->sc_i2c.ic_send_start = tl_i2c_send_start; 364 sc->sc_i2c.ic_send_stop = tl_i2c_send_stop; 365 sc->sc_i2c.ic_initiate_xfer = tl_i2c_initiate_xfer; 366 sc->sc_i2c.ic_read_byte = tl_i2c_read_byte; 367 sc->sc_i2c.ic_write_byte = tl_i2c_write_byte; 368 369 #ifdef TLDEBUG 370 aprint_debug_dev(self, "default values of INTreg: 0x%x\n", 371 tl_intreg_read(sc, TL_INT_Defaults)); 372 #endif 373 374 /* read mac addr */ 375 if (seeprom_bootstrap_read(&sc->sc_i2c, 0x50, 0x83, 256 /* 2kbit */, 376 sc->tl_enaddr, ETHER_ADDR_LEN)) { 377 aprint_error_dev(self, "error reading Ethernet address\n"); 378 return; 379 } 380 aprint_normal_dev(self, "Ethernet address %s\n", 381 ether_sprintf(sc->tl_enaddr)); 382 383 /* Map and establish interrupts */ 384 if (pci_intr_map(pa, &intrhandle)) { 385 aprint_error_dev(self, "couldn't map interrupt\n"); 386 return; 387 } 388 intrstr = pci_intr_string(pa->pa_pc, intrhandle, intrbuf, 389 sizeof(intrbuf)); 390 sc->tl_if.if_softc = sc; 391 sc->tl_ih = pci_intr_establish_xname(pa->pa_pc, intrhandle, IPL_NET, 392 tl_intr, sc, device_xname(self)); 393 if (sc->tl_ih == NULL) { 394 aprint_error_dev(self, "couldn't establish interrupt"); 395 if (intrstr != NULL) 396 aprint_error(" at %s", intrstr); 397 aprint_error("\n"); 398 return; 399 } 400 aprint_normal_dev(self, "interrupting at %s\n", intrstr); 401 402 /* init these pointers, so that tl_shutdown won't try to read them */ 403 sc->Rx_list = NULL; 404 sc->Tx_list = NULL; 405 406 /* allocate DMA-safe memory for control structs */ 407 if (bus_dmamem_alloc(sc->tl_dmatag, PAGE_SIZE, 0, PAGE_SIZE, 408 &sc->ctrl_segs, 1, &sc->ctrl_nsegs, BUS_DMA_NOWAIT) != 0 || 409 bus_dmamem_map(sc->tl_dmatag, &sc->ctrl_segs, 410 sc->ctrl_nsegs, PAGE_SIZE, (void **)&sc->ctrl, 411 BUS_DMA_NOWAIT | BUS_DMA_COHERENT) != 0) { 412 aprint_error_dev(self, "can't allocate DMA memory for lists\n"); 413 return; 414 } 415 416 /* 417 * Initialize our media structures and probe the MII. 418 * 419 * Note that we don't care about the media instance. We 420 * are expecting to have multiple PHYs on the 10/100 cards, 421 * and on those cards we exclude the internal PHY from providing 422 * 10baseT. By ignoring the instance, it allows us to not have 423 * to specify it on the command line when switching media. 424 */ 425 mii->mii_ifp = ifp; 426 mii->mii_readreg = tl_mii_read; 427 mii->mii_writereg = tl_mii_write; 428 mii->mii_statchg = tl_statchg; 429 sc->tl_ec.ec_mii = mii; 430 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 431 ether_mediastatus); 432 mii_attach(self, mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); 433 if (LIST_FIRST(&mii->mii_phys) == NULL) { 434 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 435 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 436 } else 437 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 438 439 /* 440 * We can support 802.1Q VLAN-sized frames. 441 */ 442 sc->tl_ec.ec_capabilities |= ETHERCAP_VLAN_MTU; 443 444 strlcpy(ifp->if_xname, device_xname(self), IFNAMSIZ); 445 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 446 ifp->if_ioctl = tl_ifioctl; 447 ifp->if_start = tl_ifstart; 448 ifp->if_watchdog = tl_ifwatchdog; 449 ifp->if_init = tl_init; 450 ifp->if_stop = tl_stop; 451 ifp->if_timer = 0; 452 IFQ_SET_READY(&ifp->if_snd); 453 if_attach(ifp); 454 if_deferred_start_init(ifp, NULL); 455 ether_ifattach(&(sc)->tl_if, (sc)->tl_enaddr); 456 457 /* 458 * Add shutdown hook so that DMA is disabled prior to reboot. 459 * Not doing reboot before the driver initializes. 460 */ 461 if (pmf_device_register1(self, NULL, NULL, tl_shutdown)) 462 pmf_class_network_register(self, ifp); 463 else 464 aprint_error_dev(self, "couldn't establish power handler\n"); 465 466 rnd_attach_source(&sc->rnd_source, device_xname(self), 467 RND_TYPE_NET, RND_FLAG_DEFAULT); 468 } 469 470 static void 471 tl_reset(tl_softc_t *sc) 472 { 473 int i; 474 475 /* read stats */ 476 if (sc->tl_if.if_flags & IFF_RUNNING) { 477 callout_stop(&sc->tl_tick_ch); 478 tl_read_stats(sc); 479 } 480 /* Reset adapter */ 481 TL_HR_WRITE(sc, TL_HOST_CMD, 482 TL_HR_READ(sc, TL_HOST_CMD) | HOST_CMD_Ad_Rst); 483 DELAY(100000); 484 /* Disable interrupts */ 485 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_IntOff); 486 /* setup aregs & hash */ 487 for (i = TL_INT_Areg0; i <= TL_INT_HASH2; i = i + 4) 488 tl_intreg_write(sc, i, 0); 489 #ifdef TLDEBUG_ADDR 490 printf("Areg & hash registers: \n"); 491 for (i = TL_INT_Areg0; i <= TL_INT_HASH2; i = i + 4) 492 printf(" reg %x: %x\n", i, tl_intreg_read(sc, i)); 493 #endif 494 /* Setup NetConfig */ 495 tl_intreg_write(sc, TL_INT_NetConfig, 496 TL_NETCONFIG_1F | TL_NETCONFIG_1chn | TL_NETCONFIG_PHY_EN); 497 /* Bsize: accept default */ 498 /* TX commit in Acommit: accept default */ 499 /* Load Ld_tmr and Ld_thr */ 500 /* Ld_tmr = 3 */ 501 TL_HR_WRITE(sc, TL_HOST_CMD, 0x3 | HOST_CMD_LdTmr); 502 /* Ld_thr = 0 */ 503 TL_HR_WRITE(sc, TL_HOST_CMD, 0x0 | HOST_CMD_LdThr); 504 /* Unreset MII */ 505 netsio_set(sc, TL_NETSIO_NMRST); 506 DELAY(100000); 507 sc->tl_mii.mii_media_status &= ~IFM_ACTIVE; 508 } 509 510 static bool 511 tl_shutdown(device_t self, int howto) 512 { 513 tl_softc_t *sc = device_private(self); 514 struct ifnet *ifp = &sc->tl_if; 515 516 tl_stop(ifp, 1); 517 518 return true; 519 } 520 521 static void 522 tl_stop(struct ifnet *ifp, int disable) 523 { 524 tl_softc_t *sc = ifp->if_softc; 525 struct Tx_list *Tx; 526 int i; 527 528 if ((ifp->if_flags & IFF_RUNNING) == 0) 529 return; 530 /* disable interrupts */ 531 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_IntOff); 532 /* stop TX and RX channels */ 533 TL_HR_WRITE(sc, TL_HOST_CMD, 534 HOST_CMD_STOP | HOST_CMD_RT | HOST_CMD_Nes); 535 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_STOP); 536 DELAY(100000); 537 538 /* stop statistics reading loop, read stats */ 539 callout_stop(&sc->tl_tick_ch); 540 tl_read_stats(sc); 541 542 /* Down the MII. */ 543 mii_down(&sc->tl_mii); 544 545 /* deallocate memory allocations */ 546 if (sc->Rx_list) { 547 for (i = 0; i< TL_NBUF; i++) { 548 if (sc->Rx_list[i].m) { 549 bus_dmamap_unload(sc->tl_dmatag, 550 sc->Rx_list[i].m_dmamap); 551 m_freem(sc->Rx_list[i].m); 552 } 553 bus_dmamap_destroy(sc->tl_dmatag, 554 sc->Rx_list[i].m_dmamap); 555 sc->Rx_list[i].m = NULL; 556 } 557 free(sc->Rx_list, M_DEVBUF); 558 sc->Rx_list = NULL; 559 bus_dmamap_unload(sc->tl_dmatag, sc->Rx_dmamap); 560 bus_dmamap_destroy(sc->tl_dmatag, sc->Rx_dmamap); 561 sc->hw_Rx_list = NULL; 562 while ((Tx = sc->active_Tx) != NULL) { 563 Tx->hw_list->stat = 0; 564 bus_dmamap_unload(sc->tl_dmatag, Tx->m_dmamap); 565 bus_dmamap_destroy(sc->tl_dmatag, Tx->m_dmamap); 566 m_freem(Tx->m); 567 sc->active_Tx = Tx->next; 568 Tx->next = sc->Free_Tx; 569 sc->Free_Tx = Tx; 570 } 571 sc->last_Tx = NULL; 572 free(sc->Tx_list, M_DEVBUF); 573 sc->Tx_list = NULL; 574 bus_dmamap_unload(sc->tl_dmatag, sc->Tx_dmamap); 575 bus_dmamap_destroy(sc->tl_dmatag, sc->Tx_dmamap); 576 sc->hw_Tx_list = NULL; 577 } 578 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 579 ifp->if_timer = 0; 580 sc->tl_mii.mii_media_status &= ~IFM_ACTIVE; 581 } 582 583 static void 584 tl_restart(void *v) 585 { 586 587 tl_init(v); 588 } 589 590 static int 591 tl_init(struct ifnet *ifp) 592 { 593 tl_softc_t *sc = ifp->if_softc; 594 int i, s, error; 595 bus_size_t boundary; 596 prop_number_t prop_boundary; 597 const char *errstring; 598 char *nullbuf; 599 600 s = splnet(); 601 /* cancel any pending IO */ 602 tl_stop(ifp, 1); 603 tl_reset(sc); 604 if ((sc->tl_if.if_flags & IFF_UP) == 0) { 605 splx(s); 606 return 0; 607 } 608 /* Set various register to reasonable value */ 609 /* setup NetCmd in promisc mode if needed */ 610 i = (ifp->if_flags & IFF_PROMISC) ? TL_NETCOMMAND_CAF : 0; 611 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetCmd, 612 TL_NETCOMMAND_NRESET | TL_NETCOMMAND_NWRAP | i); 613 /* Max receive size : MCLBYTES */ 614 tl_intreg_write_byte(sc, TL_INT_MISC + TL_MISC_MaxRxL, MCLBYTES & 0xff); 615 tl_intreg_write_byte(sc, TL_INT_MISC + TL_MISC_MaxRxH, 616 (MCLBYTES >> 8) & 0xff); 617 618 /* init MAC addr */ 619 for (i = 0; i < ETHER_ADDR_LEN; i++) 620 tl_intreg_write_byte(sc, TL_INT_Areg0 + i , sc->tl_enaddr[i]); 621 /* add multicast filters */ 622 tl_addr_filter(sc); 623 #ifdef TLDEBUG_ADDR 624 printf("Wrote Mac addr, Areg & hash registers are now: \n"); 625 for (i = TL_INT_Areg0; i <= TL_INT_HASH2; i = i + 4) 626 printf(" reg %x: %x\n", i, tl_intreg_read(sc, i)); 627 #endif 628 629 /* Pre-allocate receivers mbuf, make the lists */ 630 sc->Rx_list = malloc(sizeof(struct Rx_list) * TL_NBUF, M_DEVBUF, 631 M_NOWAIT | M_ZERO); 632 sc->Tx_list = malloc(sizeof(struct Tx_list) * TL_NBUF, M_DEVBUF, 633 M_NOWAIT | M_ZERO); 634 if (sc->Rx_list == NULL || sc->Tx_list == NULL) { 635 errstring = "out of memory for lists"; 636 error = ENOMEM; 637 goto bad; 638 } 639 640 /* 641 * Some boards (Set Engineering GFE) do not permit DMA transfers 642 * across page boundaries. 643 */ 644 prop_boundary = prop_dictionary_get(device_properties(sc->sc_dev), 645 "tl-dma-page-boundary"); 646 if (prop_boundary != NULL) { 647 KASSERT(prop_object_type(prop_boundary) == PROP_TYPE_NUMBER); 648 boundary 649 = (bus_size_t)prop_number_unsigned_value(prop_boundary); 650 } else { 651 boundary = 0; 652 } 653 654 error = bus_dmamap_create(sc->tl_dmatag, 655 sizeof(struct tl_Rx_list) * TL_NBUF, 1, 656 sizeof(struct tl_Rx_list) * TL_NBUF, 0, BUS_DMA_WAITOK, 657 &sc->Rx_dmamap); 658 if (error == 0) 659 error = bus_dmamap_create(sc->tl_dmatag, 660 sizeof(struct tl_Tx_list) * TL_NBUF, 1, 661 sizeof(struct tl_Tx_list) * TL_NBUF, boundary, 662 BUS_DMA_WAITOK, &sc->Tx_dmamap); 663 if (error == 0) 664 error = bus_dmamap_create(sc->tl_dmatag, ETHER_MIN_TX, 1, 665 ETHER_MIN_TX, boundary, BUS_DMA_WAITOK, 666 &sc->null_dmamap); 667 if (error) { 668 errstring = "can't allocate DMA maps for lists"; 669 goto bad; 670 } 671 memset(sc->ctrl, 0, PAGE_SIZE); 672 sc->hw_Rx_list = (void *)sc->ctrl; 673 sc->hw_Tx_list = 674 (void *)(sc->ctrl + sizeof(struct tl_Rx_list) * TL_NBUF); 675 nullbuf = sc->ctrl + sizeof(struct tl_Rx_list) * TL_NBUF + 676 sizeof(struct tl_Tx_list) * TL_NBUF; 677 error = bus_dmamap_load(sc->tl_dmatag, sc->Rx_dmamap, 678 sc->hw_Rx_list, sizeof(struct tl_Rx_list) * TL_NBUF, NULL, 679 BUS_DMA_WAITOK); 680 if (error == 0) 681 error = bus_dmamap_load(sc->tl_dmatag, sc->Tx_dmamap, 682 sc->hw_Tx_list, sizeof(struct tl_Tx_list) * TL_NBUF, NULL, 683 BUS_DMA_WAITOK); 684 if (error == 0) 685 error = bus_dmamap_load(sc->tl_dmatag, sc->null_dmamap, 686 nullbuf, ETHER_MIN_TX, NULL, BUS_DMA_WAITOK); 687 if (error) { 688 errstring = "can't DMA map DMA memory for lists"; 689 goto bad; 690 } 691 for (i = 0; i < TL_NBUF; i++) { 692 error = bus_dmamap_create(sc->tl_dmatag, MCLBYTES, 693 1, MCLBYTES, boundary, BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 694 &sc->Rx_list[i].m_dmamap); 695 if (error == 0) { 696 error = bus_dmamap_create(sc->tl_dmatag, MCLBYTES, 697 TL_NSEG, MCLBYTES, boundary, 698 BUS_DMA_WAITOK | BUS_DMA_ALLOCNOW, 699 &sc->Tx_list[i].m_dmamap); 700 } 701 if (error) { 702 errstring = "can't allocate DMA maps for mbufs"; 703 goto bad; 704 } 705 sc->Rx_list[i].hw_list = &sc->hw_Rx_list[i]; 706 sc->Rx_list[i].hw_listaddr = sc->Rx_dmamap->dm_segs[0].ds_addr 707 + sizeof(struct tl_Rx_list) * i; 708 sc->Tx_list[i].hw_list = &sc->hw_Tx_list[i]; 709 sc->Tx_list[i].hw_listaddr = sc->Tx_dmamap->dm_segs[0].ds_addr 710 + sizeof(struct tl_Tx_list) * i; 711 if (tl_add_RxBuff(sc, &sc->Rx_list[i], NULL) == 0) { 712 errstring = "out of mbuf for receive list"; 713 error = ENOMEM; 714 goto bad; 715 } 716 if (i > 0) { /* chain the list */ 717 sc->Rx_list[i - 1].next = &sc->Rx_list[i]; 718 sc->hw_Rx_list[i - 1].fwd = 719 htole32(sc->Rx_list[i].hw_listaddr); 720 sc->Tx_list[i - 1].next = &sc->Tx_list[i]; 721 } 722 } 723 sc->hw_Rx_list[TL_NBUF - 1].fwd = 0; 724 sc->Rx_list[TL_NBUF - 1].next = NULL; 725 sc->hw_Tx_list[TL_NBUF - 1].fwd = 0; 726 sc->Tx_list[TL_NBUF - 1].next = NULL; 727 728 sc->active_Rx = &sc->Rx_list[0]; 729 sc->last_Rx = &sc->Rx_list[TL_NBUF - 1]; 730 sc->active_Tx = sc->last_Tx = NULL; 731 sc->Free_Tx = &sc->Tx_list[0]; 732 bus_dmamap_sync(sc->tl_dmatag, sc->Rx_dmamap, 0, 733 sizeof(struct tl_Rx_list) * TL_NBUF, 734 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 735 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0, 736 sizeof(struct tl_Tx_list) * TL_NBUF, 737 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 738 bus_dmamap_sync(sc->tl_dmatag, sc->null_dmamap, 0, ETHER_MIN_TX, 739 BUS_DMASYNC_PREWRITE); 740 741 /* set media */ 742 if ((error = mii_mediachg(&sc->tl_mii)) == ENXIO) 743 error = 0; 744 else if (error != 0) { 745 errstring = "could not set media"; 746 goto bad; 747 } 748 749 /* start ticks calls */ 750 callout_reset(&sc->tl_tick_ch, hz, tl_ticks, sc); 751 /* write address of Rx list and enable interrupts */ 752 TL_HR_WRITE(sc, TL_HOST_CH_PARM, sc->Rx_list[0].hw_listaddr); 753 TL_HR_WRITE(sc, TL_HOST_CMD, 754 HOST_CMD_GO | HOST_CMD_RT | HOST_CMD_Nes | HOST_CMD_IntOn); 755 sc->tl_if.if_flags |= IFF_RUNNING; 756 sc->tl_if.if_flags &= ~IFF_OACTIVE; 757 splx(s); 758 return 0; 759 bad: 760 printf("%s: %s\n", device_xname(sc->sc_dev), errstring); 761 splx(s); 762 return error; 763 } 764 765 766 static uint32_t 767 tl_intreg_read(tl_softc_t *sc, uint32_t reg) 768 { 769 770 TL_HR_WRITE(sc, TL_HOST_INTR_DIOADR, reg & TL_HOST_DIOADR_MASK); 771 return TL_HR_READ(sc, TL_HOST_DIO_DATA); 772 } 773 774 static uint8_t 775 tl_intreg_read_byte(tl_softc_t *sc, uint32_t reg) 776 { 777 778 TL_HR_WRITE(sc, TL_HOST_INTR_DIOADR, 779 (reg & (~0x07)) & TL_HOST_DIOADR_MASK); 780 return TL_HR_READ_BYTE(sc, TL_HOST_DIO_DATA + (reg & 0x07)); 781 } 782 783 static void 784 tl_intreg_write(tl_softc_t *sc, uint32_t reg, uint32_t val) 785 { 786 787 TL_HR_WRITE(sc, TL_HOST_INTR_DIOADR, reg & TL_HOST_DIOADR_MASK); 788 TL_HR_WRITE(sc, TL_HOST_DIO_DATA, val); 789 } 790 791 static void 792 tl_intreg_write_byte(tl_softc_t *sc, uint32_t reg, uint8_t val) 793 { 794 795 TL_HR_WRITE(sc, TL_HOST_INTR_DIOADR, 796 (reg & (~0x03)) & TL_HOST_DIOADR_MASK); 797 TL_HR_WRITE_BYTE(sc, TL_HOST_DIO_DATA + (reg & 0x03), val); 798 } 799 800 void 801 tl_mii_sync(struct tl_softc *sc) 802 { 803 int i; 804 805 netsio_clr(sc, TL_NETSIO_MTXEN); 806 for (i = 0; i < 32; i++) { 807 netsio_clr(sc, TL_NETSIO_MCLK); 808 netsio_set(sc, TL_NETSIO_MCLK); 809 } 810 } 811 812 void 813 tl_mii_sendbits(struct tl_softc *sc, uint32_t data, int nbits) 814 { 815 int i; 816 817 netsio_set(sc, TL_NETSIO_MTXEN); 818 for (i = 1 << (nbits - 1); i; i = i >> 1) { 819 netsio_clr(sc, TL_NETSIO_MCLK); 820 netsio_read(sc, TL_NETSIO_MCLK); 821 if (data & i) 822 netsio_set(sc, TL_NETSIO_MDATA); 823 else 824 netsio_clr(sc, TL_NETSIO_MDATA); 825 netsio_set(sc, TL_NETSIO_MCLK); 826 netsio_read(sc, TL_NETSIO_MCLK); 827 } 828 } 829 830 int 831 tl_mii_read(device_t self, int phy, int reg, uint16_t *val) 832 { 833 struct tl_softc *sc = device_private(self); 834 uint16_t data = 0; 835 int i, err; 836 837 /* 838 * Read the PHY register by manually driving the MII control lines. 839 */ 840 841 tl_mii_sync(sc); 842 tl_mii_sendbits(sc, MII_COMMAND_START, 2); 843 tl_mii_sendbits(sc, MII_COMMAND_READ, 2); 844 tl_mii_sendbits(sc, phy, 5); 845 tl_mii_sendbits(sc, reg, 5); 846 847 netsio_clr(sc, TL_NETSIO_MTXEN); 848 netsio_clr(sc, TL_NETSIO_MCLK); 849 netsio_set(sc, TL_NETSIO_MCLK); 850 netsio_clr(sc, TL_NETSIO_MCLK); 851 852 err = netsio_read(sc, TL_NETSIO_MDATA); 853 netsio_set(sc, TL_NETSIO_MCLK); 854 855 /* Even if an error occurs, must still clock out the cycle. */ 856 for (i = 0; i < 16; i++) { 857 data <<= 1; 858 netsio_clr(sc, TL_NETSIO_MCLK); 859 if (err == 0 && netsio_read(sc, TL_NETSIO_MDATA)) 860 data |= 1; 861 netsio_set(sc, TL_NETSIO_MCLK); 862 } 863 netsio_clr(sc, TL_NETSIO_MCLK); 864 netsio_set(sc, TL_NETSIO_MCLK); 865 866 *val = data; 867 return err; 868 } 869 870 int 871 tl_mii_write(device_t self, int phy, int reg, uint16_t val) 872 { 873 struct tl_softc *sc = device_private(self); 874 875 /* 876 * Write the PHY register by manually driving the MII control lines. 877 */ 878 879 tl_mii_sync(sc); 880 tl_mii_sendbits(sc, MII_COMMAND_START, 2); 881 tl_mii_sendbits(sc, MII_COMMAND_WRITE, 2); 882 tl_mii_sendbits(sc, phy, 5); 883 tl_mii_sendbits(sc, reg, 5); 884 tl_mii_sendbits(sc, MII_COMMAND_ACK, 2); 885 tl_mii_sendbits(sc, val, 16); 886 887 netsio_clr(sc, TL_NETSIO_MCLK); 888 netsio_set(sc, TL_NETSIO_MCLK); 889 890 return 0; 891 } 892 893 void 894 tl_statchg(struct ifnet *ifp) 895 { 896 tl_softc_t *sc = ifp->if_softc; 897 uint32_t reg; 898 899 #ifdef TLDEBUG 900 printf("%s: media %x\n", __func__, sc->tl_mii.mii_media.ifm_media); 901 #endif 902 903 /* 904 * We must keep the ThunderLAN and the PHY in sync as 905 * to the status of full-duplex! 906 */ 907 reg = tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetCmd); 908 if (sc->tl_mii.mii_media_active & IFM_FDX) 909 reg |= TL_NETCOMMAND_DUPLEX; 910 else 911 reg &= ~TL_NETCOMMAND_DUPLEX; 912 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetCmd, reg); 913 } 914 915 /********** I2C glue **********/ 916 917 static int 918 tl_i2c_send_start(void *cookie, int flags) 919 { 920 921 return i2c_bitbang_send_start(cookie, flags, &tl_i2cbb_ops); 922 } 923 924 static int 925 tl_i2c_send_stop(void *cookie, int flags) 926 { 927 928 return i2c_bitbang_send_stop(cookie, flags, &tl_i2cbb_ops); 929 } 930 931 static int 932 tl_i2c_initiate_xfer(void *cookie, i2c_addr_t addr, int flags) 933 { 934 935 return i2c_bitbang_initiate_xfer(cookie, addr, flags, &tl_i2cbb_ops); 936 } 937 938 static int 939 tl_i2c_read_byte(void *cookie, uint8_t *valp, int flags) 940 { 941 942 return i2c_bitbang_read_byte(cookie, valp, flags, &tl_i2cbb_ops); 943 } 944 945 static int 946 tl_i2c_write_byte(void *cookie, uint8_t val, int flags) 947 { 948 949 return i2c_bitbang_write_byte(cookie, val, flags, &tl_i2cbb_ops); 950 } 951 952 /********** I2C bit-bang glue **********/ 953 954 static void 955 tl_i2cbb_set_bits(void *cookie, uint32_t bits) 956 { 957 struct tl_softc *sc = cookie; 958 uint8_t reg; 959 960 reg = tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetSio); 961 reg = (reg & ~(TL_NETSIO_EDATA | TL_NETSIO_ECLOCK)) | bits; 962 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetSio, reg); 963 } 964 965 static void 966 tl_i2cbb_set_dir(void *cookie, uint32_t bits) 967 { 968 struct tl_softc *sc = cookie; 969 uint8_t reg; 970 971 reg = tl_intreg_read_byte(sc, TL_INT_NET + TL_INT_NetSio); 972 reg = (reg & ~TL_NETSIO_ETXEN) | bits; 973 tl_intreg_write_byte(sc, TL_INT_NET + TL_INT_NetSio, reg); 974 } 975 976 static uint32_t 977 tl_i2cbb_read(void *cookie) 978 { 979 980 return tl_intreg_read_byte(cookie, TL_INT_NET + TL_INT_NetSio); 981 } 982 983 /********** End of I2C stuff **********/ 984 985 static int 986 tl_intr(void *v) 987 { 988 tl_softc_t *sc = v; 989 struct ifnet *ifp = &sc->tl_if; 990 struct Rx_list *Rx; 991 struct Tx_list *Tx; 992 struct mbuf *m; 993 uint32_t int_type, int_reg; 994 int ack = 0; 995 int size; 996 997 int_reg = TL_HR_READ(sc, TL_HOST_INTR_DIOADR); 998 int_type = int_reg & TL_INTR_MASK; 999 if (int_type == 0) 1000 return 0; 1001 #if defined(TLDEBUG_RX) || defined(TLDEBUG_TX) 1002 printf("%s: interrupt type %x, intr_reg %x\n", 1003 device_xname(sc->sc_dev), int_type, int_reg); 1004 #endif 1005 /* disable interrupts */ 1006 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_IntOff); 1007 switch (int_type & TL_INTR_MASK) { 1008 case TL_INTR_RxEOF: 1009 bus_dmamap_sync(sc->tl_dmatag, sc->Rx_dmamap, 0, 1010 sizeof(struct tl_Rx_list) * TL_NBUF, 1011 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1012 while (le32toh(sc->active_Rx->hw_list->stat) & 1013 TL_RX_CSTAT_CPLT) { 1014 /* dequeue and requeue at end of list */ 1015 ack++; 1016 Rx = sc->active_Rx; 1017 sc->active_Rx = Rx->next; 1018 bus_dmamap_sync(sc->tl_dmatag, Rx->m_dmamap, 0, 1019 Rx->m_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1020 bus_dmamap_unload(sc->tl_dmatag, Rx->m_dmamap); 1021 m = Rx->m; 1022 size = le32toh(Rx->hw_list->stat) >> 16; 1023 #ifdef TLDEBUG_RX 1024 printf("%s: RX list complete, Rx %p, size=%d\n", 1025 __func__, Rx, size); 1026 #endif 1027 if (tl_add_RxBuff(sc, Rx, m) == 0) { 1028 /* 1029 * No new mbuf, reuse the same. This means 1030 * that this packet 1031 * is lost 1032 */ 1033 m = NULL; 1034 #ifdef TL_PRIV_STATS 1035 sc->ierr_nomem++; 1036 #endif 1037 #ifdef TLDEBUG 1038 printf("%s: out of mbuf, lost input packet\n", 1039 device_xname(sc->sc_dev)); 1040 #endif 1041 } 1042 Rx->next = NULL; 1043 Rx->hw_list->fwd = 0; 1044 sc->last_Rx->hw_list->fwd = htole32(Rx->hw_listaddr); 1045 sc->last_Rx->next = Rx; 1046 sc->last_Rx = Rx; 1047 1048 /* deliver packet */ 1049 if (m) { 1050 if (size < sizeof(struct ether_header)) { 1051 m_freem(m); 1052 continue; 1053 } 1054 m_set_rcvif(m, ifp); 1055 m->m_pkthdr.len = m->m_len = size; 1056 #ifdef TLDEBUG_RX 1057 { 1058 struct ether_header *eh = 1059 mtod(m, struct ether_header *); 1060 printf("%s: Rx packet:\n", __func__); 1061 ether_printheader(eh); 1062 } 1063 #endif 1064 if_percpuq_enqueue(ifp->if_percpuq, m); 1065 } 1066 } 1067 bus_dmamap_sync(sc->tl_dmatag, sc->Rx_dmamap, 0, 1068 sizeof(struct tl_Rx_list) * TL_NBUF, 1069 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1070 #ifdef TLDEBUG_RX 1071 printf("TL_INTR_RxEOF: ack %d\n", ack); 1072 #else 1073 if (ack == 0) { 1074 printf("%s: EOF intr without anything to read !\n", 1075 device_xname(sc->sc_dev)); 1076 tl_reset(sc); 1077 /* schedule reinit of the board */ 1078 callout_reset(&sc->tl_restart_ch, 1, tl_restart, ifp); 1079 return 1; 1080 } 1081 #endif 1082 break; 1083 case TL_INTR_RxEOC: 1084 ack++; 1085 bus_dmamap_sync(sc->tl_dmatag, sc->Rx_dmamap, 0, 1086 sizeof(struct tl_Rx_list) * TL_NBUF, 1087 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1088 #ifdef TLDEBUG_RX 1089 printf("TL_INTR_RxEOC: ack %d\n", ack); 1090 #endif 1091 #ifdef DIAGNOSTIC 1092 if (le32toh(sc->active_Rx->hw_list->stat) & TL_RX_CSTAT_CPLT) { 1093 printf("%s: Rx EOC interrupt and active Tx list not " 1094 "cleared\n", device_xname(sc->sc_dev)); 1095 return 0; 1096 } else 1097 #endif 1098 { 1099 /* 1100 * write address of Rx list and send Rx GO command, ack 1101 * interrupt and enable interrupts in one command 1102 */ 1103 TL_HR_WRITE(sc, TL_HOST_CH_PARM, sc->active_Rx->hw_listaddr); 1104 TL_HR_WRITE(sc, TL_HOST_CMD, 1105 HOST_CMD_GO | HOST_CMD_RT | HOST_CMD_Nes | ack | int_type | 1106 HOST_CMD_ACK | HOST_CMD_IntOn); 1107 return 1; 1108 } 1109 case TL_INTR_TxEOF: 1110 case TL_INTR_TxEOC: 1111 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0, 1112 sizeof(struct tl_Tx_list) * TL_NBUF, 1113 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 1114 while ((Tx = sc->active_Tx) != NULL) { 1115 if ((le32toh(Tx->hw_list->stat) & TL_TX_CSTAT_CPLT) 1116 == 0) 1117 break; 1118 ack++; 1119 #ifdef TLDEBUG_TX 1120 printf("TL_INTR_TxEOC: list 0x%x done\n", 1121 (int)Tx->hw_listaddr); 1122 #endif 1123 Tx->hw_list->stat = 0; 1124 bus_dmamap_sync(sc->tl_dmatag, Tx->m_dmamap, 0, 1125 Tx->m_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1126 bus_dmamap_unload(sc->tl_dmatag, Tx->m_dmamap); 1127 m_freem(Tx->m); 1128 Tx->m = NULL; 1129 sc->active_Tx = Tx->next; 1130 if (sc->active_Tx == NULL) 1131 sc->last_Tx = NULL; 1132 Tx->next = sc->Free_Tx; 1133 sc->Free_Tx = Tx; 1134 } 1135 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0, 1136 sizeof(struct tl_Tx_list) * TL_NBUF, 1137 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1138 /* if this was an EOC, ACK immediatly */ 1139 if (ack) 1140 sc->tl_if.if_flags &= ~IFF_OACTIVE; 1141 if (int_type == TL_INTR_TxEOC) { 1142 #ifdef TLDEBUG_TX 1143 printf("TL_INTR_TxEOC: ack %d (will be set to 1)\n", 1144 ack); 1145 #endif 1146 TL_HR_WRITE(sc, TL_HOST_CMD, 1 | int_type | 1147 HOST_CMD_ACK | HOST_CMD_IntOn); 1148 if (sc->active_Tx != NULL) { 1149 /* needs a Tx go command */ 1150 TL_HR_WRITE(sc, TL_HOST_CH_PARM, 1151 sc->active_Tx->hw_listaddr); 1152 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_GO); 1153 } 1154 sc->tl_if.if_timer = 0; 1155 if_schedule_deferred_start(&sc->tl_if); 1156 return 1; 1157 } 1158 #ifdef TLDEBUG 1159 else { 1160 printf("TL_INTR_TxEOF: ack %d\n", ack); 1161 } 1162 #endif 1163 sc->tl_if.if_timer = 0; 1164 if_schedule_deferred_start(&sc->tl_if); 1165 break; 1166 case TL_INTR_Stat: 1167 ack++; 1168 #ifdef TLDEBUG 1169 printf("TL_INTR_Stat: ack %d\n", ack); 1170 #endif 1171 tl_read_stats(sc); 1172 break; 1173 case TL_INTR_Adc: 1174 if (int_reg & TL_INTVec_MASK) { 1175 /* adapter check conditions */ 1176 printf("%s: check condition, intvect=0x%x, " 1177 "ch_param=0x%x\n", device_xname(sc->sc_dev), 1178 int_reg & TL_INTVec_MASK, 1179 TL_HR_READ(sc, TL_HOST_CH_PARM)); 1180 tl_reset(sc); 1181 /* schedule reinit of the board */ 1182 callout_reset(&sc->tl_restart_ch, 1, tl_restart, ifp); 1183 return 1; 1184 } else { 1185 uint8_t netstat; 1186 /* Network status */ 1187 netstat = 1188 tl_intreg_read_byte(sc, TL_INT_NET+TL_INT_NetSts); 1189 printf("%s: network status, NetSts=%x\n", 1190 device_xname(sc->sc_dev), netstat); 1191 /* Ack interrupts */ 1192 tl_intreg_write_byte(sc, TL_INT_NET+TL_INT_NetSts, 1193 netstat); 1194 ack++; 1195 } 1196 break; 1197 default: 1198 printf("%s: unhandled interrupt code %x!\n", 1199 device_xname(sc->sc_dev), int_type); 1200 ack++; 1201 } 1202 1203 if (ack) { 1204 /* Ack the interrupt and enable interrupts */ 1205 TL_HR_WRITE(sc, TL_HOST_CMD, ack | int_type | HOST_CMD_ACK | 1206 HOST_CMD_IntOn); 1207 rnd_add_uint32(&sc->rnd_source, int_reg); 1208 return 1; 1209 } 1210 /* ack = 0 ; interrupt was perhaps not our. Just enable interrupts */ 1211 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_IntOn); 1212 return 0; 1213 } 1214 1215 static int 1216 tl_ifioctl(struct ifnet *ifp, unsigned long cmd, void *data) 1217 { 1218 struct tl_softc *sc = ifp->if_softc; 1219 int s, error; 1220 1221 s = splnet(); 1222 error = ether_ioctl(ifp, cmd, data); 1223 if (error == ENETRESET) { 1224 if (ifp->if_flags & IFF_RUNNING) 1225 tl_addr_filter(sc); 1226 error = 0; 1227 } 1228 splx(s); 1229 return error; 1230 } 1231 1232 static void 1233 tl_ifstart(struct ifnet *ifp) 1234 { 1235 tl_softc_t *sc = ifp->if_softc; 1236 struct mbuf *mb_head; 1237 struct Tx_list *Tx; 1238 int segment, size; 1239 int again, error; 1240 1241 if ((sc->tl_if.if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1242 return; 1243 txloop: 1244 /* If we don't have more space ... */ 1245 if (sc->Free_Tx == NULL) { 1246 #ifdef TLDEBUG 1247 printf("%s: No free TX list\n", __func__); 1248 #endif 1249 sc->tl_if.if_flags |= IFF_OACTIVE; 1250 return; 1251 } 1252 /* Grab a paquet for output */ 1253 IFQ_DEQUEUE(&ifp->if_snd, mb_head); 1254 if (mb_head == NULL) { 1255 #ifdef TLDEBUG_TX 1256 printf("%s: nothing to send\n", __func__); 1257 #endif 1258 return; 1259 } 1260 Tx = sc->Free_Tx; 1261 sc->Free_Tx = Tx->next; 1262 Tx->next = NULL; 1263 again = 0; 1264 /* 1265 * Go through each of the mbufs in the chain and initialize 1266 * the transmit list descriptors with the physical address 1267 * and size of the mbuf. 1268 */ 1269 tbdinit: 1270 memset(Tx->hw_list, 0, sizeof(struct tl_Tx_list)); 1271 Tx->m = mb_head; 1272 size = mb_head->m_pkthdr.len; 1273 if ((error = bus_dmamap_load_mbuf(sc->tl_dmatag, Tx->m_dmamap, mb_head, 1274 BUS_DMA_NOWAIT)) || (size < ETHER_MIN_TX && 1275 Tx->m_dmamap->dm_nsegs == TL_NSEG)) { 1276 struct mbuf *mn; 1277 /* 1278 * We ran out of segments, or we will. We have to recopy this 1279 * mbuf chain first. 1280 */ 1281 if (error == 0) 1282 bus_dmamap_unload(sc->tl_dmatag, Tx->m_dmamap); 1283 if (again) { 1284 /* already copyed, can't do much more */ 1285 m_freem(mb_head); 1286 goto bad; 1287 } 1288 again = 1; 1289 #ifdef TLDEBUG_TX 1290 printf("%s: need to copy mbuf\n", __func__); 1291 #endif 1292 #ifdef TL_PRIV_STATS 1293 sc->oerr_mcopy++; 1294 #endif 1295 MGETHDR(mn, M_DONTWAIT, MT_DATA); 1296 if (mn == NULL) { 1297 m_freem(mb_head); 1298 goto bad; 1299 } 1300 if (mb_head->m_pkthdr.len > MHLEN) { 1301 MCLGET(mn, M_DONTWAIT); 1302 if ((mn->m_flags & M_EXT) == 0) { 1303 m_freem(mn); 1304 m_freem(mb_head); 1305 goto bad; 1306 } 1307 } 1308 m_copydata(mb_head, 0, mb_head->m_pkthdr.len, 1309 mtod(mn, void *)); 1310 mn->m_pkthdr.len = mn->m_len = mb_head->m_pkthdr.len; 1311 m_freem(mb_head); 1312 mb_head = mn; 1313 goto tbdinit; 1314 } 1315 for (segment = 0; segment < Tx->m_dmamap->dm_nsegs; segment++) { 1316 Tx->hw_list->seg[segment].data_addr = 1317 htole32(Tx->m_dmamap->dm_segs[segment].ds_addr); 1318 Tx->hw_list->seg[segment].data_count = 1319 htole32(Tx->m_dmamap->dm_segs[segment].ds_len); 1320 } 1321 bus_dmamap_sync(sc->tl_dmatag, Tx->m_dmamap, 0, 1322 Tx->m_dmamap->dm_mapsize, 1323 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1324 /* We are at end of mbuf chain. check the size and 1325 * see if it needs to be extended 1326 */ 1327 if (size < ETHER_MIN_TX) { 1328 #ifdef DIAGNOSTIC 1329 if (segment >= TL_NSEG) { 1330 panic("%s: to much segmets (%d)", __func__, segment); 1331 } 1332 #endif 1333 /* 1334 * add the nullbuf in the seg 1335 */ 1336 Tx->hw_list->seg[segment].data_count = 1337 htole32(ETHER_MIN_TX - size); 1338 Tx->hw_list->seg[segment].data_addr = 1339 htole32(sc->null_dmamap->dm_segs[0].ds_addr); 1340 size = ETHER_MIN_TX; 1341 segment++; 1342 } 1343 /* The list is done, finish the list init */ 1344 Tx->hw_list->seg[segment - 1].data_count |= 1345 htole32(TL_LAST_SEG); 1346 Tx->hw_list->stat = htole32((size << 16) | 0x3000); 1347 #ifdef TLDEBUG_TX 1348 printf("%s: sending, Tx : stat = 0x%x\n", device_xname(sc->sc_dev), 1349 le32toh(Tx->hw_list->stat)); 1350 #if 0 1351 for (segment = 0; segment < TL_NSEG; segment++) { 1352 printf(" seg %d addr 0x%x len 0x%x\n", 1353 segment, 1354 le32toh(Tx->hw_list->seg[segment].data_addr), 1355 le32toh(Tx->hw_list->seg[segment].data_count)); 1356 } 1357 #endif 1358 #endif 1359 if (sc->active_Tx == NULL) { 1360 sc->active_Tx = sc->last_Tx = Tx; 1361 #ifdef TLDEBUG_TX 1362 printf("%s: Tx GO, addr=0x%ux\n", device_xname(sc->sc_dev), 1363 (int)Tx->hw_listaddr); 1364 #endif 1365 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0, 1366 sizeof(struct tl_Tx_list) * TL_NBUF, 1367 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1368 TL_HR_WRITE(sc, TL_HOST_CH_PARM, Tx->hw_listaddr); 1369 TL_HR_WRITE(sc, TL_HOST_CMD, HOST_CMD_GO); 1370 } else { 1371 #ifdef TLDEBUG_TX 1372 printf("%s: Tx addr=0x%ux queued\n", device_xname(sc->sc_dev), 1373 (int)Tx->hw_listaddr); 1374 #endif 1375 sc->last_Tx->hw_list->fwd = htole32(Tx->hw_listaddr); 1376 bus_dmamap_sync(sc->tl_dmatag, sc->Tx_dmamap, 0, 1377 sizeof(struct tl_Tx_list) * TL_NBUF, 1378 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1379 sc->last_Tx->next = Tx; 1380 sc->last_Tx = Tx; 1381 #ifdef DIAGNOSTIC 1382 if (sc->last_Tx->hw_list->fwd & 0x7) 1383 printf("%s: physical addr 0x%x of list not properly " 1384 "aligned\n", 1385 device_xname(sc->sc_dev), 1386 sc->last_Rx->hw_list->fwd); 1387 #endif 1388 } 1389 /* Pass packet to bpf if there is a listener */ 1390 bpf_mtap(ifp, mb_head, BPF_D_OUT); 1391 /* 1392 * Set a 5 second timer just in case we don't hear from the card again. 1393 */ 1394 ifp->if_timer = 5; 1395 goto txloop; 1396 bad: 1397 #ifdef TLDEBUG 1398 printf("%s: Out of mbuf, Tx pkt lost\n", __func__); 1399 #endif 1400 Tx->next = sc->Free_Tx; 1401 sc->Free_Tx = Tx; 1402 } 1403 1404 static void 1405 tl_ifwatchdog(struct ifnet *ifp) 1406 { 1407 tl_softc_t *sc = ifp->if_softc; 1408 1409 if ((ifp->if_flags & IFF_RUNNING) == 0) 1410 return; 1411 printf("%s: device timeout\n", device_xname(sc->sc_dev)); 1412 if_statinc(ifp, if_oerrors); 1413 tl_init(ifp); 1414 } 1415 1416 static int 1417 tl_add_RxBuff(tl_softc_t *sc, struct Rx_list *Rx, struct mbuf *oldm) 1418 { 1419 struct mbuf *m; 1420 int error; 1421 1422 MGETHDR(m, M_DONTWAIT, MT_DATA); 1423 if (m != NULL) { 1424 MCLGET(m, M_DONTWAIT); 1425 if ((m->m_flags & M_EXT) == 0) { 1426 m_freem(m); 1427 if (oldm == NULL) 1428 return 0; 1429 m = oldm; 1430 m->m_data = m->m_ext.ext_buf; 1431 } 1432 } else { 1433 if (oldm == NULL) 1434 return 0; 1435 m = oldm; 1436 m->m_data = m->m_ext.ext_buf; 1437 } 1438 1439 /* (re)init the Rx_list struct */ 1440 1441 Rx->m = m; 1442 if ((error = bus_dmamap_load(sc->tl_dmatag, Rx->m_dmamap, 1443 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, BUS_DMA_NOWAIT)) != 0) { 1444 printf("%s: bus_dmamap_load() failed (error %d) for " 1445 "tl_add_RxBuff ", device_xname(sc->sc_dev), error); 1446 printf("size %d (%d)\n", m->m_pkthdr.len, MCLBYTES); 1447 m_freem(m); 1448 Rx->m = NULL; 1449 return 0; 1450 } 1451 bus_dmamap_sync(sc->tl_dmatag, Rx->m_dmamap, 0, 1452 Rx->m_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1453 /* 1454 * Move the data pointer up so that the incoming data packet 1455 * will be 32-bit aligned. 1456 */ 1457 m->m_data += 2; 1458 1459 Rx->hw_list->stat = 1460 htole32(((Rx->m_dmamap->dm_segs[0].ds_len - 2) << 16) | 0x3000); 1461 Rx->hw_list->seg.data_count = 1462 htole32(Rx->m_dmamap->dm_segs[0].ds_len - 2); 1463 Rx->hw_list->seg.data_addr = 1464 htole32(Rx->m_dmamap->dm_segs[0].ds_addr + 2); 1465 return (m != oldm); 1466 } 1467 1468 static void 1469 tl_ticks(void *v) 1470 { 1471 tl_softc_t *sc = v; 1472 1473 tl_read_stats(sc); 1474 1475 /* Tick the MII. */ 1476 mii_tick(&sc->tl_mii); 1477 1478 /* read statistics every seconds */ 1479 callout_reset(&sc->tl_tick_ch, hz, tl_ticks, sc); 1480 } 1481 1482 static void 1483 tl_read_stats(tl_softc_t *sc) 1484 { 1485 uint32_t reg; 1486 int ierr_overr; 1487 int ierr_code; 1488 int ierr_crc; 1489 int oerr_underr; 1490 int oerr_deferred; 1491 int oerr_coll; 1492 int oerr_multicoll; 1493 int oerr_exesscoll; 1494 int oerr_latecoll; 1495 int oerr_carrloss; 1496 struct ifnet *ifp = &sc->tl_if; 1497 1498 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 1499 1500 reg = tl_intreg_read(sc, TL_INT_STATS_TX); 1501 if_statadd_ref(nsr, if_opackets, reg & 0x00ffffff); 1502 oerr_underr = reg >> 24; 1503 1504 reg = tl_intreg_read(sc, TL_INT_STATS_RX); 1505 ierr_overr = reg >> 24; 1506 1507 reg = tl_intreg_read(sc, TL_INT_STATS_FERR); 1508 ierr_crc = (reg & TL_FERR_CRC) >> 16; 1509 ierr_code = (reg & TL_FERR_CODE) >> 24; 1510 oerr_deferred = (reg & TL_FERR_DEF); 1511 1512 reg = tl_intreg_read(sc, TL_INT_STATS_COLL); 1513 oerr_multicoll = (reg & TL_COL_MULTI); 1514 oerr_coll = (reg & TL_COL_SINGLE) >> 16; 1515 1516 reg = tl_intreg_read(sc, TL_INT_LERR); 1517 oerr_exesscoll = (reg & TL_LERR_ECOLL); 1518 oerr_latecoll = (reg & TL_LERR_LCOLL) >> 8; 1519 oerr_carrloss = (reg & TL_LERR_CL) >> 16; 1520 1521 if_statadd_ref(nsr, if_oerrors, 1522 oerr_underr + oerr_exesscoll + oerr_latecoll + oerr_carrloss); 1523 if_statadd_ref(nsr, if_collisions, oerr_coll + oerr_multicoll); 1524 if_statadd_ref(nsr, if_ierrors, ierr_overr + ierr_code + ierr_crc); 1525 IF_STAT_PUTREF(ifp); 1526 1527 if (ierr_overr) 1528 printf("%s: receiver ring buffer overrun\n", 1529 device_xname(sc->sc_dev)); 1530 if (oerr_underr) 1531 printf("%s: transmit buffer underrun\n", 1532 device_xname(sc->sc_dev)); 1533 #ifdef TL_PRIV_STATS 1534 sc->ierr_overr += ierr_overr; 1535 sc->ierr_code += ierr_code; 1536 sc->ierr_crc += ierr_crc; 1537 sc->oerr_underr += oerr_underr; 1538 sc->oerr_deferred += oerr_deferred; 1539 sc->oerr_coll += oerr_coll; 1540 sc->oerr_multicoll += oerr_multicoll; 1541 sc->oerr_exesscoll += oerr_exesscoll; 1542 sc->oerr_latecoll += oerr_latecoll; 1543 sc->oerr_carrloss += oerr_carrloss; 1544 #endif 1545 } 1546 1547 static void 1548 tl_addr_filter(tl_softc_t *sc) 1549 { 1550 struct ethercom *ec = &sc->tl_ec; 1551 struct ether_multistep step; 1552 struct ether_multi *enm; 1553 uint32_t hash[2] = {0, 0}; 1554 int i; 1555 1556 sc->tl_if.if_flags &= ~IFF_ALLMULTI; 1557 ETHER_LOCK(ec); 1558 ETHER_FIRST_MULTI(step, ec, enm); 1559 while (enm != NULL) { 1560 #ifdef TLDEBUG 1561 printf("%s: addrs %s %s\n", __func__, 1562 ether_sprintf(enm->enm_addrlo), 1563 ether_sprintf(enm->enm_addrhi)); 1564 #endif 1565 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, 6) == 0) { 1566 i = tl_multicast_hash(enm->enm_addrlo); 1567 hash[i / 32] |= 1 << (i%32); 1568 } else { 1569 hash[0] = hash[1] = 0xffffffff; 1570 sc->tl_if.if_flags |= IFF_ALLMULTI; 1571 break; 1572 } 1573 ETHER_NEXT_MULTI(step, enm); 1574 } 1575 ETHER_UNLOCK(ec); 1576 #ifdef TLDEBUG 1577 printf("%s: hash1 %x has2 %x\n", __func__, hash[0], hash[1]); 1578 #endif 1579 tl_intreg_write(sc, TL_INT_HASH1, hash[0]); 1580 tl_intreg_write(sc, TL_INT_HASH2, hash[1]); 1581 } 1582 1583 static int 1584 tl_multicast_hash(uint8_t *a) 1585 { 1586 int hash; 1587 1588 #define DA(addr, bit) (addr[5 - (bit / 8)] & (1 << (bit % 8))) 1589 #define xor8(a, b, c, d, e, f, g, h) \ 1590 (((a != 0) + (b != 0) + (c != 0) + (d != 0) + \ 1591 (e != 0) + (f != 0) + (g != 0) + (h != 0)) & 1) 1592 1593 hash = xor8(DA(a,0), DA(a, 6), DA(a,12), DA(a,18), DA(a,24), DA(a,30), 1594 DA(a,36), DA(a,42)); 1595 hash |= xor8(DA(a,1), DA(a, 7), DA(a,13), DA(a,19), DA(a,25), DA(a,31), 1596 DA(a,37), DA(a,43)) << 1; 1597 hash |= xor8(DA(a,2), DA(a, 8), DA(a,14), DA(a,20), DA(a,26), DA(a,32), 1598 DA(a,38), DA(a,44)) << 2; 1599 hash |= xor8(DA(a,3), DA(a, 9), DA(a,15), DA(a,21), DA(a,27), DA(a,33), 1600 DA(a,39), DA(a,45)) << 3; 1601 hash |= xor8(DA(a,4), DA(a,10), DA(a,16), DA(a,22), DA(a,28), DA(a,34), 1602 DA(a,40), DA(a,46)) << 4; 1603 hash |= xor8(DA(a,5), DA(a,11), DA(a,17), DA(a,23), DA(a,29), DA(a,35), 1604 DA(a,41), DA(a,47)) << 5; 1605 1606 return hash; 1607 } 1608 1609 #if defined(TLDEBUG_RX) 1610 void 1611 ether_printheader(struct ether_header *eh) 1612 { 1613 uint8_t *c = (uint8_t *)eh; 1614 int i; 1615 1616 for (i = 0; i < sizeof(struct ether_header); i++) 1617 printf("%02x ", (u_int)c[i]); 1618 printf("\n"); 1619 } 1620 #endif 1621