1 /* $NetBSD: if_ti.c,v 1.17 2000/12/28 22:59:13 sommerfeld Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * FreeBSD Id: if_ti.c,v 1.15 1999/08/14 15:45:03 wpaul Exp 35 */ 36 37 /* 38 * Alteon Networks Tigon PCI gigabit ethernet driver for FreeBSD. 39 * Manuals, sample driver and firmware source kits are available 40 * from http://www.alteon.com/support/openkits. 41 * 42 * Written by Bill Paul <wpaul@ctr.columbia.edu> 43 * Electrical Engineering Department 44 * Columbia University, New York City 45 */ 46 47 /* 48 * The Alteon Networks Tigon chip contains an embedded R4000 CPU, 49 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs 50 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The 51 * Tigon supports hardware IP, TCP and UCP checksumming, multicast 52 * filtering and jumbo (9014 byte) frames. The hardware is largely 53 * controlled by firmware, which must be loaded into the NIC during 54 * initialization. 55 * 56 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware 57 * revision, which supports new features such as extended commands, 58 * extended jumbo receive ring desciptors and a mini receive ring. 59 * 60 * Alteon Networks is to be commended for releasing such a vast amount 61 * of development material for the Tigon NIC without requiring an NDA 62 * (although they really should have done it a long time ago). With 63 * any luck, the other vendors will finally wise up and follow Alteon's 64 * stellar example. 65 * 66 * The firmware for the Tigon 1 and 2 NICs is compiled directly into 67 * this driver by #including it as a C header file. This bloats the 68 * driver somewhat, but it's the easiest method considering that the 69 * driver code and firmware code need to be kept in sync. The source 70 * for the firmware is not provided with the FreeBSD distribution since 71 * compiling it requires a GNU toolchain targeted for mips-sgi-irix5.3. 72 * 73 * The following people deserve special thanks: 74 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board 75 * for testing 76 * - Raymond Lee of Netgear, for providing a pair of Netgear 77 * GA620 Tigon 2 boards for testing 78 * - Ulf Zimmermann, for bringing the GA620 to my attention and 79 * convincing me to write this driver. 80 * - Andrew Gallatin for providing FreeBSD/Alpha support. 81 */ 82 83 #include "bpfilter.h" 84 #include "opt_inet.h" 85 #include "opt_ns.h" 86 87 #include <sys/param.h> 88 #include <sys/systm.h> 89 #include <sys/sockio.h> 90 #include <sys/mbuf.h> 91 #include <sys/malloc.h> 92 #include <sys/kernel.h> 93 #include <sys/socket.h> 94 #include <sys/queue.h> 95 #include <sys/device.h> 96 #include <sys/reboot.h> 97 98 #include <uvm/uvm_extern.h> 99 100 #include <net/if.h> 101 #include <net/if_arp.h> 102 #include <net/if_ether.h> 103 #include <net/if_dl.h> 104 #include <net/if_media.h> 105 106 #if NBPFILTER > 0 107 #include <net/bpf.h> 108 #endif 109 110 #ifdef INET 111 #include <netinet/in.h> 112 #include <netinet/if_inarp.h> 113 #endif 114 115 #ifdef NS 116 #include <netns/ns.h> 117 #include <netns/ns_if.h> 118 #endif 119 120 #include <machine/bus.h> 121 122 #include <dev/pci/pcireg.h> 123 #include <dev/pci/pcivar.h> 124 #include <dev/pci/pcidevs.h> 125 126 #include <dev/pci/if_tireg.h> 127 #include <dev/pci/ti_fw.h> 128 #include <dev/pci/ti_fw2.h> 129 130 #ifdef M_HWCKSUM 131 /*#define TI_CSUM_OFFLOAD*/ 132 #endif 133 134 /* 135 * Various supported device vendors/types and their names. 136 */ 137 138 static struct ti_type ti_devs[] = { 139 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_ACENIC, 140 "Alteon AceNIC 1000baseSX Gigabit Ethernet" }, 141 { PCI_VENDOR_ALTEON, PCI_PRODUCT_ALTEON_ACENIC_COPPER, 142 "Alteon AceNIC 1000baseT Gigabit Ethernet" }, 143 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C985, 144 "3Com 3c985-SX Gigabit Ethernet" }, 145 { PCI_VENDOR_NETGEAR, PCI_PRODUCT_NETGEAR_GA620, 146 "Netgear GA620 1000baseSX Gigabit Ethernet" }, 147 { PCI_VENDOR_NETGEAR, PCI_PRODUCT_NETGEAR_GA620T, 148 "Netgear GA620 1000baseT Gigabit Ethernet" }, 149 { PCI_VENDOR_SGI, PCI_PRODUCT_SGI_TIGON, 150 "Silicon Graphics Gigabit Ethernet" }, 151 { 0, 0, NULL } 152 }; 153 154 static struct ti_type *ti_type_match __P((struct pci_attach_args *)); 155 static int ti_probe __P((struct device *, struct cfdata *, void *)); 156 static void ti_attach __P((struct device *, struct device *, void *)); 157 static void ti_shutdown __P((void *)); 158 static void ti_txeof __P((struct ti_softc *)); 159 static void ti_rxeof __P((struct ti_softc *)); 160 161 static void ti_stats_update __P((struct ti_softc *)); 162 static int ti_encap __P((struct ti_softc *, struct mbuf *, 163 u_int32_t *)); 164 165 static int ti_intr __P((void *)); 166 static void ti_start __P((struct ifnet *)); 167 static int ti_ioctl __P((struct ifnet *, u_long, caddr_t)); 168 static void ti_init __P((void *)); 169 static void ti_init2 __P((struct ti_softc *)); 170 static void ti_stop __P((struct ti_softc *)); 171 static void ti_watchdog __P((struct ifnet *)); 172 static int ti_ifmedia_upd __P((struct ifnet *)); 173 static void ti_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 174 175 static u_int32_t ti_eeprom_putbyte __P((struct ti_softc *, int)); 176 static u_int8_t ti_eeprom_getbyte __P((struct ti_softc *, 177 int, u_int8_t *)); 178 static int ti_read_eeprom __P((struct ti_softc *, caddr_t, int, int)); 179 180 static void ti_add_mcast __P((struct ti_softc *, struct ether_addr *)); 181 static void ti_del_mcast __P((struct ti_softc *, struct ether_addr *)); 182 static void ti_setmulti __P((struct ti_softc *)); 183 184 static void ti_mem __P((struct ti_softc *, u_int32_t, 185 u_int32_t, caddr_t)); 186 static void ti_loadfw __P((struct ti_softc *)); 187 static void ti_cmd __P((struct ti_softc *, struct ti_cmd_desc *)); 188 static void ti_cmd_ext __P((struct ti_softc *, struct ti_cmd_desc *, 189 caddr_t, int)); 190 static void ti_handle_events __P((struct ti_softc *)); 191 static int ti_alloc_jumbo_mem __P((struct ti_softc *)); 192 static void *ti_jalloc __P((struct ti_softc *)); 193 static void ti_jfree __P((caddr_t, u_int, void *)); 194 static int ti_newbuf_std __P((struct ti_softc *, int, struct mbuf *, bus_dmamap_t)); 195 static int ti_newbuf_mini __P((struct ti_softc *, int, struct mbuf *, bus_dmamap_t)); 196 static int ti_newbuf_jumbo __P((struct ti_softc *, int, struct mbuf *)); 197 static int ti_init_rx_ring_std __P((struct ti_softc *)); 198 static void ti_free_rx_ring_std __P((struct ti_softc *)); 199 static int ti_init_rx_ring_jumbo __P((struct ti_softc *)); 200 static void ti_free_rx_ring_jumbo __P((struct ti_softc *)); 201 static int ti_init_rx_ring_mini __P((struct ti_softc *)); 202 static void ti_free_rx_ring_mini __P((struct ti_softc *)); 203 static void ti_free_tx_ring __P((struct ti_softc *)); 204 static int ti_init_tx_ring __P((struct ti_softc *)); 205 206 static int ti_64bitslot_war __P((struct ti_softc *)); 207 static int ti_chipinit __P((struct ti_softc *)); 208 static int ti_gibinit __P((struct ti_softc *)); 209 210 static int ti_ether_ioctl __P((struct ifnet *, u_long, caddr_t)); 211 212 struct cfattach ti_ca = { 213 sizeof(struct ti_softc), ti_probe, ti_attach 214 }; 215 216 /* 217 * Send an instruction or address to the EEPROM, check for ACK. 218 */ 219 static u_int32_t ti_eeprom_putbyte(sc, byte) 220 struct ti_softc *sc; 221 int byte; 222 { 223 int i, ack = 0; 224 225 /* 226 * Make sure we're in TX mode. 227 */ 228 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 229 230 /* 231 * Feed in each bit and stobe the clock. 232 */ 233 for (i = 0x80; i; i >>= 1) { 234 if (byte & i) { 235 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 236 } else { 237 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 238 } 239 DELAY(1); 240 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 241 DELAY(1); 242 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 243 } 244 245 /* 246 * Turn off TX mode. 247 */ 248 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 249 250 /* 251 * Check for ack. 252 */ 253 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 254 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; 255 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 256 257 return(ack); 258 } 259 260 /* 261 * Read a byte of data stored in the EEPROM at address 'addr.' 262 * We have to send two address bytes since the EEPROM can hold 263 * more than 256 bytes of data. 264 */ 265 static u_int8_t ti_eeprom_getbyte(sc, addr, dest) 266 struct ti_softc *sc; 267 int addr; 268 u_int8_t *dest; 269 { 270 int i; 271 u_int8_t byte = 0; 272 273 EEPROM_START; 274 275 /* 276 * Send write control code to EEPROM. 277 */ 278 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 279 printf("%s: failed to send write command, status: %x\n", 280 sc->sc_dev.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 281 return(1); 282 } 283 284 /* 285 * Send first byte of address of byte we want to read. 286 */ 287 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { 288 printf("%s: failed to send address, status: %x\n", 289 sc->sc_dev.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 290 return(1); 291 } 292 /* 293 * Send second byte address of byte we want to read. 294 */ 295 if (ti_eeprom_putbyte(sc, addr & 0xFF)) { 296 printf("%s: failed to send address, status: %x\n", 297 sc->sc_dev.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 298 return(1); 299 } 300 301 EEPROM_STOP; 302 EEPROM_START; 303 /* 304 * Send read control code to EEPROM. 305 */ 306 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 307 printf("%s: failed to send read command, status: %x\n", 308 sc->sc_dev.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 309 return(1); 310 } 311 312 /* 313 * Start reading bits from EEPROM. 314 */ 315 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 316 for (i = 0x80; i; i >>= 1) { 317 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 318 DELAY(1); 319 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) 320 byte |= i; 321 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 322 DELAY(1); 323 } 324 325 EEPROM_STOP; 326 327 /* 328 * No ACK generated for read, so just return byte. 329 */ 330 331 *dest = byte; 332 333 return(0); 334 } 335 336 /* 337 * Read a sequence of bytes from the EEPROM. 338 */ 339 static int ti_read_eeprom(sc, dest, off, cnt) 340 struct ti_softc *sc; 341 caddr_t dest; 342 int off; 343 int cnt; 344 { 345 int err = 0, i; 346 u_int8_t byte = 0; 347 348 for (i = 0; i < cnt; i++) { 349 err = ti_eeprom_getbyte(sc, off + i, &byte); 350 if (err) 351 break; 352 *(dest + i) = byte; 353 } 354 355 return(err ? 1 : 0); 356 } 357 358 /* 359 * NIC memory access function. Can be used to either clear a section 360 * of NIC local memory or (if buf is non-NULL) copy data into it. 361 */ 362 static void ti_mem(sc, addr, len, buf) 363 struct ti_softc *sc; 364 u_int32_t addr, len; 365 caddr_t buf; 366 { 367 int segptr, segsize, cnt; 368 caddr_t ptr; 369 370 segptr = addr; 371 cnt = len; 372 ptr = buf; 373 374 while(cnt) { 375 if (cnt < TI_WINLEN) 376 segsize = cnt; 377 else 378 segsize = TI_WINLEN - (segptr % TI_WINLEN); 379 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 380 if (buf == NULL) { 381 bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle, 382 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, 383 segsize / 4); 384 } else { 385 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 386 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 387 (u_int32_t *)ptr, segsize / 4); 388 ptr += segsize; 389 } 390 segptr += segsize; 391 cnt -= segsize; 392 } 393 394 return; 395 } 396 397 /* 398 * Load firmware image into the NIC. Check that the firmware revision 399 * is acceptable and see if we want the firmware for the Tigon 1 or 400 * Tigon 2. 401 */ 402 static void ti_loadfw(sc) 403 struct ti_softc *sc; 404 { 405 switch(sc->ti_hwrev) { 406 case TI_HWREV_TIGON: 407 if (tigonFwReleaseMajor != TI_FIRMWARE_MAJOR || 408 tigonFwReleaseMinor != TI_FIRMWARE_MINOR || 409 tigonFwReleaseFix != TI_FIRMWARE_FIX) { 410 printf("%s: firmware revision mismatch; want " 411 "%d.%d.%d, got %d.%d.%d\n", sc->sc_dev.dv_xname, 412 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 413 TI_FIRMWARE_FIX, tigonFwReleaseMajor, 414 tigonFwReleaseMinor, tigonFwReleaseFix); 415 return; 416 } 417 ti_mem(sc, tigonFwTextAddr, tigonFwTextLen, 418 (caddr_t)tigonFwText); 419 ti_mem(sc, tigonFwDataAddr, tigonFwDataLen, 420 (caddr_t)tigonFwData); 421 ti_mem(sc, tigonFwRodataAddr, tigonFwRodataLen, 422 (caddr_t)tigonFwRodata); 423 ti_mem(sc, tigonFwBssAddr, tigonFwBssLen, NULL); 424 ti_mem(sc, tigonFwSbssAddr, tigonFwSbssLen, NULL); 425 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigonFwStartAddr); 426 break; 427 case TI_HWREV_TIGON_II: 428 if (tigon2FwReleaseMajor != TI_FIRMWARE_MAJOR || 429 tigon2FwReleaseMinor != TI_FIRMWARE_MINOR || 430 tigon2FwReleaseFix != TI_FIRMWARE_FIX) { 431 printf("%s: firmware revision mismatch; want " 432 "%d.%d.%d, got %d.%d.%d\n", sc->sc_dev.dv_xname, 433 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 434 TI_FIRMWARE_FIX, tigon2FwReleaseMajor, 435 tigon2FwReleaseMinor, tigon2FwReleaseFix); 436 return; 437 } 438 ti_mem(sc, tigon2FwTextAddr, tigon2FwTextLen, 439 (caddr_t)tigon2FwText); 440 ti_mem(sc, tigon2FwDataAddr, tigon2FwDataLen, 441 (caddr_t)tigon2FwData); 442 ti_mem(sc, tigon2FwRodataAddr, tigon2FwRodataLen, 443 (caddr_t)tigon2FwRodata); 444 ti_mem(sc, tigon2FwBssAddr, tigon2FwBssLen, NULL); 445 ti_mem(sc, tigon2FwSbssAddr, tigon2FwSbssLen, NULL); 446 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tigon2FwStartAddr); 447 break; 448 default: 449 printf("%s: can't load firmware: unknown hardware rev\n", 450 sc->sc_dev.dv_xname); 451 break; 452 } 453 454 return; 455 } 456 457 /* 458 * Send the NIC a command via the command ring. 459 */ 460 static void ti_cmd(sc, cmd) 461 struct ti_softc *sc; 462 struct ti_cmd_desc *cmd; 463 { 464 u_int32_t index; 465 466 index = sc->ti_cmd_saved_prodidx; 467 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 468 TI_INC(index, TI_CMD_RING_CNT); 469 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 470 sc->ti_cmd_saved_prodidx = index; 471 472 return; 473 } 474 475 /* 476 * Send the NIC an extended command. The 'len' parameter specifies the 477 * number of command slots to include after the initial command. 478 */ 479 static void ti_cmd_ext(sc, cmd, arg, len) 480 struct ti_softc *sc; 481 struct ti_cmd_desc *cmd; 482 caddr_t arg; 483 int len; 484 { 485 u_int32_t index; 486 int i; 487 488 index = sc->ti_cmd_saved_prodidx; 489 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 490 TI_INC(index, TI_CMD_RING_CNT); 491 for (i = 0; i < len; i++) { 492 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), 493 *(u_int32_t *)(&arg[i * 4])); 494 TI_INC(index, TI_CMD_RING_CNT); 495 } 496 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 497 sc->ti_cmd_saved_prodidx = index; 498 499 return; 500 } 501 502 /* 503 * Handle events that have triggered interrupts. 504 */ 505 static void ti_handle_events(sc) 506 struct ti_softc *sc; 507 { 508 struct ti_event_desc *e; 509 510 if (sc->ti_rdata->ti_event_ring == NULL) 511 return; 512 513 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { 514 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; 515 switch(e->ti_event) { 516 case TI_EV_LINKSTAT_CHANGED: 517 sc->ti_linkstat = e->ti_code; 518 if (e->ti_code == TI_EV_CODE_LINK_UP) 519 printf("%s: 10/100 link up\n", 520 sc->sc_dev.dv_xname); 521 else if (e->ti_code == TI_EV_CODE_GIG_LINK_UP) 522 printf("%s: gigabit link up\n", 523 sc->sc_dev.dv_xname); 524 else if (e->ti_code == TI_EV_CODE_LINK_DOWN) 525 printf("%s: link down\n", 526 sc->sc_dev.dv_xname); 527 break; 528 case TI_EV_ERROR: 529 if (e->ti_code == TI_EV_CODE_ERR_INVAL_CMD) 530 printf("%s: invalid command\n", 531 sc->sc_dev.dv_xname); 532 else if (e->ti_code == TI_EV_CODE_ERR_UNIMP_CMD) 533 printf("%s: unknown command\n", 534 sc->sc_dev.dv_xname); 535 else if (e->ti_code == TI_EV_CODE_ERR_BADCFG) 536 printf("%s: bad config data\n", 537 sc->sc_dev.dv_xname); 538 break; 539 case TI_EV_FIRMWARE_UP: 540 ti_init2(sc); 541 break; 542 case TI_EV_STATS_UPDATED: 543 ti_stats_update(sc); 544 break; 545 case TI_EV_RESET_JUMBO_RING: 546 case TI_EV_MCAST_UPDATED: 547 /* Who cares. */ 548 break; 549 default: 550 printf("%s: unknown event: %d\n", 551 sc->sc_dev.dv_xname, e->ti_event); 552 break; 553 } 554 /* Advance the consumer index. */ 555 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); 556 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); 557 } 558 559 return; 560 } 561 562 /* 563 * Memory management for the jumbo receive ring is a pain in the 564 * butt. We need to allocate at least 9018 bytes of space per frame, 565 * _and_ it has to be contiguous (unless you use the extended 566 * jumbo descriptor format). Using malloc() all the time won't 567 * work: malloc() allocates memory in powers of two, which means we 568 * would end up wasting a considerable amount of space by allocating 569 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have 570 * to do our own memory management. 571 * 572 * The driver needs to allocate a contiguous chunk of memory at boot 573 * time. We then chop this up ourselves into 9K pieces and use them 574 * as external mbuf storage. 575 * 576 * One issue here is how much memory to allocate. The jumbo ring has 577 * 256 slots in it, but at 9K per slot than can consume over 2MB of 578 * RAM. This is a bit much, especially considering we also need 579 * RAM for the standard ring and mini ring (on the Tigon 2). To 580 * save space, we only actually allocate enough memory for 64 slots 581 * by default, which works out to between 500 and 600K. This can 582 * be tuned by changing a #define in if_tireg.h. 583 */ 584 585 static int ti_alloc_jumbo_mem(sc) 586 struct ti_softc *sc; 587 { 588 caddr_t ptr; 589 int i; 590 struct ti_jpool_entry *entry; 591 bus_dma_segment_t dmaseg; 592 int error, dmanseg; 593 594 /* Grab a big chunk o' storage. */ 595 if ((error = bus_dmamem_alloc(sc->sc_dmat, 596 TI_JMEM, PAGE_SIZE, 0, &dmaseg, 1, &dmanseg, 597 BUS_DMA_NOWAIT)) != 0) { 598 printf("%s: can't allocate jumbo buffer, error = %d\n", 599 sc->sc_dev.dv_xname, error); 600 return (error); 601 } 602 603 if ((error = bus_dmamem_map(sc->sc_dmat, &dmaseg, dmanseg, 604 TI_JMEM, (caddr_t *)&sc->ti_cdata.ti_jumbo_buf, 605 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 606 printf("%s: can't map jumbo buffer, error = %d\n", 607 sc->sc_dev.dv_xname, error); 608 return (error); 609 } 610 611 if ((error = bus_dmamap_create(sc->sc_dmat, 612 TI_JMEM, 1, 613 TI_JMEM, 0, BUS_DMA_NOWAIT, 614 &sc->jumbo_dmamap)) != 0) { 615 printf("%s: can't create jumbo buffer DMA map, error = %d\n", 616 sc->sc_dev.dv_xname, error); 617 return (error); 618 } 619 620 if ((error = bus_dmamap_load(sc->sc_dmat, sc->jumbo_dmamap, 621 sc->ti_cdata.ti_jumbo_buf, TI_JMEM, NULL, 622 BUS_DMA_NOWAIT)) != 0) { 623 printf("%s: can't load jumbo buffer DMA map, error = %d\n", 624 sc->sc_dev.dv_xname, error); 625 return (error); 626 } 627 sc->jumbo_dmaaddr = sc->jumbo_dmamap->dm_segs[0].ds_addr; 628 629 SIMPLEQ_INIT(&sc->ti_jfree_listhead); 630 SIMPLEQ_INIT(&sc->ti_jinuse_listhead); 631 632 /* 633 * Now divide it up into 9K pieces and save the addresses 634 * in an array. 635 */ 636 ptr = sc->ti_cdata.ti_jumbo_buf; 637 for (i = 0; i < TI_JSLOTS; i++) { 638 sc->ti_cdata.ti_jslots[i] = ptr; 639 ptr += TI_JLEN; 640 entry = malloc(sizeof(struct ti_jpool_entry), 641 M_DEVBUF, M_NOWAIT); 642 if (entry == NULL) { 643 free(sc->ti_cdata.ti_jumbo_buf, M_DEVBUF); 644 sc->ti_cdata.ti_jumbo_buf = NULL; 645 printf("%s: no memory for jumbo " 646 "buffer queue!\n", sc->sc_dev.dv_xname); 647 return(ENOBUFS); 648 } 649 entry->slot = i; 650 SIMPLEQ_INSERT_HEAD(&sc->ti_jfree_listhead, entry, 651 jpool_entries); 652 } 653 654 return(0); 655 } 656 657 /* 658 * Allocate a jumbo buffer. 659 */ 660 static void *ti_jalloc(sc) 661 struct ti_softc *sc; 662 { 663 struct ti_jpool_entry *entry; 664 665 entry = SIMPLEQ_FIRST(&sc->ti_jfree_listhead); 666 667 if (entry == NULL) { 668 printf("%s: no free jumbo buffers\n", sc->sc_dev.dv_xname); 669 return(NULL); 670 } 671 672 SIMPLEQ_REMOVE_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); 673 SIMPLEQ_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries); 674 return(sc->ti_cdata.ti_jslots[entry->slot]); 675 } 676 677 /* 678 * Release a jumbo buffer. 679 */ 680 static void ti_jfree(buf, size, arg) 681 caddr_t buf; 682 u_int size; 683 void *arg; 684 { 685 struct ti_softc *sc; 686 int i; 687 struct ti_jpool_entry *entry; 688 689 /* Extract the softc struct pointer. */ 690 sc = (struct ti_softc *)arg; 691 692 if (sc == NULL) 693 panic("ti_jfree: didn't get softc pointer!"); 694 695 /* calculate the slot this buffer belongs to */ 696 697 i = ((caddr_t)buf 698 - (caddr_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN; 699 700 if ((i < 0) || (i >= TI_JSLOTS)) 701 panic("ti_jfree: asked to free buffer that we don't manage!"); 702 entry = SIMPLEQ_FIRST(&sc->ti_jinuse_listhead); 703 if (entry == NULL) 704 panic("ti_jfree: buffer not in use!"); 705 entry->slot = i; 706 SIMPLEQ_REMOVE_HEAD(&sc->ti_jinuse_listhead, 707 entry, jpool_entries); 708 SIMPLEQ_INSERT_HEAD(&sc->ti_jfree_listhead, 709 entry, jpool_entries); 710 711 return; 712 } 713 714 715 /* 716 * Intialize a standard receive ring descriptor. 717 */ 718 static int ti_newbuf_std(sc, i, m, dmamap) 719 struct ti_softc *sc; 720 int i; 721 struct mbuf *m; 722 bus_dmamap_t dmamap; /* required if (m != NULL) */ 723 { 724 struct mbuf *m_new = NULL; 725 struct ti_rx_desc *r; 726 int error; 727 728 if (dmamap == NULL) { 729 /* if (m) panic() */ 730 731 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 732 MCLBYTES, 0, BUS_DMA_NOWAIT, 733 &dmamap)) != 0) { 734 printf("%s: can't create recv map, error = %d\n", 735 sc->sc_dev.dv_xname, error); 736 return(ENOMEM); 737 } 738 } 739 sc->std_dmamap[i] = dmamap; 740 741 if (m == NULL) { 742 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 743 if (m_new == NULL) { 744 printf("%s: mbuf allocation failed " 745 "-- packet dropped!\n", sc->sc_dev.dv_xname); 746 return(ENOBUFS); 747 } 748 749 MCLGET(m_new, M_DONTWAIT); 750 if (!(m_new->m_flags & M_EXT)) { 751 printf("%s: cluster allocation failed " 752 "-- packet dropped!\n", sc->sc_dev.dv_xname); 753 m_freem(m_new); 754 return(ENOBUFS); 755 } 756 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 757 m_adj(m_new, ETHER_ALIGN); 758 759 if ((error = bus_dmamap_load(sc->sc_dmat, dmamap, 760 mtod(m_new, caddr_t), m_new->m_len, NULL, 761 BUS_DMA_NOWAIT)) != 0) { 762 printf("%s: can't load recv map, error = %d\n", 763 sc->sc_dev.dv_xname, error); 764 return (ENOMEM); 765 } 766 } else { 767 m_new = m; 768 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 769 m_new->m_data = m_new->m_ext.ext_buf; 770 m_adj(m_new, ETHER_ALIGN); 771 772 /* reuse the dmamap */ 773 } 774 775 sc->ti_cdata.ti_rx_std_chain[i] = m_new; 776 r = &sc->ti_rdata->ti_rx_std_ring[i]; 777 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr; 778 r->ti_type = TI_BDTYPE_RECV_BD; 779 #ifdef TI_CSUM_OFFLOAD 780 r->ti_flags = TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM; 781 #else 782 r->ti_flags = 0; 783 #endif 784 r->ti_len = m_new->m_len; /* == ds_len */ 785 r->ti_idx = i; 786 787 return(0); 788 } 789 790 /* 791 * Intialize a mini receive ring descriptor. This only applies to 792 * the Tigon 2. 793 */ 794 static int ti_newbuf_mini(sc, i, m, dmamap) 795 struct ti_softc *sc; 796 int i; 797 struct mbuf *m; 798 bus_dmamap_t dmamap; /* required if (m != NULL) */ 799 { 800 struct mbuf *m_new = NULL; 801 struct ti_rx_desc *r; 802 int error; 803 804 if (dmamap == NULL) { 805 /* if (m) panic() */ 806 807 if ((error = bus_dmamap_create(sc->sc_dmat, MHLEN, 1, 808 MHLEN, 0, BUS_DMA_NOWAIT, 809 &dmamap)) != 0) { 810 printf("%s: can't create recv map, error = %d\n", 811 sc->sc_dev.dv_xname, error); 812 return(ENOMEM); 813 } 814 } 815 sc->mini_dmamap[i] = dmamap; 816 817 if (m == NULL) { 818 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 819 if (m_new == NULL) { 820 printf("%s: mbuf allocation failed " 821 "-- packet dropped!\n", sc->sc_dev.dv_xname); 822 return(ENOBUFS); 823 } 824 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 825 m_adj(m_new, ETHER_ALIGN); 826 827 if ((error = bus_dmamap_load(sc->sc_dmat, dmamap, 828 mtod(m_new, caddr_t), m_new->m_len, NULL, 829 BUS_DMA_NOWAIT)) != 0) { 830 printf("%s: can't load recv map, error = %d\n", 831 sc->sc_dev.dv_xname, error); 832 return (ENOMEM); 833 } 834 } else { 835 m_new = m; 836 m_new->m_data = m_new->m_pktdat; 837 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 838 m_adj(m_new, ETHER_ALIGN); 839 840 /* reuse the dmamap */ 841 } 842 843 r = &sc->ti_rdata->ti_rx_mini_ring[i]; 844 sc->ti_cdata.ti_rx_mini_chain[i] = m_new; 845 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr; 846 r->ti_type = TI_BDTYPE_RECV_BD; 847 r->ti_flags = TI_BDFLAG_MINI_RING; 848 #ifdef TI_CSUM_OFFLOAD 849 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM; 850 #endif 851 r->ti_len = m_new->m_len; /* == ds_len */ 852 r->ti_idx = i; 853 854 return(0); 855 } 856 857 /* 858 * Initialize a jumbo receive ring descriptor. This allocates 859 * a jumbo buffer from the pool managed internally by the driver. 860 */ 861 static int ti_newbuf_jumbo(sc, i, m) 862 struct ti_softc *sc; 863 int i; 864 struct mbuf *m; 865 { 866 struct mbuf *m_new = NULL; 867 struct ti_rx_desc *r; 868 869 if (m == NULL) { 870 caddr_t *buf = NULL; 871 872 /* Allocate the mbuf. */ 873 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 874 if (m_new == NULL) { 875 printf("%s: mbuf allocation failed " 876 "-- packet dropped!\n", sc->sc_dev.dv_xname); 877 return(ENOBUFS); 878 } 879 880 /* Allocate the jumbo buffer */ 881 buf = ti_jalloc(sc); 882 if (buf == NULL) { 883 m_freem(m_new); 884 printf("%s: jumbo allocation failed " 885 "-- packet dropped!\n", sc->sc_dev.dv_xname); 886 return(ENOBUFS); 887 } 888 889 /* Attach the buffer to the mbuf. */ 890 m_new->m_data = m_new->m_ext.ext_buf = (void *)buf; 891 m_new->m_flags |= M_EXT; 892 m_new->m_len = m_new->m_pkthdr.len = 893 m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; 894 m_new->m_ext.ext_free = ti_jfree; 895 m_new->m_ext.ext_arg = sc; 896 MCLINITREFERENCE(m_new); 897 } else { 898 m_new = m; 899 m_new->m_data = m_new->m_ext.ext_buf; 900 m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; 901 } 902 903 m_adj(m_new, ETHER_ALIGN); 904 /* Set up the descriptor. */ 905 r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; 906 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; 907 TI_HOSTADDR(r->ti_addr) = sc->jumbo_dmaaddr + 908 ((caddr_t)mtod(m_new, caddr_t) 909 - (caddr_t)sc->ti_cdata.ti_jumbo_buf); 910 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 911 r->ti_flags = TI_BDFLAG_JUMBO_RING; 912 #ifdef TI_CSUM_OFFLOAD 913 r->ti_flags |= TI_BDFLAG_TCP_UDP_CKSUM|TI_BDFLAG_IP_CKSUM; 914 #endif 915 r->ti_len = m_new->m_len; 916 r->ti_idx = i; 917 918 return(0); 919 } 920 921 /* 922 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 923 * that's 1MB or memory, which is a lot. For now, we fill only the first 924 * 256 ring entries and hope that our CPU is fast enough to keep up with 925 * the NIC. 926 */ 927 static int ti_init_rx_ring_std(sc) 928 struct ti_softc *sc; 929 { 930 int i; 931 struct ti_cmd_desc cmd; 932 933 for (i = 0; i < TI_SSLOTS; i++) { 934 if (ti_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 935 return(ENOBUFS); 936 }; 937 938 TI_UPDATE_STDPROD(sc, i - 1); 939 sc->ti_std = i - 1; 940 941 return(0); 942 } 943 944 static void ti_free_rx_ring_std(sc) 945 struct ti_softc *sc; 946 { 947 int i; 948 949 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 950 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { 951 m_freem(sc->ti_cdata.ti_rx_std_chain[i]); 952 sc->ti_cdata.ti_rx_std_chain[i] = NULL; 953 954 /* if (sc->std_dmamap[i] == 0) panic() */ 955 bus_dmamap_destroy(sc->sc_dmat, sc->std_dmamap[i]); 956 sc->std_dmamap[i] = 0; 957 } 958 bzero((char *)&sc->ti_rdata->ti_rx_std_ring[i], 959 sizeof(struct ti_rx_desc)); 960 } 961 962 return; 963 } 964 965 static int ti_init_rx_ring_jumbo(sc) 966 struct ti_softc *sc; 967 { 968 int i; 969 struct ti_cmd_desc cmd; 970 971 for (i = 0; i < (TI_JSLOTS - 20); i++) { 972 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 973 return(ENOBUFS); 974 }; 975 976 TI_UPDATE_JUMBOPROD(sc, i - 1); 977 sc->ti_jumbo = i - 1; 978 979 return(0); 980 } 981 982 static void ti_free_rx_ring_jumbo(sc) 983 struct ti_softc *sc; 984 { 985 int i; 986 987 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 988 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { 989 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); 990 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; 991 } 992 bzero((char *)&sc->ti_rdata->ti_rx_jumbo_ring[i], 993 sizeof(struct ti_rx_desc)); 994 } 995 996 return; 997 } 998 999 static int ti_init_rx_ring_mini(sc) 1000 struct ti_softc *sc; 1001 { 1002 int i; 1003 1004 for (i = 0; i < TI_MSLOTS; i++) { 1005 if (ti_newbuf_mini(sc, i, NULL, 0) == ENOBUFS) 1006 return(ENOBUFS); 1007 }; 1008 1009 TI_UPDATE_MINIPROD(sc, i - 1); 1010 sc->ti_mini = i - 1; 1011 1012 return(0); 1013 } 1014 1015 static void ti_free_rx_ring_mini(sc) 1016 struct ti_softc *sc; 1017 { 1018 int i; 1019 1020 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 1021 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { 1022 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); 1023 sc->ti_cdata.ti_rx_mini_chain[i] = NULL; 1024 1025 /* if (sc->mini_dmamap[i] == 0) panic() */ 1026 bus_dmamap_destroy(sc->sc_dmat, sc->mini_dmamap[i]); 1027 sc->mini_dmamap[i] = 0; 1028 } 1029 bzero((char *)&sc->ti_rdata->ti_rx_mini_ring[i], 1030 sizeof(struct ti_rx_desc)); 1031 } 1032 1033 return; 1034 } 1035 1036 static void ti_free_tx_ring(sc) 1037 struct ti_softc *sc; 1038 { 1039 int i; 1040 struct txdmamap_pool_entry *dma; 1041 1042 if (sc->ti_rdata->ti_tx_ring == NULL) 1043 return; 1044 1045 for (i = 0; i < TI_TX_RING_CNT; i++) { 1046 if (sc->ti_cdata.ti_tx_chain[i] != NULL) { 1047 m_freem(sc->ti_cdata.ti_tx_chain[i]); 1048 sc->ti_cdata.ti_tx_chain[i] = NULL; 1049 1050 /* if (sc->txdma[i] == 0) panic() */ 1051 SIMPLEQ_INSERT_HEAD(&sc->txdma_list, sc->txdma[i], 1052 link); 1053 sc->txdma[i] = 0; 1054 } 1055 bzero((char *)&sc->ti_rdata->ti_tx_ring[i], 1056 sizeof(struct ti_tx_desc)); 1057 } 1058 1059 while ((dma = SIMPLEQ_FIRST(&sc->txdma_list))) { 1060 SIMPLEQ_REMOVE_HEAD(&sc->txdma_list, dma, link); 1061 bus_dmamap_destroy(sc->sc_dmat, dma->dmamap); 1062 free(dma, M_DEVBUF); 1063 } 1064 1065 return; 1066 } 1067 1068 static int ti_init_tx_ring(sc) 1069 struct ti_softc *sc; 1070 { 1071 int i, error; 1072 bus_dmamap_t dmamap; 1073 struct txdmamap_pool_entry *dma; 1074 1075 sc->ti_txcnt = 0; 1076 sc->ti_tx_saved_considx = 0; 1077 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); 1078 1079 SIMPLEQ_INIT(&sc->txdma_list); 1080 for (i = 0; i < TI_RSLOTS; i++) { 1081 /* I've seen mbufs with 30 fragments. */ 1082 if ((error = bus_dmamap_create(sc->sc_dmat, TI_JUMBO_FRAMELEN, 1083 40, TI_JUMBO_FRAMELEN, 0, 1084 BUS_DMA_NOWAIT, &dmamap)) != 0) { 1085 printf("%s: can't create tx map, error = %d\n", 1086 sc->sc_dev.dv_xname, error); 1087 return(ENOMEM); 1088 } 1089 dma = malloc(sizeof(*dma), M_DEVBUF, M_NOWAIT); 1090 if (!dma) { 1091 printf("%s: can't alloc txdmamap_pool_entry\n", 1092 sc->sc_dev.dv_xname); 1093 bus_dmamap_destroy(sc->sc_dmat, dmamap); 1094 return (ENOMEM); 1095 } 1096 dma->dmamap = dmamap; 1097 SIMPLEQ_INSERT_HEAD(&sc->txdma_list, dma, link); 1098 } 1099 1100 return(0); 1101 } 1102 1103 /* 1104 * The Tigon 2 firmware has a new way to add/delete multicast addresses, 1105 * but we have to support the old way too so that Tigon 1 cards will 1106 * work. 1107 */ 1108 void ti_add_mcast(sc, addr) 1109 struct ti_softc *sc; 1110 struct ether_addr *addr; 1111 { 1112 struct ti_cmd_desc cmd; 1113 u_int16_t *m; 1114 u_int32_t ext[2] = {0, 0}; 1115 1116 m = (u_int16_t *)&addr->ether_addr_octet[0]; /* XXX */ 1117 1118 switch(sc->ti_hwrev) { 1119 case TI_HWREV_TIGON: 1120 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1121 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1122 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); 1123 break; 1124 case TI_HWREV_TIGON_II: 1125 ext[0] = htons(m[0]); 1126 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1127 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); 1128 break; 1129 default: 1130 printf("%s: unknown hwrev\n", sc->sc_dev.dv_xname); 1131 break; 1132 } 1133 1134 return; 1135 } 1136 1137 void ti_del_mcast(sc, addr) 1138 struct ti_softc *sc; 1139 struct ether_addr *addr; 1140 { 1141 struct ti_cmd_desc cmd; 1142 u_int16_t *m; 1143 u_int32_t ext[2] = {0, 0}; 1144 1145 m = (u_int16_t *)&addr->ether_addr_octet[0]; /* XXX */ 1146 1147 switch(sc->ti_hwrev) { 1148 case TI_HWREV_TIGON: 1149 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1150 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1151 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); 1152 break; 1153 case TI_HWREV_TIGON_II: 1154 ext[0] = htons(m[0]); 1155 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1156 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); 1157 break; 1158 default: 1159 printf("%s: unknown hwrev\n", sc->sc_dev.dv_xname); 1160 break; 1161 } 1162 1163 return; 1164 } 1165 1166 /* 1167 * Configure the Tigon's multicast address filter. 1168 * 1169 * The actual multicast table management is a bit of a pain, thanks to 1170 * slight brain damage on the part of both Alteon and us. With our 1171 * multicast code, we are only alerted when the multicast address table 1172 * changes and at that point we only have the current list of addresses: 1173 * we only know the current state, not the previous state, so we don't 1174 * actually know what addresses were removed or added. The firmware has 1175 * state, but we can't get our grubby mits on it, and there is no 'delete 1176 * all multicast addresses' command. Hence, we have to maintain our own 1177 * state so we know what addresses have been programmed into the NIC at 1178 * any given time. 1179 */ 1180 static void ti_setmulti(sc) 1181 struct ti_softc *sc; 1182 { 1183 struct ifnet *ifp; 1184 struct ti_cmd_desc cmd; 1185 struct ti_mc_entry *mc; 1186 u_int32_t intrs; 1187 struct ether_multi *enm; 1188 struct ether_multistep step; 1189 1190 ifp = &sc->ethercom.ec_if; 1191 1192 if (ifp->if_flags & IFF_ALLMULTI) { 1193 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_ENB, 0); 1194 return; 1195 } else { 1196 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); 1197 } 1198 1199 /* Disable interrupts. */ 1200 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); 1201 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1202 1203 /* First, zot all the existing filters. */ 1204 while (SIMPLEQ_FIRST(&sc->ti_mc_listhead) != NULL) { 1205 mc = SIMPLEQ_FIRST(&sc->ti_mc_listhead); 1206 ti_del_mcast(sc, &mc->mc_addr); 1207 SIMPLEQ_REMOVE_HEAD(&sc->ti_mc_listhead, mc, mc_entries); 1208 free(mc, M_DEVBUF); 1209 } 1210 1211 /* Now program new ones. */ 1212 ETHER_FIRST_MULTI(step, &sc->ethercom, enm); 1213 while (enm != NULL) { 1214 mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, M_NOWAIT); 1215 bcopy(enm->enm_addrlo, 1216 (char *)&mc->mc_addr, ETHER_ADDR_LEN); 1217 SIMPLEQ_INSERT_HEAD(&sc->ti_mc_listhead, mc, mc_entries); 1218 ti_add_mcast(sc, &mc->mc_addr); 1219 ETHER_NEXT_MULTI(step, enm); 1220 } 1221 1222 /* Re-enable interrupts. */ 1223 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1224 1225 return; 1226 } 1227 1228 /* 1229 * Check to see if the BIOS has configured us for a 64 bit slot when 1230 * we aren't actually in one. If we detect this condition, we can work 1231 * around it on the Tigon 2 by setting a bit in the PCI state register, 1232 * but for the Tigon 1 we must give up and abort the interface attach. 1233 */ 1234 static int ti_64bitslot_war(sc) 1235 struct ti_softc *sc; 1236 { 1237 if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { 1238 CSR_WRITE_4(sc, 0x600, 0); 1239 CSR_WRITE_4(sc, 0x604, 0); 1240 CSR_WRITE_4(sc, 0x600, 0x5555AAAA); 1241 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { 1242 if (sc->ti_hwrev == TI_HWREV_TIGON) 1243 return(EINVAL); 1244 else { 1245 TI_SETBIT(sc, TI_PCI_STATE, 1246 TI_PCISTATE_32BIT_BUS); 1247 return(0); 1248 } 1249 } 1250 } 1251 1252 return(0); 1253 } 1254 1255 /* 1256 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1257 * self-test results. 1258 */ 1259 static int ti_chipinit(sc) 1260 struct ti_softc *sc; 1261 { 1262 u_int32_t cacheline; 1263 u_int32_t pci_writemax = 0; 1264 1265 /* Initialize link to down state. */ 1266 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; 1267 1268 /* Set endianness before we access any non-PCI registers. */ 1269 #if BYTE_ORDER == BIG_ENDIAN 1270 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1271 TI_MHC_BIGENDIAN_INIT | (TI_MHC_BIGENDIAN_INIT << 24)); 1272 #else 1273 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1274 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); 1275 #endif 1276 1277 /* Check the ROM failed bit to see if self-tests passed. */ 1278 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { 1279 printf("%s: board self-diagnostics failed!\n", 1280 sc->sc_dev.dv_xname); 1281 return(ENODEV); 1282 } 1283 1284 /* Halt the CPU. */ 1285 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); 1286 1287 /* Figure out the hardware revision. */ 1288 switch(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK) { 1289 case TI_REV_TIGON_I: 1290 sc->ti_hwrev = TI_HWREV_TIGON; 1291 break; 1292 case TI_REV_TIGON_II: 1293 sc->ti_hwrev = TI_HWREV_TIGON_II; 1294 break; 1295 default: 1296 printf("%s: unsupported chip revision\n", sc->sc_dev.dv_xname); 1297 return(ENODEV); 1298 } 1299 1300 /* Do special setup for Tigon 2. */ 1301 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1302 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); 1303 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_256K); 1304 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); 1305 } 1306 1307 /* Set up the PCI state register. */ 1308 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD|TI_PCI_WRITE_CMD); 1309 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1310 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); 1311 } 1312 1313 /* Clear the read/write max DMA parameters. */ 1314 TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| 1315 TI_PCISTATE_READ_MAXDMA)); 1316 1317 /* Get cache line size. */ 1318 cacheline = PCI_CACHELINE(CSR_READ_4(sc, PCI_BHLC_REG)); 1319 1320 /* 1321 * If the system has set enabled the PCI memory write 1322 * and invalidate command in the command register, set 1323 * the write max parameter accordingly. This is necessary 1324 * to use MWI with the Tigon 2. 1325 */ 1326 if (CSR_READ_4(sc, PCI_COMMAND_STATUS_REG) 1327 & PCI_COMMAND_INVALIDATE_ENABLE) { 1328 switch(cacheline) { 1329 case 1: 1330 case 4: 1331 case 8: 1332 case 16: 1333 case 32: 1334 case 64: 1335 break; 1336 default: 1337 /* Disable PCI memory write and invalidate. */ 1338 if (bootverbose) 1339 printf("%s: cache line size %d not " 1340 "supported; disabling PCI MWI\n", 1341 sc->sc_dev.dv_xname, cacheline); 1342 CSR_WRITE_4(sc, PCI_COMMAND_STATUS_REG, 1343 CSR_READ_4(sc, PCI_COMMAND_STATUS_REG) 1344 & ~PCI_COMMAND_INVALIDATE_ENABLE); 1345 break; 1346 } 1347 } 1348 1349 #ifdef __brokenalpha__ 1350 /* 1351 * From the Alteon sample driver: 1352 * Must insure that we do not cross an 8K (bytes) boundary 1353 * for DMA reads. Our highest limit is 1K bytes. This is a 1354 * restriction on some ALPHA platforms with early revision 1355 * 21174 PCI chipsets, such as the AlphaPC 164lx 1356 */ 1357 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024); 1358 #else 1359 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); 1360 #endif 1361 1362 /* This sets the min dma param all the way up (0xff). */ 1363 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); 1364 1365 /* Configure DMA variables. */ 1366 #if BYTE_ORDER == BIG_ENDIAN 1367 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_BD | 1368 TI_OPMODE_BYTESWAP_DATA | TI_OPMODE_WORDSWAP_BD | 1369 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1370 TI_OPMODE_DONT_FRAG_JUMBO); 1371 #else 1372 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_BYTESWAP_DATA| 1373 TI_OPMODE_WORDSWAP_BD|TI_OPMODE_DONT_FRAG_JUMBO| 1374 TI_OPMODE_WARN_ENB|TI_OPMODE_FATAL_ENB); 1375 #endif 1376 1377 /* 1378 * Only allow 1 DMA channel to be active at a time. 1379 * I don't think this is a good idea, but without it 1380 * the firmware racks up lots of nicDmaReadRingFull 1381 * errors. 1382 */ 1383 #ifndef TI_CSUM_OFFLOAD 1384 TI_SETBIT(sc, TI_GCR_OPMODE, TI_OPMODE_1_DMA_ACTIVE); 1385 #endif 1386 1387 /* Recommended settings from Tigon manual. */ 1388 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); 1389 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); 1390 1391 if (ti_64bitslot_war(sc)) { 1392 printf("%s: bios thinks we're in a 64 bit slot, " 1393 "but we aren't", sc->sc_dev.dv_xname); 1394 return(EINVAL); 1395 } 1396 1397 return(0); 1398 } 1399 1400 /* 1401 * Initialize the general information block and firmware, and 1402 * start the CPU(s) running. 1403 */ 1404 static int ti_gibinit(sc) 1405 struct ti_softc *sc; 1406 { 1407 struct ti_rcb *rcb; 1408 int i; 1409 struct ifnet *ifp; 1410 1411 ifp = &sc->ethercom.ec_if; 1412 1413 /* Disable interrupts for now. */ 1414 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1415 1416 /* Tell the chip where to find the general information block. */ 1417 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); 1418 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, sc->info_dmaaddr + 1419 ((caddr_t)&sc->ti_rdata->ti_info - (caddr_t)sc->ti_rdata)); 1420 1421 /* Load the firmware into SRAM. */ 1422 ti_loadfw(sc); 1423 1424 /* Set up the contents of the general info and ring control blocks. */ 1425 1426 /* Set up the event ring and producer pointer. */ 1427 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; 1428 1429 TI_HOSTADDR(rcb->ti_hostaddr) = sc->info_dmaaddr + 1430 ((caddr_t)&sc->ti_rdata->ti_event_ring - (caddr_t)sc->ti_rdata); 1431 rcb->ti_flags = 0; 1432 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = 1433 sc->info_dmaaddr + ((caddr_t)&sc->ti_rdata->ti_ev_prodidx_r 1434 - (caddr_t)sc->ti_rdata); 1435 sc->ti_ev_prodidx.ti_idx = 0; 1436 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); 1437 sc->ti_ev_saved_considx = 0; 1438 1439 /* Set up the command ring and producer mailbox. */ 1440 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; 1441 1442 TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); 1443 rcb->ti_flags = 0; 1444 rcb->ti_max_len = 0; 1445 for (i = 0; i < TI_CMD_RING_CNT; i++) { 1446 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); 1447 } 1448 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); 1449 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); 1450 sc->ti_cmd_saved_prodidx = 0; 1451 1452 /* 1453 * Assign the address of the stats refresh buffer. 1454 * We re-use the current stats buffer for this to 1455 * conserve memory. 1456 */ 1457 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = 1458 sc->info_dmaaddr + ((caddr_t)&sc->ti_rdata->ti_info.ti_stats 1459 - (caddr_t)sc->ti_rdata); 1460 1461 /* Set up the standard receive ring. */ 1462 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; 1463 TI_HOSTADDR(rcb->ti_hostaddr) = sc->info_dmaaddr + 1464 ((caddr_t)&sc->ti_rdata->ti_rx_std_ring 1465 - (caddr_t)sc->ti_rdata); 1466 rcb->ti_max_len = TI_FRAMELEN; 1467 rcb->ti_flags = 0; 1468 #ifdef TI_CSUM_OFFLOAD 1469 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM|TI_RCB_FLAG_IP_CKSUM; 1470 #endif 1471 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1472 1473 /* Set up the jumbo receive ring. */ 1474 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; 1475 TI_HOSTADDR(rcb->ti_hostaddr) = sc->info_dmaaddr + 1476 ((caddr_t)&sc->ti_rdata->ti_rx_jumbo_ring - (caddr_t)sc->ti_rdata); 1477 rcb->ti_max_len = TI_JUMBO_FRAMELEN; 1478 rcb->ti_flags = 0; 1479 #ifdef TI_CSUM_OFFLOAD 1480 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM|TI_RCB_FLAG_IP_CKSUM; 1481 #endif 1482 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1483 1484 /* 1485 * Set up the mini ring. Only activated on the 1486 * Tigon 2 but the slot in the config block is 1487 * still there on the Tigon 1. 1488 */ 1489 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; 1490 TI_HOSTADDR(rcb->ti_hostaddr) = sc->info_dmaaddr + 1491 ((caddr_t)&sc->ti_rdata->ti_rx_mini_ring - (caddr_t)sc->ti_rdata); 1492 rcb->ti_max_len = MHLEN - ETHER_ALIGN; 1493 if (sc->ti_hwrev == TI_HWREV_TIGON) 1494 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; 1495 else 1496 rcb->ti_flags = 0; 1497 #ifdef TI_CSUM_OFFLOAD 1498 rcb->ti_flags |= TI_RCB_FLAG_TCP_UDP_CKSUM|TI_RCB_FLAG_IP_CKSUM; 1499 #endif 1500 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1501 1502 /* 1503 * Set up the receive return ring. 1504 */ 1505 rcb = &sc->ti_rdata->ti_info.ti_return_rcb; 1506 TI_HOSTADDR(rcb->ti_hostaddr) = sc->info_dmaaddr + 1507 ((caddr_t)&sc->ti_rdata->ti_rx_return_ring - (caddr_t)sc->ti_rdata); 1508 rcb->ti_flags = 0; 1509 rcb->ti_max_len = TI_RETURN_RING_CNT; 1510 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = 1511 sc->info_dmaaddr + ((caddr_t)&sc->ti_rdata->ti_return_prodidx_r 1512 - (caddr_t)sc->ti_rdata); 1513 1514 /* 1515 * Set up the tx ring. Note: for the Tigon 2, we have the option 1516 * of putting the transmit ring in the host's address space and 1517 * letting the chip DMA it instead of leaving the ring in the NIC's 1518 * memory and accessing it through the shared memory region. We 1519 * do this for the Tigon 2, but it doesn't work on the Tigon 1, 1520 * so we have to revert to the shared memory scheme if we detect 1521 * a Tigon 1 chip. 1522 */ 1523 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); 1524 if (sc->ti_hwrev == TI_HWREV_TIGON) { 1525 sc->ti_rdata->ti_tx_ring_nic = 1526 (struct ti_tx_desc *)(sc->ti_vhandle + TI_WINDOW); 1527 } 1528 bzero((char *)sc->ti_rdata->ti_tx_ring, 1529 TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); 1530 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; 1531 if (sc->ti_hwrev == TI_HWREV_TIGON) 1532 rcb->ti_flags = 0; 1533 else 1534 rcb->ti_flags = TI_RCB_FLAG_HOST_RING; 1535 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1536 rcb->ti_max_len = TI_TX_RING_CNT; 1537 if (sc->ti_hwrev == TI_HWREV_TIGON) 1538 TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; 1539 else 1540 TI_HOSTADDR(rcb->ti_hostaddr) = sc->info_dmaaddr + 1541 ((caddr_t)&sc->ti_rdata->ti_tx_ring 1542 - (caddr_t)sc->ti_rdata); 1543 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = 1544 sc->info_dmaaddr + ((caddr_t)&sc->ti_rdata->ti_tx_considx_r 1545 - (caddr_t)sc->ti_rdata); 1546 1547 /* Set up tuneables */ 1548 if (ifp->if_mtu > (ETHERMTU + ETHER_HDR_LEN + ETHER_CRC_LEN) || 1549 (sc->ethercom.ec_capenable & ETHERCAP_VLAN_MTU)) 1550 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, 1551 (sc->ti_rx_coal_ticks / 10)); 1552 else 1553 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, sc->ti_rx_coal_ticks); 1554 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); 1555 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 1556 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); 1557 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); 1558 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); 1559 1560 /* Turn interrupts on. */ 1561 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); 1562 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1563 1564 /* Start CPU. */ 1565 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); 1566 1567 return(0); 1568 } 1569 1570 /* 1571 * look for id in the device list, returning the first match 1572 */ 1573 static struct ti_type * ti_type_match(pa) 1574 struct pci_attach_args *pa; 1575 { 1576 struct ti_type *t; 1577 1578 t = ti_devs; 1579 while(t->ti_name != NULL) { 1580 if ((PCI_VENDOR(pa->pa_id) == t->ti_vid) && 1581 (PCI_PRODUCT(pa->pa_id) == t->ti_did)) { 1582 return (t); 1583 } 1584 t++; 1585 } 1586 1587 return(NULL); 1588 } 1589 1590 /* 1591 * Probe for a Tigon chip. Check the PCI vendor and device IDs 1592 * against our list and return its name if we find a match. 1593 */ 1594 static int ti_probe(parent, match, aux) 1595 struct device *parent; 1596 struct cfdata *match; 1597 void *aux; 1598 { 1599 struct pci_attach_args *pa = aux; 1600 struct ti_type *t; 1601 1602 t = ti_type_match(pa); 1603 1604 return((t == NULL) ? 0 : 1); 1605 } 1606 1607 static void ti_attach(parent, self, aux) 1608 struct device *parent, *self; 1609 void *aux; 1610 { 1611 u_int32_t command; 1612 struct ifnet *ifp; 1613 struct ti_softc *sc; 1614 u_char eaddr[ETHER_ADDR_LEN]; 1615 struct pci_attach_args *pa = aux; 1616 pci_chipset_tag_t pc = pa->pa_pc; 1617 pci_intr_handle_t ih; 1618 const char *intrstr = NULL; 1619 bus_dma_segment_t dmaseg; 1620 int error, dmanseg, nolinear; 1621 struct ti_type *t; 1622 1623 t = ti_type_match(pa); 1624 if (t == NULL) { 1625 printf("ti_attach: were did the card go ?\n"); 1626 return; 1627 } 1628 1629 printf(": %s (rev. 0x%02x)\n", t->ti_name, PCI_REVISION(pa->pa_class)); 1630 1631 sc = (struct ti_softc *)self; 1632 1633 /* 1634 * Map control/status registers. 1635 */ 1636 nolinear = 0; 1637 if (pci_mapreg_map(pa, 0x10, 1638 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 1639 BUS_SPACE_MAP_LINEAR , &sc->ti_btag, &sc->ti_bhandle, 1640 NULL, NULL)) { 1641 nolinear = 1; 1642 if (pci_mapreg_map(pa, 0x10, 1643 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 1644 0 , &sc->ti_btag, &sc->ti_bhandle, NULL, NULL)) { 1645 printf(": can't map memory space\n"); 1646 return; 1647 } 1648 } 1649 if (nolinear == 0) 1650 sc->ti_vhandle = (void *)(sc->ti_bhandle); /* XXX XXX XXX */ 1651 else 1652 sc->ti_vhandle = NULL; 1653 1654 command = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1655 command |= PCI_COMMAND_MASTER_ENABLE; 1656 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, command); 1657 1658 /* Allocate interrupt */ 1659 if (pci_intr_map(pa, &ih)) { 1660 printf("%s: couldn't map interrupt\n", sc->sc_dev.dv_xname); 1661 return;; 1662 } 1663 intrstr = pci_intr_string(pc, ih); 1664 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ti_intr, sc); 1665 if (sc->sc_ih == NULL) { 1666 printf("%s: couldn't establish interrupt", 1667 sc->sc_dev.dv_xname); 1668 if (intrstr != NULL) 1669 printf(" at %s", intrstr); 1670 printf("\n"); 1671 return;; 1672 } 1673 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 1674 /* 1675 * Add shutdown hook so that DMA is disabled prior to reboot. Not 1676 * doing do could allow DMA to corrupt kernel memory during the 1677 * reboot before the driver initializes. 1678 */ 1679 (void) shutdownhook_establish(ti_shutdown, sc); 1680 1681 if (ti_chipinit(sc)) { 1682 printf("%s: chip initialization failed\n", self->dv_xname); 1683 goto fail2; 1684 } 1685 if (sc->ti_hwrev == TI_HWREV_TIGON && nolinear == 1) { 1686 printf("%s: memory space not mapped linear\n", self->dv_xname); 1687 } 1688 1689 /* Zero out the NIC's on-board SRAM. */ 1690 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); 1691 1692 /* Init again -- zeroing memory may have clobbered some registers. */ 1693 if (ti_chipinit(sc)) { 1694 printf("%s: chip initialization failed\n", self->dv_xname); 1695 goto fail2; 1696 } 1697 1698 /* 1699 * Get station address from the EEPROM. Note: the manual states 1700 * that the MAC address is at offset 0x8c, however the data is 1701 * stored as two longwords (since that's how it's loaded into 1702 * the NIC). This means the MAC address is actually preceeded 1703 * by two zero bytes. We need to skip over those. 1704 */ 1705 if (ti_read_eeprom(sc, (caddr_t)&eaddr, 1706 TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1707 printf("%s: failed to read station address\n", self->dv_xname); 1708 goto fail2; 1709 } 1710 1711 /* 1712 * A Tigon chip was detected. Inform the world. 1713 */ 1714 printf("%s: Ethernet address: %s\n", self->dv_xname, 1715 ether_sprintf(eaddr)); 1716 1717 sc->sc_dmat = pa->pa_dmat; 1718 1719 /* Allocate the general information block and ring buffers. */ 1720 if ((error = bus_dmamem_alloc(sc->sc_dmat, 1721 sizeof(struct ti_ring_data), PAGE_SIZE, 0, &dmaseg, 1, &dmanseg, 1722 BUS_DMA_NOWAIT)) != 0) { 1723 printf("%s: can't allocate ring buffer, error = %d\n", 1724 sc->sc_dev.dv_xname, error); 1725 goto fail2; 1726 } 1727 1728 if ((error = bus_dmamem_map(sc->sc_dmat, &dmaseg, dmanseg, 1729 sizeof(struct ti_ring_data), (caddr_t *)&sc->ti_rdata, 1730 BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { 1731 printf("%s: can't map ring buffer, error = %d\n", 1732 sc->sc_dev.dv_xname, error); 1733 goto fail2; 1734 } 1735 1736 if ((error = bus_dmamap_create(sc->sc_dmat, 1737 sizeof(struct ti_ring_data), 1, 1738 sizeof(struct ti_ring_data), 0, BUS_DMA_NOWAIT, 1739 &sc->info_dmamap)) != 0) { 1740 printf("%s: can't create ring buffer DMA map, error = %d\n", 1741 sc->sc_dev.dv_xname, error); 1742 goto fail2; 1743 } 1744 1745 if ((error = bus_dmamap_load(sc->sc_dmat, sc->info_dmamap, 1746 sc->ti_rdata, sizeof(struct ti_ring_data), NULL, 1747 BUS_DMA_NOWAIT)) != 0) { 1748 printf("%s: can't load ring buffer DMA map, error = %d\n", 1749 sc->sc_dev.dv_xname, error); 1750 goto fail2; 1751 } 1752 1753 sc->info_dmaaddr = sc->info_dmamap->dm_segs[0].ds_addr; 1754 1755 bzero(sc->ti_rdata, sizeof(struct ti_ring_data)); 1756 1757 /* Try to allocate memory for jumbo buffers. */ 1758 if (ti_alloc_jumbo_mem(sc)) { 1759 printf("%s: jumbo buffer allocation failed\n", self->dv_xname); 1760 goto fail2; 1761 } 1762 1763 /* 1764 * We really need a better way to tell a 1000baseTX card 1765 * from a 1000baseSX one, since in theory there could be 1766 * OEMed 1000baseTX cards from lame vendors who aren't 1767 * clever enough to change the PCI ID. For the moment 1768 * though, the AceNIC is the only copper card available. 1769 */ 1770 if ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ALTEON && 1771 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ALTEON_ACENIC_COPPER) || 1772 (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_NETGEAR && 1773 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETGEAR_GA620T)) 1774 sc->ti_copper = 1; 1775 else 1776 sc->ti_copper = 0; 1777 1778 /* Set default tuneable values. */ 1779 sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; 1780 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; 1781 sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; 1782 sc->ti_rx_max_coal_bds = 64; 1783 sc->ti_tx_max_coal_bds = 128; 1784 sc->ti_tx_buf_ratio = 21; 1785 1786 /* Set up ifnet structure */ 1787 ifp = &sc->ethercom.ec_if; 1788 ifp->if_softc = sc; 1789 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 1790 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1791 ifp->if_ioctl = ti_ioctl; 1792 ifp->if_start = ti_start; 1793 ifp->if_watchdog = ti_watchdog; 1794 IFQ_SET_READY(&ifp->if_snd); 1795 1796 #if 0 1797 /* 1798 * XXX This is not really correct -- we don't necessarily 1799 * XXX want to queue up as many as we can transmit at the 1800 * XXX upper layer like that. Someone with a board should 1801 * XXX check to see how this affects performance. 1802 */ 1803 ifp->if_snd.ifq_maxlen = TI_TX_RING_CNT - 1; 1804 #endif 1805 1806 /* 1807 * We can support 802.1Q VLAN-sized frames. 1808 */ 1809 sc->ethercom.ec_capabilities |= 1810 ETHERCAP_VLAN_MTU | ETHERCAP_VLAN_HWTAGGING; 1811 1812 /* Set up ifmedia support. */ 1813 ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); 1814 if (sc->ti_copper) { 1815 /* 1816 * Copper cards allow manual 10/100 mode selection, 1817 * but not manual 1000baseTX mode selection. Why? 1818 * Becuase currently there's no way to specify the 1819 * master/slave setting through the firmware interface, 1820 * so Alteon decided to just bag it and handle it 1821 * via autonegotiation. 1822 */ 1823 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1824 ifmedia_add(&sc->ifmedia, 1825 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1826 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 1827 ifmedia_add(&sc->ifmedia, 1828 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 1829 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_TX, 0, NULL); 1830 ifmedia_add(&sc->ifmedia, 1831 IFM_ETHER|IFM_1000_TX|IFM_FDX, 0, NULL); 1832 } else { 1833 /* Fiber cards don't support 10/100 modes. */ 1834 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1835 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1836 } 1837 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1838 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); 1839 1840 /* 1841 * Call MI attach routines. 1842 */ 1843 if_attach(ifp); 1844 ether_ifattach(ifp, eaddr); 1845 1846 return; 1847 fail2: 1848 pci_intr_disestablish(pc, sc->sc_ih); 1849 return; 1850 } 1851 1852 /* 1853 * Frame reception handling. This is called if there's a frame 1854 * on the receive return list. 1855 * 1856 * Note: we have to be able to handle three possibilities here: 1857 * 1) the frame is from the mini receive ring (can only happen) 1858 * on Tigon 2 boards) 1859 * 2) the frame is from the jumbo recieve ring 1860 * 3) the frame is from the standard receive ring 1861 */ 1862 1863 static void ti_rxeof(sc) 1864 struct ti_softc *sc; 1865 { 1866 struct ifnet *ifp; 1867 struct ti_cmd_desc cmd; 1868 1869 ifp = &sc->ethercom.ec_if; 1870 1871 while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { 1872 struct ti_rx_desc *cur_rx; 1873 u_int32_t rxidx; 1874 struct mbuf *m = NULL; 1875 u_int16_t vlan_tag = 0; 1876 int have_tag = 0; 1877 #ifdef TI_CSUM_OFFLOAD 1878 struct ip *ip; 1879 #endif 1880 bus_dmamap_t dmamap; 1881 1882 cur_rx = 1883 &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; 1884 rxidx = cur_rx->ti_idx; 1885 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); 1886 1887 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { 1888 have_tag = 1; 1889 vlan_tag = cur_rx->ti_vlan_tag; 1890 } 1891 1892 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { 1893 TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); 1894 m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; 1895 sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; 1896 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1897 ifp->if_ierrors++; 1898 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1899 continue; 1900 } 1901 if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) 1902 == ENOBUFS) { 1903 ifp->if_ierrors++; 1904 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1905 continue; 1906 } 1907 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { 1908 TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); 1909 m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; 1910 sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; 1911 dmamap = sc->mini_dmamap[rxidx]; 1912 sc->mini_dmamap[rxidx] = 0; 1913 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1914 ifp->if_ierrors++; 1915 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap); 1916 continue; 1917 } 1918 if (ti_newbuf_mini(sc, sc->ti_mini, NULL, dmamap) 1919 == ENOBUFS) { 1920 ifp->if_ierrors++; 1921 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap); 1922 continue; 1923 } 1924 } else { 1925 TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); 1926 m = sc->ti_cdata.ti_rx_std_chain[rxidx]; 1927 sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; 1928 dmamap = sc->std_dmamap[rxidx]; 1929 sc->std_dmamap[rxidx] = 0; 1930 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1931 ifp->if_ierrors++; 1932 ti_newbuf_std(sc, sc->ti_std, m, dmamap); 1933 continue; 1934 } 1935 if (ti_newbuf_std(sc, sc->ti_std, NULL, dmamap) 1936 == ENOBUFS) { 1937 ifp->if_ierrors++; 1938 ti_newbuf_std(sc, sc->ti_std, m, dmamap); 1939 continue; 1940 } 1941 } 1942 1943 m->m_pkthdr.len = m->m_len = cur_rx->ti_len; 1944 ifp->if_ipackets++; 1945 m->m_pkthdr.rcvif = ifp; 1946 1947 #if NBPFILTER > 0 1948 /* 1949 * Handle BPF listeners. Let the BPF user see the packet, but 1950 * don't pass it up to the ether_input() layer unless it's 1951 * a broadcast packet, multicast packet, matches our ethernet 1952 * address or the interface is in promiscuous mode. 1953 */ 1954 if (ifp->if_bpf) 1955 bpf_mtap(ifp->if_bpf, m); 1956 #endif 1957 1958 #ifdef TI_CSUM_OFFLOAD /* XXX NetBSD: broken because m points to ether pkt */ 1959 ip = mtod(m, struct ip *); 1960 if (!(cur_rx->ti_tcp_udp_cksum ^ 0xFFFF) && 1961 !(ip->ip_off & htons(IP_MF | IP_OFFMASK | IP_RF))) 1962 m->m_flags |= M_HWCKSUM; 1963 #endif 1964 1965 if (have_tag) { 1966 struct mbuf *n; 1967 n = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN); 1968 if (n) { 1969 *mtod(n, int *) = vlan_tag; 1970 n->m_len = sizeof(int); 1971 } else { 1972 printf("%s: no mbuf for tag\n", ifp->if_xname); 1973 m_freem(m); 1974 continue; 1975 } 1976 have_tag = vlan_tag = 0; 1977 } 1978 (*ifp->if_input)(ifp, m); 1979 } 1980 1981 /* Only necessary on the Tigon 1. */ 1982 if (sc->ti_hwrev == TI_HWREV_TIGON) 1983 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 1984 sc->ti_rx_saved_considx); 1985 1986 TI_UPDATE_STDPROD(sc, sc->ti_std); 1987 TI_UPDATE_MINIPROD(sc, sc->ti_mini); 1988 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); 1989 1990 return; 1991 } 1992 1993 static void ti_txeof(sc) 1994 struct ti_softc *sc; 1995 { 1996 struct ti_tx_desc *cur_tx = NULL; 1997 struct ifnet *ifp; 1998 1999 ifp = &sc->ethercom.ec_if; 2000 2001 /* 2002 * Go through our tx ring and free mbufs for those 2003 * frames that have been sent. 2004 */ 2005 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 2006 u_int32_t idx = 0; 2007 2008 idx = sc->ti_tx_saved_considx; 2009 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2010 if (idx > 383) 2011 CSR_WRITE_4(sc, TI_WINBASE, 2012 TI_TX_RING_BASE + 6144); 2013 else if (idx > 255) 2014 CSR_WRITE_4(sc, TI_WINBASE, 2015 TI_TX_RING_BASE + 4096); 2016 else if (idx > 127) 2017 CSR_WRITE_4(sc, TI_WINBASE, 2018 TI_TX_RING_BASE + 2048); 2019 else 2020 CSR_WRITE_4(sc, TI_WINBASE, 2021 TI_TX_RING_BASE); 2022 cur_tx = &sc->ti_rdata->ti_tx_ring_nic[idx % 128]; 2023 } else 2024 cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; 2025 if (cur_tx->ti_flags & TI_BDFLAG_END) 2026 ifp->if_opackets++; 2027 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 2028 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 2029 sc->ti_cdata.ti_tx_chain[idx] = NULL; 2030 2031 /* if (sc->txdma[idx] == 0) panic() */ 2032 SIMPLEQ_INSERT_HEAD(&sc->txdma_list, sc->txdma[idx], 2033 link); 2034 sc->txdma[idx] = 0; 2035 } 2036 sc->ti_txcnt--; 2037 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 2038 ifp->if_timer = 0; 2039 } 2040 2041 if (cur_tx != NULL) 2042 ifp->if_flags &= ~IFF_OACTIVE; 2043 2044 return; 2045 } 2046 2047 static int ti_intr(xsc) 2048 void *xsc; 2049 { 2050 struct ti_softc *sc; 2051 struct ifnet *ifp; 2052 2053 sc = xsc; 2054 ifp = &sc->ethercom.ec_if; 2055 2056 #ifdef notdef 2057 /* Avoid this for now -- checking this register is expensive. */ 2058 /* Make sure this is really our interrupt. */ 2059 if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) 2060 return (0); 2061 #endif 2062 2063 /* Ack interrupt and stop others from occuring. */ 2064 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2065 2066 if (ifp->if_flags & IFF_RUNNING) { 2067 /* Check RX return ring producer/consumer */ 2068 ti_rxeof(sc); 2069 2070 /* Check TX ring producer/consumer */ 2071 ti_txeof(sc); 2072 } 2073 2074 ti_handle_events(sc); 2075 2076 /* Re-enable interrupts. */ 2077 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2078 2079 if ((ifp->if_flags & IFF_RUNNING) != 0 && 2080 IFQ_IS_EMPTY(&ifp->if_snd) == 0) 2081 ti_start(ifp); 2082 2083 return (1); 2084 } 2085 2086 static void ti_stats_update(sc) 2087 struct ti_softc *sc; 2088 { 2089 struct ifnet *ifp; 2090 2091 ifp = &sc->ethercom.ec_if; 2092 2093 ifp->if_collisions += 2094 (sc->ti_rdata->ti_info.ti_stats.dot3StatsSingleCollisionFrames + 2095 sc->ti_rdata->ti_info.ti_stats.dot3StatsMultipleCollisionFrames + 2096 sc->ti_rdata->ti_info.ti_stats.dot3StatsExcessiveCollisions + 2097 sc->ti_rdata->ti_info.ti_stats.dot3StatsLateCollisions) - 2098 ifp->if_collisions; 2099 2100 return; 2101 } 2102 2103 /* 2104 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2105 * pointers to descriptors. 2106 */ 2107 static int ti_encap(sc, m_head, txidx) 2108 struct ti_softc *sc; 2109 struct mbuf *m_head; 2110 u_int32_t *txidx; 2111 { 2112 struct ti_tx_desc *f = NULL; 2113 u_int32_t frag, cur, cnt = 0; 2114 struct txdmamap_pool_entry *dma; 2115 bus_dmamap_t dmamap; 2116 int error, i; 2117 struct mbuf *n; 2118 2119 dma = SIMPLEQ_FIRST(&sc->txdma_list); 2120 if (dma == NULL) { 2121 return ENOMEM; 2122 } 2123 dmamap = dma->dmamap; 2124 2125 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m_head, 0); 2126 if (error) { 2127 struct mbuf *m; 2128 int i = 0; 2129 for (m = m_head; m; m = m->m_next) 2130 i++; 2131 printf("ti_encap: bus_dmamap_load_mbuf (len %d, %d frags) " 2132 "error %d\n", m_head->m_pkthdr.len, i, error); 2133 return (ENOMEM); 2134 } 2135 2136 cur = frag = *txidx; 2137 2138 /* 2139 * Start packing the mbufs in this chain into 2140 * the fragment pointers. Stop when we run out 2141 * of fragments or hit the end of the mbuf chain. 2142 */ 2143 for (i = 0; i < dmamap->dm_nsegs; i++) { 2144 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2145 if (frag > 383) 2146 CSR_WRITE_4(sc, TI_WINBASE, 2147 TI_TX_RING_BASE + 6144); 2148 else if (frag > 255) 2149 CSR_WRITE_4(sc, TI_WINBASE, 2150 TI_TX_RING_BASE + 4096); 2151 else if (frag > 127) 2152 CSR_WRITE_4(sc, TI_WINBASE, 2153 TI_TX_RING_BASE + 2048); 2154 else 2155 CSR_WRITE_4(sc, TI_WINBASE, 2156 TI_TX_RING_BASE); 2157 f = &sc->ti_rdata->ti_tx_ring_nic[frag % 128]; 2158 } else 2159 f = &sc->ti_rdata->ti_tx_ring[frag]; 2160 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 2161 break; 2162 TI_HOSTADDR(f->ti_addr) = dmamap->dm_segs[i].ds_addr; 2163 f->ti_len = dmamap->dm_segs[i].ds_len; 2164 f->ti_flags = 0; 2165 n = m_aux_find(m_head, AF_LINK, ETHERTYPE_VLAN); 2166 if (n) { 2167 f->ti_flags |= TI_BDFLAG_VLAN_TAG; 2168 f->ti_vlan_tag = *mtod(n, int *); 2169 } else { 2170 f->ti_vlan_tag = 0; 2171 } 2172 /* 2173 * Sanity check: avoid coming within 16 descriptors 2174 * of the end of the ring. 2175 */ 2176 if ((TI_TX_RING_CNT - (sc->ti_txcnt + cnt)) < 16) 2177 return(ENOBUFS); 2178 cur = frag; 2179 TI_INC(frag, TI_TX_RING_CNT); 2180 cnt++; 2181 } 2182 2183 if (i < dmamap->dm_nsegs) 2184 return(ENOBUFS); 2185 2186 if (frag == sc->ti_tx_saved_considx) 2187 return(ENOBUFS); 2188 2189 if (sc->ti_hwrev == TI_HWREV_TIGON) 2190 sc->ti_rdata->ti_tx_ring_nic[cur % 128].ti_flags |= 2191 TI_BDFLAG_END; 2192 else 2193 sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; 2194 sc->ti_cdata.ti_tx_chain[cur] = m_head; 2195 SIMPLEQ_REMOVE_HEAD(&sc->txdma_list, dma, link); 2196 sc->txdma[cur] = dma; 2197 sc->ti_txcnt += cnt; 2198 2199 *txidx = frag; 2200 2201 return(0); 2202 } 2203 2204 /* 2205 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2206 * to the mbuf data regions directly in the transmit descriptors. 2207 */ 2208 static void ti_start(ifp) 2209 struct ifnet *ifp; 2210 { 2211 struct ti_softc *sc; 2212 struct mbuf *m_head = NULL; 2213 u_int32_t prodidx = 0; 2214 2215 sc = ifp->if_softc; 2216 2217 prodidx = CSR_READ_4(sc, TI_MB_SENDPROD_IDX); 2218 2219 while (sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { 2220 IFQ_POLL(&ifp->if_snd, m_head); 2221 if (m_head == NULL) 2222 break; 2223 2224 /* 2225 * Pack the data into the transmit ring. If we 2226 * don't have room, set the OACTIVE flag and wait 2227 * for the NIC to drain the ring. 2228 */ 2229 if (ti_encap(sc, m_head, &prodidx)) { 2230 ifp->if_flags |= IFF_OACTIVE; 2231 break; 2232 } 2233 2234 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2235 2236 /* 2237 * If there's a BPF listener, bounce a copy of this frame 2238 * to him. 2239 */ 2240 #if NBPFILTER > 0 2241 if (ifp->if_bpf) 2242 bpf_mtap(ifp->if_bpf, m_head); 2243 #endif 2244 } 2245 2246 /* Transmit */ 2247 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); 2248 2249 /* 2250 * Set a timeout in case the chip goes out to lunch. 2251 */ 2252 ifp->if_timer = 5; 2253 2254 return; 2255 } 2256 2257 static void ti_init(xsc) 2258 void *xsc; 2259 { 2260 struct ti_softc *sc = xsc; 2261 int s; 2262 2263 s = splimp(); 2264 2265 /* Cancel pending I/O and flush buffers. */ 2266 ti_stop(sc); 2267 2268 /* Init the gen info block, ring control blocks and firmware. */ 2269 if (ti_gibinit(sc)) { 2270 printf("%s: initialization failure\n", sc->sc_dev.dv_xname); 2271 splx(s); 2272 return; 2273 } 2274 2275 splx(s); 2276 2277 return; 2278 } 2279 2280 static void ti_init2(sc) 2281 struct ti_softc *sc; 2282 { 2283 struct ti_cmd_desc cmd; 2284 struct ifnet *ifp; 2285 u_int8_t *m; 2286 struct ifmedia *ifm; 2287 int tmp; 2288 2289 ifp = &sc->ethercom.ec_if; 2290 2291 /* Specify MTU and interface index. */ 2292 CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->sc_dev.dv_unit); /* ??? */ 2293 if ((sc->ethercom.ec_capenable & ETHERCAP_VLAN_MTU) && 2294 ifp->if_mtu < ETHERMTU + ETHER_VLAN_ENCAP_LEN) 2295 CSR_WRITE_4(sc, TI_GCR_IFMTU, ETHER_MAX_LEN + 2296 ETHER_VLAN_ENCAP_LEN); 2297 else 2298 CSR_WRITE_4(sc, TI_GCR_IFMTU, ifp->if_mtu + 2299 ETHER_HDR_LEN + ETHER_CRC_LEN); 2300 TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); 2301 2302 /* Load our MAC address. */ 2303 m = (u_int8_t *)LLADDR(ifp->if_sadl); 2304 CSR_WRITE_4(sc, TI_GCR_PAR0, (m[0] << 8) | m[1]); 2305 CSR_WRITE_4(sc, TI_GCR_PAR1, (m[2] << 24) | (m[3] << 16) 2306 | (m[4] << 8) | m[5]); 2307 TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); 2308 2309 /* Enable or disable promiscuous mode as needed. */ 2310 if (ifp->if_flags & IFF_PROMISC) { 2311 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_ENB, 0); 2312 } else { 2313 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); 2314 } 2315 2316 /* Program multicast filter. */ 2317 ti_setmulti(sc); 2318 2319 /* 2320 * If this is a Tigon 1, we should tell the 2321 * firmware to use software packet filtering. 2322 */ 2323 if (sc->ti_hwrev == TI_HWREV_TIGON) { 2324 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); 2325 } 2326 2327 /* Init RX ring. */ 2328 ti_init_rx_ring_std(sc); 2329 2330 /* Init jumbo RX ring. */ 2331 if (ifp->if_mtu > (MCLBYTES - ETHER_HDR_LEN - ETHER_CRC_LEN)) 2332 ti_init_rx_ring_jumbo(sc); 2333 2334 /* 2335 * If this is a Tigon 2, we can also configure the 2336 * mini ring. 2337 */ 2338 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 2339 ti_init_rx_ring_mini(sc); 2340 2341 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); 2342 sc->ti_rx_saved_considx = 0; 2343 2344 /* Init TX ring. */ 2345 ti_init_tx_ring(sc); 2346 2347 /* Tell firmware we're alive. */ 2348 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); 2349 2350 /* Enable host interrupts. */ 2351 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2352 2353 ifp->if_flags |= IFF_RUNNING; 2354 ifp->if_flags &= ~IFF_OACTIVE; 2355 2356 /* 2357 * Make sure to set media properly. We have to do this 2358 * here since we have to issue commands in order to set 2359 * the link negotiation and we can't issue commands until 2360 * the firmware is running. 2361 */ 2362 ifm = &sc->ifmedia; 2363 tmp = ifm->ifm_media; 2364 ifm->ifm_media = ifm->ifm_cur->ifm_media; 2365 ti_ifmedia_upd(ifp); 2366 ifm->ifm_media = tmp; 2367 2368 return; 2369 } 2370 2371 /* 2372 * Set media options. 2373 */ 2374 static int ti_ifmedia_upd(ifp) 2375 struct ifnet *ifp; 2376 { 2377 struct ti_softc *sc; 2378 struct ifmedia *ifm; 2379 struct ti_cmd_desc cmd; 2380 2381 sc = ifp->if_softc; 2382 ifm = &sc->ifmedia; 2383 2384 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2385 return(EINVAL); 2386 2387 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2388 case IFM_AUTO: 2389 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 2390 TI_GLNK_FULL_DUPLEX|TI_GLNK_RX_FLOWCTL_Y| 2391 TI_GLNK_AUTONEGENB|TI_GLNK_ENB); 2392 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| 2393 TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| 2394 TI_LNK_AUTONEGENB|TI_LNK_ENB); 2395 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2396 TI_CMD_CODE_NEGOTIATE_BOTH, 0); 2397 break; 2398 case IFM_1000_SX: 2399 case IFM_1000_TX: 2400 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2401 CSR_WRITE_4(sc, TI_GCR_GLINK, 2402 TI_GLNK_PREF|TI_GLNK_1000MB|TI_GLNK_FULL_DUPLEX| 2403 TI_GLNK_RX_FLOWCTL_Y|TI_GLNK_ENB); 2404 } else { 2405 CSR_WRITE_4(sc, TI_GCR_GLINK, 2406 TI_GLNK_PREF|TI_GLNK_1000MB| 2407 TI_GLNK_RX_FLOWCTL_Y|TI_GLNK_ENB); 2408 } 2409 CSR_WRITE_4(sc, TI_GCR_LINK, 0); 2410 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2411 TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); 2412 break; 2413 case IFM_100_FX: 2414 case IFM_10_FL: 2415 case IFM_100_TX: 2416 case IFM_10_T: 2417 CSR_WRITE_4(sc, TI_GCR_GLINK, 0); 2418 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF); 2419 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || 2420 IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 2421 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); 2422 } else { 2423 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); 2424 } 2425 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2426 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); 2427 } else { 2428 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); 2429 } 2430 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2431 TI_CMD_CODE_NEGOTIATE_10_100, 0); 2432 break; 2433 } 2434 2435 sc->ethercom.ec_if.if_baudrate = 2436 ifmedia_baudrate(ifm->ifm_media); 2437 2438 return(0); 2439 } 2440 2441 /* 2442 * Report current media status. 2443 */ 2444 static void ti_ifmedia_sts(ifp, ifmr) 2445 struct ifnet *ifp; 2446 struct ifmediareq *ifmr; 2447 { 2448 struct ti_softc *sc; 2449 u_int32_t media = 0; 2450 2451 sc = ifp->if_softc; 2452 2453 ifmr->ifm_status = IFM_AVALID; 2454 ifmr->ifm_active = IFM_ETHER; 2455 2456 if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) 2457 return; 2458 2459 ifmr->ifm_status |= IFM_ACTIVE; 2460 2461 if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { 2462 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); 2463 if (sc->ti_copper) 2464 ifmr->ifm_active |= IFM_1000_TX; 2465 else 2466 ifmr->ifm_active |= IFM_1000_SX; 2467 if (media & TI_GLNK_FULL_DUPLEX) 2468 ifmr->ifm_active |= IFM_FDX; 2469 else 2470 ifmr->ifm_active |= IFM_HDX; 2471 } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { 2472 media = CSR_READ_4(sc, TI_GCR_LINK_STAT); 2473 if (sc->ti_copper) { 2474 if (media & TI_LNK_100MB) 2475 ifmr->ifm_active |= IFM_100_TX; 2476 if (media & TI_LNK_10MB) 2477 ifmr->ifm_active |= IFM_10_T; 2478 } else { 2479 if (media & TI_LNK_100MB) 2480 ifmr->ifm_active |= IFM_100_FX; 2481 if (media & TI_LNK_10MB) 2482 ifmr->ifm_active |= IFM_10_FL; 2483 } 2484 if (media & TI_LNK_FULL_DUPLEX) 2485 ifmr->ifm_active |= IFM_FDX; 2486 if (media & TI_LNK_HALF_DUPLEX) 2487 ifmr->ifm_active |= IFM_HDX; 2488 } 2489 2490 sc->ethercom.ec_if.if_baudrate = 2491 ifmedia_baudrate(sc->ifmedia.ifm_media); 2492 2493 return; 2494 } 2495 2496 static int 2497 ti_ether_ioctl(ifp, cmd, data) 2498 struct ifnet *ifp; 2499 u_long cmd; 2500 caddr_t data; 2501 { 2502 struct ifaddr *ifa = (struct ifaddr *) data; 2503 struct ti_softc *sc = ifp->if_softc; 2504 2505 switch (cmd) { 2506 case SIOCSIFADDR: 2507 ifp->if_flags |= IFF_UP; 2508 2509 switch (ifa->ifa_addr->sa_family) { 2510 #ifdef INET 2511 case AF_INET: 2512 ti_init(sc); 2513 arp_ifinit(ifp, ifa); 2514 break; 2515 #endif 2516 #ifdef NS 2517 case AF_NS: 2518 { 2519 struct ns_addr *ina = &IA_SNS(ifa)->sns_addr; 2520 2521 if (ns_nullhost(*ina)) 2522 ina->x_host = *(union ns_host *) 2523 LLADDR(ifp->if_sadl); 2524 else 2525 bcopy(ina->x_host.c_host, LLADDR(ifp->if_sadl), 2526 ifp->if_addrlen); 2527 /* Set new address. */ 2528 ti_init(sc); 2529 break; 2530 } 2531 #endif 2532 default: 2533 ti_init(sc); 2534 break; 2535 } 2536 break; 2537 2538 default: 2539 return (EINVAL); 2540 } 2541 2542 return (0); 2543 } 2544 2545 static int ti_ioctl(ifp, command, data) 2546 struct ifnet *ifp; 2547 u_long command; 2548 caddr_t data; 2549 { 2550 struct ti_softc *sc = ifp->if_softc; 2551 struct ifreq *ifr = (struct ifreq *) data; 2552 int s, error = 0; 2553 struct ti_cmd_desc cmd; 2554 2555 s = splimp(); 2556 2557 switch(command) { 2558 case SIOCSIFADDR: 2559 case SIOCGIFADDR: 2560 error = ti_ether_ioctl(ifp, command, data); 2561 break; 2562 case SIOCSIFMTU: 2563 if (ifr->ifr_mtu > TI_JUMBO_MTU) 2564 error = EINVAL; 2565 else { 2566 ifp->if_mtu = ifr->ifr_mtu; 2567 ti_init(sc); 2568 } 2569 break; 2570 case SIOCSIFFLAGS: 2571 if (ifp->if_flags & IFF_UP) { 2572 /* 2573 * If only the state of the PROMISC flag changed, 2574 * then just use the 'set promisc mode' command 2575 * instead of reinitializing the entire NIC. Doing 2576 * a full re-init means reloading the firmware and 2577 * waiting for it to start up, which may take a 2578 * second or two. 2579 */ 2580 if (ifp->if_flags & IFF_RUNNING && 2581 ifp->if_flags & IFF_PROMISC && 2582 !(sc->ti_if_flags & IFF_PROMISC)) { 2583 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 2584 TI_CMD_CODE_PROMISC_ENB, 0); 2585 } else if (ifp->if_flags & IFF_RUNNING && 2586 !(ifp->if_flags & IFF_PROMISC) && 2587 sc->ti_if_flags & IFF_PROMISC) { 2588 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 2589 TI_CMD_CODE_PROMISC_DIS, 0); 2590 } else 2591 ti_init(sc); 2592 } else { 2593 if (ifp->if_flags & IFF_RUNNING) { 2594 ti_stop(sc); 2595 } 2596 } 2597 sc->ti_if_flags = ifp->if_flags; 2598 error = 0; 2599 break; 2600 case SIOCADDMULTI: 2601 case SIOCDELMULTI: 2602 if (command == SIOCADDMULTI) 2603 ether_addmulti(ifr, &sc->ethercom); 2604 else 2605 ether_delmulti(ifr, &sc->ethercom); 2606 if (ifp->if_flags & IFF_RUNNING) { 2607 ti_setmulti(sc); 2608 error = 0; 2609 } 2610 break; 2611 case SIOCSIFMEDIA: 2612 case SIOCGIFMEDIA: 2613 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2614 break; 2615 default: 2616 error = EINVAL; 2617 break; 2618 } 2619 2620 (void)splx(s); 2621 2622 return(error); 2623 } 2624 2625 static void ti_watchdog(ifp) 2626 struct ifnet *ifp; 2627 { 2628 struct ti_softc *sc; 2629 2630 sc = ifp->if_softc; 2631 2632 printf("%s: watchdog timeout -- resetting\n", sc->sc_dev.dv_xname); 2633 ti_stop(sc); 2634 ti_init(sc); 2635 2636 ifp->if_oerrors++; 2637 2638 return; 2639 } 2640 2641 /* 2642 * Stop the adapter and free any mbufs allocated to the 2643 * RX and TX lists. 2644 */ 2645 static void ti_stop(sc) 2646 struct ti_softc *sc; 2647 { 2648 struct ifnet *ifp; 2649 struct ti_cmd_desc cmd; 2650 2651 ifp = &sc->ethercom.ec_if; 2652 2653 /* Disable host interrupts. */ 2654 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2655 /* 2656 * Tell firmware we're shutting down. 2657 */ 2658 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); 2659 2660 /* Halt and reinitialize. */ 2661 ti_chipinit(sc); 2662 ti_mem(sc, 0x2000, 0x100000 - 0x2000, NULL); 2663 ti_chipinit(sc); 2664 2665 /* Free the RX lists. */ 2666 ti_free_rx_ring_std(sc); 2667 2668 /* Free jumbo RX list. */ 2669 ti_free_rx_ring_jumbo(sc); 2670 2671 /* Free mini RX list. */ 2672 ti_free_rx_ring_mini(sc); 2673 2674 /* Free TX buffers. */ 2675 ti_free_tx_ring(sc); 2676 2677 sc->ti_ev_prodidx.ti_idx = 0; 2678 sc->ti_return_prodidx.ti_idx = 0; 2679 sc->ti_tx_considx.ti_idx = 0; 2680 sc->ti_tx_saved_considx = TI_TXCONS_UNSET; 2681 2682 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2683 2684 return; 2685 } 2686 2687 /* 2688 * Stop all chip I/O so that the kernel's probe routines don't 2689 * get confused by errant DMAs when rebooting. 2690 */ 2691 static void ti_shutdown(v) 2692 void *v; 2693 { 2694 struct ti_softc *sc = v; 2695 2696 ti_chipinit(sc); 2697 2698 return; 2699 } 2700