1 /* $OpenBSD: ti.c,v 1.4 2011/06/21 16:52:45 tedu Exp $ */ 2 3 /* 4 * Copyright (c) 1997, 1998, 1999 5 * Bill Paul <wpaul@ctr.columbia.edu>. All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 3. All advertising materials mentioning features or use of this software 16 * must display the following acknowledgement: 17 * This product includes software developed by Bill Paul. 18 * 4. Neither the name of the author nor the names of any co-contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY Bill Paul AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 26 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 32 * THE POSSIBILITY OF SUCH DAMAGE. 33 * 34 * $FreeBSD: src/sys/pci/if_ti.c,v 1.25 2000/01/18 00:26:29 wpaul Exp $ 35 */ 36 37 /* 38 * Alteon Networks Tigon PCI gigabit ethernet driver for OpenBSD. 39 * 40 * Written by Bill Paul <wpaul@ctr.columbia.edu> 41 * Electrical Engineering Department 42 * Columbia University, New York City 43 */ 44 45 /* 46 * The Alteon Networks Tigon chip contains an embedded R4000 CPU, 47 * gigabit MAC, dual DMA channels and a PCI interface unit. NICs 48 * using the Tigon may have anywhere from 512K to 2MB of SRAM. The 49 * Tigon supports hardware IP, TCP and UCP checksumming, multicast 50 * filtering and jumbo (9014 byte) frames. The hardware is largely 51 * controlled by firmware, which must be loaded into the NIC during 52 * initialization. 53 * 54 * The Tigon 2 contains 2 R4000 CPUs and requires a newer firmware 55 * revision, which supports new features such as extended commands, 56 * extended jumbo receive ring desciptors and a mini receive ring. 57 * 58 * Alteon Networks is to be commended for releasing such a vast amount 59 * of development material for the Tigon NIC without requiring an NDA 60 * (although they really should have done it a long time ago). With 61 * any luck, the other vendors will finally wise up and follow Alteon's 62 * stellar example. 63 * 64 * The following people deserve special thanks: 65 * - Terry Murphy of 3Com, for providing a 3c985 Tigon 1 board 66 * for testing 67 * - Raymond Lee of Netgear, for providing a pair of Netgear 68 * GA620 Tigon 2 boards for testing 69 * - Ulf Zimmermann, for bringing the GA260 to my attention and 70 * convincing me to write this driver. 71 * - Andrew Gallatin for providing FreeBSD/Alpha support. 72 */ 73 74 #include "bpfilter.h" 75 #include "vlan.h" 76 77 #include <sys/param.h> 78 #include <sys/systm.h> 79 #include <sys/sockio.h> 80 #include <sys/mbuf.h> 81 #include <sys/malloc.h> 82 #include <sys/kernel.h> 83 #include <sys/socket.h> 84 #include <sys/device.h> 85 #include <sys/queue.h> 86 87 #include <net/if.h> 88 #include <net/if_dl.h> 89 #include <net/if_types.h> 90 91 #ifdef INET 92 #include <netinet/in.h> 93 #include <netinet/in_systm.h> 94 #include <netinet/in_var.h> 95 #include <netinet/ip.h> 96 #include <netinet/if_ether.h> 97 #endif 98 99 #include <net/if_media.h> 100 101 #if NBPFILTER > 0 102 #include <net/bpf.h> 103 #endif 104 105 #if NVLAN > 0 106 #include <net/if_types.h> 107 #include <net/if_vlan_var.h> 108 #endif 109 110 #include <machine/bus.h> 111 112 #include <dev/ic/tireg.h> 113 #include <dev/ic/tivar.h> 114 #include <dev/pci/pcireg.h> 115 116 struct cfdriver ti_cd = { 117 NULL, "ti", DV_IFNET 118 }; 119 120 void ti_txeof_tigon1(struct ti_softc *); 121 void ti_txeof_tigon2(struct ti_softc *); 122 void ti_rxeof(struct ti_softc *); 123 124 void ti_stats_update(struct ti_softc *); 125 int ti_encap_tigon1(struct ti_softc *, struct mbuf *, u_int32_t *); 126 int ti_encap_tigon2(struct ti_softc *, struct mbuf *, u_int32_t *); 127 128 int ti_intr(void *); 129 void ti_start(struct ifnet *); 130 int ti_ioctl(struct ifnet *, u_long, caddr_t); 131 void ti_init(void *); 132 void ti_init2(struct ti_softc *); 133 void ti_stop(struct ti_softc *); 134 void ti_watchdog(struct ifnet *); 135 int ti_ifmedia_upd(struct ifnet *); 136 void ti_ifmedia_sts(struct ifnet *, struct ifmediareq *); 137 138 u_int32_t ti_eeprom_putbyte(struct ti_softc *, int); 139 u_int8_t ti_eeprom_getbyte(struct ti_softc *, int, u_int8_t *); 140 int ti_read_eeprom(struct ti_softc *, caddr_t, int, int); 141 142 void ti_add_mcast(struct ti_softc *, struct ether_addr *); 143 void ti_del_mcast(struct ti_softc *, struct ether_addr *); 144 void ti_iff(struct ti_softc *); 145 146 void ti_mem_read(struct ti_softc *, u_int32_t, u_int32_t, void *); 147 void ti_mem_write(struct ti_softc *, u_int32_t, u_int32_t, const void*); 148 void ti_mem_set(struct ti_softc *, u_int32_t, u_int32_t); 149 void ti_loadfw(struct ti_softc *); 150 void ti_cmd(struct ti_softc *, struct ti_cmd_desc *); 151 void ti_cmd_ext(struct ti_softc *, struct ti_cmd_desc *, 152 caddr_t, int); 153 void ti_handle_events(struct ti_softc *); 154 int ti_alloc_jumbo_mem(struct ti_softc *); 155 void *ti_jalloc(struct ti_softc *); 156 void ti_jfree(caddr_t, u_int, void *); 157 int ti_newbuf_std(struct ti_softc *, int, struct mbuf *, bus_dmamap_t); 158 int ti_newbuf_mini(struct ti_softc *, int, struct mbuf *, bus_dmamap_t); 159 int ti_newbuf_jumbo(struct ti_softc *, int, struct mbuf *); 160 int ti_init_rx_ring_std(struct ti_softc *); 161 void ti_free_rx_ring_std(struct ti_softc *); 162 int ti_init_rx_ring_jumbo(struct ti_softc *); 163 void ti_free_rx_ring_jumbo(struct ti_softc *); 164 int ti_init_rx_ring_mini(struct ti_softc *); 165 void ti_free_rx_ring_mini(struct ti_softc *); 166 void ti_free_tx_ring(struct ti_softc *); 167 int ti_init_tx_ring(struct ti_softc *); 168 169 int ti_64bitslot_war(struct ti_softc *); 170 int ti_chipinit(struct ti_softc *); 171 void ti_chipinit_pci(struct ti_softc *); 172 void ti_chipinit_sbus(struct ti_softc *); 173 int ti_gibinit(struct ti_softc *); 174 175 /* 176 * Send an instruction or address to the EEPROM, check for ACK. 177 */ 178 u_int32_t 179 ti_eeprom_putbyte(struct ti_softc *sc, int byte) 180 { 181 int i, ack = 0; 182 183 /* 184 * Make sure we're in TX mode. 185 */ 186 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 187 188 /* 189 * Feed in each bit and strobe the clock. 190 */ 191 for (i = 0x80; i; i >>= 1) { 192 if (byte & i) 193 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 194 else 195 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_DOUT); 196 DELAY(1); 197 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 198 DELAY(1); 199 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 200 } 201 202 /* 203 * Turn off TX mode. 204 */ 205 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 206 207 /* 208 * Check for ack. 209 */ 210 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 211 ack = CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN; 212 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 213 214 return (ack); 215 } 216 217 /* 218 * Read a byte of data stored in the EEPROM at address 'addr.' 219 * We have to send two address bytes since the EEPROM can hold 220 * more than 256 bytes of data. 221 */ 222 u_int8_t 223 ti_eeprom_getbyte(struct ti_softc *sc, int addr, u_int8_t *dest) 224 { 225 int i; 226 u_int8_t byte = 0; 227 228 EEPROM_START; 229 230 /* 231 * Send write control code to EEPROM. 232 */ 233 if (ti_eeprom_putbyte(sc, EEPROM_CTL_WRITE)) { 234 printf("%s: failed to send write command, status: %x\n", 235 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 236 return (1); 237 } 238 239 /* 240 * Send first byte of address of byte we want to read. 241 */ 242 if (ti_eeprom_putbyte(sc, (addr >> 8) & 0xFF)) { 243 printf("%s: failed to send address, status: %x\n", 244 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 245 return (1); 246 } 247 /* 248 * Send second byte address of byte we want to read. 249 */ 250 if (ti_eeprom_putbyte(sc, addr & 0xFF)) { 251 printf("%s: failed to send address, status: %x\n", 252 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 253 return (1); 254 } 255 256 EEPROM_STOP; 257 EEPROM_START; 258 /* 259 * Send read control code to EEPROM. 260 */ 261 if (ti_eeprom_putbyte(sc, EEPROM_CTL_READ)) { 262 printf("%s: failed to send read command, status: %x\n", 263 sc->sc_dv.dv_xname, CSR_READ_4(sc, TI_MISC_LOCAL_CTL)); 264 return (1); 265 } 266 267 /* 268 * Start reading bits from EEPROM. 269 */ 270 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_TXEN); 271 for (i = 0x80; i; i >>= 1) { 272 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 273 DELAY(1); 274 if (CSR_READ_4(sc, TI_MISC_LOCAL_CTL) & TI_MLC_EE_DIN) 275 byte |= i; 276 TI_CLRBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_EE_CLK); 277 DELAY(1); 278 } 279 280 EEPROM_STOP; 281 282 /* 283 * No ACK generated for read, so just return byte. 284 */ 285 286 *dest = byte; 287 288 return (0); 289 } 290 291 /* 292 * Read a sequence of bytes from the EEPROM. 293 */ 294 int 295 ti_read_eeprom(struct ti_softc *sc, caddr_t dest, int off, int cnt) 296 { 297 int err = 0, i; 298 u_int8_t byte = 0; 299 300 for (i = 0; i < cnt; i++) { 301 err = ti_eeprom_getbyte(sc, off + i, &byte); 302 if (err) 303 break; 304 *(dest + i) = byte; 305 } 306 307 return (err ? 1 : 0); 308 } 309 310 /* 311 * NIC memory read function. 312 * Can be used to copy data from NIC local memory. 313 */ 314 void 315 ti_mem_read(struct ti_softc *sc, u_int32_t addr, u_int32_t len, void *buf) 316 { 317 int segptr, segsize, cnt; 318 caddr_t ptr; 319 320 segptr = addr; 321 cnt = len; 322 ptr = buf; 323 324 while(cnt) { 325 if (cnt < TI_WINLEN) 326 segsize = cnt; 327 else 328 segsize = TI_WINLEN - (segptr % TI_WINLEN); 329 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 330 bus_space_read_region_4(sc->ti_btag, sc->ti_bhandle, 331 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr, 332 segsize / 4); 333 ptr += segsize; 334 segptr += segsize; 335 cnt -= segsize; 336 } 337 } 338 339 /* 340 * NIC memory write function. 341 * Can be used to copy data into NIC local memory. 342 */ 343 void 344 ti_mem_write(struct ti_softc *sc, u_int32_t addr, u_int32_t len, 345 const void *buf) 346 { 347 int segptr, segsize, cnt; 348 const char *ptr; 349 350 segptr = addr; 351 cnt = len; 352 ptr = buf; 353 354 while(cnt) { 355 if (cnt < TI_WINLEN) 356 segsize = cnt; 357 else 358 segsize = TI_WINLEN - (segptr % TI_WINLEN); 359 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 360 bus_space_write_region_4(sc->ti_btag, sc->ti_bhandle, 361 TI_WINDOW + (segptr & (TI_WINLEN - 1)), (u_int32_t *)ptr, 362 segsize / 4); 363 ptr += segsize; 364 segptr += segsize; 365 cnt -= segsize; 366 } 367 } 368 369 /* 370 * NIC memory write function. 371 * Can be used to clear a section of NIC local memory. 372 */ 373 void 374 ti_mem_set(struct ti_softc *sc, u_int32_t addr, u_int32_t len) 375 { 376 int segptr, segsize, cnt; 377 378 segptr = addr; 379 cnt = len; 380 381 while(cnt) { 382 if (cnt < TI_WINLEN) 383 segsize = cnt; 384 else 385 segsize = TI_WINLEN - (segptr % TI_WINLEN); 386 CSR_WRITE_4(sc, TI_WINBASE, (segptr & ~(TI_WINLEN - 1))); 387 bus_space_set_region_4(sc->ti_btag, sc->ti_bhandle, 388 TI_WINDOW + (segptr & (TI_WINLEN - 1)), 0, segsize / 4); 389 segptr += segsize; 390 cnt -= segsize; 391 } 392 } 393 394 /* 395 * Load firmware image into the NIC. Check that the firmware revision 396 * is acceptable and see if we want the firmware for the Tigon 1 or 397 * Tigon 2. 398 */ 399 void 400 ti_loadfw(struct ti_softc *sc) 401 { 402 struct tigon_firmware *tf; 403 u_char *buf = NULL; 404 u_int32_t *b; 405 size_t buflen, i, cnt; 406 char *name; 407 int error; 408 409 switch(sc->ti_hwrev) { 410 case TI_HWREV_TIGON: 411 name = "tigon1"; 412 break; 413 case TI_HWREV_TIGON_II: 414 name = "tigon2"; 415 break; 416 default: 417 printf("%s: can't load firmware: unknown hardware rev\n", 418 sc->sc_dv.dv_xname); 419 return; 420 } 421 422 error = loadfirmware(name, &buf, &buflen); 423 if (error) 424 return; 425 /* convert firmware to host byte order */ 426 b = (u_int32_t *)buf; 427 cnt = buflen / sizeof(u_int32_t); 428 for (i = 0; i < cnt; i++) 429 b[i] = letoh32(b[i]); 430 431 tf = (struct tigon_firmware *)buf; 432 if (tf->FwReleaseMajor != TI_FIRMWARE_MAJOR || 433 tf->FwReleaseMinor != TI_FIRMWARE_MINOR || 434 tf->FwReleaseFix != TI_FIRMWARE_FIX) { 435 printf("%s: firmware revision mismatch; want " 436 "%d.%d.%d, got %d.%d.%d\n", sc->sc_dv.dv_xname, 437 TI_FIRMWARE_MAJOR, TI_FIRMWARE_MINOR, 438 TI_FIRMWARE_FIX, tf->FwReleaseMajor, 439 tf->FwReleaseMinor, tf->FwReleaseFix); 440 free(buf, M_DEVBUF); 441 return; 442 } 443 ti_mem_write(sc, tf->FwTextAddr, tf->FwTextLen, 444 (caddr_t)&tf->data[tf->FwTextOffset]); 445 ti_mem_write(sc, tf->FwRodataAddr, tf->FwRodataLen, 446 (caddr_t)&tf->data[tf->FwRodataOffset]); 447 ti_mem_write(sc, tf->FwDataAddr, tf->FwDataLen, 448 (caddr_t)&tf->data[tf->FwDataOffset]); 449 ti_mem_set(sc, tf->FwBssAddr, tf->FwBssLen); 450 ti_mem_set(sc, tf->FwSbssAddr, tf->FwSbssLen); 451 CSR_WRITE_4(sc, TI_CPU_PROGRAM_COUNTER, tf->FwStartAddr); 452 free(buf, M_DEVBUF); 453 } 454 455 /* 456 * Send the NIC a command via the command ring. 457 */ 458 void 459 ti_cmd(struct ti_softc *sc, struct ti_cmd_desc *cmd) 460 { 461 u_int32_t index; 462 463 index = sc->ti_cmd_saved_prodidx; 464 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 465 TI_INC(index, TI_CMD_RING_CNT); 466 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 467 sc->ti_cmd_saved_prodidx = index; 468 } 469 470 /* 471 * Send the NIC an extended command. The 'len' parameter specifies the 472 * number of command slots to include after the initial command. 473 */ 474 void 475 ti_cmd_ext(struct ti_softc *sc, struct ti_cmd_desc *cmd, caddr_t arg, 476 int len) 477 { 478 u_int32_t index; 479 int i; 480 481 index = sc->ti_cmd_saved_prodidx; 482 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), *(u_int32_t *)(cmd)); 483 TI_INC(index, TI_CMD_RING_CNT); 484 for (i = 0; i < len; i++) { 485 CSR_WRITE_4(sc, TI_GCR_CMDRING + (index * 4), 486 *(u_int32_t *)(&arg[i * 4])); 487 TI_INC(index, TI_CMD_RING_CNT); 488 } 489 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, index); 490 sc->ti_cmd_saved_prodidx = index; 491 } 492 493 /* 494 * Handle events that have triggered interrupts. 495 */ 496 void 497 ti_handle_events(struct ti_softc *sc) 498 { 499 struct ti_event_desc *e; 500 struct ifnet *ifp = &sc->arpcom.ac_if; 501 502 if (sc->ti_rdata->ti_event_ring == NULL) 503 return; 504 505 while (sc->ti_ev_saved_considx != sc->ti_ev_prodidx.ti_idx) { 506 e = &sc->ti_rdata->ti_event_ring[sc->ti_ev_saved_considx]; 507 switch (TI_EVENT_EVENT(e)) { 508 case TI_EV_LINKSTAT_CHANGED: 509 sc->ti_linkstat = TI_EVENT_CODE(e); 510 switch (sc->ti_linkstat) { 511 case TI_EV_CODE_LINK_UP: 512 case TI_EV_CODE_GIG_LINK_UP: 513 { 514 struct ifmediareq ifmr; 515 516 bzero(&ifmr, sizeof(ifmr)); 517 ti_ifmedia_sts(ifp, &ifmr); 518 if (ifmr.ifm_active & IFM_FDX) { 519 ifp->if_link_state = 520 LINK_STATE_FULL_DUPLEX; 521 } else { 522 ifp->if_link_state = 523 LINK_STATE_HALF_DUPLEX; 524 } 525 if_link_state_change(ifp); 526 ifp->if_baudrate = 527 ifmedia_baudrate(ifmr.ifm_active); 528 break; 529 } 530 case TI_EV_CODE_LINK_DOWN: 531 ifp->if_link_state = LINK_STATE_DOWN; 532 if_link_state_change(ifp); 533 ifp->if_baudrate = 0; 534 break; 535 default: 536 printf("%s: unknown link state code %d\n", 537 sc->sc_dv.dv_xname, sc->ti_linkstat); 538 } 539 break; 540 case TI_EV_ERROR: 541 if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_INVAL_CMD) 542 printf("%s: invalid command\n", 543 sc->sc_dv.dv_xname); 544 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_UNIMP_CMD) 545 printf("%s: unknown command\n", 546 sc->sc_dv.dv_xname); 547 else if (TI_EVENT_CODE(e) == TI_EV_CODE_ERR_BADCFG) 548 printf("%s: bad config data\n", 549 sc->sc_dv.dv_xname); 550 break; 551 case TI_EV_FIRMWARE_UP: 552 ti_init2(sc); 553 break; 554 case TI_EV_STATS_UPDATED: 555 ti_stats_update(sc); 556 break; 557 case TI_EV_RESET_JUMBO_RING: 558 case TI_EV_MCAST_UPDATED: 559 /* Who cares. */ 560 break; 561 default: 562 printf("%s: unknown event: %d\n", sc->sc_dv.dv_xname, 563 TI_EVENT_EVENT(e)); 564 break; 565 } 566 /* Advance the consumer index. */ 567 TI_INC(sc->ti_ev_saved_considx, TI_EVENT_RING_CNT); 568 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, sc->ti_ev_saved_considx); 569 } 570 } 571 572 /* 573 * Memory management for the jumbo receive ring is a pain in the 574 * butt. We need to allocate at least 9018 bytes of space per frame, 575 * _and_ it has to be contiguous (unless you use the extended 576 * jumbo descriptor format). Using malloc() all the time won't 577 * work: malloc() allocates memory in powers of two, which means we 578 * would end up wasting a considerable amount of space by allocating 579 * 9K chunks. We don't have a jumbo mbuf cluster pool. Thus, we have 580 * to do our own memory management. 581 * 582 * The driver needs to allocate a contiguous chunk of memory at boot 583 * time. We then chop this up ourselves into 9K pieces and use them 584 * as external mbuf storage. 585 * 586 * One issue here is how much memory to allocate. The jumbo ring has 587 * 256 slots in it, but at 9K per slot than can consume over 2MB of 588 * RAM. This is a bit much, especially considering we also need 589 * RAM for the standard ring and mini ring (on the Tigon 2). To 590 * save space, we only actually allocate enough memory for 64 slots 591 * by default, which works out to between 500 and 600K. This can 592 * be tuned by changing a #define in if_tireg.h. 593 */ 594 595 int 596 ti_alloc_jumbo_mem(struct ti_softc *sc) 597 { 598 caddr_t ptr, kva; 599 bus_dma_segment_t seg; 600 int i, rseg, state, error; 601 struct ti_jpool_entry *entry; 602 603 state = error = 0; 604 605 /* Grab a big chunk o' storage. */ 606 if (bus_dmamem_alloc(sc->sc_dmatag, TI_JMEM, PAGE_SIZE, 0, 607 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 608 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 609 return (ENOBUFS); 610 } 611 612 state = 1; 613 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, TI_JMEM, &kva, 614 BUS_DMA_NOWAIT)) { 615 printf("%s: can't map dma buffers (%d bytes)\n", 616 sc->sc_dv.dv_xname, TI_JMEM); 617 error = ENOBUFS; 618 goto out; 619 } 620 621 state = 2; 622 if (bus_dmamap_create(sc->sc_dmatag, TI_JMEM, 1, TI_JMEM, 0, 623 BUS_DMA_NOWAIT, &sc->ti_cdata.ti_rx_jumbo_map)) { 624 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 625 error = ENOBUFS; 626 goto out; 627 } 628 629 state = 3; 630 if (bus_dmamap_load(sc->sc_dmatag, sc->ti_cdata.ti_rx_jumbo_map, kva, 631 TI_JMEM, NULL, BUS_DMA_NOWAIT)) { 632 printf("%s: can't load dma map\n", sc->sc_dv.dv_xname); 633 error = ENOBUFS; 634 goto out; 635 } 636 637 state = 4; 638 sc->ti_cdata.ti_jumbo_buf = (caddr_t)kva; 639 640 SLIST_INIT(&sc->ti_jfree_listhead); 641 SLIST_INIT(&sc->ti_jinuse_listhead); 642 643 /* 644 * Now divide it up into 9K pieces and save the addresses 645 * in an array. 646 */ 647 ptr = sc->ti_cdata.ti_jumbo_buf; 648 for (i = 0; i < TI_JSLOTS; i++) { 649 sc->ti_cdata.ti_jslots[i].ti_buf = ptr; 650 sc->ti_cdata.ti_jslots[i].ti_inuse = 0; 651 ptr += TI_JLEN; 652 entry = malloc(sizeof(struct ti_jpool_entry), 653 M_DEVBUF, M_NOWAIT); 654 if (entry == NULL) { 655 sc->ti_cdata.ti_jumbo_buf = NULL; 656 printf("%s: no memory for jumbo buffer queue\n", 657 sc->sc_dv.dv_xname); 658 error = ENOBUFS; 659 goto out; 660 } 661 entry->slot = i; 662 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, entry, jpool_entries); 663 } 664 out: 665 if (error != 0) { 666 switch (state) { 667 case 4: 668 bus_dmamap_unload(sc->sc_dmatag, 669 sc->ti_cdata.ti_rx_jumbo_map); 670 case 3: 671 bus_dmamap_destroy(sc->sc_dmatag, 672 sc->ti_cdata.ti_rx_jumbo_map); 673 case 2: 674 bus_dmamem_unmap(sc->sc_dmatag, kva, TI_JMEM); 675 case 1: 676 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 677 break; 678 default: 679 break; 680 } 681 } 682 683 return (error); 684 } 685 686 /* 687 * Allocate a jumbo buffer. 688 */ 689 void * 690 ti_jalloc(struct ti_softc *sc) 691 { 692 struct ti_jpool_entry *entry; 693 694 entry = SLIST_FIRST(&sc->ti_jfree_listhead); 695 696 if (entry == NULL) 697 return (NULL); 698 699 SLIST_REMOVE_HEAD(&sc->ti_jfree_listhead, jpool_entries); 700 SLIST_INSERT_HEAD(&sc->ti_jinuse_listhead, entry, jpool_entries); 701 sc->ti_cdata.ti_jslots[entry->slot].ti_inuse = 1; 702 return (sc->ti_cdata.ti_jslots[entry->slot].ti_buf); 703 } 704 705 /* 706 * Release a jumbo buffer. 707 */ 708 void 709 ti_jfree(caddr_t buf, u_int size, void *arg) 710 { 711 struct ti_softc *sc; 712 int i; 713 struct ti_jpool_entry *entry; 714 715 /* Extract the softc struct pointer. */ 716 sc = (struct ti_softc *)arg; 717 718 if (sc == NULL) 719 panic("ti_jfree: can't find softc pointer!"); 720 721 /* calculate the slot this buffer belongs to */ 722 i = ((vaddr_t)buf - (vaddr_t)sc->ti_cdata.ti_jumbo_buf) / TI_JLEN; 723 724 if ((i < 0) || (i >= TI_JSLOTS)) 725 panic("ti_jfree: asked to free buffer that we don't manage!"); 726 else if (sc->ti_cdata.ti_jslots[i].ti_inuse == 0) 727 panic("ti_jfree: buffer already free!"); 728 729 sc->ti_cdata.ti_jslots[i].ti_inuse--; 730 if(sc->ti_cdata.ti_jslots[i].ti_inuse == 0) { 731 entry = SLIST_FIRST(&sc->ti_jinuse_listhead); 732 if (entry == NULL) 733 panic("ti_jfree: buffer not in use!"); 734 entry->slot = i; 735 SLIST_REMOVE_HEAD(&sc->ti_jinuse_listhead, jpool_entries); 736 SLIST_INSERT_HEAD(&sc->ti_jfree_listhead, 737 entry, jpool_entries); 738 } 739 } 740 741 /* 742 * Intialize a standard receive ring descriptor. 743 */ 744 int 745 ti_newbuf_std(struct ti_softc *sc, int i, struct mbuf *m, 746 bus_dmamap_t dmamap) 747 { 748 struct mbuf *m_new = NULL; 749 struct ti_rx_desc *r; 750 751 if (dmamap == NULL) { 752 /* if (m) panic() */ 753 754 if (bus_dmamap_create(sc->sc_dmatag, MCLBYTES, 1, MCLBYTES, 755 0, BUS_DMA_NOWAIT, &dmamap)) { 756 printf("%s: can't create recv map\n", 757 sc->sc_dv.dv_xname); 758 return (ENOMEM); 759 } 760 } else if (m == NULL) 761 bus_dmamap_unload(sc->sc_dmatag, dmamap); 762 763 sc->ti_cdata.ti_rx_std_map[i] = dmamap; 764 765 if (m == NULL) { 766 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 767 if (m_new == NULL) 768 return (ENOBUFS); 769 770 MCLGET(m_new, M_DONTWAIT); 771 if (!(m_new->m_flags & M_EXT)) { 772 m_freem(m_new); 773 return (ENOBUFS); 774 } 775 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 776 777 m_adj(m_new, ETHER_ALIGN); 778 779 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new, 780 BUS_DMA_NOWAIT)) 781 return (ENOBUFS); 782 783 } else { 784 /* 785 * We're re-using a previously allocated mbuf; 786 * be sure to re-init pointers and lengths to 787 * default values. 788 */ 789 m_new = m; 790 m_new->m_len = m_new->m_pkthdr.len = MCLBYTES; 791 m_new->m_data = m_new->m_ext.ext_buf; 792 m_adj(m_new, ETHER_ALIGN); 793 } 794 795 sc->ti_cdata.ti_rx_std_chain[i] = m_new; 796 r = &sc->ti_rdata->ti_rx_std_ring[i]; 797 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr; 798 r->ti_type = TI_BDTYPE_RECV_BD; 799 r->ti_flags = TI_BDFLAG_IP_CKSUM; 800 r->ti_len = dmamap->dm_segs[0].ds_len; 801 r->ti_idx = i; 802 803 if ((dmamap->dm_segs[0].ds_addr & ~(MCLBYTES - 1)) != 804 ((dmamap->dm_segs[0].ds_addr + dmamap->dm_segs[0].ds_len - 1) & 805 ~(MCLBYTES - 1))) 806 panic("%s: overwritten!!!", sc->sc_dv.dv_xname); 807 808 return (0); 809 } 810 811 /* 812 * Intialize a mini receive ring descriptor. This only applies to 813 * the Tigon 2. 814 */ 815 int 816 ti_newbuf_mini(struct ti_softc *sc, int i, struct mbuf *m, 817 bus_dmamap_t dmamap) 818 { 819 struct mbuf *m_new = NULL; 820 struct ti_rx_desc *r; 821 822 if (dmamap == NULL) { 823 /* if (m) panic() */ 824 825 if (bus_dmamap_create(sc->sc_dmatag, MHLEN, 1, MHLEN, 826 0, BUS_DMA_NOWAIT, &dmamap)) { 827 printf("%s: can't create recv map\n", 828 sc->sc_dv.dv_xname); 829 return (ENOMEM); 830 } 831 } else if (m == NULL) 832 bus_dmamap_unload(sc->sc_dmatag, dmamap); 833 834 sc->ti_cdata.ti_rx_mini_map[i] = dmamap; 835 836 if (m == NULL) { 837 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 838 if (m_new == NULL) 839 return (ENOBUFS); 840 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 841 m_adj(m_new, ETHER_ALIGN); 842 843 if (bus_dmamap_load_mbuf(sc->sc_dmatag, dmamap, m_new, 844 BUS_DMA_NOWAIT)) 845 return (ENOBUFS); 846 847 } else { 848 /* 849 * We're re-using a previously allocated mbuf; 850 * be sure to re-init pointers and lengths to 851 * default values. 852 */ 853 m_new = m; 854 m_new->m_data = m_new->m_pktdat; 855 m_new->m_len = m_new->m_pkthdr.len = MHLEN; 856 } 857 858 r = &sc->ti_rdata->ti_rx_mini_ring[i]; 859 sc->ti_cdata.ti_rx_mini_chain[i] = m_new; 860 TI_HOSTADDR(r->ti_addr) = dmamap->dm_segs[0].ds_addr; 861 r->ti_type = TI_BDTYPE_RECV_BD; 862 r->ti_flags = TI_BDFLAG_MINI_RING | TI_BDFLAG_IP_CKSUM; 863 r->ti_len = dmamap->dm_segs[0].ds_len; 864 r->ti_idx = i; 865 866 return (0); 867 } 868 869 /* 870 * Initialize a jumbo receive ring descriptor. This allocates 871 * a jumbo buffer from the pool managed internally by the driver. 872 */ 873 int 874 ti_newbuf_jumbo(struct ti_softc *sc, int i, struct mbuf *m) 875 { 876 struct mbuf *m_new = NULL; 877 struct ti_rx_desc *r; 878 879 if (m == NULL) { 880 caddr_t buf = NULL; 881 882 /* Allocate the mbuf. */ 883 MGETHDR(m_new, M_DONTWAIT, MT_DATA); 884 if (m_new == NULL) 885 return (ENOBUFS); 886 887 /* Allocate the jumbo buffer */ 888 buf = ti_jalloc(sc); 889 if (buf == NULL) { 890 m_freem(m_new); 891 return (ENOBUFS); 892 } 893 894 /* Attach the buffer to the mbuf. */ 895 m_new->m_len = m_new->m_pkthdr.len = TI_JUMBO_FRAMELEN; 896 MEXTADD(m_new, buf, TI_JUMBO_FRAMELEN, 0, ti_jfree, sc); 897 } else { 898 /* 899 * We're re-using a previously allocated mbuf; 900 * be sure to re-init pointers and lengths to 901 * default values. 902 */ 903 m_new = m; 904 m_new->m_data = m_new->m_ext.ext_buf; 905 m_new->m_ext.ext_size = TI_JUMBO_FRAMELEN; 906 } 907 908 m_adj(m_new, ETHER_ALIGN); 909 /* Set up the descriptor. */ 910 r = &sc->ti_rdata->ti_rx_jumbo_ring[i]; 911 sc->ti_cdata.ti_rx_jumbo_chain[i] = m_new; 912 TI_HOSTADDR(r->ti_addr) = TI_JUMBO_DMA_ADDR(sc, m_new); 913 r->ti_type = TI_BDTYPE_RECV_JUMBO_BD; 914 r->ti_flags = TI_BDFLAG_JUMBO_RING | TI_BDFLAG_IP_CKSUM; 915 r->ti_len = m_new->m_len; 916 r->ti_idx = i; 917 918 return (0); 919 } 920 921 /* 922 * The standard receive ring has 512 entries in it. At 2K per mbuf cluster, 923 * that's 1MB of memory, which is a lot. For now, we fill only the first 924 * 256 ring entries and hope that our CPU is fast enough to keep up with 925 * the NIC. 926 */ 927 int 928 ti_init_rx_ring_std(struct ti_softc *sc) 929 { 930 int i; 931 struct ti_cmd_desc cmd; 932 933 for (i = 0; i < TI_SSLOTS; i++) { 934 if (ti_newbuf_std(sc, i, NULL, 0) == ENOBUFS) 935 return (ENOBUFS); 936 } 937 938 TI_UPDATE_STDPROD(sc, i - 1); 939 sc->ti_std = i - 1; 940 941 return (0); 942 } 943 944 void 945 ti_free_rx_ring_std(struct ti_softc *sc) 946 { 947 int i; 948 949 for (i = 0; i < TI_STD_RX_RING_CNT; i++) { 950 if (sc->ti_cdata.ti_rx_std_chain[i] != NULL) { 951 m_freem(sc->ti_cdata.ti_rx_std_chain[i]); 952 sc->ti_cdata.ti_rx_std_chain[i] = NULL; 953 bus_dmamap_destroy(sc->sc_dmatag, 954 sc->ti_cdata.ti_rx_std_map[i]); 955 sc->ti_cdata.ti_rx_std_map[i] = 0; 956 } 957 bzero(&sc->ti_rdata->ti_rx_std_ring[i], 958 sizeof(struct ti_rx_desc)); 959 } 960 } 961 962 int 963 ti_init_rx_ring_jumbo(struct ti_softc *sc) 964 { 965 int i; 966 struct ti_cmd_desc cmd; 967 968 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 969 if (ti_newbuf_jumbo(sc, i, NULL) == ENOBUFS) 970 return (ENOBUFS); 971 }; 972 973 TI_UPDATE_JUMBOPROD(sc, i - 1); 974 sc->ti_jumbo = i - 1; 975 976 return (0); 977 } 978 979 void 980 ti_free_rx_ring_jumbo(struct ti_softc *sc) 981 { 982 int i; 983 984 for (i = 0; i < TI_JUMBO_RX_RING_CNT; i++) { 985 if (sc->ti_cdata.ti_rx_jumbo_chain[i] != NULL) { 986 m_freem(sc->ti_cdata.ti_rx_jumbo_chain[i]); 987 sc->ti_cdata.ti_rx_jumbo_chain[i] = NULL; 988 } 989 bzero(&sc->ti_rdata->ti_rx_jumbo_ring[i], 990 sizeof(struct ti_rx_desc)); 991 } 992 } 993 994 int 995 ti_init_rx_ring_mini(struct ti_softc *sc) 996 { 997 int i; 998 999 for (i = 0; i < TI_MSLOTS; i++) { 1000 if (ti_newbuf_mini(sc, i, NULL, 0) == ENOBUFS) 1001 return (ENOBUFS); 1002 }; 1003 1004 TI_UPDATE_MINIPROD(sc, i - 1); 1005 sc->ti_mini = i - 1; 1006 1007 return (0); 1008 } 1009 1010 void 1011 ti_free_rx_ring_mini(struct ti_softc *sc) 1012 { 1013 int i; 1014 1015 for (i = 0; i < TI_MINI_RX_RING_CNT; i++) { 1016 if (sc->ti_cdata.ti_rx_mini_chain[i] != NULL) { 1017 m_freem(sc->ti_cdata.ti_rx_mini_chain[i]); 1018 sc->ti_cdata.ti_rx_mini_chain[i] = NULL; 1019 bus_dmamap_destroy(sc->sc_dmatag, 1020 sc->ti_cdata.ti_rx_mini_map[i]); 1021 sc->ti_cdata.ti_rx_mini_map[i] = 0; 1022 } 1023 bzero(&sc->ti_rdata->ti_rx_mini_ring[i], 1024 sizeof(struct ti_rx_desc)); 1025 } 1026 } 1027 1028 void 1029 ti_free_tx_ring(struct ti_softc *sc) 1030 { 1031 int i; 1032 struct ti_txmap_entry *entry; 1033 1034 if (sc->ti_rdata->ti_tx_ring == NULL) 1035 return; 1036 1037 for (i = 0; i < TI_TX_RING_CNT; i++) { 1038 if (sc->ti_cdata.ti_tx_chain[i] != NULL) { 1039 m_freem(sc->ti_cdata.ti_tx_chain[i]); 1040 sc->ti_cdata.ti_tx_chain[i] = NULL; 1041 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, 1042 sc->ti_cdata.ti_tx_map[i], link); 1043 sc->ti_cdata.ti_tx_map[i] = 0; 1044 } 1045 bzero(&sc->ti_rdata->ti_tx_ring[i], 1046 sizeof(struct ti_tx_desc)); 1047 } 1048 1049 while ((entry = SLIST_FIRST(&sc->ti_tx_map_listhead))) { 1050 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link); 1051 bus_dmamap_destroy(sc->sc_dmatag, entry->dmamap); 1052 free(entry, M_DEVBUF); 1053 } 1054 } 1055 1056 int 1057 ti_init_tx_ring(struct ti_softc *sc) 1058 { 1059 int i; 1060 bus_dmamap_t dmamap; 1061 struct ti_txmap_entry *entry; 1062 1063 sc->ti_txcnt = 0; 1064 sc->ti_tx_saved_considx = 0; 1065 sc->ti_tx_saved_prodidx = 0; 1066 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, 0); 1067 1068 SLIST_INIT(&sc->ti_tx_map_listhead); 1069 for (i = 0; i < TI_TX_RING_CNT; i++) { 1070 if (bus_dmamap_create(sc->sc_dmatag, TI_JUMBO_FRAMELEN, 1071 TI_NTXSEG, MCLBYTES, 0, BUS_DMA_NOWAIT, &dmamap)) 1072 return (ENOBUFS); 1073 1074 entry = malloc(sizeof(*entry), M_DEVBUF, M_NOWAIT); 1075 if (!entry) { 1076 bus_dmamap_destroy(sc->sc_dmatag, dmamap); 1077 return (ENOBUFS); 1078 } 1079 entry->dmamap = dmamap; 1080 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, link); 1081 } 1082 1083 return (0); 1084 } 1085 1086 /* 1087 * The Tigon 2 firmware has a new way to add/delete multicast addresses, 1088 * but we have to support the old way too so that Tigon 1 cards will 1089 * work. 1090 */ 1091 void 1092 ti_add_mcast(struct ti_softc *sc, struct ether_addr *addr) 1093 { 1094 struct ti_cmd_desc cmd; 1095 u_int16_t *m; 1096 u_int32_t ext[2] = {0, 0}; 1097 1098 m = (u_int16_t *)&addr->ether_addr_octet[0]; 1099 1100 switch(sc->ti_hwrev) { 1101 case TI_HWREV_TIGON: 1102 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1103 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1104 TI_DO_CMD(TI_CMD_ADD_MCAST_ADDR, 0, 0); 1105 break; 1106 case TI_HWREV_TIGON_II: 1107 ext[0] = htons(m[0]); 1108 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1109 TI_DO_CMD_EXT(TI_CMD_EXT_ADD_MCAST, 0, 0, (caddr_t)&ext, 2); 1110 break; 1111 default: 1112 printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname); 1113 break; 1114 } 1115 } 1116 1117 void 1118 ti_del_mcast(struct ti_softc *sc, struct ether_addr *addr) 1119 { 1120 struct ti_cmd_desc cmd; 1121 u_int16_t *m; 1122 u_int32_t ext[2] = {0, 0}; 1123 1124 m = (u_int16_t *)&addr->ether_addr_octet[0]; 1125 1126 switch(sc->ti_hwrev) { 1127 case TI_HWREV_TIGON: 1128 CSR_WRITE_4(sc, TI_GCR_MAR0, htons(m[0])); 1129 CSR_WRITE_4(sc, TI_GCR_MAR1, (htons(m[1]) << 16) | htons(m[2])); 1130 TI_DO_CMD(TI_CMD_DEL_MCAST_ADDR, 0, 0); 1131 break; 1132 case TI_HWREV_TIGON_II: 1133 ext[0] = htons(m[0]); 1134 ext[1] = (htons(m[1]) << 16) | htons(m[2]); 1135 TI_DO_CMD_EXT(TI_CMD_EXT_DEL_MCAST, 0, 0, (caddr_t)&ext, 2); 1136 break; 1137 default: 1138 printf("%s: unknown hwrev\n", sc->sc_dv.dv_xname); 1139 break; 1140 } 1141 } 1142 1143 /* 1144 * Configure the Tigon's multicast address filter. 1145 * 1146 * The actual multicast table management is a bit of a pain, thanks to 1147 * slight brain damage on the part of both Alteon and us. With our 1148 * multicast code, we are only alerted when the multicast address table 1149 * changes and at that point we only have the current list of addresses: 1150 * we only know the current state, not the previous state, so we don't 1151 * actually know what addresses were removed or added. The firmware has 1152 * state, but we can't get our grubby mits on it, and there is no 'delete 1153 * all multicast addresses' command. Hence, we have to maintain our own 1154 * state so we know what addresses have been programmed into the NIC at 1155 * any given time. 1156 */ 1157 void 1158 ti_iff(struct ti_softc *sc) 1159 { 1160 struct ifnet *ifp = &sc->arpcom.ac_if; 1161 struct arpcom *ac = &sc->arpcom; 1162 struct ether_multi *enm; 1163 struct ether_multistep step; 1164 struct ti_cmd_desc cmd; 1165 struct ti_mc_entry *mc; 1166 u_int32_t intrs; 1167 1168 TI_DO_CMD(TI_CMD_SET_ALLMULTI, TI_CMD_CODE_ALLMULTI_DIS, 0); 1169 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, TI_CMD_CODE_PROMISC_DIS, 0); 1170 ifp->if_flags &= ~IFF_ALLMULTI; 1171 1172 if (ifp->if_flags & IFF_PROMISC || ac->ac_multirangecnt > 0) { 1173 ifp->if_flags |= IFF_ALLMULTI; 1174 if (ifp->if_flags & IFF_PROMISC) { 1175 TI_DO_CMD(TI_CMD_SET_PROMISC_MODE, 1176 TI_CMD_CODE_PROMISC_ENB, 0); 1177 } else { 1178 TI_DO_CMD(TI_CMD_SET_ALLMULTI, 1179 TI_CMD_CODE_ALLMULTI_ENB, 0); 1180 } 1181 } else { 1182 /* Disable interrupts. */ 1183 intrs = CSR_READ_4(sc, TI_MB_HOSTINTR); 1184 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1185 1186 /* First, zot all the existing filters. */ 1187 while (SLIST_FIRST(&sc->ti_mc_listhead) != NULL) { 1188 mc = SLIST_FIRST(&sc->ti_mc_listhead); 1189 ti_del_mcast(sc, &mc->mc_addr); 1190 SLIST_REMOVE_HEAD(&sc->ti_mc_listhead, mc_entries); 1191 free(mc, M_DEVBUF); 1192 } 1193 1194 /* Now program new ones. */ 1195 ETHER_FIRST_MULTI(step, ac, enm); 1196 while (enm != NULL) { 1197 mc = malloc(sizeof(struct ti_mc_entry), M_DEVBUF, 1198 M_NOWAIT); 1199 if (mc == NULL) 1200 panic("ti_iff"); 1201 1202 bcopy(enm->enm_addrlo, &mc->mc_addr, 1203 ETHER_ADDR_LEN); 1204 SLIST_INSERT_HEAD(&sc->ti_mc_listhead, mc, 1205 mc_entries); 1206 ti_add_mcast(sc, &mc->mc_addr); 1207 1208 ETHER_NEXT_MULTI(step, enm); 1209 } 1210 1211 /* Re-enable interrupts. */ 1212 CSR_WRITE_4(sc, TI_MB_HOSTINTR, intrs); 1213 } 1214 } 1215 1216 /* 1217 * Check to see if the BIOS has configured us for a 64 bit slot when 1218 * we aren't actually in one. If we detect this condition, we can work 1219 * around it on the Tigon 2 by setting a bit in the PCI state register, 1220 * but for the Tigon 1 we must give up and abort the interface attach. 1221 */ 1222 int 1223 ti_64bitslot_war(struct ti_softc *sc) 1224 { 1225 if (!(CSR_READ_4(sc, TI_PCI_STATE) & TI_PCISTATE_32BIT_BUS)) { 1226 CSR_WRITE_4(sc, 0x600, 0); 1227 CSR_WRITE_4(sc, 0x604, 0); 1228 CSR_WRITE_4(sc, 0x600, 0x5555AAAA); 1229 if (CSR_READ_4(sc, 0x604) == 0x5555AAAA) { 1230 if (sc->ti_hwrev == TI_HWREV_TIGON) 1231 return (EINVAL); 1232 else { 1233 TI_SETBIT(sc, TI_PCI_STATE, 1234 TI_PCISTATE_32BIT_BUS); 1235 return (0); 1236 } 1237 } 1238 } 1239 1240 return (0); 1241 } 1242 1243 /* 1244 * Do endian, PCI and DMA initialization. Also check the on-board ROM 1245 * self-test results. 1246 */ 1247 int 1248 ti_chipinit(struct ti_softc *sc) 1249 { 1250 u_int32_t chip_rev; 1251 1252 /* Initialize link to down state. */ 1253 sc->ti_linkstat = TI_EV_CODE_LINK_DOWN; 1254 1255 /* Set endianness before we access any non-PCI registers. */ 1256 CSR_WRITE_4(sc, TI_MISC_HOST_CTL, 1257 TI_MHC_LITTLEENDIAN_INIT | (TI_MHC_LITTLEENDIAN_INIT << 24)); 1258 1259 /* Check the ROM failed bit to see if self-tests passed. */ 1260 if (CSR_READ_4(sc, TI_CPU_STATE) & TI_CPUSTATE_ROMFAIL) { 1261 printf("%s: board self-diagnostics failed!\n", 1262 sc->sc_dv.dv_xname); 1263 return (ENODEV); 1264 } 1265 1266 /* Halt the CPU. */ 1267 TI_SETBIT(sc, TI_CPU_STATE, TI_CPUSTATE_HALT); 1268 1269 /* Figure out the hardware revision. */ 1270 chip_rev = CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_CHIP_REV_MASK; 1271 switch(chip_rev) { 1272 case TI_REV_TIGON_I: 1273 sc->ti_hwrev = TI_HWREV_TIGON; 1274 break; 1275 case TI_REV_TIGON_II: 1276 sc->ti_hwrev = TI_HWREV_TIGON_II; 1277 break; 1278 default: 1279 printf("\n"); 1280 printf("%s: unsupported chip revision: %x\n", 1281 sc->sc_dv.dv_xname, chip_rev); 1282 return (ENODEV); 1283 } 1284 1285 /* Do special setup for Tigon 2. */ 1286 if (sc->ti_hwrev == TI_HWREV_TIGON_II) { 1287 TI_SETBIT(sc, TI_CPU_CTL_B, TI_CPUSTATE_HALT); 1288 TI_SETBIT(sc, TI_MISC_LOCAL_CTL, TI_MLC_SRAM_BANK_512K); 1289 TI_SETBIT(sc, TI_MISC_CONF, TI_MCR_SRAM_SYNCHRONOUS); 1290 } 1291 1292 if (sc->ti_sbus) 1293 ti_chipinit_sbus(sc); 1294 else 1295 ti_chipinit_pci(sc); 1296 1297 /* Recommended settings from Tigon manual. */ 1298 CSR_WRITE_4(sc, TI_GCR_DMA_WRITECFG, TI_DMA_STATE_THRESH_8W); 1299 CSR_WRITE_4(sc, TI_GCR_DMA_READCFG, TI_DMA_STATE_THRESH_8W); 1300 1301 if (ti_64bitslot_war(sc)) { 1302 printf("%s: bios thinks we're in a 64 bit slot, " 1303 "but we aren't", sc->sc_dv.dv_xname); 1304 return (EINVAL); 1305 } 1306 1307 return (0); 1308 } 1309 1310 void 1311 ti_chipinit_pci(struct ti_softc *sc) 1312 { 1313 u_int32_t cacheline; 1314 u_int32_t pci_writemax = 0; 1315 1316 /* Set up the PCI state register. */ 1317 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD); 1318 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 1319 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_USE_MEM_RD_MULT); 1320 1321 /* Clear the read/write max DMA parameters. */ 1322 TI_CLRBIT(sc, TI_PCI_STATE, (TI_PCISTATE_WRITE_MAXDMA| 1323 TI_PCISTATE_READ_MAXDMA)); 1324 1325 /* Get cache line size. */ 1326 cacheline = CSR_READ_4(sc, TI_PCI_BIST) & 0xFF; 1327 1328 /* 1329 * If the system has set enabled the PCI memory write 1330 * and invalidate command in the command register, set 1331 * the write max parameter accordingly. This is necessary 1332 * to use MWI with the Tigon 2. 1333 */ 1334 if (CSR_READ_4(sc, TI_PCI_CMDSTAT) & PCI_COMMAND_INVALIDATE_ENABLE) { 1335 switch(cacheline) { 1336 case 1: 1337 case 4: 1338 case 8: 1339 case 16: 1340 case 32: 1341 case 64: 1342 break; 1343 default: 1344 /* Disable PCI memory write and invalidate. */ 1345 CSR_WRITE_4(sc, TI_PCI_CMDSTAT, CSR_READ_4(sc, 1346 TI_PCI_CMDSTAT) & ~PCI_COMMAND_INVALIDATE_ENABLE); 1347 break; 1348 } 1349 } 1350 1351 #ifdef __brokenalpha__ 1352 /* 1353 * From the Alteon sample driver: 1354 * Must insure that we do not cross an 8K (bytes) boundary 1355 * for DMA reads. Our highest limit is 1K bytes. This is a 1356 * restriction on some ALPHA platforms with early revision 1357 * 21174 PCI chipsets, such as the AlphaPC 164lx 1358 */ 1359 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax|TI_PCI_READMAX_1024); 1360 #else 1361 TI_SETBIT(sc, TI_PCI_STATE, pci_writemax); 1362 #endif 1363 1364 /* This sets the min dma param all the way up (0xff). */ 1365 TI_SETBIT(sc, TI_PCI_STATE, TI_PCISTATE_MINDMA); 1366 1367 /* Configure DMA variables. */ 1368 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_DMA_SWAP_OPTIONS | 1369 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1370 TI_OPMODE_DONT_FRAG_JUMBO); 1371 } 1372 1373 void 1374 ti_chipinit_sbus(struct ti_softc *sc) 1375 { 1376 /* Set up the PCI state register. */ 1377 CSR_WRITE_4(sc, TI_PCI_STATE, TI_PCI_READ_CMD | TI_PCI_WRITE_CMD | 1378 TI_PCISTATE_NO_SWAP_READ_DMA | TI_PCISTATE_NO_SWAP_WRITE_DMA | 1379 TI_PCI_WRITEMAX_64 | TI_PCI_READMAX_64 | 1380 TI_PCISTATE_PROVIDE_LEN); 1381 1382 /* Configure DMA variables. */ 1383 CSR_WRITE_4(sc, TI_GCR_OPMODE, TI_OPMODE_WORDSWAP_BD | 1384 TI_OPMODE_1_DMA_ACTIVE | TI_OPMODE_SBUS | 1385 TI_OPMODE_WARN_ENB | TI_OPMODE_FATAL_ENB | 1386 TI_OPMODE_DONT_FRAG_JUMBO); 1387 } 1388 1389 /* 1390 * Initialize the general information block and firmware, and 1391 * start the CPU(s) running. 1392 */ 1393 int 1394 ti_gibinit(struct ti_softc *sc) 1395 { 1396 struct ti_rcb *rcb; 1397 int i; 1398 struct ifnet *ifp; 1399 1400 ifp = &sc->arpcom.ac_if; 1401 1402 /* Disable interrupts for now. */ 1403 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1404 1405 /* 1406 * Tell the chip where to find the general information block. 1407 * While this struct could go into >4GB memory, we allocate it in a 1408 * single slab with the other descriptors, and those don't seem to 1409 * support being located in a 64-bit region. 1410 */ 1411 CSR_WRITE_4(sc, TI_GCR_GENINFO_HI, 0); 1412 CSR_WRITE_4(sc, TI_GCR_GENINFO_LO, 1413 TI_RING_DMA_ADDR(sc, ti_info) & 0xffffffff); 1414 1415 /* Load the firmware into SRAM. */ 1416 ti_loadfw(sc); 1417 1418 /* Set up the contents of the general info and ring control blocks. */ 1419 1420 /* Set up the event ring and producer pointer. */ 1421 rcb = &sc->ti_rdata->ti_info.ti_ev_rcb; 1422 1423 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_event_ring); 1424 rcb->ti_flags = 0; 1425 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_ev_prodidx_ptr) = 1426 TI_RING_DMA_ADDR(sc, ti_ev_prodidx_r); 1427 sc->ti_ev_prodidx.ti_idx = 0; 1428 CSR_WRITE_4(sc, TI_GCR_EVENTCONS_IDX, 0); 1429 sc->ti_ev_saved_considx = 0; 1430 1431 /* Set up the command ring and producer mailbox. */ 1432 rcb = &sc->ti_rdata->ti_info.ti_cmd_rcb; 1433 1434 TI_HOSTADDR(rcb->ti_hostaddr) = TI_GCR_NIC_ADDR(TI_GCR_CMDRING); 1435 rcb->ti_flags = 0; 1436 rcb->ti_max_len = 0; 1437 for (i = 0; i < TI_CMD_RING_CNT; i++) { 1438 CSR_WRITE_4(sc, TI_GCR_CMDRING + (i * 4), 0); 1439 } 1440 CSR_WRITE_4(sc, TI_GCR_CMDCONS_IDX, 0); 1441 CSR_WRITE_4(sc, TI_MB_CMDPROD_IDX, 0); 1442 sc->ti_cmd_saved_prodidx = 0; 1443 1444 /* 1445 * Assign the address of the stats refresh buffer. 1446 * We re-use the current stats buffer for this to 1447 * conserve memory. 1448 */ 1449 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_refresh_stats_ptr) = 1450 TI_RING_DMA_ADDR(sc, ti_info.ti_stats); 1451 1452 /* Set up the standard receive ring. */ 1453 rcb = &sc->ti_rdata->ti_info.ti_std_rx_rcb; 1454 TI_HOSTADDR(rcb->ti_hostaddr) = 1455 TI_RING_DMA_ADDR(sc, ti_rx_std_ring); 1456 rcb->ti_max_len = ETHER_MAX_LEN; 1457 rcb->ti_flags = 0; 1458 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1459 #if NVLAN > 0 1460 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1461 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1462 #endif 1463 1464 /* Set up the jumbo receive ring. */ 1465 rcb = &sc->ti_rdata->ti_info.ti_jumbo_rx_rcb; 1466 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_jumbo_ring); 1467 rcb->ti_max_len = TI_JUMBO_FRAMELEN; 1468 rcb->ti_flags = 0; 1469 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1470 #if NVLAN > 0 1471 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1472 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1473 #endif 1474 1475 /* 1476 * Set up the mini ring. Only activated on the 1477 * Tigon 2 but the slot in the config block is 1478 * still there on the Tigon 1. 1479 */ 1480 rcb = &sc->ti_rdata->ti_info.ti_mini_rx_rcb; 1481 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc, ti_rx_mini_ring); 1482 rcb->ti_max_len = MHLEN - ETHER_ALIGN; 1483 if (sc->ti_hwrev == TI_HWREV_TIGON) 1484 rcb->ti_flags = TI_RCB_FLAG_RING_DISABLED; 1485 else 1486 rcb->ti_flags = 0; 1487 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1488 #if NVLAN > 0 1489 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1490 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1491 #endif 1492 1493 /* 1494 * Set up the receive return ring. 1495 */ 1496 rcb = &sc->ti_rdata->ti_info.ti_return_rcb; 1497 TI_HOSTADDR(rcb->ti_hostaddr) = TI_RING_DMA_ADDR(sc,ti_rx_return_ring); 1498 rcb->ti_flags = 0; 1499 rcb->ti_max_len = TI_RETURN_RING_CNT; 1500 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_return_prodidx_ptr) = 1501 TI_RING_DMA_ADDR(sc, ti_return_prodidx_r); 1502 1503 /* 1504 * Set up the tx ring. Note: for the Tigon 2, we have the option 1505 * of putting the transmit ring in the host's address space and 1506 * letting the chip DMA it instead of leaving the ring in the NIC's 1507 * memory and accessing it through the shared memory region. We 1508 * do this for the Tigon 2, but it doesn't work on the Tigon 1, 1509 * so we have to revert to the shared memory scheme if we detect 1510 * a Tigon 1 chip. 1511 */ 1512 CSR_WRITE_4(sc, TI_WINBASE, TI_TX_RING_BASE); 1513 bzero(sc->ti_rdata->ti_tx_ring, 1514 TI_TX_RING_CNT * sizeof(struct ti_tx_desc)); 1515 rcb = &sc->ti_rdata->ti_info.ti_tx_rcb; 1516 if (sc->ti_hwrev == TI_HWREV_TIGON) 1517 rcb->ti_flags = 0; 1518 else 1519 rcb->ti_flags = TI_RCB_FLAG_HOST_RING; 1520 rcb->ti_flags |= TI_RCB_FLAG_IP_CKSUM | TI_RCB_FLAG_NO_PHDR_CKSUM; 1521 #if NVLAN > 0 1522 if (ifp->if_capabilities & IFCAP_VLAN_HWTAGGING) 1523 rcb->ti_flags |= TI_RCB_FLAG_VLAN_ASSIST; 1524 #endif 1525 rcb->ti_max_len = TI_TX_RING_CNT; 1526 if (sc->ti_hwrev == TI_HWREV_TIGON) 1527 TI_HOSTADDR(rcb->ti_hostaddr) = TI_TX_RING_BASE; 1528 else 1529 TI_HOSTADDR(rcb->ti_hostaddr) = 1530 TI_RING_DMA_ADDR(sc, ti_tx_ring); 1531 TI_HOSTADDR(sc->ti_rdata->ti_info.ti_tx_considx_ptr) = 1532 TI_RING_DMA_ADDR(sc, ti_tx_considx_r); 1533 1534 TI_RING_DMASYNC(sc, ti_info, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1535 1536 /* Set up tuneables */ 1537 CSR_WRITE_4(sc, TI_GCR_RX_COAL_TICKS, (sc->ti_rx_coal_ticks / 10)); 1538 CSR_WRITE_4(sc, TI_GCR_TX_COAL_TICKS, sc->ti_tx_coal_ticks); 1539 CSR_WRITE_4(sc, TI_GCR_STAT_TICKS, sc->ti_stat_ticks); 1540 CSR_WRITE_4(sc, TI_GCR_RX_MAX_COAL_BD, sc->ti_rx_max_coal_bds); 1541 CSR_WRITE_4(sc, TI_GCR_TX_MAX_COAL_BD, sc->ti_tx_max_coal_bds); 1542 CSR_WRITE_4(sc, TI_GCR_TX_BUFFER_RATIO, sc->ti_tx_buf_ratio); 1543 1544 /* Turn interrupts on. */ 1545 CSR_WRITE_4(sc, TI_GCR_MASK_INTRS, 0); 1546 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1547 1548 /* Start CPU. */ 1549 TI_CLRBIT(sc, TI_CPU_STATE, (TI_CPUSTATE_HALT|TI_CPUSTATE_STEP)); 1550 1551 return (0); 1552 } 1553 1554 int 1555 ti_attach(struct ti_softc *sc) 1556 { 1557 bus_dma_segment_t seg; 1558 int rseg; 1559 struct ifnet *ifp; 1560 caddr_t kva; 1561 1562 if (ti_chipinit(sc)) { 1563 printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname); 1564 return (1); 1565 } 1566 1567 /* Zero out the NIC's on-board SRAM. */ 1568 ti_mem_set(sc, 0x2000, 0x100000 - 0x2000); 1569 1570 /* Init again -- zeroing memory may have clobbered some registers. */ 1571 if (ti_chipinit(sc)) { 1572 printf("%s: chip initialization failed\n", sc->sc_dv.dv_xname); 1573 return (1); 1574 } 1575 1576 /* 1577 * Get station address from the EEPROM. Note: the manual states 1578 * that the MAC address is at offset 0x8c, however the data is 1579 * stored as two longwords (since that's how it's loaded into 1580 * the NIC). This means the MAC address is actually preceded 1581 * by two zero bytes. We need to skip over those. 1582 */ 1583 if (ti_read_eeprom(sc, (caddr_t)&sc->arpcom.ac_enaddr, 1584 TI_EE_MAC_OFFSET + 2, ETHER_ADDR_LEN)) { 1585 printf("%s: failed to read station address\n", 1586 sc->sc_dv.dv_xname); 1587 return (1); 1588 } 1589 1590 /* 1591 * A Tigon chip was detected. Inform the world. 1592 */ 1593 printf(", address %s\n", ether_sprintf(sc->arpcom.ac_enaddr)); 1594 1595 /* Allocate the general information block and ring buffers. */ 1596 if (bus_dmamem_alloc(sc->sc_dmatag, sizeof(struct ti_ring_data), 1597 PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 1598 printf("%s: can't alloc rx buffers\n", sc->sc_dv.dv_xname); 1599 return (1); 1600 } 1601 if (bus_dmamem_map(sc->sc_dmatag, &seg, rseg, 1602 sizeof(struct ti_ring_data), &kva, BUS_DMA_NOWAIT)) { 1603 printf("%s: can't map dma buffers (%d bytes)\n", 1604 sc->sc_dv.dv_xname, sizeof(struct ti_ring_data)); 1605 goto fail_1; 1606 } 1607 if (bus_dmamap_create(sc->sc_dmatag, sizeof(struct ti_ring_data), 1, 1608 sizeof(struct ti_ring_data), 0, BUS_DMA_NOWAIT, 1609 &sc->ti_ring_map)) { 1610 printf("%s: can't create dma map\n", sc->sc_dv.dv_xname); 1611 goto fail_2; 1612 } 1613 if (bus_dmamap_load(sc->sc_dmatag, sc->ti_ring_map, kva, 1614 sizeof(struct ti_ring_data), NULL, BUS_DMA_NOWAIT)) { 1615 goto fail_3; 1616 } 1617 sc->ti_rdata = (struct ti_ring_data *)kva; 1618 bzero(sc->ti_rdata, sizeof(struct ti_ring_data)); 1619 1620 /* Try to allocate memory for jumbo buffers. */ 1621 if (ti_alloc_jumbo_mem(sc)) { 1622 printf("%s: jumbo buffer allocation failed\n", 1623 sc->sc_dv.dv_xname); 1624 goto fail_3; 1625 } 1626 1627 /* Set default tuneable values. */ 1628 sc->ti_stat_ticks = 2 * TI_TICKS_PER_SEC; 1629 sc->ti_rx_coal_ticks = TI_TICKS_PER_SEC / 5000; 1630 sc->ti_tx_coal_ticks = TI_TICKS_PER_SEC / 500; 1631 sc->ti_rx_max_coal_bds = 64; 1632 sc->ti_tx_max_coal_bds = 128; 1633 sc->ti_tx_buf_ratio = 21; 1634 1635 /* Set up ifnet structure */ 1636 ifp = &sc->arpcom.ac_if; 1637 ifp->if_softc = sc; 1638 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1639 ifp->if_ioctl = ti_ioctl; 1640 ifp->if_start = ti_start; 1641 ifp->if_watchdog = ti_watchdog; 1642 ifp->if_hardmtu = TI_JUMBO_FRAMELEN - ETHER_HDR_LEN; 1643 IFQ_SET_MAXLEN(&ifp->if_snd, TI_TX_RING_CNT - 1); 1644 IFQ_SET_READY(&ifp->if_snd); 1645 bcopy(sc->sc_dv.dv_xname, ifp->if_xname, IFNAMSIZ); 1646 1647 ifp->if_capabilities = IFCAP_VLAN_MTU; 1648 1649 #if NVLAN > 0 1650 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1651 #endif 1652 1653 /* Set up ifmedia support. */ 1654 ifmedia_init(&sc->ifmedia, IFM_IMASK, ti_ifmedia_upd, ti_ifmedia_sts); 1655 if (sc->ti_copper) { 1656 /* 1657 * Copper cards allow manual 10/100 mode selection, 1658 * but not manual 1000baseTX mode selection. Why? 1659 * Because currently there's no way to specify the 1660 * master/slave setting through the firmware interface, 1661 * so Alteon decided to just bag it and handle it 1662 * via autonegotiation. 1663 */ 1664 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 1665 ifmedia_add(&sc->ifmedia, 1666 IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 1667 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 1668 ifmedia_add(&sc->ifmedia, 1669 IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 1670 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_T, 0, NULL); 1671 ifmedia_add(&sc->ifmedia, 1672 IFM_ETHER|IFM_1000_T|IFM_FDX, 0, NULL); 1673 } else { 1674 /* Fiber cards don't support 10/100 modes. */ 1675 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_1000_SX, 0, NULL); 1676 ifmedia_add(&sc->ifmedia, 1677 IFM_ETHER|IFM_1000_SX|IFM_FDX, 0, NULL); 1678 } 1679 ifmedia_add(&sc->ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 1680 ifmedia_set(&sc->ifmedia, IFM_ETHER|IFM_AUTO); 1681 1682 /* 1683 * Call MI attach routines. 1684 */ 1685 if_attach(ifp); 1686 ether_ifattach(ifp); 1687 1688 return (0); 1689 1690 fail_3: 1691 bus_dmamap_destroy(sc->sc_dmatag, sc->ti_ring_map); 1692 1693 fail_2: 1694 bus_dmamem_unmap(sc->sc_dmatag, kva, 1695 sizeof(struct ti_ring_data)); 1696 1697 fail_1: 1698 bus_dmamem_free(sc->sc_dmatag, &seg, rseg); 1699 1700 return (1); 1701 } 1702 1703 /* 1704 * Frame reception handling. This is called if there's a frame 1705 * on the receive return list. 1706 * 1707 * Note: we have to be able to handle three possibilities here: 1708 * 1) the frame is from the mini receive ring (can only happen) 1709 * on Tigon 2 boards) 1710 * 2) the frame is from the jumbo receive ring 1711 * 3) the frame is from the standard receive ring 1712 */ 1713 1714 void 1715 ti_rxeof(struct ti_softc *sc) 1716 { 1717 struct ifnet *ifp; 1718 struct ti_cmd_desc cmd; 1719 1720 ifp = &sc->arpcom.ac_if; 1721 1722 while(sc->ti_rx_saved_considx != sc->ti_return_prodidx.ti_idx) { 1723 struct ti_rx_desc *cur_rx; 1724 u_int32_t rxidx; 1725 struct mbuf *m = NULL; 1726 bus_dmamap_t dmamap; 1727 1728 cur_rx = 1729 &sc->ti_rdata->ti_rx_return_ring[sc->ti_rx_saved_considx]; 1730 rxidx = cur_rx->ti_idx; 1731 TI_INC(sc->ti_rx_saved_considx, TI_RETURN_RING_CNT); 1732 1733 if (cur_rx->ti_flags & TI_BDFLAG_JUMBO_RING) { 1734 TI_INC(sc->ti_jumbo, TI_JUMBO_RX_RING_CNT); 1735 m = sc->ti_cdata.ti_rx_jumbo_chain[rxidx]; 1736 sc->ti_cdata.ti_rx_jumbo_chain[rxidx] = NULL; 1737 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1738 ifp->if_ierrors++; 1739 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1740 continue; 1741 } 1742 if (ti_newbuf_jumbo(sc, sc->ti_jumbo, NULL) 1743 == ENOBUFS) { 1744 struct mbuf *m0; 1745 m0 = m_devget(mtod(m, char *), cur_rx->ti_len, 1746 ETHER_ALIGN, ifp, NULL); 1747 ti_newbuf_jumbo(sc, sc->ti_jumbo, m); 1748 if (m0 == NULL) { 1749 ifp->if_ierrors++; 1750 continue; 1751 } 1752 m = m0; 1753 } 1754 } else if (cur_rx->ti_flags & TI_BDFLAG_MINI_RING) { 1755 TI_INC(sc->ti_mini, TI_MINI_RX_RING_CNT); 1756 m = sc->ti_cdata.ti_rx_mini_chain[rxidx]; 1757 sc->ti_cdata.ti_rx_mini_chain[rxidx] = NULL; 1758 dmamap = sc->ti_cdata.ti_rx_mini_map[rxidx]; 1759 sc->ti_cdata.ti_rx_mini_map[rxidx] = 0; 1760 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1761 ifp->if_ierrors++; 1762 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap); 1763 continue; 1764 } 1765 if (ti_newbuf_mini(sc, sc->ti_mini, NULL, dmamap) 1766 == ENOBUFS) { 1767 ifp->if_ierrors++; 1768 ti_newbuf_mini(sc, sc->ti_mini, m, dmamap); 1769 continue; 1770 } 1771 } else { 1772 TI_INC(sc->ti_std, TI_STD_RX_RING_CNT); 1773 m = sc->ti_cdata.ti_rx_std_chain[rxidx]; 1774 sc->ti_cdata.ti_rx_std_chain[rxidx] = NULL; 1775 dmamap = sc->ti_cdata.ti_rx_std_map[rxidx]; 1776 sc->ti_cdata.ti_rx_std_map[rxidx] = 0; 1777 if (cur_rx->ti_flags & TI_BDFLAG_ERROR) { 1778 ifp->if_ierrors++; 1779 ti_newbuf_std(sc, sc->ti_std, m, dmamap); 1780 continue; 1781 } 1782 if (ti_newbuf_std(sc, sc->ti_std, NULL, dmamap) 1783 == ENOBUFS) { 1784 ifp->if_ierrors++; 1785 ti_newbuf_std(sc, sc->ti_std, m, dmamap); 1786 continue; 1787 } 1788 } 1789 1790 if (m == NULL) 1791 panic("%s: couldn't get mbuf", sc->sc_dv.dv_xname); 1792 1793 m->m_pkthdr.len = m->m_len = cur_rx->ti_len; 1794 ifp->if_ipackets++; 1795 m->m_pkthdr.rcvif = ifp; 1796 1797 #if NVLAN > 0 1798 if (cur_rx->ti_flags & TI_BDFLAG_VLAN_TAG) { 1799 m->m_pkthdr.ether_vtag = cur_rx->ti_vlan_tag; 1800 m->m_flags |= M_VLANTAG; 1801 } 1802 #endif 1803 1804 #if NBPFILTER > 0 1805 /* 1806 * Handle BPF listeners. Let the BPF user see the packet. 1807 */ 1808 if (ifp->if_bpf) 1809 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 1810 #endif 1811 1812 if ((cur_rx->ti_ip_cksum ^ 0xffff) == 0) 1813 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 1814 1815 ether_input_mbuf(ifp, m); 1816 } 1817 1818 /* Only necessary on the Tigon 1. */ 1819 if (sc->ti_hwrev == TI_HWREV_TIGON) 1820 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 1821 sc->ti_rx_saved_considx); 1822 1823 TI_UPDATE_STDPROD(sc, sc->ti_std); 1824 TI_UPDATE_MINIPROD(sc, sc->ti_mini); 1825 TI_UPDATE_JUMBOPROD(sc, sc->ti_jumbo); 1826 } 1827 1828 void 1829 ti_txeof_tigon1(struct ti_softc *sc) 1830 { 1831 struct ifnet *ifp; 1832 struct ti_txmap_entry *entry; 1833 int active = 1; 1834 1835 ifp = &sc->arpcom.ac_if; 1836 1837 /* 1838 * Go through our tx ring and free mbufs for those 1839 * frames that have been sent. 1840 */ 1841 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 1842 u_int32_t idx = 0; 1843 struct ti_tx_desc txdesc; 1844 1845 idx = sc->ti_tx_saved_considx; 1846 ti_mem_read(sc, TI_TX_RING_BASE + idx * sizeof(txdesc), 1847 sizeof(txdesc), (caddr_t)&txdesc); 1848 1849 if (txdesc.ti_flags & TI_BDFLAG_END) 1850 ifp->if_opackets++; 1851 1852 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 1853 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 1854 sc->ti_cdata.ti_tx_chain[idx] = NULL; 1855 1856 entry = sc->ti_cdata.ti_tx_map[idx]; 1857 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1858 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1859 1860 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1861 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, 1862 link); 1863 sc->ti_cdata.ti_tx_map[idx] = NULL; 1864 1865 } 1866 sc->ti_txcnt--; 1867 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 1868 ifp->if_timer = 0; 1869 1870 active = 0; 1871 } 1872 1873 if (!active) 1874 ifp->if_flags &= ~IFF_OACTIVE; 1875 } 1876 1877 void 1878 ti_txeof_tigon2(struct ti_softc *sc) 1879 { 1880 struct ti_tx_desc *cur_tx = NULL; 1881 struct ifnet *ifp; 1882 struct ti_txmap_entry *entry; 1883 1884 ifp = &sc->arpcom.ac_if; 1885 1886 /* 1887 * Go through our tx ring and free mbufs for those 1888 * frames that have been sent. 1889 */ 1890 while (sc->ti_tx_saved_considx != sc->ti_tx_considx.ti_idx) { 1891 u_int32_t idx = 0; 1892 1893 idx = sc->ti_tx_saved_considx; 1894 cur_tx = &sc->ti_rdata->ti_tx_ring[idx]; 1895 1896 if (cur_tx->ti_flags & TI_BDFLAG_END) 1897 ifp->if_opackets++; 1898 if (sc->ti_cdata.ti_tx_chain[idx] != NULL) { 1899 m_freem(sc->ti_cdata.ti_tx_chain[idx]); 1900 sc->ti_cdata.ti_tx_chain[idx] = NULL; 1901 1902 entry = sc->ti_cdata.ti_tx_map[idx]; 1903 bus_dmamap_sync(sc->sc_dmatag, entry->dmamap, 0, 1904 entry->dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1905 1906 bus_dmamap_unload(sc->sc_dmatag, entry->dmamap); 1907 SLIST_INSERT_HEAD(&sc->ti_tx_map_listhead, entry, 1908 link); 1909 sc->ti_cdata.ti_tx_map[idx] = NULL; 1910 1911 } 1912 sc->ti_txcnt--; 1913 TI_INC(sc->ti_tx_saved_considx, TI_TX_RING_CNT); 1914 ifp->if_timer = 0; 1915 } 1916 1917 if (cur_tx != NULL) 1918 ifp->if_flags &= ~IFF_OACTIVE; 1919 } 1920 1921 int 1922 ti_intr(void *xsc) 1923 { 1924 struct ti_softc *sc; 1925 struct ifnet *ifp; 1926 1927 sc = xsc; 1928 ifp = &sc->arpcom.ac_if; 1929 1930 /* XXX checking this register is expensive. */ 1931 /* Make sure this is really our interrupt. */ 1932 if (!(CSR_READ_4(sc, TI_MISC_HOST_CTL) & TI_MHC_INTSTATE)) 1933 return (0); 1934 1935 /* Ack interrupt and stop others from occurring. */ 1936 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 1937 1938 if (ifp->if_flags & IFF_RUNNING) { 1939 /* Check RX return ring producer/consumer */ 1940 ti_rxeof(sc); 1941 1942 /* Check TX ring producer/consumer */ 1943 if (sc->ti_hwrev == TI_HWREV_TIGON) 1944 ti_txeof_tigon1(sc); 1945 else 1946 ti_txeof_tigon2(sc); 1947 } 1948 1949 ti_handle_events(sc); 1950 1951 /* Re-enable interrupts. */ 1952 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 1953 1954 if (ifp->if_flags & IFF_RUNNING && !IFQ_IS_EMPTY(&ifp->if_snd)) 1955 ti_start(ifp); 1956 1957 return (1); 1958 } 1959 1960 void 1961 ti_stats_update(struct ti_softc *sc) 1962 { 1963 struct ifnet *ifp; 1964 struct ti_stats *stats = &sc->ti_rdata->ti_info.ti_stats; 1965 1966 ifp = &sc->arpcom.ac_if; 1967 1968 TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_POSTREAD); 1969 1970 ifp->if_collisions += stats->dot3StatsSingleCollisionFrames + 1971 stats->dot3StatsMultipleCollisionFrames + 1972 stats->dot3StatsExcessiveCollisions + 1973 stats->dot3StatsLateCollisions - 1974 ifp->if_collisions; 1975 1976 TI_RING_DMASYNC(sc, ti_info.ti_stats, BUS_DMASYNC_PREREAD); 1977 } 1978 1979 /* 1980 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 1981 * pointers to descriptors. 1982 */ 1983 int 1984 ti_encap_tigon1(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 1985 { 1986 u_int32_t frag, cur; 1987 struct ti_txmap_entry *entry; 1988 bus_dmamap_t txmap; 1989 struct ti_tx_desc txdesc; 1990 int i = 0; 1991 1992 entry = SLIST_FIRST(&sc->ti_tx_map_listhead); 1993 if (entry == NULL) 1994 return (ENOBUFS); 1995 txmap = entry->dmamap; 1996 1997 cur = frag = *txidx; 1998 1999 /* 2000 * Start packing the mbufs in this chain into 2001 * the fragment pointers. Stop when we run out 2002 * of fragments or hit the end of the mbuf chain. 2003 */ 2004 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 2005 BUS_DMA_NOWAIT)) 2006 return (ENOBUFS); 2007 2008 /* 2009 * Sanity check: avoid coming within 16 descriptors 2010 * of the end of the ring. 2011 */ 2012 if (txmap->dm_nsegs > (TI_TX_RING_CNT - sc->ti_txcnt - 16)) 2013 goto fail_unload; 2014 2015 for (i = 0; i < txmap->dm_nsegs; i++) { 2016 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 2017 break; 2018 2019 memset(&txdesc, 0, sizeof(txdesc)); 2020 2021 TI_HOSTADDR(txdesc.ti_addr) = txmap->dm_segs[i].ds_addr; 2022 txdesc.ti_len = txmap->dm_segs[i].ds_len & 0xffff; 2023 txdesc.ti_flags = 0; 2024 txdesc.ti_vlan_tag = 0; 2025 2026 #if NVLAN > 0 2027 if (m_head->m_flags & M_VLANTAG) { 2028 txdesc.ti_flags |= TI_BDFLAG_VLAN_TAG; 2029 txdesc.ti_vlan_tag = m_head->m_pkthdr.ether_vtag; 2030 } 2031 #endif 2032 2033 ti_mem_write(sc, TI_TX_RING_BASE + frag * sizeof(txdesc), 2034 sizeof(txdesc), (caddr_t)&txdesc); 2035 2036 cur = frag; 2037 TI_INC(frag, TI_TX_RING_CNT); 2038 } 2039 2040 if (frag == sc->ti_tx_saved_considx) 2041 goto fail_unload; 2042 2043 txdesc.ti_flags |= TI_BDFLAG_END; 2044 ti_mem_write(sc, TI_TX_RING_BASE + cur * sizeof(txdesc), 2045 sizeof(txdesc), (caddr_t)&txdesc); 2046 2047 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize, 2048 BUS_DMASYNC_PREWRITE); 2049 2050 sc->ti_cdata.ti_tx_chain[cur] = m_head; 2051 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link); 2052 sc->ti_cdata.ti_tx_map[cur] = entry; 2053 sc->ti_txcnt += txmap->dm_nsegs; 2054 2055 *txidx = frag; 2056 2057 return (0); 2058 2059 fail_unload: 2060 bus_dmamap_unload(sc->sc_dmatag, txmap); 2061 2062 return (ENOBUFS); 2063 } 2064 2065 /* 2066 * Encapsulate an mbuf chain in the tx ring by coupling the mbuf data 2067 * pointers to descriptors. 2068 */ 2069 int 2070 ti_encap_tigon2(struct ti_softc *sc, struct mbuf *m_head, u_int32_t *txidx) 2071 { 2072 struct ti_tx_desc *f = NULL; 2073 u_int32_t frag, cur; 2074 struct ti_txmap_entry *entry; 2075 bus_dmamap_t txmap; 2076 int i = 0; 2077 2078 entry = SLIST_FIRST(&sc->ti_tx_map_listhead); 2079 if (entry == NULL) 2080 return (ENOBUFS); 2081 txmap = entry->dmamap; 2082 2083 cur = frag = *txidx; 2084 2085 /* 2086 * Start packing the mbufs in this chain into 2087 * the fragment pointers. Stop when we run out 2088 * of fragments or hit the end of the mbuf chain. 2089 */ 2090 if (bus_dmamap_load_mbuf(sc->sc_dmatag, txmap, m_head, 2091 BUS_DMA_NOWAIT)) 2092 return (ENOBUFS); 2093 2094 /* 2095 * Sanity check: avoid coming within 16 descriptors 2096 * of the end of the ring. 2097 */ 2098 if (txmap->dm_nsegs > (TI_TX_RING_CNT - sc->ti_txcnt - 16)) 2099 goto fail_unload; 2100 2101 for (i = 0; i < txmap->dm_nsegs; i++) { 2102 f = &sc->ti_rdata->ti_tx_ring[frag]; 2103 2104 if (sc->ti_cdata.ti_tx_chain[frag] != NULL) 2105 break; 2106 2107 TI_HOSTADDR(f->ti_addr) = txmap->dm_segs[i].ds_addr; 2108 f->ti_len = txmap->dm_segs[i].ds_len & 0xffff; 2109 f->ti_flags = 0; 2110 f->ti_vlan_tag = 0; 2111 2112 #if NVLAN > 0 2113 if (m_head->m_flags & M_VLANTAG) { 2114 f->ti_flags |= TI_BDFLAG_VLAN_TAG; 2115 f->ti_vlan_tag = m_head->m_pkthdr.ether_vtag; 2116 } 2117 #endif 2118 2119 cur = frag; 2120 TI_INC(frag, TI_TX_RING_CNT); 2121 } 2122 2123 if (frag == sc->ti_tx_saved_considx) 2124 goto fail_unload; 2125 2126 sc->ti_rdata->ti_tx_ring[cur].ti_flags |= TI_BDFLAG_END; 2127 2128 bus_dmamap_sync(sc->sc_dmatag, txmap, 0, txmap->dm_mapsize, 2129 BUS_DMASYNC_PREWRITE); 2130 2131 TI_RING_DMASYNC(sc, ti_tx_ring[cur], BUS_DMASYNC_POSTREAD); 2132 2133 sc->ti_cdata.ti_tx_chain[cur] = m_head; 2134 SLIST_REMOVE_HEAD(&sc->ti_tx_map_listhead, link); 2135 sc->ti_cdata.ti_tx_map[cur] = entry; 2136 sc->ti_txcnt += txmap->dm_nsegs; 2137 2138 *txidx = frag; 2139 2140 return (0); 2141 2142 fail_unload: 2143 bus_dmamap_unload(sc->sc_dmatag, txmap); 2144 2145 return (ENOBUFS); 2146 } 2147 2148 /* 2149 * Main transmit routine. To avoid having to do mbuf copies, we put pointers 2150 * to the mbuf data regions directly in the transmit descriptors. 2151 */ 2152 void 2153 ti_start(struct ifnet *ifp) 2154 { 2155 struct ti_softc *sc; 2156 struct mbuf *m_head = NULL; 2157 u_int32_t prodidx; 2158 int pkts = 0, error; 2159 2160 sc = ifp->if_softc; 2161 2162 prodidx = sc->ti_tx_saved_prodidx; 2163 2164 while(sc->ti_cdata.ti_tx_chain[prodidx] == NULL) { 2165 IFQ_POLL(&ifp->if_snd, m_head); 2166 if (m_head == NULL) 2167 break; 2168 2169 /* 2170 * Pack the data into the transmit ring. If we 2171 * don't have room, set the OACTIVE flag and wait 2172 * for the NIC to drain the ring. 2173 */ 2174 if (sc->ti_hwrev == TI_HWREV_TIGON) 2175 error = ti_encap_tigon1(sc, m_head, &prodidx); 2176 else 2177 error = ti_encap_tigon2(sc, m_head, &prodidx); 2178 2179 if (error) { 2180 ifp->if_flags |= IFF_OACTIVE; 2181 break; 2182 } 2183 2184 /* now we are committed to transmit the packet */ 2185 IFQ_DEQUEUE(&ifp->if_snd, m_head); 2186 pkts++; 2187 2188 /* 2189 * If there's a BPF listener, bounce a copy of this frame 2190 * to him. 2191 */ 2192 #if NBPFILTER > 0 2193 if (ifp->if_bpf) 2194 bpf_mtap_ether(ifp->if_bpf, m_head, BPF_DIRECTION_OUT); 2195 #endif 2196 } 2197 if (pkts == 0) 2198 return; 2199 2200 /* Transmit */ 2201 sc->ti_tx_saved_prodidx = prodidx; 2202 CSR_WRITE_4(sc, TI_MB_SENDPROD_IDX, prodidx); 2203 2204 /* 2205 * Set a timeout in case the chip goes out to lunch. 2206 */ 2207 ifp->if_timer = 5; 2208 } 2209 2210 void 2211 ti_init(void *xsc) 2212 { 2213 struct ti_softc *sc = xsc; 2214 int s; 2215 2216 s = splnet(); 2217 2218 /* Cancel pending I/O and flush buffers. */ 2219 ti_stop(sc); 2220 2221 /* Init the gen info block, ring control blocks and firmware. */ 2222 if (ti_gibinit(sc)) { 2223 printf("%s: initialization failure\n", sc->sc_dv.dv_xname); 2224 splx(s); 2225 return; 2226 } 2227 2228 splx(s); 2229 } 2230 2231 void 2232 ti_init2(struct ti_softc *sc) 2233 { 2234 struct ti_cmd_desc cmd; 2235 struct ifnet *ifp; 2236 u_int16_t *m; 2237 struct ifmedia *ifm; 2238 int tmp; 2239 2240 ifp = &sc->arpcom.ac_if; 2241 2242 /* Specify MTU and interface index. */ 2243 CSR_WRITE_4(sc, TI_GCR_IFINDEX, sc->sc_dv.dv_unit); 2244 CSR_WRITE_4(sc, TI_GCR_IFMTU, 2245 TI_JUMBO_FRAMELEN + ETHER_VLAN_ENCAP_LEN); 2246 TI_DO_CMD(TI_CMD_UPDATE_GENCOM, 0, 0); 2247 2248 /* Load our MAC address. */ 2249 m = (u_int16_t *)&sc->arpcom.ac_enaddr[0]; 2250 CSR_WRITE_4(sc, TI_GCR_PAR0, htons(m[0])); 2251 CSR_WRITE_4(sc, TI_GCR_PAR1, (htons(m[1]) << 16) | htons(m[2])); 2252 TI_DO_CMD(TI_CMD_SET_MAC_ADDR, 0, 0); 2253 2254 /* Program promiscuous mode and multicast filters. */ 2255 ti_iff(sc); 2256 2257 /* 2258 * If this is a Tigon 1, we should tell the 2259 * firmware to use software packet filtering. 2260 */ 2261 if (sc->ti_hwrev == TI_HWREV_TIGON) 2262 TI_DO_CMD(TI_CMD_FDR_FILTERING, TI_CMD_CODE_FILT_ENB, 0); 2263 2264 /* Init RX ring. */ 2265 if (ti_init_rx_ring_std(sc) == ENOBUFS) 2266 panic("not enough mbufs for rx ring"); 2267 2268 /* Init jumbo RX ring. */ 2269 ti_init_rx_ring_jumbo(sc); 2270 2271 /* 2272 * If this is a Tigon 2, we can also configure the 2273 * mini ring. 2274 */ 2275 if (sc->ti_hwrev == TI_HWREV_TIGON_II) 2276 ti_init_rx_ring_mini(sc); 2277 2278 CSR_WRITE_4(sc, TI_GCR_RXRETURNCONS_IDX, 0); 2279 sc->ti_rx_saved_considx = 0; 2280 2281 /* Init TX ring. */ 2282 ti_init_tx_ring(sc); 2283 2284 /* Tell firmware we're alive. */ 2285 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_UP, 0); 2286 2287 /* Enable host interrupts. */ 2288 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 0); 2289 2290 ifp->if_flags |= IFF_RUNNING; 2291 ifp->if_flags &= ~IFF_OACTIVE; 2292 2293 /* 2294 * Make sure to set media properly. We have to do this 2295 * here since we have to issue commands in order to set 2296 * the link negotiation and we can't issue commands until 2297 * the firmware is running. 2298 */ 2299 ifm = &sc->ifmedia; 2300 tmp = ifm->ifm_media; 2301 ifm->ifm_media = ifm->ifm_cur->ifm_media; 2302 ti_ifmedia_upd(ifp); 2303 ifm->ifm_media = tmp; 2304 } 2305 2306 /* 2307 * Set media options. 2308 */ 2309 int 2310 ti_ifmedia_upd(struct ifnet *ifp) 2311 { 2312 struct ti_softc *sc; 2313 struct ifmedia *ifm; 2314 struct ti_cmd_desc cmd; 2315 2316 sc = ifp->if_softc; 2317 ifm = &sc->ifmedia; 2318 2319 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 2320 return(EINVAL); 2321 2322 switch(IFM_SUBTYPE(ifm->ifm_media)) { 2323 case IFM_AUTO: 2324 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 2325 TI_GLNK_FULL_DUPLEX|TI_GLNK_RX_FLOWCTL_Y| 2326 TI_GLNK_AUTONEGENB|TI_GLNK_ENB); 2327 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_100MB|TI_LNK_10MB| 2328 TI_LNK_FULL_DUPLEX|TI_LNK_HALF_DUPLEX| 2329 TI_LNK_AUTONEGENB|TI_LNK_ENB); 2330 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2331 TI_CMD_CODE_NEGOTIATE_BOTH, 0); 2332 break; 2333 case IFM_1000_SX: 2334 case IFM_1000_T: 2335 CSR_WRITE_4(sc, TI_GCR_GLINK, TI_GLNK_PREF|TI_GLNK_1000MB| 2336 TI_GLNK_RX_FLOWCTL_Y|TI_GLNK_ENB); 2337 CSR_WRITE_4(sc, TI_GCR_LINK, 0); 2338 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2339 TI_SETBIT(sc, TI_GCR_GLINK, TI_GLNK_FULL_DUPLEX); 2340 } 2341 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2342 TI_CMD_CODE_NEGOTIATE_GIGABIT, 0); 2343 break; 2344 case IFM_100_FX: 2345 case IFM_10_FL: 2346 case IFM_100_TX: 2347 case IFM_10_T: 2348 CSR_WRITE_4(sc, TI_GCR_GLINK, 0); 2349 CSR_WRITE_4(sc, TI_GCR_LINK, TI_LNK_ENB|TI_LNK_PREF); 2350 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX || 2351 IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 2352 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_100MB); 2353 } else { 2354 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_10MB); 2355 } 2356 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) { 2357 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_FULL_DUPLEX); 2358 } else { 2359 TI_SETBIT(sc, TI_GCR_LINK, TI_LNK_HALF_DUPLEX); 2360 } 2361 TI_DO_CMD(TI_CMD_LINK_NEGOTIATION, 2362 TI_CMD_CODE_NEGOTIATE_10_100, 0); 2363 break; 2364 } 2365 2366 return (0); 2367 } 2368 2369 /* 2370 * Report current media status. 2371 */ 2372 void 2373 ti_ifmedia_sts(struct ifnet *ifp, struct ifmediareq *ifmr) 2374 { 2375 struct ti_softc *sc; 2376 u_int32_t media = 0; 2377 2378 sc = ifp->if_softc; 2379 2380 ifmr->ifm_status = IFM_AVALID; 2381 ifmr->ifm_active = IFM_ETHER; 2382 2383 if (sc->ti_linkstat == TI_EV_CODE_LINK_DOWN) { 2384 ifmr->ifm_active |= IFM_NONE; 2385 return; 2386 } 2387 2388 ifmr->ifm_status |= IFM_ACTIVE; 2389 2390 if (sc->ti_linkstat == TI_EV_CODE_GIG_LINK_UP) { 2391 media = CSR_READ_4(sc, TI_GCR_GLINK_STAT); 2392 if (sc->ti_copper) 2393 ifmr->ifm_active |= IFM_1000_T; 2394 else 2395 ifmr->ifm_active |= IFM_1000_SX; 2396 if (media & TI_GLNK_FULL_DUPLEX) 2397 ifmr->ifm_active |= IFM_FDX; 2398 else 2399 ifmr->ifm_active |= IFM_HDX; 2400 } else if (sc->ti_linkstat == TI_EV_CODE_LINK_UP) { 2401 media = CSR_READ_4(sc, TI_GCR_LINK_STAT); 2402 if (sc->ti_copper) { 2403 if (media & TI_LNK_100MB) 2404 ifmr->ifm_active |= IFM_100_TX; 2405 if (media & TI_LNK_10MB) 2406 ifmr->ifm_active |= IFM_10_T; 2407 } else { 2408 if (media & TI_LNK_100MB) 2409 ifmr->ifm_active |= IFM_100_FX; 2410 if (media & TI_LNK_10MB) 2411 ifmr->ifm_active |= IFM_10_FL; 2412 } 2413 if (media & TI_LNK_FULL_DUPLEX) 2414 ifmr->ifm_active |= IFM_FDX; 2415 if (media & TI_LNK_HALF_DUPLEX) 2416 ifmr->ifm_active |= IFM_HDX; 2417 } 2418 } 2419 2420 int 2421 ti_ioctl(struct ifnet *ifp, u_long command, caddr_t data) 2422 { 2423 struct ti_softc *sc = ifp->if_softc; 2424 struct ifaddr *ifa = (struct ifaddr *)data; 2425 struct ifreq *ifr = (struct ifreq *)data; 2426 int s, error = 0; 2427 2428 s = splnet(); 2429 2430 switch(command) { 2431 case SIOCSIFADDR: 2432 ifp->if_flags |= IFF_UP; 2433 if ((ifp->if_flags & IFF_RUNNING) == 0) 2434 ti_init(sc); 2435 #ifdef INET 2436 if (ifa->ifa_addr->sa_family == AF_INET) 2437 arp_ifinit(&sc->arpcom, ifa); 2438 #endif 2439 break; 2440 2441 case SIOCSIFFLAGS: 2442 if (ifp->if_flags & IFF_UP) { 2443 if (ifp->if_flags & IFF_RUNNING) 2444 error = ENETRESET; 2445 else 2446 ti_init(sc); 2447 } else { 2448 if (ifp->if_flags & IFF_RUNNING) 2449 ti_stop(sc); 2450 } 2451 break; 2452 2453 case SIOCSIFMEDIA: 2454 case SIOCGIFMEDIA: 2455 error = ifmedia_ioctl(ifp, ifr, &sc->ifmedia, command); 2456 break; 2457 2458 default: 2459 error = ether_ioctl(ifp, &sc->arpcom, command, data); 2460 } 2461 2462 if (error == ENETRESET) { 2463 if (ifp->if_flags & IFF_RUNNING) 2464 ti_iff(sc); 2465 error = 0; 2466 } 2467 2468 splx(s); 2469 return (error); 2470 } 2471 2472 void 2473 ti_watchdog(struct ifnet *ifp) 2474 { 2475 struct ti_softc *sc; 2476 2477 sc = ifp->if_softc; 2478 2479 printf("%s: watchdog timeout -- resetting\n", sc->sc_dv.dv_xname); 2480 ti_stop(sc); 2481 ti_init(sc); 2482 2483 ifp->if_oerrors++; 2484 } 2485 2486 /* 2487 * Stop the adapter and free any mbufs allocated to the 2488 * RX and TX lists. 2489 */ 2490 void 2491 ti_stop(struct ti_softc *sc) 2492 { 2493 struct ifnet *ifp; 2494 struct ti_cmd_desc cmd; 2495 2496 ifp = &sc->arpcom.ac_if; 2497 2498 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2499 2500 /* Disable host interrupts. */ 2501 CSR_WRITE_4(sc, TI_MB_HOSTINTR, 1); 2502 /* 2503 * Tell firmware we're shutting down. 2504 */ 2505 TI_DO_CMD(TI_CMD_HOST_STATE, TI_CMD_CODE_STACK_DOWN, 0); 2506 2507 /* Halt and reinitialize. */ 2508 ti_chipinit(sc); 2509 ti_mem_set(sc, 0x2000, 0x100000 - 0x2000); 2510 ti_chipinit(sc); 2511 2512 /* Free the RX lists. */ 2513 ti_free_rx_ring_std(sc); 2514 2515 /* Free jumbo RX list. */ 2516 ti_free_rx_ring_jumbo(sc); 2517 2518 /* Free mini RX list. */ 2519 ti_free_rx_ring_mini(sc); 2520 2521 /* Free TX buffers. */ 2522 ti_free_tx_ring(sc); 2523 2524 sc->ti_ev_prodidx.ti_idx = 0; 2525 sc->ti_return_prodidx.ti_idx = 0; 2526 sc->ti_tx_considx.ti_idx = 0; 2527 sc->ti_tx_saved_considx = TI_TXCONS_UNSET; 2528 } 2529