1 /* $NetBSD: if_txp.c,v 1.8 2004/10/30 18:09:22 thorpej Exp $ */ 2 3 /* 4 * Copyright (c) 2001 5 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and 6 * Aaron Campbell <aaron@monkey.org>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR THE VOICES IN THEIR HEADS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * Driver for 3c990 (Typhoon) Ethernet ASIC 32 */ 33 34 #include <sys/cdefs.h> 35 __KERNEL_RCSID(0, "$NetBSD: if_txp.c,v 1.8 2004/10/30 18:09:22 thorpej Exp $"); 36 37 #include "bpfilter.h" 38 #include "opt_inet.h" 39 40 #include <sys/param.h> 41 #include <sys/systm.h> 42 #include <sys/sockio.h> 43 #include <sys/mbuf.h> 44 #include <sys/malloc.h> 45 #include <sys/kernel.h> 46 #include <sys/socket.h> 47 #include <sys/device.h> 48 #include <sys/callout.h> 49 50 #include <net/if.h> 51 #include <net/if_dl.h> 52 #include <net/if_types.h> 53 #include <net/if_ether.h> 54 #include <net/if_arp.h> 55 56 #ifdef INET 57 #include <netinet/in.h> 58 #include <netinet/in_systm.h> 59 #include <netinet/in_var.h> 60 #include <netinet/ip.h> 61 #include <netinet/if_inarp.h> 62 #endif 63 64 #include <net/if_media.h> 65 66 #if NBPFILTER > 0 67 #include <net/bpf.h> 68 #endif 69 70 #include <uvm/uvm_extern.h> /* for vtophys */ 71 #include <machine/bus.h> 72 73 #include <dev/mii/mii.h> 74 #include <dev/mii/miivar.h> 75 #include <dev/pci/pcireg.h> 76 #include <dev/pci/pcivar.h> 77 #include <dev/pci/pcidevs.h> 78 79 #include <dev/pci/if_txpreg.h> 80 81 #include <dev/microcode/typhoon/3c990img.h> 82 83 /* 84 * These currently break the 3c990 firmware, hopefully will be resolved 85 * at some point. 86 */ 87 #undef TRY_TX_UDP_CSUM 88 #undef TRY_TX_TCP_CSUM 89 90 int txp_probe(struct device *, struct cfdata *, void *); 91 void txp_attach(struct device *, struct device *, void *); 92 int txp_intr(void *); 93 void txp_tick(void *); 94 void txp_shutdown(void *); 95 int txp_ioctl(struct ifnet *, u_long, caddr_t); 96 void txp_start(struct ifnet *); 97 void txp_stop(struct txp_softc *); 98 void txp_init(struct txp_softc *); 99 void txp_watchdog(struct ifnet *); 100 101 int txp_chip_init(struct txp_softc *); 102 int txp_reset_adapter(struct txp_softc *); 103 int txp_download_fw(struct txp_softc *); 104 int txp_download_fw_wait(struct txp_softc *); 105 int txp_download_fw_section(struct txp_softc *, 106 struct txp_fw_section_header *, int); 107 int txp_alloc_rings(struct txp_softc *); 108 void txp_dma_free(struct txp_softc *, struct txp_dma_alloc *); 109 int txp_dma_malloc(struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int); 110 void txp_set_filter(struct txp_softc *); 111 112 int txp_cmd_desc_numfree(struct txp_softc *); 113 int txp_command(struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, 114 u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int); 115 int txp_command2(struct txp_softc *, u_int16_t, u_int16_t, 116 u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t, 117 struct txp_rsp_desc **, int); 118 int txp_response(struct txp_softc *, u_int32_t, u_int16_t, u_int16_t, 119 struct txp_rsp_desc **); 120 void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *, 121 struct txp_rsp_desc *); 122 void txp_capabilities(struct txp_softc *); 123 124 void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *); 125 int txp_ifmedia_upd(struct ifnet *); 126 void txp_show_descriptor(void *); 127 void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *, 128 struct txp_dma_alloc *); 129 void txp_rxbuf_reclaim(struct txp_softc *); 130 void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, 131 struct txp_dma_alloc *); 132 133 CFATTACH_DECL(txp, sizeof(struct txp_softc), txp_probe, txp_attach, 134 NULL, NULL); 135 136 const struct txp_pci_match { 137 int vid, did, flags; 138 } txp_devices[] = { 139 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990, 0 }, 140 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95, 0 }, 141 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97, 0 }, 142 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95, TXP_SERVERVERSION }, 143 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97, TXP_SERVERVERSION }, 144 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990B, TXP_USESUBSYSTEM }, 145 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR, TXP_SERVERVERSION }, 146 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX, TXP_USESUBSYSTEM }, 147 }; 148 149 static const struct txp_pci_match *txp_pcilookup(pcireg_t); 150 151 static const struct { 152 u_int16_t mask, value; 153 int flags; 154 } txp_subsysinfo[] = { 155 {0xf000, 0x2000, TXP_SERVERVERSION}, 156 {0x0100, 0x0100, TXP_FIBER}, 157 #if 0 /* information from 3com header, unused */ 158 {0x0010, 0x0010, /* secured firmware */}, 159 {0x0003, 0x0000, /* variable DES */}, 160 {0x0003, 0x0001, /* single DES - "95" */}, 161 {0x0003, 0x0002, /* triple DES - "97" */}, 162 #endif 163 }; 164 165 static const struct txp_pci_match * 166 txp_pcilookup(id) 167 pcireg_t id; 168 { 169 int i; 170 171 for (i = 0; i < sizeof(txp_devices) / sizeof(txp_devices[0]); i++) 172 if ((PCI_VENDOR(id) == txp_devices[i].vid) && 173 (PCI_PRODUCT(id) == txp_devices[i].did)) 174 return (&txp_devices[i]); 175 return (0); 176 } 177 178 int 179 txp_probe(parent, match, aux) 180 struct device *parent; 181 struct cfdata *match; 182 void *aux; 183 { 184 struct pci_attach_args *pa = aux; 185 186 if (txp_pcilookup(pa->pa_id)) 187 return (1); 188 return (0); 189 } 190 191 void 192 txp_attach(parent, self, aux) 193 struct device *parent, *self; 194 void *aux; 195 { 196 struct txp_softc *sc = (struct txp_softc *)self; 197 struct pci_attach_args *pa = aux; 198 pci_chipset_tag_t pc = pa->pa_pc; 199 pci_intr_handle_t ih; 200 const char *intrstr = NULL; 201 struct ifnet *ifp = &sc->sc_arpcom.ec_if; 202 u_int32_t command; 203 u_int16_t p1; 204 u_int32_t p2; 205 u_char enaddr[6]; 206 const struct txp_pci_match *pcimatch; 207 u_int16_t subsys; 208 int i, flags; 209 char devinfo[256]; 210 211 sc->sc_cold = 1; 212 213 pcimatch = txp_pcilookup(pa->pa_id); 214 flags = pcimatch->flags; 215 if (pcimatch->flags & TXP_USESUBSYSTEM) { 216 subsys = PCI_PRODUCT(pci_conf_read(pc, pa->pa_tag, 217 PCI_SUBSYS_ID_REG)); 218 for (i = 0; 219 i < sizeof(txp_subsysinfo)/sizeof(txp_subsysinfo[0]); 220 i++) 221 if ((subsys & txp_subsysinfo[i].mask) == 222 txp_subsysinfo[i].value) 223 flags |= txp_subsysinfo[i].flags; 224 } 225 sc->sc_flags = flags; 226 227 pci_devinfo(pa->pa_id, 0, 0, devinfo, sizeof(devinfo)); 228 #define TXP_EXTRAINFO ((flags & (TXP_USESUBSYSTEM|TXP_SERVERVERSION)) == \ 229 (TXP_USESUBSYSTEM|TXP_SERVERVERSION) ? " (SVR)" : "") 230 printf(": %s%s\n%s", devinfo, TXP_EXTRAINFO, sc->sc_dev.dv_xname); 231 232 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 233 234 if (!(command & PCI_COMMAND_MASTER_ENABLE)) { 235 printf(": failed to enable bus mastering\n"); 236 return; 237 } 238 239 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 240 printf(": failed to enable memory mapping\n"); 241 return; 242 } 243 if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 244 &sc->sc_bt, &sc->sc_bh, NULL, NULL)) { 245 printf(": can't map mem space %d\n", 0); 246 return; 247 } 248 249 sc->sc_dmat = pa->pa_dmat; 250 251 /* 252 * Allocate our interrupt. 253 */ 254 if (pci_intr_map(pa, &ih)) { 255 printf(": couldn't map interrupt\n"); 256 return; 257 } 258 259 intrstr = pci_intr_string(pc, ih); 260 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc); 261 if (sc->sc_ih == NULL) { 262 printf(": couldn't establish interrupt"); 263 if (intrstr != NULL) 264 printf(" at %s", intrstr); 265 printf("\n"); 266 return; 267 } 268 printf(": interrupting at %s\n", intrstr); 269 270 if (txp_chip_init(sc)) 271 goto cleanupintr; 272 273 if (txp_download_fw(sc)) 274 goto cleanupintr; 275 276 if (txp_alloc_rings(sc)) 277 goto cleanupintr; 278 279 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, 280 NULL, NULL, NULL, 1)) 281 goto cleanupintr; 282 283 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0, 284 &p1, &p2, NULL, 1)) 285 goto cleanupintr; 286 287 txp_set_filter(sc); 288 289 p1 = htole16(p1); 290 enaddr[0] = ((u_int8_t *)&p1)[1]; 291 enaddr[1] = ((u_int8_t *)&p1)[0]; 292 p2 = htole32(p2); 293 enaddr[2] = ((u_int8_t *)&p2)[3]; 294 enaddr[3] = ((u_int8_t *)&p2)[2]; 295 enaddr[4] = ((u_int8_t *)&p2)[1]; 296 enaddr[5] = ((u_int8_t *)&p2)[0]; 297 298 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 299 ether_sprintf(enaddr)); 300 sc->sc_cold = 0; 301 302 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); 303 if (flags & TXP_FIBER) { 304 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX, 305 0, NULL); 306 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_HDX, 307 0, NULL); 308 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_FX|IFM_FDX, 309 0, NULL); 310 } else { 311 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 312 0, NULL); 313 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 314 0, NULL); 315 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 316 0, NULL); 317 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 318 0, NULL); 319 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 320 0, NULL); 321 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 322 0, NULL); 323 } 324 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 325 326 sc->sc_xcvr = TXP_XCVR_AUTO; 327 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, 328 NULL, NULL, NULL, 0); 329 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 330 331 ifp->if_softc = sc; 332 ifp->if_mtu = ETHERMTU; 333 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 334 ifp->if_ioctl = txp_ioctl; 335 ifp->if_start = txp_start; 336 ifp->if_watchdog = txp_watchdog; 337 ifp->if_baudrate = 10000000; 338 IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES); 339 IFQ_SET_READY(&ifp->if_snd); 340 ifp->if_capabilities = 0; 341 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 342 343 txp_capabilities(sc); 344 345 callout_init(&sc->sc_tick); 346 callout_setfunc(&sc->sc_tick, txp_tick, sc); 347 348 /* 349 * Attach us everywhere 350 */ 351 if_attach(ifp); 352 ether_ifattach(ifp, enaddr); 353 354 shutdownhook_establish(txp_shutdown, sc); 355 356 357 return; 358 359 cleanupintr: 360 pci_intr_disestablish(pc,sc->sc_ih); 361 362 return; 363 364 } 365 366 int 367 txp_chip_init(sc) 368 struct txp_softc *sc; 369 { 370 /* disable interrupts */ 371 WRITE_REG(sc, TXP_IER, 0); 372 WRITE_REG(sc, TXP_IMR, 373 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 374 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 375 TXP_INT_LATCH); 376 377 /* ack all interrupts */ 378 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 379 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 380 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 381 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 382 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 383 384 if (txp_reset_adapter(sc)) 385 return (-1); 386 387 /* disable interrupts */ 388 WRITE_REG(sc, TXP_IER, 0); 389 WRITE_REG(sc, TXP_IMR, 390 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 391 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 392 TXP_INT_LATCH); 393 394 /* ack all interrupts */ 395 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 396 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 397 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 398 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 399 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 400 401 return (0); 402 } 403 404 int 405 txp_reset_adapter(sc) 406 struct txp_softc *sc; 407 { 408 u_int32_t r; 409 int i; 410 411 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL); 412 DELAY(1000); 413 WRITE_REG(sc, TXP_SRR, 0); 414 415 /* Should wait max 6 seconds */ 416 for (i = 0; i < 6000; i++) { 417 r = READ_REG(sc, TXP_A2H_0); 418 if (r == STAT_WAITING_FOR_HOST_REQUEST) 419 break; 420 DELAY(1000); 421 } 422 423 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 424 printf("%s: reset hung\n", TXP_DEVNAME(sc)); 425 return (-1); 426 } 427 428 return (0); 429 } 430 431 int 432 txp_download_fw(sc) 433 struct txp_softc *sc; 434 { 435 struct txp_fw_file_header *fileheader; 436 struct txp_fw_section_header *secthead; 437 int sect; 438 u_int32_t r, i, ier, imr; 439 440 ier = READ_REG(sc, TXP_IER); 441 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0); 442 443 imr = READ_REG(sc, TXP_IMR); 444 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0); 445 446 for (i = 0; i < 10000; i++) { 447 r = READ_REG(sc, TXP_A2H_0); 448 if (r == STAT_WAITING_FOR_HOST_REQUEST) 449 break; 450 DELAY(50); 451 } 452 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 453 printf(": not waiting for host request\n"); 454 return (-1); 455 } 456 457 /* Ack the status */ 458 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 459 460 fileheader = (struct txp_fw_file_header *)tc990image; 461 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) { 462 printf(": fw invalid magic\n"); 463 return (-1); 464 } 465 466 /* Tell boot firmware to get ready for image */ 467 WRITE_REG(sc, TXP_H2A_1, le32toh(fileheader->addr)); 468 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE); 469 470 if (txp_download_fw_wait(sc)) { 471 printf("%s: fw wait failed, initial\n", sc->sc_dev.dv_xname); 472 return (-1); 473 } 474 475 secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) + 476 sizeof(struct txp_fw_file_header)); 477 478 for (sect = 0; sect < le32toh(fileheader->nsections); sect++) { 479 if (txp_download_fw_section(sc, secthead, sect)) 480 return (-1); 481 secthead = (struct txp_fw_section_header *) 482 (((u_int8_t *)secthead) + le32toh(secthead->nbytes) + 483 sizeof(*secthead)); 484 } 485 486 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE); 487 488 for (i = 0; i < 10000; i++) { 489 r = READ_REG(sc, TXP_A2H_0); 490 if (r == STAT_WAITING_FOR_BOOT) 491 break; 492 DELAY(50); 493 } 494 if (r != STAT_WAITING_FOR_BOOT) { 495 printf(": not waiting for boot\n"); 496 return (-1); 497 } 498 499 WRITE_REG(sc, TXP_IER, ier); 500 WRITE_REG(sc, TXP_IMR, imr); 501 502 return (0); 503 } 504 505 int 506 txp_download_fw_wait(sc) 507 struct txp_softc *sc; 508 { 509 u_int32_t i, r; 510 511 for (i = 0; i < 10000; i++) { 512 r = READ_REG(sc, TXP_ISR); 513 if (r & TXP_INT_A2H_0) 514 break; 515 DELAY(50); 516 } 517 518 if (!(r & TXP_INT_A2H_0)) { 519 printf(": fw wait failed comm0\n"); 520 return (-1); 521 } 522 523 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 524 525 r = READ_REG(sc, TXP_A2H_0); 526 if (r != STAT_WAITING_FOR_SEGMENT) { 527 printf(": fw not waiting for segment\n"); 528 return (-1); 529 } 530 return (0); 531 } 532 533 int 534 txp_download_fw_section(sc, sect, sectnum) 535 struct txp_softc *sc; 536 struct txp_fw_section_header *sect; 537 int sectnum; 538 { 539 struct txp_dma_alloc dma; 540 int rseg, err = 0; 541 struct mbuf m; 542 u_int16_t csum; 543 544 /* Skip zero length sections */ 545 if (sect->nbytes == 0) 546 return (0); 547 548 /* Make sure we aren't past the end of the image */ 549 rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image); 550 if (rseg >= sizeof(tc990image)) { 551 printf(": fw invalid section address, section %d\n", sectnum); 552 return (-1); 553 } 554 555 /* Make sure this section doesn't go past the end */ 556 rseg += le32toh(sect->nbytes); 557 if (rseg >= sizeof(tc990image)) { 558 printf(": fw truncated section %d\n", sectnum); 559 return (-1); 560 } 561 562 /* map a buffer, copy segment to it, get physaddr */ 563 if (txp_dma_malloc(sc, le32toh(sect->nbytes), &dma, 0)) { 564 printf(": fw dma malloc failed, section %d\n", sectnum); 565 return (-1); 566 } 567 568 bcopy(((u_int8_t *)sect) + sizeof(*sect), dma.dma_vaddr, 569 le32toh(sect->nbytes)); 570 571 /* 572 * dummy up mbuf and verify section checksum 573 */ 574 m.m_type = MT_DATA; 575 m.m_next = m.m_nextpkt = NULL; 576 m.m_len = le32toh(sect->nbytes); 577 m.m_data = dma.dma_vaddr; 578 m.m_flags = 0; 579 csum = in_cksum(&m, le32toh(sect->nbytes)); 580 if (csum != sect->cksum) { 581 printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n", 582 sectnum, sect->cksum, csum); 583 err = -1; 584 goto bail; 585 } 586 587 bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, 588 dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 589 590 WRITE_REG(sc, TXP_H2A_1, le32toh(sect->nbytes)); 591 WRITE_REG(sc, TXP_H2A_2, le32toh(sect->cksum)); 592 WRITE_REG(sc, TXP_H2A_3, le32toh(sect->addr)); 593 WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32); 594 WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff); 595 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE); 596 597 if (txp_download_fw_wait(sc)) { 598 printf("%s: fw wait failed, section %d\n", 599 sc->sc_dev.dv_xname, sectnum); 600 err = -1; 601 } 602 603 bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, 604 dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 605 606 bail: 607 txp_dma_free(sc, &dma); 608 609 return (err); 610 } 611 612 int 613 txp_intr(vsc) 614 void *vsc; 615 { 616 struct txp_softc *sc = vsc; 617 struct txp_hostvar *hv = sc->sc_hostvar; 618 u_int32_t isr; 619 int claimed = 0; 620 621 /* mask all interrupts */ 622 WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF | 623 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 624 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 625 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 626 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 627 628 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 629 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); 630 631 isr = READ_REG(sc, TXP_ISR); 632 while (isr) { 633 claimed = 1; 634 WRITE_REG(sc, TXP_ISR, isr); 635 636 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff)) 637 txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma); 638 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff)) 639 txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma); 640 641 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx) 642 txp_rxbuf_reclaim(sc); 643 644 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != 645 TXP_OFFSET2IDX(le32toh(*(sc->sc_txhir.r_off))))) 646 txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma); 647 648 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != 649 TXP_OFFSET2IDX(le32toh(*(sc->sc_txlor.r_off))))) 650 txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma); 651 652 isr = READ_REG(sc, TXP_ISR); 653 } 654 655 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 656 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); 657 658 /* unmask all interrupts */ 659 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 660 661 txp_start(&sc->sc_arpcom.ec_if); 662 663 return (claimed); 664 } 665 666 void 667 txp_rx_reclaim(sc, r, dma) 668 struct txp_softc *sc; 669 struct txp_rx_ring *r; 670 struct txp_dma_alloc *dma; 671 { 672 struct ifnet *ifp = &sc->sc_arpcom.ec_if; 673 struct txp_rx_desc *rxd; 674 struct mbuf *m; 675 struct txp_swdesc *sd; 676 u_int32_t roff, woff; 677 int sumflags = 0; 678 int idx; 679 680 roff = le32toh(*r->r_roff); 681 woff = le32toh(*r->r_woff); 682 idx = roff / sizeof(struct txp_rx_desc); 683 rxd = r->r_desc + idx; 684 685 while (roff != woff) { 686 687 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 688 idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc), 689 BUS_DMASYNC_POSTREAD); 690 691 if (rxd->rx_flags & RX_FLAGS_ERROR) { 692 printf("%s: error 0x%x\n", sc->sc_dev.dv_xname, 693 le32toh(rxd->rx_stat)); 694 ifp->if_ierrors++; 695 goto next; 696 } 697 698 /* retrieve stashed pointer */ 699 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd)); 700 701 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 702 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 703 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 704 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 705 m = sd->sd_mbuf; 706 free(sd, M_DEVBUF); 707 m->m_pkthdr.len = m->m_len = le16toh(rxd->rx_len); 708 709 #ifdef __STRICT_ALIGNMENT 710 { 711 /* 712 * XXX Nice chip, except it won't accept "off by 2" 713 * buffers, so we're force to copy. Supposedly 714 * this will be fixed in a newer firmware rev 715 * and this will be temporary. 716 */ 717 struct mbuf *mnew; 718 719 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 720 if (mnew == NULL) { 721 m_freem(m); 722 goto next; 723 } 724 if (m->m_len > (MHLEN - 2)) { 725 MCLGET(mnew, M_DONTWAIT); 726 if (!(mnew->m_flags & M_EXT)) { 727 m_freem(mnew); 728 m_freem(m); 729 goto next; 730 } 731 } 732 mnew->m_pkthdr.rcvif = ifp; 733 mnew->m_pkthdr.len = mnew->m_len = m->m_len; 734 mnew->m_data += 2; 735 bcopy(m->m_data, mnew->m_data, m->m_len); 736 m_freem(m); 737 m = mnew; 738 } 739 #endif 740 741 #if NBPFILTER > 0 742 /* 743 * Handle BPF listeners. Let the BPF user see the packet. 744 */ 745 if (ifp->if_bpf) 746 bpf_mtap(ifp->if_bpf, m); 747 #endif 748 749 if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMBAD)) 750 sumflags |= (M_CSUM_IPv4|M_CSUM_IPv4_BAD); 751 else if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMGOOD)) 752 sumflags |= M_CSUM_IPv4; 753 754 if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMBAD)) 755 sumflags |= (M_CSUM_TCPv4|M_CSUM_TCP_UDP_BAD); 756 else if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMGOOD)) 757 sumflags |= M_CSUM_TCPv4; 758 759 if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMBAD)) 760 sumflags |= (M_CSUM_UDPv4|M_CSUM_TCP_UDP_BAD); 761 else if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMGOOD)) 762 sumflags |= M_CSUM_UDPv4; 763 764 m->m_pkthdr.csum_flags = sumflags; 765 766 if (rxd->rx_stat & htole32(RX_STAT_VLAN)) { 767 struct m_tag *mtag; 768 769 mtag = m_tag_get(PACKET_TAG_VLAN, sizeof(u_int), 770 M_NOWAIT); 771 if (!m) { 772 printf("%s: no mbuf for tag\n", 773 sc->sc_dev.dv_xname); 774 m_freem(m); 775 goto next; 776 } 777 *(u_int *)(mtag + 1) = htons(rxd->rx_vlan >> 16); 778 m_tag_prepend(m, mtag); 779 } 780 781 (*ifp->if_input)(ifp, m); 782 783 next: 784 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 785 idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc), 786 BUS_DMASYNC_PREREAD); 787 788 roff += sizeof(struct txp_rx_desc); 789 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) { 790 idx = 0; 791 roff = 0; 792 rxd = r->r_desc; 793 } else { 794 idx++; 795 rxd++; 796 } 797 woff = le32toh(*r->r_woff); 798 } 799 800 *r->r_roff = htole32(woff); 801 } 802 803 void 804 txp_rxbuf_reclaim(sc) 805 struct txp_softc *sc; 806 { 807 struct ifnet *ifp = &sc->sc_arpcom.ec_if; 808 struct txp_hostvar *hv = sc->sc_hostvar; 809 struct txp_rxbuf_desc *rbd; 810 struct txp_swdesc *sd; 811 u_int32_t i, end; 812 813 end = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_read_idx)); 814 i = TXP_OFFSET2IDX(le32toh(hv->hv_rx_buf_write_idx)); 815 816 if (++i == RXBUF_ENTRIES) 817 i = 0; 818 819 rbd = sc->sc_rxbufs + i; 820 821 while (i != end) { 822 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), 823 M_DEVBUF, M_NOWAIT); 824 if (sd == NULL) 825 break; 826 827 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); 828 if (sd->sd_mbuf == NULL) 829 goto err_sd; 830 831 MCLGET(sd->sd_mbuf, M_DONTWAIT); 832 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) 833 goto err_mbuf; 834 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 835 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 836 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, 837 TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) 838 goto err_mbuf; 839 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, 840 BUS_DMA_NOWAIT)) { 841 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 842 goto err_mbuf; 843 } 844 845 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 846 i * sizeof(struct txp_rxbuf_desc), 847 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE); 848 849 /* stash away pointer */ 850 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd)); 851 852 rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) 853 & 0xffffffff; 854 rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) 855 >> 32; 856 857 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 858 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 859 860 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 861 i * sizeof(struct txp_rxbuf_desc), 862 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE); 863 864 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(i)); 865 866 if (++i == RXBUF_ENTRIES) { 867 i = 0; 868 rbd = sc->sc_rxbufs; 869 } else 870 rbd++; 871 } 872 return; 873 874 err_mbuf: 875 m_freem(sd->sd_mbuf); 876 err_sd: 877 free(sd, M_DEVBUF); 878 } 879 880 /* 881 * Reclaim mbufs and entries from a transmit ring. 882 */ 883 void 884 txp_tx_reclaim(sc, r, dma) 885 struct txp_softc *sc; 886 struct txp_tx_ring *r; 887 struct txp_dma_alloc *dma; 888 { 889 struct ifnet *ifp = &sc->sc_arpcom.ec_if; 890 u_int32_t idx = TXP_OFFSET2IDX(le32toh(*(r->r_off))); 891 u_int32_t cons = r->r_cons, cnt = r->r_cnt; 892 struct txp_tx_desc *txd = r->r_desc + cons; 893 struct txp_swdesc *sd = sc->sc_txd + cons; 894 struct mbuf *m; 895 896 while (cons != idx) { 897 if (cnt == 0) 898 break; 899 900 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 901 cons * sizeof(struct txp_tx_desc), 902 sizeof(struct txp_tx_desc), 903 BUS_DMASYNC_POSTWRITE); 904 905 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == 906 TX_FLAGS_TYPE_DATA) { 907 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 908 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 909 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 910 m = sd->sd_mbuf; 911 if (m != NULL) { 912 m_freem(m); 913 txd->tx_addrlo = 0; 914 txd->tx_addrhi = 0; 915 ifp->if_opackets++; 916 } 917 } 918 ifp->if_flags &= ~IFF_OACTIVE; 919 920 if (++cons == TX_ENTRIES) { 921 txd = r->r_desc; 922 cons = 0; 923 sd = sc->sc_txd; 924 } else { 925 txd++; 926 sd++; 927 } 928 929 cnt--; 930 } 931 932 r->r_cons = cons; 933 r->r_cnt = cnt; 934 if (cnt == 0) 935 ifp->if_timer = 0; 936 } 937 938 void 939 txp_shutdown(vsc) 940 void *vsc; 941 { 942 struct txp_softc *sc = (struct txp_softc *)vsc; 943 944 /* mask all interrupts */ 945 WRITE_REG(sc, TXP_IMR, 946 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 947 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 948 TXP_INT_LATCH); 949 950 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 951 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 952 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0); 953 } 954 955 int 956 txp_alloc_rings(sc) 957 struct txp_softc *sc; 958 { 959 struct ifnet *ifp = &sc->sc_arpcom.ec_if; 960 struct txp_boot_record *boot; 961 struct txp_swdesc *sd; 962 u_int32_t r; 963 int i, j; 964 965 /* boot record */ 966 if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), &sc->sc_boot_dma, 967 BUS_DMA_COHERENT)) { 968 printf(": can't allocate boot record\n"); 969 return (-1); 970 } 971 boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr; 972 bzero(boot, sizeof(*boot)); 973 sc->sc_boot = boot; 974 975 /* host variables */ 976 if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma, 977 BUS_DMA_COHERENT)) { 978 printf(": can't allocate host ring\n"); 979 goto bail_boot; 980 } 981 bzero(sc->sc_host_dma.dma_vaddr, sizeof(struct txp_hostvar)); 982 boot->br_hostvar_lo = htole32(sc->sc_host_dma.dma_paddr & 0xffffffff); 983 boot->br_hostvar_hi = htole32(sc->sc_host_dma.dma_paddr >> 32); 984 sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr; 985 986 /* high priority tx ring */ 987 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, 988 &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) { 989 printf(": can't allocate high tx ring\n"); 990 goto bail_host; 991 } 992 bzero(sc->sc_txhiring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES); 993 boot->br_txhipri_lo = htole32(sc->sc_txhiring_dma.dma_paddr & 0xffffffff); 994 boot->br_txhipri_hi = htole32(sc->sc_txhiring_dma.dma_paddr >> 32); 995 boot->br_txhipri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc)); 996 sc->sc_txhir.r_reg = TXP_H2A_1; 997 sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr; 998 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0; 999 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx; 1000 for (i = 0; i < TX_ENTRIES; i++) { 1001 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1002 TX_ENTRIES - 4, TXP_MAX_SEGLEN, 0, 1003 BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) { 1004 for (j = 0; j < i; j++) { 1005 bus_dmamap_destroy(sc->sc_dmat, 1006 sc->sc_txd[j].sd_map); 1007 sc->sc_txd[j].sd_map = NULL; 1008 } 1009 goto bail_txhiring; 1010 } 1011 } 1012 1013 /* low priority tx ring */ 1014 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, 1015 &sc->sc_txloring_dma, BUS_DMA_COHERENT)) { 1016 printf(": can't allocate low tx ring\n"); 1017 goto bail_txhiring; 1018 } 1019 bzero(sc->sc_txloring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES); 1020 boot->br_txlopri_lo = htole32(sc->sc_txloring_dma.dma_paddr & 0xffffffff); 1021 boot->br_txlopri_hi = htole32(sc->sc_txloring_dma.dma_paddr >> 32); 1022 boot->br_txlopri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc)); 1023 sc->sc_txlor.r_reg = TXP_H2A_3; 1024 sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr; 1025 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0; 1026 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx; 1027 1028 /* high priority rx ring */ 1029 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, 1030 &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) { 1031 printf(": can't allocate high rx ring\n"); 1032 goto bail_txloring; 1033 } 1034 bzero(sc->sc_rxhiring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES); 1035 boot->br_rxhipri_lo = htole32(sc->sc_rxhiring_dma.dma_paddr & 0xffffffff); 1036 boot->br_rxhipri_hi = htole32(sc->sc_rxhiring_dma.dma_paddr >> 32); 1037 boot->br_rxhipri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc)); 1038 sc->sc_rxhir.r_desc = 1039 (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr; 1040 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx; 1041 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx; 1042 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map, 1043 0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1044 1045 /* low priority ring */ 1046 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, 1047 &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) { 1048 printf(": can't allocate low rx ring\n"); 1049 goto bail_rxhiring; 1050 } 1051 bzero(sc->sc_rxloring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES); 1052 boot->br_rxlopri_lo = htole32(sc->sc_rxloring_dma.dma_paddr & 0xffffffff); 1053 boot->br_rxlopri_hi = htole32(sc->sc_rxloring_dma.dma_paddr >> 32); 1054 boot->br_rxlopri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc)); 1055 sc->sc_rxlor.r_desc = 1056 (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr; 1057 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx; 1058 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx; 1059 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map, 1060 0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1061 1062 /* command ring */ 1063 if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES, 1064 &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) { 1065 printf(": can't allocate command ring\n"); 1066 goto bail_rxloring; 1067 } 1068 bzero(sc->sc_cmdring_dma.dma_vaddr, sizeof(struct txp_cmd_desc) * CMD_ENTRIES); 1069 boot->br_cmd_lo = htole32(sc->sc_cmdring_dma.dma_paddr & 0xffffffff); 1070 boot->br_cmd_hi = htole32(sc->sc_cmdring_dma.dma_paddr >> 32); 1071 boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc)); 1072 sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr; 1073 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc); 1074 sc->sc_cmdring.lastwrite = 0; 1075 1076 /* response ring */ 1077 if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES, 1078 &sc->sc_rspring_dma, BUS_DMA_COHERENT)) { 1079 printf(": can't allocate response ring\n"); 1080 goto bail_cmdring; 1081 } 1082 bzero(sc->sc_rspring_dma.dma_vaddr, sizeof(struct txp_rsp_desc) * RSP_ENTRIES); 1083 boot->br_resp_lo = htole32(sc->sc_rspring_dma.dma_paddr & 0xffffffff); 1084 boot->br_resp_hi = htole32(sc->sc_rspring_dma.dma_paddr >> 32); 1085 boot->br_resp_siz = htole32(CMD_ENTRIES * sizeof(struct txp_rsp_desc)); 1086 sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr; 1087 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc); 1088 sc->sc_rspring.lastwrite = 0; 1089 1090 /* receive buffer ring */ 1091 if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES, 1092 &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) { 1093 printf(": can't allocate rx buffer ring\n"); 1094 goto bail_rspring; 1095 } 1096 bzero(sc->sc_rxbufring_dma.dma_vaddr, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES); 1097 boot->br_rxbuf_lo = htole32(sc->sc_rxbufring_dma.dma_paddr & 0xffffffff); 1098 boot->br_rxbuf_hi = htole32(sc->sc_rxbufring_dma.dma_paddr >> 32); 1099 boot->br_rxbuf_siz = htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc)); 1100 sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr; 1101 for (i = 0; i < RXBUF_ENTRIES; i++) { 1102 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), 1103 M_DEVBUF, M_NOWAIT); 1104 if (sd == NULL) 1105 break; 1106 1107 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); 1108 if (sd->sd_mbuf == NULL) { 1109 goto bail_rxbufring; 1110 } 1111 1112 MCLGET(sd->sd_mbuf, M_DONTWAIT); 1113 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) { 1114 goto bail_rxbufring; 1115 } 1116 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 1117 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 1118 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, 1119 TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) { 1120 goto bail_rxbufring; 1121 } 1122 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, 1123 BUS_DMA_NOWAIT)) { 1124 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 1125 goto bail_rxbufring; 1126 } 1127 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 1128 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1129 1130 /* stash away pointer */ 1131 bcopy(&sd, (u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, sizeof(sd)); 1132 1133 sc->sc_rxbufs[i].rb_paddrlo = 1134 ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff; 1135 sc->sc_rxbufs[i].rb_paddrhi = 1136 ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32; 1137 } 1138 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 1139 0, sc->sc_rxbufring_dma.dma_map->dm_mapsize, 1140 BUS_DMASYNC_PREWRITE); 1141 sc->sc_hostvar->hv_rx_buf_write_idx = htole32((RXBUF_ENTRIES - 1) * 1142 sizeof(struct txp_rxbuf_desc)); 1143 1144 /* zero dma */ 1145 if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma, 1146 BUS_DMA_COHERENT)) { 1147 printf(": can't allocate response ring\n"); 1148 goto bail_rxbufring; 1149 } 1150 bzero(sc->sc_zero_dma.dma_vaddr, sizeof(u_int32_t)); 1151 boot->br_zero_lo = htole32(sc->sc_zero_dma.dma_paddr & 0xffffffff); 1152 boot->br_zero_hi = htole32(sc->sc_zero_dma.dma_paddr >> 32); 1153 1154 /* See if it's waiting for boot, and try to boot it */ 1155 for (i = 0; i < 10000; i++) { 1156 r = READ_REG(sc, TXP_A2H_0); 1157 if (r == STAT_WAITING_FOR_BOOT) 1158 break; 1159 DELAY(50); 1160 } 1161 if (r != STAT_WAITING_FOR_BOOT) { 1162 printf(": not waiting for boot\n"); 1163 goto bail; 1164 } 1165 WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32); 1166 WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff); 1167 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); 1168 1169 /* See if it booted */ 1170 for (i = 0; i < 10000; i++) { 1171 r = READ_REG(sc, TXP_A2H_0); 1172 if (r == STAT_RUNNING) 1173 break; 1174 DELAY(50); 1175 } 1176 if (r != STAT_RUNNING) { 1177 printf(": fw not running\n"); 1178 goto bail; 1179 } 1180 1181 /* Clear TX and CMD ring write registers */ 1182 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); 1183 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); 1184 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); 1185 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); 1186 1187 return (0); 1188 1189 bail: 1190 txp_dma_free(sc, &sc->sc_zero_dma); 1191 bail_rxbufring: 1192 txp_dma_free(sc, &sc->sc_rxbufring_dma); 1193 bail_rspring: 1194 txp_dma_free(sc, &sc->sc_rspring_dma); 1195 bail_cmdring: 1196 txp_dma_free(sc, &sc->sc_cmdring_dma); 1197 bail_rxloring: 1198 txp_dma_free(sc, &sc->sc_rxloring_dma); 1199 bail_rxhiring: 1200 txp_dma_free(sc, &sc->sc_rxhiring_dma); 1201 bail_txloring: 1202 txp_dma_free(sc, &sc->sc_txloring_dma); 1203 bail_txhiring: 1204 txp_dma_free(sc, &sc->sc_txhiring_dma); 1205 bail_host: 1206 txp_dma_free(sc, &sc->sc_host_dma); 1207 bail_boot: 1208 txp_dma_free(sc, &sc->sc_boot_dma); 1209 return (-1); 1210 } 1211 1212 int 1213 txp_dma_malloc(sc, size, dma, mapflags) 1214 struct txp_softc *sc; 1215 bus_size_t size; 1216 struct txp_dma_alloc *dma; 1217 int mapflags; 1218 { 1219 int r; 1220 1221 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, 1222 &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0) 1223 goto fail_0; 1224 1225 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, 1226 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) 1227 goto fail_1; 1228 1229 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1230 BUS_DMA_NOWAIT, &dma->dma_map)) != 0) 1231 goto fail_2; 1232 1233 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 1234 size, NULL, BUS_DMA_NOWAIT)) != 0) 1235 goto fail_3; 1236 1237 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 1238 return (0); 1239 1240 fail_3: 1241 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1242 fail_2: 1243 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 1244 fail_1: 1245 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1246 fail_0: 1247 return (r); 1248 } 1249 1250 void 1251 txp_dma_free(sc, dma) 1252 struct txp_softc *sc; 1253 struct txp_dma_alloc *dma; 1254 { 1255 bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 1256 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize); 1257 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1258 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1259 } 1260 1261 int 1262 txp_ioctl(ifp, command, data) 1263 struct ifnet *ifp; 1264 u_long command; 1265 caddr_t data; 1266 { 1267 struct txp_softc *sc = ifp->if_softc; 1268 struct ifreq *ifr = (struct ifreq *)data; 1269 struct ifaddr *ifa = (struct ifaddr *)data; 1270 int s, error = 0; 1271 1272 s = splnet(); 1273 1274 #if 0 1275 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) { 1276 splx(s); 1277 return error; 1278 } 1279 #endif 1280 1281 switch(command) { 1282 case SIOCSIFADDR: 1283 ifp->if_flags |= IFF_UP; 1284 switch (ifa->ifa_addr->sa_family) { 1285 #ifdef INET 1286 case AF_INET: 1287 txp_init(sc); 1288 arp_ifinit(ifp, ifa); 1289 break; 1290 #endif /* INET */ 1291 default: 1292 txp_init(sc); 1293 break; 1294 } 1295 break; 1296 case SIOCSIFFLAGS: 1297 if (ifp->if_flags & IFF_UP) { 1298 txp_init(sc); 1299 } else { 1300 if (ifp->if_flags & IFF_RUNNING) 1301 txp_stop(sc); 1302 } 1303 break; 1304 case SIOCADDMULTI: 1305 case SIOCDELMULTI: 1306 error = (command == SIOCADDMULTI) ? 1307 ether_addmulti(ifr, &sc->sc_arpcom) : 1308 ether_delmulti(ifr, &sc->sc_arpcom); 1309 1310 if (error == ENETRESET) { 1311 /* 1312 * Multicast list has changed; set the hardware 1313 * filter accordingly. 1314 */ 1315 if (ifp->if_flags & IFF_RUNNING) 1316 txp_set_filter(sc); 1317 error = 0; 1318 } 1319 break; 1320 case SIOCGIFMEDIA: 1321 case SIOCSIFMEDIA: 1322 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command); 1323 break; 1324 default: 1325 error = EINVAL; 1326 break; 1327 } 1328 1329 splx(s); 1330 1331 return(error); 1332 } 1333 1334 void 1335 txp_init(sc) 1336 struct txp_softc *sc; 1337 { 1338 struct ifnet *ifp = &sc->sc_arpcom.ec_if; 1339 int s; 1340 1341 txp_stop(sc); 1342 1343 s = splnet(); 1344 1345 txp_set_filter(sc); 1346 1347 txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1348 txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1349 1350 WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF | 1351 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 1352 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 1353 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 1354 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 1355 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 1356 1357 ifp->if_flags |= IFF_RUNNING; 1358 ifp->if_flags &= ~IFF_OACTIVE; 1359 ifp->if_timer = 0; 1360 1361 if (!callout_pending(&sc->sc_tick)) 1362 callout_schedule(&sc->sc_tick, hz); 1363 1364 splx(s); 1365 } 1366 1367 void 1368 txp_tick(vsc) 1369 void *vsc; 1370 { 1371 struct txp_softc *sc = vsc; 1372 struct ifnet *ifp = &sc->sc_arpcom.ec_if; 1373 struct txp_rsp_desc *rsp = NULL; 1374 struct txp_ext_desc *ext; 1375 int s; 1376 1377 s = splnet(); 1378 txp_rxbuf_reclaim(sc); 1379 1380 if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, 1381 &rsp, 1)) 1382 goto out; 1383 if (rsp->rsp_numdesc != 6) 1384 goto out; 1385 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, 1386 NULL, NULL, NULL, 1)) 1387 goto out; 1388 ext = (struct txp_ext_desc *)(rsp + 1); 1389 1390 ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 + 1391 ext[4].ext_1 + ext[4].ext_4; 1392 ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + 1393 ext[2].ext_1; 1394 ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + 1395 ext[1].ext_3; 1396 ifp->if_opackets += rsp->rsp_par2; 1397 ifp->if_ipackets += ext[2].ext_3; 1398 1399 out: 1400 if (rsp != NULL) 1401 free(rsp, M_DEVBUF); 1402 1403 splx(s); 1404 callout_schedule(&sc->sc_tick, hz); 1405 } 1406 1407 void 1408 txp_start(ifp) 1409 struct ifnet *ifp; 1410 { 1411 struct txp_softc *sc = ifp->if_softc; 1412 struct txp_tx_ring *r = &sc->sc_txhir; 1413 struct txp_tx_desc *txd; 1414 int txdidx; 1415 struct txp_frag_desc *fxd; 1416 struct mbuf *m, *mnew; 1417 struct txp_swdesc *sd; 1418 u_int32_t firstprod, firstcnt, prod, cnt, i; 1419 struct m_tag *mtag; 1420 1421 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1422 return; 1423 1424 prod = r->r_prod; 1425 cnt = r->r_cnt; 1426 1427 while (1) { 1428 IFQ_POLL(&ifp->if_snd, m); 1429 if (m == NULL) 1430 break; 1431 mnew = NULL; 1432 1433 firstprod = prod; 1434 firstcnt = cnt; 1435 1436 sd = sc->sc_txd + prod; 1437 sd->sd_mbuf = m; 1438 1439 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, 1440 BUS_DMA_NOWAIT)) { 1441 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 1442 if (mnew == NULL) 1443 goto oactive1; 1444 if (m->m_pkthdr.len > MHLEN) { 1445 MCLGET(mnew, M_DONTWAIT); 1446 if ((mnew->m_flags & M_EXT) == 0) { 1447 m_freem(mnew); 1448 goto oactive1; 1449 } 1450 } 1451 m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t)); 1452 mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len; 1453 IFQ_DEQUEUE(&ifp->if_snd, m); 1454 m_freem(m); 1455 m = mnew; 1456 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, 1457 BUS_DMA_NOWAIT)) 1458 goto oactive1; 1459 } 1460 1461 if ((TX_ENTRIES - cnt) < 4) 1462 goto oactive; 1463 1464 txd = r->r_desc + prod; 1465 txdidx = prod; 1466 txd->tx_flags = TX_FLAGS_TYPE_DATA; 1467 txd->tx_numdesc = 0; 1468 txd->tx_addrlo = 0; 1469 txd->tx_addrhi = 0; 1470 txd->tx_totlen = m->m_pkthdr.len; 1471 txd->tx_pflags = 0; 1472 txd->tx_numdesc = sd->sd_map->dm_nsegs; 1473 1474 if (++prod == TX_ENTRIES) 1475 prod = 0; 1476 1477 if (++cnt >= (TX_ENTRIES - 4)) 1478 goto oactive; 1479 1480 mtag = m_tag_find(m, PACKET_TAG_VLAN, NULL); 1481 if (mtag) 1482 txd->tx_pflags = TX_PFLAGS_VLAN | 1483 (htons(*(u_int *)(mtag + 1)) << TX_PFLAGS_VLANTAG_S); 1484 1485 if (m->m_pkthdr.csum_flags & M_CSUM_IPv4) 1486 txd->tx_pflags |= TX_PFLAGS_IPCKSUM; 1487 #ifdef TRY_TX_TCP_CSUM 1488 if (m->m_pkthdr.csum_flags & M_CSUM_TCPv4) 1489 txd->tx_pflags |= TX_PFLAGS_TCPCKSUM; 1490 #endif 1491 #ifdef TRY_TX_UDP_CSUM 1492 if (m->m_pkthdr.csum_flags & M_CSUM_UDPv4) 1493 txd->tx_pflags |= TX_PFLAGS_UDPCKSUM; 1494 #endif 1495 1496 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 1497 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1498 1499 fxd = (struct txp_frag_desc *)(r->r_desc + prod); 1500 for (i = 0; i < sd->sd_map->dm_nsegs; i++) { 1501 if (++cnt >= (TX_ENTRIES - 4)) { 1502 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 1503 0, sd->sd_map->dm_mapsize, 1504 BUS_DMASYNC_POSTWRITE); 1505 goto oactive; 1506 } 1507 1508 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | 1509 FRAG_FLAGS_VALID; 1510 fxd->frag_rsvd1 = 0; 1511 fxd->frag_len = sd->sd_map->dm_segs[i].ds_len; 1512 fxd->frag_addrlo = 1513 ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) & 1514 0xffffffff; 1515 fxd->frag_addrhi = 1516 ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >> 1517 32; 1518 fxd->frag_rsvd2 = 0; 1519 1520 bus_dmamap_sync(sc->sc_dmat, 1521 sc->sc_txhiring_dma.dma_map, 1522 prod * sizeof(struct txp_frag_desc), 1523 sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE); 1524 1525 if (++prod == TX_ENTRIES) { 1526 fxd = (struct txp_frag_desc *)r->r_desc; 1527 prod = 0; 1528 } else 1529 fxd++; 1530 1531 } 1532 1533 /* 1534 * if mnew isn't NULL, we already dequeued and copied 1535 * the packet. 1536 */ 1537 if (mnew == NULL) 1538 IFQ_DEQUEUE(&ifp->if_snd, m); 1539 1540 ifp->if_timer = 5; 1541 1542 #if NBPFILTER > 0 1543 if (ifp->if_bpf) 1544 bpf_mtap(ifp->if_bpf, m); 1545 #endif 1546 1547 txd->tx_flags |= TX_FLAGS_VALID; 1548 bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map, 1549 txdidx * sizeof(struct txp_tx_desc), 1550 sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE); 1551 1552 #if 0 1553 { 1554 struct mbuf *mx; 1555 int i; 1556 1557 printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n", 1558 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, 1559 txd->tx_pflags); 1560 for (mx = m; mx != NULL; mx = mx->m_next) { 1561 for (i = 0; i < mx->m_len; i++) { 1562 printf(":%02x", 1563 (u_int8_t)m->m_data[i]); 1564 } 1565 } 1566 printf("\n"); 1567 } 1568 #endif 1569 1570 WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod)); 1571 } 1572 1573 r->r_prod = prod; 1574 r->r_cnt = cnt; 1575 return; 1576 1577 oactive: 1578 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 1579 oactive1: 1580 ifp->if_flags |= IFF_OACTIVE; 1581 r->r_prod = firstprod; 1582 r->r_cnt = firstcnt; 1583 } 1584 1585 /* 1586 * Handle simple commands sent to the typhoon 1587 */ 1588 int 1589 txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait) 1590 struct txp_softc *sc; 1591 u_int16_t id, in1, *out1; 1592 u_int32_t in2, in3, *out2, *out3; 1593 int wait; 1594 { 1595 struct txp_rsp_desc *rsp = NULL; 1596 1597 if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait)) 1598 return (-1); 1599 1600 if (!wait) 1601 return (0); 1602 1603 if (out1 != NULL) 1604 *out1 = le16toh(rsp->rsp_par1); 1605 if (out2 != NULL) 1606 *out2 = le32toh(rsp->rsp_par2); 1607 if (out3 != NULL) 1608 *out3 = le32toh(rsp->rsp_par3); 1609 free(rsp, M_DEVBUF); 1610 return (0); 1611 } 1612 1613 int 1614 txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait) 1615 struct txp_softc *sc; 1616 u_int16_t id, in1; 1617 u_int32_t in2, in3; 1618 struct txp_ext_desc *in_extp; 1619 u_int8_t in_extn; 1620 struct txp_rsp_desc **rspp; 1621 int wait; 1622 { 1623 struct txp_hostvar *hv = sc->sc_hostvar; 1624 struct txp_cmd_desc *cmd; 1625 struct txp_ext_desc *ext; 1626 u_int32_t idx, i; 1627 u_int16_t seq; 1628 1629 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) { 1630 printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc)); 1631 return (-1); 1632 } 1633 1634 idx = sc->sc_cmdring.lastwrite; 1635 cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1636 bzero(cmd, sizeof(*cmd)); 1637 1638 cmd->cmd_numdesc = in_extn; 1639 seq = sc->sc_seq++; 1640 cmd->cmd_seq = htole16(seq); 1641 cmd->cmd_id = htole16(id); 1642 cmd->cmd_par1 = htole16(in1); 1643 cmd->cmd_par2 = htole32(in2); 1644 cmd->cmd_par3 = htole32(in3); 1645 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD | 1646 (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; 1647 1648 idx += sizeof(struct txp_cmd_desc); 1649 if (idx == sc->sc_cmdring.size) 1650 idx = 0; 1651 1652 for (i = 0; i < in_extn; i++) { 1653 ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1654 bcopy(in_extp, ext, sizeof(struct txp_ext_desc)); 1655 in_extp++; 1656 idx += sizeof(struct txp_cmd_desc); 1657 if (idx == sc->sc_cmdring.size) 1658 idx = 0; 1659 } 1660 1661 sc->sc_cmdring.lastwrite = idx; 1662 1663 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite); 1664 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1665 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); 1666 1667 if (!wait) 1668 return (0); 1669 1670 for (i = 0; i < 10000; i++) { 1671 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1672 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD); 1673 idx = le32toh(hv->hv_resp_read_idx); 1674 if (idx != le32toh(hv->hv_resp_write_idx)) { 1675 *rspp = NULL; 1676 if (txp_response(sc, idx, id, seq, rspp)) 1677 return (-1); 1678 if (*rspp != NULL) 1679 break; 1680 } 1681 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1682 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); 1683 DELAY(50); 1684 } 1685 if (i == 1000 || (*rspp) == NULL) { 1686 printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id); 1687 return (-1); 1688 } 1689 1690 return (0); 1691 } 1692 1693 int 1694 txp_response(sc, ridx, id, seq, rspp) 1695 struct txp_softc *sc; 1696 u_int32_t ridx; 1697 u_int16_t id; 1698 u_int16_t seq; 1699 struct txp_rsp_desc **rspp; 1700 { 1701 struct txp_hostvar *hv = sc->sc_hostvar; 1702 struct txp_rsp_desc *rsp; 1703 1704 while (ridx != le32toh(hv->hv_resp_write_idx)) { 1705 rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx); 1706 1707 if (id == le16toh(rsp->rsp_id) && le16toh(rsp->rsp_seq) == seq) { 1708 *rspp = (struct txp_rsp_desc *)malloc( 1709 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1), 1710 M_DEVBUF, M_NOWAIT); 1711 if ((*rspp) == NULL) 1712 return (-1); 1713 txp_rsp_fixup(sc, rsp, *rspp); 1714 return (0); 1715 } 1716 1717 if (rsp->rsp_flags & RSP_FLAGS_ERROR) { 1718 printf("%s: response error: id 0x%x\n", 1719 TXP_DEVNAME(sc), le16toh(rsp->rsp_id)); 1720 txp_rsp_fixup(sc, rsp, NULL); 1721 ridx = le32toh(hv->hv_resp_read_idx); 1722 continue; 1723 } 1724 1725 switch (le16toh(rsp->rsp_id)) { 1726 case TXP_CMD_CYCLE_STATISTICS: 1727 case TXP_CMD_MEDIA_STATUS_READ: 1728 break; 1729 case TXP_CMD_HELLO_RESPONSE: 1730 printf("%s: hello\n", TXP_DEVNAME(sc)); 1731 break; 1732 default: 1733 printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc), 1734 le16toh(rsp->rsp_id)); 1735 } 1736 1737 txp_rsp_fixup(sc, rsp, NULL); 1738 ridx = le32toh(hv->hv_resp_read_idx); 1739 hv->hv_resp_read_idx = le32toh(ridx); 1740 } 1741 1742 return (0); 1743 } 1744 1745 void 1746 txp_rsp_fixup(sc, rsp, dst) 1747 struct txp_softc *sc; 1748 struct txp_rsp_desc *rsp, *dst; 1749 { 1750 struct txp_rsp_desc *src = rsp; 1751 struct txp_hostvar *hv = sc->sc_hostvar; 1752 u_int32_t i, ridx; 1753 1754 ridx = le32toh(hv->hv_resp_read_idx); 1755 1756 for (i = 0; i < rsp->rsp_numdesc + 1; i++) { 1757 if (dst != NULL) 1758 bcopy(src, dst++, sizeof(struct txp_rsp_desc)); 1759 ridx += sizeof(struct txp_rsp_desc); 1760 if (ridx == sc->sc_rspring.size) { 1761 src = sc->sc_rspring.base; 1762 ridx = 0; 1763 } else 1764 src++; 1765 sc->sc_rspring.lastwrite = ridx; 1766 hv->hv_resp_read_idx = htole32(ridx); 1767 } 1768 1769 hv->hv_resp_read_idx = htole32(ridx); 1770 } 1771 1772 int 1773 txp_cmd_desc_numfree(sc) 1774 struct txp_softc *sc; 1775 { 1776 struct txp_hostvar *hv = sc->sc_hostvar; 1777 struct txp_boot_record *br = sc->sc_boot; 1778 u_int32_t widx, ridx, nfree; 1779 1780 widx = sc->sc_cmdring.lastwrite; 1781 ridx = le32toh(hv->hv_cmd_read_idx); 1782 1783 if (widx == ridx) { 1784 /* Ring is completely free */ 1785 nfree = le32toh(br->br_cmd_siz) - sizeof(struct txp_cmd_desc); 1786 } else { 1787 if (widx > ridx) 1788 nfree = le32toh(br->br_cmd_siz) - 1789 (widx - ridx + sizeof(struct txp_cmd_desc)); 1790 else 1791 nfree = ridx - widx - sizeof(struct txp_cmd_desc); 1792 } 1793 1794 return (nfree / sizeof(struct txp_cmd_desc)); 1795 } 1796 1797 void 1798 txp_stop(sc) 1799 struct txp_softc *sc; 1800 { 1801 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1802 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1803 1804 if (callout_pending(&sc->sc_tick)) 1805 callout_stop(&sc->sc_tick); 1806 } 1807 1808 void 1809 txp_watchdog(ifp) 1810 struct ifnet *ifp; 1811 { 1812 } 1813 1814 int 1815 txp_ifmedia_upd(ifp) 1816 struct ifnet *ifp; 1817 { 1818 struct txp_softc *sc = ifp->if_softc; 1819 struct ifmedia *ifm = &sc->sc_ifmedia; 1820 u_int16_t new_xcvr; 1821 1822 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1823 return (EINVAL); 1824 1825 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1826 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1827 new_xcvr = TXP_XCVR_10_FDX; 1828 else 1829 new_xcvr = TXP_XCVR_10_HDX; 1830 } else if ((IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) || 1831 (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_FX)) { 1832 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1833 new_xcvr = TXP_XCVR_100_FDX; 1834 else 1835 new_xcvr = TXP_XCVR_100_HDX; 1836 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1837 new_xcvr = TXP_XCVR_AUTO; 1838 } else 1839 return (EINVAL); 1840 1841 /* nothing to do */ 1842 if (sc->sc_xcvr == new_xcvr) 1843 return (0); 1844 1845 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0, 1846 NULL, NULL, NULL, 0); 1847 sc->sc_xcvr = new_xcvr; 1848 1849 return (0); 1850 } 1851 1852 void 1853 txp_ifmedia_sts(ifp, ifmr) 1854 struct ifnet *ifp; 1855 struct ifmediareq *ifmr; 1856 { 1857 struct txp_softc *sc = ifp->if_softc; 1858 struct ifmedia *ifm = &sc->sc_ifmedia; 1859 u_int16_t bmsr, bmcr, anlpar; 1860 1861 ifmr->ifm_status = IFM_AVALID; 1862 ifmr->ifm_active = IFM_ETHER; 1863 1864 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1865 &bmsr, NULL, NULL, 1)) 1866 goto bail; 1867 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1868 &bmsr, NULL, NULL, 1)) 1869 goto bail; 1870 1871 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0, 1872 &bmcr, NULL, NULL, 1)) 1873 goto bail; 1874 1875 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0, 1876 &anlpar, NULL, NULL, 1)) 1877 goto bail; 1878 1879 if (bmsr & BMSR_LINK) 1880 ifmr->ifm_status |= IFM_ACTIVE; 1881 1882 if (bmcr & BMCR_ISO) { 1883 ifmr->ifm_active |= IFM_NONE; 1884 ifmr->ifm_status = 0; 1885 return; 1886 } 1887 1888 if (bmcr & BMCR_LOOP) 1889 ifmr->ifm_active |= IFM_LOOP; 1890 1891 if (!(sc->sc_flags & TXP_FIBER) && (bmcr & BMCR_AUTOEN)) { 1892 if ((bmsr & BMSR_ACOMP) == 0) { 1893 ifmr->ifm_active |= IFM_NONE; 1894 return; 1895 } 1896 1897 if (anlpar & ANLPAR_T4) 1898 ifmr->ifm_active |= IFM_100_T4; 1899 else if (anlpar & ANLPAR_TX_FD) 1900 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1901 else if (anlpar & ANLPAR_TX) 1902 ifmr->ifm_active |= IFM_100_TX; 1903 else if (anlpar & ANLPAR_10_FD) 1904 ifmr->ifm_active |= IFM_10_T|IFM_FDX; 1905 else if (anlpar & ANLPAR_10) 1906 ifmr->ifm_active |= IFM_10_T; 1907 else 1908 ifmr->ifm_active |= IFM_NONE; 1909 } else 1910 ifmr->ifm_active = ifm->ifm_cur->ifm_media; 1911 return; 1912 1913 bail: 1914 ifmr->ifm_active |= IFM_NONE; 1915 ifmr->ifm_status &= ~IFM_AVALID; 1916 } 1917 1918 void 1919 txp_show_descriptor(d) 1920 void *d; 1921 { 1922 struct txp_cmd_desc *cmd = d; 1923 struct txp_rsp_desc *rsp = d; 1924 struct txp_tx_desc *txd = d; 1925 struct txp_frag_desc *frgd = d; 1926 1927 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) { 1928 case CMD_FLAGS_TYPE_CMD: 1929 /* command descriptor */ 1930 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1931 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id), 1932 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1), 1933 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3)); 1934 break; 1935 case CMD_FLAGS_TYPE_RESP: 1936 /* response descriptor */ 1937 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1938 rsp->rsp_flags, rsp->rsp_numdesc, le16toh(rsp->rsp_id), 1939 le16toh(rsp->rsp_seq), le16toh(rsp->rsp_par1), 1940 le32toh(rsp->rsp_par2), le32toh(rsp->rsp_par3)); 1941 break; 1942 case CMD_FLAGS_TYPE_DATA: 1943 /* data header (assuming tx for now) */ 1944 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]", 1945 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, 1946 txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags); 1947 break; 1948 case CMD_FLAGS_TYPE_FRAG: 1949 /* fragment descriptor */ 1950 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]", 1951 frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len, 1952 frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2); 1953 break; 1954 default: 1955 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1956 cmd->cmd_flags & CMD_FLAGS_TYPE_M, 1957 cmd->cmd_flags, cmd->cmd_numdesc, le16toh(cmd->cmd_id), 1958 le16toh(cmd->cmd_seq), le16toh(cmd->cmd_par1), 1959 le32toh(cmd->cmd_par2), le32toh(cmd->cmd_par3)); 1960 break; 1961 } 1962 } 1963 1964 void 1965 txp_set_filter(sc) 1966 struct txp_softc *sc; 1967 { 1968 struct ethercom *ac = &sc->sc_arpcom; 1969 struct ifnet *ifp = &sc->sc_arpcom.ec_if; 1970 u_int32_t crc, carry, hashbit, hash[2]; 1971 u_int16_t filter; 1972 u_int8_t octet; 1973 int i, j, mcnt = 0; 1974 struct ether_multi *enm; 1975 struct ether_multistep step; 1976 1977 if (ifp->if_flags & IFF_PROMISC) { 1978 filter = TXP_RXFILT_PROMISC; 1979 goto setit; 1980 } 1981 1982 again: 1983 filter = TXP_RXFILT_DIRECT; 1984 1985 if (ifp->if_flags & IFF_BROADCAST) 1986 filter |= TXP_RXFILT_BROADCAST; 1987 1988 if (ifp->if_flags & IFF_ALLMULTI) 1989 filter |= TXP_RXFILT_ALLMULTI; 1990 else { 1991 hash[0] = hash[1] = 0; 1992 1993 ETHER_FIRST_MULTI(step, ac, enm); 1994 while (enm != NULL) { 1995 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1996 /* 1997 * We must listen to a range of multicast 1998 * addresses. For now, just accept all 1999 * multicasts, rather than trying to set only 2000 * those filter bits needed to match the range. 2001 * (At this time, the only use of address 2002 * ranges is for IP multicast routing, for 2003 * which the range is big enough to require 2004 * all bits set.) 2005 */ 2006 ifp->if_flags |= IFF_ALLMULTI; 2007 goto again; 2008 } 2009 2010 mcnt++; 2011 crc = 0xffffffff; 2012 2013 for (i = 0; i < ETHER_ADDR_LEN; i++) { 2014 octet = enm->enm_addrlo[i]; 2015 for (j = 0; j < 8; j++) { 2016 carry = ((crc & 0x80000000) ? 1 : 0) ^ 2017 (octet & 1); 2018 crc <<= 1; 2019 octet >>= 1; 2020 if (carry) 2021 crc = (crc ^ TXP_POLYNOMIAL) | 2022 carry; 2023 } 2024 } 2025 hashbit = (u_int16_t)(crc & (64 - 1)); 2026 hash[hashbit / 32] |= (1 << hashbit % 32); 2027 ETHER_NEXT_MULTI(step, enm); 2028 } 2029 2030 if (mcnt > 0) { 2031 filter |= TXP_RXFILT_HASHMULTI; 2032 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 2033 2, hash[0], hash[1], NULL, NULL, NULL, 0); 2034 } 2035 } 2036 2037 setit: 2038 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0, 2039 NULL, NULL, NULL, 1); 2040 } 2041 2042 void 2043 txp_capabilities(sc) 2044 struct txp_softc *sc; 2045 { 2046 struct ifnet *ifp = &sc->sc_arpcom.ec_if; 2047 struct txp_rsp_desc *rsp = NULL; 2048 struct txp_ext_desc *ext; 2049 2050 if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1)) 2051 goto out; 2052 2053 if (rsp->rsp_numdesc != 1) 2054 goto out; 2055 ext = (struct txp_ext_desc *)(rsp + 1); 2056 2057 sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK; 2058 sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK; 2059 2060 sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_MTU; 2061 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) { 2062 sc->sc_tx_capability |= OFFLOAD_VLAN; 2063 sc->sc_rx_capability |= OFFLOAD_VLAN; 2064 sc->sc_arpcom.ec_capabilities |= ETHERCAP_VLAN_HWTAGGING; 2065 } 2066 2067 #if 0 2068 /* not ready yet */ 2069 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) { 2070 sc->sc_tx_capability |= OFFLOAD_IPSEC; 2071 sc->sc_rx_capability |= OFFLOAD_IPSEC; 2072 ifp->if_capabilities |= IFCAP_IPSEC; 2073 } 2074 #endif 2075 2076 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) { 2077 sc->sc_tx_capability |= OFFLOAD_IPCKSUM; 2078 sc->sc_rx_capability |= OFFLOAD_IPCKSUM; 2079 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 2080 } 2081 2082 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) { 2083 sc->sc_rx_capability |= OFFLOAD_TCPCKSUM; 2084 #ifdef TRY_TX_TCP_CSUM 2085 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM; 2086 ifp->if_capabilities |= IFCAP_CSUM_TCPv4; 2087 #endif 2088 } 2089 2090 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) { 2091 sc->sc_rx_capability |= OFFLOAD_UDPCKSUM; 2092 #ifdef TRY_TX_UDP_CSUM 2093 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM; 2094 ifp->if_capabilities |= IFCAP_CSUM_UDPv4; 2095 #endif 2096 } 2097 2098 if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, 2099 sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1)) 2100 goto out; 2101 2102 out: 2103 if (rsp != NULL) 2104 free(rsp, M_DEVBUF); 2105 } 2106