1 /* $OpenBSD: if_txp.c,v 1.59 2001/11/06 19:53:19 miod Exp $ */ 2 3 /* 4 * Copyright (c) 2001 5 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and 6 * Aaron Campbell <aaron@monkey.org>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Jason L. Wright, 19 * Theo de Raadt and Aaron Campbell. 20 * 4. Neither the name of the author nor the names of any co-contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 34 * THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * Driver for 3c990 (Typhoon) Ethernet ASIC 39 */ 40 41 #include "bpfilter.h" 42 #include "vlan.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sockio.h> 47 #include <sys/mbuf.h> 48 #include <sys/malloc.h> 49 #include <sys/kernel.h> 50 #include <sys/socket.h> 51 #include <sys/device.h> 52 #include <sys/timeout.h> 53 54 #include <net/if.h> 55 #include <net/if_dl.h> 56 #include <net/if_types.h> 57 58 #ifdef INET 59 #include <netinet/in.h> 60 #include <netinet/in_systm.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip.h> 63 #include <netinet/if_ether.h> 64 #endif 65 66 #include <net/if_media.h> 67 68 #if NBPFILTER > 0 69 #include <net/bpf.h> 70 #endif 71 72 #if NVLAN > 0 73 #include <net/if_vlan_var.h> 74 #endif 75 76 #include <uvm/uvm_extern.h> /* for vtophys */ 77 #include <machine/bus.h> 78 79 #include <dev/mii/mii.h> 80 #include <dev/mii/miivar.h> 81 #include <dev/pci/pcireg.h> 82 #include <dev/pci/pcivar.h> 83 #include <dev/pci/pcidevs.h> 84 85 #include <dev/pci/if_txpreg.h> 86 87 #include <dev/microcode/typhoon/3c990img.h> 88 89 /* 90 * These currently break the 3c990 firmware, hopefully will be resolved 91 * at some point. 92 */ 93 #undef TRY_TX_UDP_CSUM 94 #undef TRY_TX_TCP_CSUM 95 96 int txp_probe __P((struct device *, void *, void *)); 97 void txp_attach __P((struct device *, struct device *, void *)); 98 int txp_intr __P((void *)); 99 void txp_tick __P((void *)); 100 void txp_shutdown __P((void *)); 101 int txp_ioctl __P((struct ifnet *, u_long, caddr_t)); 102 void txp_start __P((struct ifnet *)); 103 void txp_stop __P((struct txp_softc *)); 104 void txp_init __P((struct txp_softc *)); 105 void txp_watchdog __P((struct ifnet *)); 106 107 int txp_chip_init __P((struct txp_softc *)); 108 int txp_reset_adapter __P((struct txp_softc *)); 109 int txp_download_fw __P((struct txp_softc *)); 110 int txp_download_fw_wait __P((struct txp_softc *)); 111 int txp_download_fw_section __P((struct txp_softc *, 112 struct txp_fw_section_header *, int)); 113 int txp_alloc_rings __P((struct txp_softc *)); 114 void txp_dma_free __P((struct txp_softc *, struct txp_dma_alloc *)); 115 int txp_dma_malloc __P((struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int)); 116 void txp_set_filter __P((struct txp_softc *)); 117 118 int txp_cmd_desc_numfree __P((struct txp_softc *)); 119 int txp_command __P((struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, 120 u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int)); 121 int txp_command2 __P((struct txp_softc *, u_int16_t, u_int16_t, 122 u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t, 123 struct txp_rsp_desc **, int)); 124 int txp_response __P((struct txp_softc *, u_int32_t, u_int16_t, u_int16_t, 125 struct txp_rsp_desc **)); 126 void txp_rsp_fixup __P((struct txp_softc *, struct txp_rsp_desc *, 127 struct txp_rsp_desc *)); 128 void txp_capabilities __P((struct txp_softc *)); 129 130 void txp_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 131 int txp_ifmedia_upd __P((struct ifnet *)); 132 void txp_show_descriptor __P((void *)); 133 void txp_tx_reclaim __P((struct txp_softc *, struct txp_tx_ring *, 134 struct txp_dma_alloc *)); 135 void txp_rxbuf_reclaim __P((struct txp_softc *)); 136 void txp_rx_reclaim __P((struct txp_softc *, struct txp_rx_ring *, 137 struct txp_dma_alloc *)); 138 139 struct cfattach txp_ca = { 140 sizeof(struct txp_softc), txp_probe, txp_attach, 141 }; 142 143 struct cfdriver txp_cd = { 144 0, "txp", DV_IFNET 145 }; 146 147 int 148 txp_probe(parent, match, aux) 149 struct device *parent; 150 void *match, *aux; 151 { 152 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 153 154 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_3COM) 155 return (0); 156 157 switch (PCI_PRODUCT(pa->pa_id)) { 158 case PCI_PRODUCT_3COM_3CR990TX95: 159 case PCI_PRODUCT_3COM_3CR990TX97: 160 case PCI_PRODUCT_3COM_3CR990SVR95: 161 case PCI_PRODUCT_3COM_3CR990SVR97: 162 case PCI_PRODUCT_3COM_3C990BTXM: 163 case PCI_PRODUCT_3COM_3C990BSVR: 164 return (1); 165 } 166 167 return (0); 168 } 169 170 void 171 txp_attach(parent, self, aux) 172 struct device *parent, *self; 173 void *aux; 174 { 175 struct txp_softc *sc = (struct txp_softc *)self; 176 struct pci_attach_args *pa = aux; 177 pci_chipset_tag_t pc = pa->pa_pc; 178 pci_intr_handle_t ih; 179 const char *intrstr = NULL; 180 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 181 bus_size_t iosize; 182 u_int32_t command; 183 u_int16_t p1; 184 u_int32_t p2; 185 186 sc->sc_cold = 1; 187 188 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 189 190 if (!(command & PCI_COMMAND_MASTER_ENABLE)) { 191 printf(": failed to enable bus mastering\n"); 192 return; 193 } 194 195 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 196 printf(": failed to enable memory mapping\n"); 197 return; 198 } 199 if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 200 &sc->sc_bt, &sc->sc_bh, NULL, &iosize, 0)) { 201 printf(": can't map mem space %d\n", 0); 202 return; 203 } 204 205 sc->sc_dmat = pa->pa_dmat; 206 207 /* 208 * Allocate our interrupt. 209 */ 210 if (pci_intr_map(pa, &ih)) { 211 printf(": couldn't map interrupt\n"); 212 return; 213 } 214 215 intrstr = pci_intr_string(pc, ih); 216 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc, 217 self->dv_xname); 218 if (sc->sc_ih == NULL) { 219 printf(": couldn't establish interrupt"); 220 if (intrstr != NULL) 221 printf(" at %s", intrstr); 222 printf("\n"); 223 return; 224 } 225 printf(": %s", intrstr); 226 227 if (txp_chip_init(sc)) 228 return; 229 230 if (txp_download_fw(sc)) 231 return; 232 233 if (txp_alloc_rings(sc)) 234 return; 235 236 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, 237 NULL, NULL, NULL, 1)) 238 return; 239 240 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0, 241 &p1, &p2, NULL, 1)) 242 return; 243 244 txp_set_filter(sc); 245 246 sc->sc_arpcom.ac_enaddr[0] = ((u_int8_t *)&p1)[1]; 247 sc->sc_arpcom.ac_enaddr[1] = ((u_int8_t *)&p1)[0]; 248 sc->sc_arpcom.ac_enaddr[2] = ((u_int8_t *)&p2)[3]; 249 sc->sc_arpcom.ac_enaddr[3] = ((u_int8_t *)&p2)[2]; 250 sc->sc_arpcom.ac_enaddr[4] = ((u_int8_t *)&p2)[1]; 251 sc->sc_arpcom.ac_enaddr[5] = ((u_int8_t *)&p2)[0]; 252 253 printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 254 sc->sc_cold = 0; 255 256 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); 257 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 258 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 259 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 260 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 261 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); 262 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 263 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 264 265 sc->sc_xcvr = TXP_XCVR_AUTO; 266 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, 267 NULL, NULL, NULL, 0); 268 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 269 270 ifp->if_softc = sc; 271 ifp->if_mtu = ETHERMTU; 272 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 273 ifp->if_ioctl = txp_ioctl; 274 ifp->if_output = ether_output; 275 ifp->if_start = txp_start; 276 ifp->if_watchdog = txp_watchdog; 277 ifp->if_baudrate = 10000000; 278 IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES); 279 IFQ_SET_READY(&ifp->if_snd); 280 ifp->if_capabilities = 0; 281 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 282 283 txp_capabilities(sc); 284 285 timeout_set(&sc->sc_tick, txp_tick, sc); 286 287 /* 288 * Attach us everywhere 289 */ 290 if_attach(ifp); 291 ether_ifattach(ifp); 292 293 shutdownhook_establish(txp_shutdown, sc); 294 } 295 296 int 297 txp_chip_init(sc) 298 struct txp_softc *sc; 299 { 300 /* disable interrupts */ 301 WRITE_REG(sc, TXP_IER, 0); 302 WRITE_REG(sc, TXP_IMR, 303 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 304 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 305 TXP_INT_LATCH); 306 307 /* ack all interrupts */ 308 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 309 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 310 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 311 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 312 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 313 314 if (txp_reset_adapter(sc)) 315 return (-1); 316 317 /* disable interrupts */ 318 WRITE_REG(sc, TXP_IER, 0); 319 WRITE_REG(sc, TXP_IMR, 320 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 321 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 322 TXP_INT_LATCH); 323 324 /* ack all interrupts */ 325 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 326 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 327 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 328 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 329 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 330 331 return (0); 332 } 333 334 int 335 txp_reset_adapter(sc) 336 struct txp_softc *sc; 337 { 338 u_int32_t r; 339 int i; 340 341 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL); 342 DELAY(1000); 343 WRITE_REG(sc, TXP_SRR, 0); 344 345 /* Should wait max 6 seconds */ 346 for (i = 0; i < 6000; i++) { 347 r = READ_REG(sc, TXP_A2H_0); 348 if (r == STAT_WAITING_FOR_HOST_REQUEST) 349 break; 350 DELAY(1000); 351 } 352 353 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 354 printf("%s: reset hung\n", TXP_DEVNAME(sc)); 355 return (-1); 356 } 357 358 return (0); 359 } 360 361 int 362 txp_download_fw(sc) 363 struct txp_softc *sc; 364 { 365 struct txp_fw_file_header *fileheader; 366 struct txp_fw_section_header *secthead; 367 int sect; 368 u_int32_t r, i, ier, imr; 369 370 ier = READ_REG(sc, TXP_IER); 371 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0); 372 373 imr = READ_REG(sc, TXP_IMR); 374 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0); 375 376 for (i = 0; i < 10000; i++) { 377 r = READ_REG(sc, TXP_A2H_0); 378 if (r == STAT_WAITING_FOR_HOST_REQUEST) 379 break; 380 DELAY(50); 381 } 382 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 383 printf(": not waiting for host request\n"); 384 return (-1); 385 } 386 387 /* Ack the status */ 388 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 389 390 fileheader = (struct txp_fw_file_header *)tc990image; 391 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) { 392 printf(": fw invalid magic\n"); 393 return (-1); 394 } 395 396 /* Tell boot firmware to get ready for image */ 397 WRITE_REG(sc, TXP_H2A_1, fileheader->addr); 398 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE); 399 400 if (txp_download_fw_wait(sc)) { 401 printf("%s: fw wait failed, initial\n", sc->sc_dev.dv_xname); 402 return (-1); 403 } 404 405 secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) + 406 sizeof(struct txp_fw_file_header)); 407 408 for (sect = 0; sect < fileheader->nsections; sect++) { 409 if (txp_download_fw_section(sc, secthead, sect)) 410 return (-1); 411 secthead = (struct txp_fw_section_header *) 412 (((u_int8_t *)secthead) + secthead->nbytes + sizeof(*secthead)); 413 } 414 415 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE); 416 417 for (i = 0; i < 10000; i++) { 418 r = READ_REG(sc, TXP_A2H_0); 419 if (r == STAT_WAITING_FOR_BOOT) 420 break; 421 DELAY(50); 422 } 423 if (r != STAT_WAITING_FOR_BOOT) { 424 printf(": not waiting for boot\n"); 425 return (-1); 426 } 427 428 WRITE_REG(sc, TXP_IER, ier); 429 WRITE_REG(sc, TXP_IMR, imr); 430 431 return (0); 432 } 433 434 int 435 txp_download_fw_wait(sc) 436 struct txp_softc *sc; 437 { 438 u_int32_t i, r; 439 440 for (i = 0; i < 10000; i++) { 441 r = READ_REG(sc, TXP_ISR); 442 if (r & TXP_INT_A2H_0) 443 break; 444 DELAY(50); 445 } 446 447 if (!(r & TXP_INT_A2H_0)) { 448 printf(": fw wait failed comm0\n"); 449 return (-1); 450 } 451 452 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 453 454 r = READ_REG(sc, TXP_A2H_0); 455 if (r != STAT_WAITING_FOR_SEGMENT) { 456 printf(": fw not waiting for segment\n"); 457 return (-1); 458 } 459 return (0); 460 } 461 462 int 463 txp_download_fw_section(sc, sect, sectnum) 464 struct txp_softc *sc; 465 struct txp_fw_section_header *sect; 466 int sectnum; 467 { 468 struct txp_dma_alloc dma; 469 int rseg, err = 0; 470 struct mbuf m; 471 u_int16_t csum; 472 473 /* Skip zero length sections */ 474 if (sect->nbytes == 0) 475 return (0); 476 477 /* Make sure we aren't past the end of the image */ 478 rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image); 479 if (rseg >= sizeof(tc990image)) { 480 printf(": fw invalid section address, section %d\n", sectnum); 481 return (-1); 482 } 483 484 /* Make sure this section doesn't go past the end */ 485 rseg += sect->nbytes; 486 if (rseg >= sizeof(tc990image)) { 487 printf(": fw truncated section %d\n", sectnum); 488 return (-1); 489 } 490 491 /* map a buffer, copy segment to it, get physaddr */ 492 if (txp_dma_malloc(sc, sect->nbytes, &dma, 0)) { 493 printf(": fw dma malloc failed, section %d\n", sectnum); 494 return (-1); 495 } 496 497 bcopy(((u_int8_t *)sect) + sizeof(*sect), dma.dma_vaddr, sect->nbytes); 498 499 /* 500 * dummy up mbuf and verify section checksum 501 */ 502 m.m_type = MT_DATA; 503 m.m_next = m.m_nextpkt = NULL; 504 m.m_len = sect->nbytes; 505 m.m_data = dma.dma_vaddr; 506 m.m_flags = 0; 507 csum = in_cksum(&m, sect->nbytes); 508 if (csum != sect->cksum) { 509 printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n", 510 sectnum, sect->cksum, csum); 511 err = -1; 512 goto bail; 513 } 514 515 bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, 516 dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 517 518 WRITE_REG(sc, TXP_H2A_1, sect->nbytes); 519 WRITE_REG(sc, TXP_H2A_2, sect->cksum); 520 WRITE_REG(sc, TXP_H2A_3, sect->addr); 521 WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32); 522 WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff); 523 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE); 524 525 if (txp_download_fw_wait(sc)) { 526 printf("%s: fw wait failed, section %d\n", 527 sc->sc_dev.dv_xname, sectnum); 528 err = -1; 529 } 530 531 bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, 532 dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 533 534 bail: 535 txp_dma_free(sc, &dma); 536 537 return (err); 538 } 539 540 int 541 txp_intr(vsc) 542 void *vsc; 543 { 544 struct txp_softc *sc = vsc; 545 struct txp_hostvar *hv = sc->sc_hostvar; 546 u_int32_t isr; 547 int claimed = 0; 548 549 /* mask all interrupts */ 550 WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF | 551 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 552 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 553 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 554 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 555 556 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 557 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); 558 559 isr = READ_REG(sc, TXP_ISR); 560 while (isr) { 561 claimed = 1; 562 WRITE_REG(sc, TXP_ISR, isr); 563 564 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff)) 565 txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma); 566 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff)) 567 txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma); 568 569 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx) 570 txp_rxbuf_reclaim(sc); 571 572 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != 573 TXP_OFFSET2IDX(*(sc->sc_txhir.r_off)))) 574 txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma); 575 576 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != 577 TXP_OFFSET2IDX(*(sc->sc_txlor.r_off)))) 578 txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma); 579 580 isr = READ_REG(sc, TXP_ISR); 581 } 582 583 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 584 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); 585 586 /* unmask all interrupts */ 587 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 588 589 txp_start(&sc->sc_arpcom.ac_if); 590 591 return (claimed); 592 } 593 594 void 595 txp_rx_reclaim(sc, r, dma) 596 struct txp_softc *sc; 597 struct txp_rx_ring *r; 598 struct txp_dma_alloc *dma; 599 { 600 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 601 struct txp_rx_desc *rxd; 602 struct mbuf *m; 603 struct txp_swdesc *sd; 604 u_int32_t roff, woff; 605 int sumflags = 0, idx; 606 607 roff = *r->r_roff; 608 woff = *r->r_woff; 609 idx = roff / sizeof(struct txp_rx_desc); 610 rxd = r->r_desc + idx; 611 612 while (roff != woff) { 613 614 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 615 idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc), 616 BUS_DMASYNC_POSTREAD); 617 618 if (rxd->rx_flags & RX_FLAGS_ERROR) { 619 printf("%s: error 0x%x\n", sc->sc_dev.dv_xname, 620 rxd->rx_stat); 621 ifp->if_ierrors++; 622 goto next; 623 } 624 625 /* retrieve stashed pointer */ 626 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd)); 627 628 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 629 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 630 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 631 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 632 m = sd->sd_mbuf; 633 free(sd, M_DEVBUF); 634 m->m_pkthdr.len = m->m_len = rxd->rx_len; 635 636 #ifdef __STRICT_ALIGNMENT 637 { 638 /* 639 * XXX Nice chip, except it won't accept "off by 2" 640 * buffers, so we're force to copy. Supposedly 641 * this will be fixed in a newer firmware rev 642 * and this will be temporary. 643 */ 644 struct mbuf *mnew; 645 646 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 647 if (mnew == NULL) { 648 m_freem(m); 649 goto next; 650 } 651 if (m->m_len > (MHLEN - 2)) { 652 MCLGET(mnew, M_DONTWAIT); 653 if (!(mnew->m_flags & M_EXT)) { 654 m_freem(mnew); 655 m_freem(m); 656 goto next; 657 } 658 } 659 mnew->m_pkthdr.rcvif = ifp; 660 mnew->m_pkthdr.len = mnew->m_len = m->m_len; 661 mnew->m_data += 2; 662 bcopy(m->m_data, mnew->m_data, m->m_len); 663 m_freem(m); 664 m = mnew; 665 } 666 #endif 667 668 #if NBPFILTER > 0 669 /* 670 * Handle BPF listeners. Let the BPF user see the packet. 671 */ 672 if (ifp->if_bpf) 673 bpf_mtap(ifp->if_bpf, m); 674 #endif 675 676 if (rxd->rx_stat & RX_STAT_IPCKSUMBAD) 677 sumflags |= M_IPV4_CSUM_IN_BAD; 678 else if (rxd->rx_stat & RX_STAT_IPCKSUMGOOD) 679 sumflags |= M_IPV4_CSUM_IN_OK; 680 681 if (rxd->rx_stat & RX_STAT_TCPCKSUMBAD) 682 sumflags |= M_TCP_CSUM_IN_BAD; 683 else if (rxd->rx_stat & RX_STAT_TCPCKSUMGOOD) 684 sumflags |= M_TCP_CSUM_IN_OK; 685 686 if (rxd->rx_stat & RX_STAT_UDPCKSUMBAD) 687 sumflags |= M_UDP_CSUM_IN_BAD; 688 else if (rxd->rx_stat & RX_STAT_UDPCKSUMGOOD) 689 sumflags |= M_UDP_CSUM_IN_OK; 690 691 m->m_pkthdr.csum = sumflags; 692 693 #if NVLAN > 0 694 if (rxd->rx_stat & RX_STAT_VLAN) { 695 if (vlan_input_tag(m, htons(rxd->rx_vlan >> 16)) < 0) 696 ifp->if_noproto++; 697 goto next; 698 } 699 #endif 700 701 ether_input_mbuf(ifp, m); 702 703 next: 704 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 705 idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc), 706 BUS_DMASYNC_PREREAD); 707 708 roff += sizeof(struct txp_rx_desc); 709 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) { 710 idx = 0; 711 roff = 0; 712 rxd = r->r_desc; 713 } else { 714 idx++; 715 rxd++; 716 } 717 woff = *r->r_woff; 718 } 719 720 *r->r_roff = woff; 721 } 722 723 void 724 txp_rxbuf_reclaim(sc) 725 struct txp_softc *sc; 726 { 727 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 728 struct txp_hostvar *hv = sc->sc_hostvar; 729 struct txp_rxbuf_desc *rbd; 730 struct txp_swdesc *sd; 731 u_int32_t i, end; 732 733 end = TXP_OFFSET2IDX(hv->hv_rx_buf_read_idx); 734 i = TXP_OFFSET2IDX(hv->hv_rx_buf_write_idx); 735 736 if (++i == RXBUF_ENTRIES) 737 i = 0; 738 739 rbd = sc->sc_rxbufs + i; 740 741 while (i != end) { 742 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), 743 M_DEVBUF, M_NOWAIT); 744 if (sd == NULL) 745 break; 746 747 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); 748 if (sd->sd_mbuf == NULL) 749 goto err_sd; 750 751 MCLGET(sd->sd_mbuf, M_DONTWAIT); 752 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) 753 goto err_mbuf; 754 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 755 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 756 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, 757 TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) 758 goto err_mbuf; 759 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, 760 BUS_DMA_NOWAIT)) { 761 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 762 goto err_mbuf; 763 } 764 765 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 766 i * sizeof(struct txp_rxbuf_desc), 767 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE); 768 769 /* stash away pointer */ 770 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd)); 771 772 rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) 773 & 0xffffffff; 774 rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) 775 >> 32; 776 777 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 778 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 779 780 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 781 i * sizeof(struct txp_rxbuf_desc), 782 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE); 783 784 hv->hv_rx_buf_write_idx = TXP_IDX2OFFSET(i); 785 786 if (++i == RXBUF_ENTRIES) { 787 i = 0; 788 rbd = sc->sc_rxbufs; 789 } else 790 rbd++; 791 } 792 return; 793 794 err_mbuf: 795 m_freem(sd->sd_mbuf); 796 err_sd: 797 free(sd, M_DEVBUF); 798 } 799 800 /* 801 * Reclaim mbufs and entries from a transmit ring. 802 */ 803 void 804 txp_tx_reclaim(sc, r, dma) 805 struct txp_softc *sc; 806 struct txp_tx_ring *r; 807 struct txp_dma_alloc *dma; 808 { 809 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 810 u_int32_t idx = TXP_OFFSET2IDX(*(r->r_off)); 811 u_int32_t cons = r->r_cons, cnt = r->r_cnt; 812 struct txp_tx_desc *txd = r->r_desc + cons; 813 struct txp_swdesc *sd = sc->sc_txd + cons; 814 struct mbuf *m; 815 816 while (cons != idx) { 817 if (cnt == 0) 818 break; 819 820 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 821 cons * sizeof(struct txp_tx_desc), 822 sizeof(struct txp_tx_desc), 823 BUS_DMASYNC_POSTWRITE); 824 825 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == 826 TX_FLAGS_TYPE_DATA) { 827 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 828 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 829 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 830 m = sd->sd_mbuf; 831 if (m != NULL) { 832 m_freem(m); 833 txd->tx_addrlo = 0; 834 txd->tx_addrhi = 0; 835 ifp->if_opackets++; 836 } 837 } 838 ifp->if_flags &= ~IFF_OACTIVE; 839 840 if (++cons == TX_ENTRIES) { 841 txd = r->r_desc; 842 cons = 0; 843 sd = sc->sc_txd; 844 } else { 845 txd++; 846 sd++; 847 } 848 849 cnt--; 850 } 851 852 r->r_cons = cons; 853 r->r_cnt = cnt; 854 if (cnt == 0) 855 ifp->if_timer = 0; 856 } 857 858 void 859 txp_shutdown(vsc) 860 void *vsc; 861 { 862 struct txp_softc *sc = (struct txp_softc *)vsc; 863 864 /* mask all interrupts */ 865 WRITE_REG(sc, TXP_IMR, 866 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 867 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 868 TXP_INT_LATCH); 869 870 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 871 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 872 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0); 873 } 874 875 int 876 txp_alloc_rings(sc) 877 struct txp_softc *sc; 878 { 879 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 880 struct txp_boot_record *boot; 881 struct txp_swdesc *sd; 882 u_int32_t r; 883 int i, j; 884 885 /* boot record */ 886 if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), &sc->sc_boot_dma, 887 BUS_DMA_COHERENT)) { 888 printf(": can't allocate boot record\n"); 889 return (-1); 890 } 891 boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr; 892 bzero(boot, sizeof(*boot)); 893 sc->sc_boot = boot; 894 895 /* host variables */ 896 if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma, 897 BUS_DMA_COHERENT)) { 898 printf(": can't allocate host ring\n"); 899 goto bail_boot; 900 } 901 bzero(sc->sc_host_dma.dma_vaddr, sizeof(struct txp_hostvar)); 902 boot->br_hostvar_lo = sc->sc_host_dma.dma_paddr & 0xffffffff; 903 boot->br_hostvar_hi = sc->sc_host_dma.dma_paddr >> 32; 904 sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr; 905 906 /* high priority tx ring */ 907 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, 908 &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) { 909 printf(": can't allocate high tx ring\n"); 910 goto bail_host; 911 } 912 bzero(sc->sc_txhiring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES); 913 boot->br_txhipri_lo = sc->sc_txhiring_dma.dma_paddr & 0xffffffff; 914 boot->br_txhipri_hi = sc->sc_txhiring_dma.dma_paddr >> 32; 915 boot->br_txhipri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); 916 sc->sc_txhir.r_reg = TXP_H2A_1; 917 sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr; 918 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0; 919 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx; 920 for (i = 0; i < TX_ENTRIES; i++) { 921 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 922 TX_ENTRIES - 4, TXP_MAX_SEGLEN, 0, 923 BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) { 924 for (j = 0; j < i; j++) { 925 bus_dmamap_destroy(sc->sc_dmat, 926 sc->sc_txd[j].sd_map); 927 sc->sc_txd[j].sd_map = NULL; 928 } 929 goto bail_txhiring; 930 } 931 } 932 933 /* low priority tx ring */ 934 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, 935 &sc->sc_txloring_dma, BUS_DMA_COHERENT)) { 936 printf(": can't allocate low tx ring\n"); 937 goto bail_txhiring; 938 } 939 bzero(sc->sc_txloring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES); 940 boot->br_txlopri_lo = sc->sc_txloring_dma.dma_paddr & 0xffffffff; 941 boot->br_txlopri_hi = sc->sc_txloring_dma.dma_paddr >> 32; 942 boot->br_txlopri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); 943 sc->sc_txlor.r_reg = TXP_H2A_3; 944 sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr; 945 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0; 946 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx; 947 948 /* high priority rx ring */ 949 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, 950 &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) { 951 printf(": can't allocate high rx ring\n"); 952 goto bail_txloring; 953 } 954 bzero(sc->sc_rxhiring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES); 955 boot->br_rxhipri_lo = sc->sc_rxhiring_dma.dma_paddr & 0xffffffff; 956 boot->br_rxhipri_hi = sc->sc_rxhiring_dma.dma_paddr >> 32; 957 boot->br_rxhipri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); 958 sc->sc_rxhir.r_desc = 959 (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr; 960 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx; 961 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx; 962 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map, 963 0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 964 965 /* low priority ring */ 966 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, 967 &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) { 968 printf(": can't allocate low rx ring\n"); 969 goto bail_rxhiring; 970 } 971 bzero(sc->sc_rxloring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES); 972 boot->br_rxlopri_lo = sc->sc_rxloring_dma.dma_paddr & 0xffffffff; 973 boot->br_rxlopri_hi = sc->sc_rxloring_dma.dma_paddr >> 32; 974 boot->br_rxlopri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); 975 sc->sc_rxlor.r_desc = 976 (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr; 977 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx; 978 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx; 979 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map, 980 0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 981 982 /* command ring */ 983 if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES, 984 &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) { 985 printf(": can't allocate command ring\n"); 986 goto bail_rxloring; 987 } 988 bzero(sc->sc_cmdring_dma.dma_vaddr, sizeof(struct txp_cmd_desc) * CMD_ENTRIES); 989 boot->br_cmd_lo = sc->sc_cmdring_dma.dma_paddr & 0xffffffff; 990 boot->br_cmd_hi = sc->sc_cmdring_dma.dma_paddr >> 32; 991 boot->br_cmd_siz = CMD_ENTRIES * sizeof(struct txp_cmd_desc); 992 sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr; 993 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc); 994 sc->sc_cmdring.lastwrite = 0; 995 996 /* response ring */ 997 if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES, 998 &sc->sc_rspring_dma, BUS_DMA_COHERENT)) { 999 printf(": can't allocate response ring\n"); 1000 goto bail_cmdring; 1001 } 1002 bzero(sc->sc_rspring_dma.dma_vaddr, sizeof(struct txp_rsp_desc) * RSP_ENTRIES); 1003 boot->br_resp_lo = sc->sc_rspring_dma.dma_paddr & 0xffffffff; 1004 boot->br_resp_hi = sc->sc_rspring_dma.dma_paddr >> 32; 1005 boot->br_resp_siz = CMD_ENTRIES * sizeof(struct txp_rsp_desc); 1006 sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr; 1007 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc); 1008 sc->sc_rspring.lastwrite = 0; 1009 1010 /* receive buffer ring */ 1011 if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES, 1012 &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) { 1013 printf(": can't allocate rx buffer ring\n"); 1014 goto bail_rspring; 1015 } 1016 bzero(sc->sc_rxbufring_dma.dma_vaddr, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES); 1017 boot->br_rxbuf_lo = sc->sc_rxbufring_dma.dma_paddr & 0xffffffff; 1018 boot->br_rxbuf_hi = sc->sc_rxbufring_dma.dma_paddr >> 32; 1019 boot->br_rxbuf_siz = RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc); 1020 sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr; 1021 for (i = 0; i < RXBUF_ENTRIES; i++) { 1022 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), 1023 M_DEVBUF, M_NOWAIT); 1024 if (sd == NULL) 1025 break; 1026 1027 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); 1028 if (sd->sd_mbuf == NULL) { 1029 goto bail_rxbufring; 1030 } 1031 1032 MCLGET(sd->sd_mbuf, M_DONTWAIT); 1033 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) { 1034 goto bail_rxbufring; 1035 } 1036 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 1037 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 1038 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, 1039 TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) { 1040 goto bail_rxbufring; 1041 } 1042 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, 1043 BUS_DMA_NOWAIT)) { 1044 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 1045 goto bail_rxbufring; 1046 } 1047 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 1048 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1049 1050 /* stash away pointer */ 1051 bcopy(&sd, (u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, sizeof(sd)); 1052 1053 sc->sc_rxbufs[i].rb_paddrlo = 1054 ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff; 1055 sc->sc_rxbufs[i].rb_paddrhi = 1056 ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32; 1057 } 1058 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 1059 0, sc->sc_rxbufring_dma.dma_map->dm_mapsize, 1060 BUS_DMASYNC_PREWRITE); 1061 sc->sc_hostvar->hv_rx_buf_write_idx = (RXBUF_ENTRIES - 1) * 1062 sizeof(struct txp_rxbuf_desc); 1063 1064 /* zero dma */ 1065 if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma, 1066 BUS_DMA_COHERENT)) { 1067 printf(": can't allocate response ring\n"); 1068 goto bail_rxbufring; 1069 } 1070 bzero(sc->sc_zero_dma.dma_vaddr, sizeof(u_int32_t)); 1071 boot->br_zero_lo = sc->sc_zero_dma.dma_paddr & 0xffffffff; 1072 boot->br_zero_hi = sc->sc_zero_dma.dma_paddr >> 32; 1073 1074 /* See if it's waiting for boot, and try to boot it */ 1075 for (i = 0; i < 10000; i++) { 1076 r = READ_REG(sc, TXP_A2H_0); 1077 if (r == STAT_WAITING_FOR_BOOT) 1078 break; 1079 DELAY(50); 1080 } 1081 if (r != STAT_WAITING_FOR_BOOT) { 1082 printf(": not waiting for boot\n"); 1083 goto bail; 1084 } 1085 WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32); 1086 WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff); 1087 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); 1088 1089 /* See if it booted */ 1090 for (i = 0; i < 10000; i++) { 1091 r = READ_REG(sc, TXP_A2H_0); 1092 if (r == STAT_RUNNING) 1093 break; 1094 DELAY(50); 1095 } 1096 if (r != STAT_RUNNING) { 1097 printf(": fw not running\n"); 1098 goto bail; 1099 } 1100 1101 /* Clear TX and CMD ring write registers */ 1102 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); 1103 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); 1104 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); 1105 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); 1106 1107 return (0); 1108 1109 bail: 1110 txp_dma_free(sc, &sc->sc_zero_dma); 1111 bail_rxbufring: 1112 txp_dma_free(sc, &sc->sc_rxbufring_dma); 1113 bail_rspring: 1114 txp_dma_free(sc, &sc->sc_rspring_dma); 1115 bail_cmdring: 1116 txp_dma_free(sc, &sc->sc_cmdring_dma); 1117 bail_rxloring: 1118 txp_dma_free(sc, &sc->sc_rxloring_dma); 1119 bail_rxhiring: 1120 txp_dma_free(sc, &sc->sc_rxhiring_dma); 1121 bail_txloring: 1122 txp_dma_free(sc, &sc->sc_txloring_dma); 1123 bail_txhiring: 1124 txp_dma_free(sc, &sc->sc_txhiring_dma); 1125 bail_host: 1126 txp_dma_free(sc, &sc->sc_host_dma); 1127 bail_boot: 1128 txp_dma_free(sc, &sc->sc_boot_dma); 1129 return (-1); 1130 } 1131 1132 int 1133 txp_dma_malloc(sc, size, dma, mapflags) 1134 struct txp_softc *sc; 1135 bus_size_t size; 1136 struct txp_dma_alloc *dma; 1137 int mapflags; 1138 { 1139 int r; 1140 1141 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, 1142 &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0) 1143 goto fail_0; 1144 1145 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, 1146 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) 1147 goto fail_1; 1148 1149 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1150 BUS_DMA_NOWAIT, &dma->dma_map)) != 0) 1151 goto fail_2; 1152 1153 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 1154 size, NULL, BUS_DMA_NOWAIT)) != 0) 1155 goto fail_3; 1156 1157 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 1158 return (0); 1159 1160 fail_3: 1161 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1162 fail_2: 1163 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 1164 fail_1: 1165 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1166 fail_0: 1167 return (r); 1168 } 1169 1170 void 1171 txp_dma_free(sc, dma) 1172 struct txp_softc *sc; 1173 struct txp_dma_alloc *dma; 1174 { 1175 bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 1176 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize); 1177 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1178 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1179 } 1180 1181 int 1182 txp_ioctl(ifp, command, data) 1183 struct ifnet *ifp; 1184 u_long command; 1185 caddr_t data; 1186 { 1187 struct txp_softc *sc = ifp->if_softc; 1188 struct ifreq *ifr = (struct ifreq *)data; 1189 struct ifaddr *ifa = (struct ifaddr *)data; 1190 int s, error = 0; 1191 1192 s = splnet(); 1193 1194 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) { 1195 splx(s); 1196 return error; 1197 } 1198 1199 switch(command) { 1200 case SIOCSIFADDR: 1201 ifp->if_flags |= IFF_UP; 1202 switch (ifa->ifa_addr->sa_family) { 1203 #ifdef INET 1204 case AF_INET: 1205 txp_init(sc); 1206 arp_ifinit(&sc->sc_arpcom, ifa); 1207 break; 1208 #endif /* INET */ 1209 default: 1210 txp_init(sc); 1211 break; 1212 } 1213 break; 1214 case SIOCSIFFLAGS: 1215 if (ifp->if_flags & IFF_UP) { 1216 txp_init(sc); 1217 } else { 1218 if (ifp->if_flags & IFF_RUNNING) 1219 txp_stop(sc); 1220 } 1221 break; 1222 case SIOCADDMULTI: 1223 case SIOCDELMULTI: 1224 error = (command == SIOCADDMULTI) ? 1225 ether_addmulti(ifr, &sc->sc_arpcom) : 1226 ether_delmulti(ifr, &sc->sc_arpcom); 1227 1228 if (error == ENETRESET) { 1229 /* 1230 * Multicast list has changed; set the hardware 1231 * filter accordingly. 1232 */ 1233 txp_set_filter(sc); 1234 error = 0; 1235 } 1236 break; 1237 case SIOCGIFMEDIA: 1238 case SIOCSIFMEDIA: 1239 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command); 1240 break; 1241 default: 1242 error = EINVAL; 1243 break; 1244 } 1245 1246 (void)splx(s); 1247 1248 return(error); 1249 } 1250 1251 void 1252 txp_init(sc) 1253 struct txp_softc *sc; 1254 { 1255 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1256 int s; 1257 1258 txp_stop(sc); 1259 1260 s = splnet(); 1261 1262 txp_set_filter(sc); 1263 1264 txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1265 txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1266 1267 WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF | 1268 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 1269 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 1270 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 1271 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 1272 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 1273 1274 ifp->if_flags |= IFF_RUNNING; 1275 ifp->if_flags &= ~IFF_OACTIVE; 1276 ifp->if_timer = 0; 1277 1278 if (!timeout_pending(&sc->sc_tick)) 1279 timeout_add(&sc->sc_tick, hz); 1280 1281 splx(s); 1282 } 1283 1284 void 1285 txp_tick(vsc) 1286 void *vsc; 1287 { 1288 struct txp_softc *sc = vsc; 1289 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1290 struct txp_rsp_desc *rsp = NULL; 1291 struct txp_ext_desc *ext; 1292 int s; 1293 1294 s = splnet(); 1295 txp_rxbuf_reclaim(sc); 1296 1297 if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, 1298 &rsp, 1)) 1299 goto out; 1300 if (rsp->rsp_numdesc != 6) 1301 goto out; 1302 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, 1303 NULL, NULL, NULL, 1)) 1304 goto out; 1305 ext = (struct txp_ext_desc *)(rsp + 1); 1306 1307 ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 + 1308 ext[4].ext_1 + ext[4].ext_4; 1309 ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + 1310 ext[2].ext_1; 1311 ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + 1312 ext[1].ext_3; 1313 ifp->if_opackets += rsp->rsp_par2; 1314 ifp->if_ipackets += ext[2].ext_3; 1315 1316 out: 1317 if (rsp != NULL) 1318 free(rsp, M_DEVBUF); 1319 1320 splx(s); 1321 timeout_add(&sc->sc_tick, hz); 1322 } 1323 1324 void 1325 txp_start(ifp) 1326 struct ifnet *ifp; 1327 { 1328 struct txp_softc *sc = ifp->if_softc; 1329 struct txp_tx_ring *r = &sc->sc_txhir; 1330 struct txp_tx_desc *txd; 1331 int txdidx; 1332 struct txp_frag_desc *fxd; 1333 struct mbuf *m, *mnew; 1334 struct txp_swdesc *sd; 1335 u_int32_t firstprod, firstcnt, prod, cnt, i; 1336 #if NVLAN > 0 1337 struct ifvlan *ifv; 1338 #endif 1339 1340 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1341 return; 1342 1343 prod = r->r_prod; 1344 cnt = r->r_cnt; 1345 1346 while (1) { 1347 IFQ_DEQUEUE(&ifp->if_snd, m); 1348 if (m == NULL) 1349 break; 1350 1351 firstprod = prod; 1352 firstcnt = cnt; 1353 1354 sd = sc->sc_txd + prod; 1355 sd->sd_mbuf = m; 1356 1357 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, 1358 BUS_DMA_NOWAIT)) { 1359 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 1360 if (mnew == NULL) 1361 goto oactive1; 1362 if (m->m_pkthdr.len > MHLEN) { 1363 MCLGET(mnew, M_DONTWAIT); 1364 if ((mnew->m_flags & M_EXT) == 0) { 1365 m_freem(mnew); 1366 goto oactive1; 1367 } 1368 } 1369 m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t)); 1370 mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len; 1371 m_freem(m); 1372 m = mnew; 1373 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, 1374 BUS_DMA_NOWAIT)) 1375 goto oactive1; 1376 } 1377 1378 if ((TX_ENTRIES - cnt) < 4) 1379 goto oactive; 1380 1381 txd = r->r_desc + prod; 1382 txdidx = prod; 1383 txd->tx_flags = TX_FLAGS_TYPE_DATA; 1384 txd->tx_numdesc = 0; 1385 txd->tx_addrlo = 0; 1386 txd->tx_addrhi = 0; 1387 txd->tx_totlen = m->m_pkthdr.len; 1388 txd->tx_pflags = 0; 1389 txd->tx_numdesc = sd->sd_map->dm_nsegs; 1390 1391 if (++prod == TX_ENTRIES) 1392 prod = 0; 1393 1394 if (++cnt >= (TX_ENTRIES - 4)) 1395 goto oactive; 1396 1397 #if NVLAN > 0 1398 if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 1399 m->m_pkthdr.rcvif != NULL) { 1400 ifv = m->m_pkthdr.rcvif->if_softc; 1401 txd->tx_pflags = TX_PFLAGS_VLAN | 1402 (htons(ifv->ifv_tag) << TX_PFLAGS_VLANTAG_S); 1403 } 1404 #endif 1405 1406 if (m->m_pkthdr.csum & M_IPV4_CSUM_OUT) 1407 txd->tx_pflags |= TX_PFLAGS_IPCKSUM; 1408 #ifdef TRY_TX_TCP_CSUM 1409 if (m->m_pkthdr.csum & M_TCPV4_CSUM_OUT) 1410 txd->tx_pflags |= TX_PFLAGS_TCPCKSUM; 1411 #endif 1412 #ifdef TRY_TX_UDP_CSUM 1413 if (m->m_pkthdr.csum & M_UDPV4_CSUM_OUT) 1414 txd->tx_pflags |= TX_PFLAGS_UDPCKSUM; 1415 #endif 1416 1417 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 1418 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1419 1420 fxd = (struct txp_frag_desc *)(r->r_desc + prod); 1421 for (i = 0; i < sd->sd_map->dm_nsegs; i++) { 1422 if (++cnt >= (TX_ENTRIES - 4)) { 1423 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 1424 0, sd->sd_map->dm_mapsize, 1425 BUS_DMASYNC_POSTWRITE); 1426 goto oactive; 1427 } 1428 1429 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | 1430 FRAG_FLAGS_VALID; 1431 fxd->frag_rsvd1 = 0; 1432 fxd->frag_len = sd->sd_map->dm_segs[i].ds_len; 1433 fxd->frag_addrlo = 1434 ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) & 1435 0xffffffff; 1436 fxd->frag_addrhi = 1437 ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >> 1438 32; 1439 fxd->frag_rsvd2 = 0; 1440 1441 bus_dmamap_sync(sc->sc_dmat, 1442 sc->sc_txhiring_dma.dma_map, 1443 prod * sizeof(struct txp_frag_desc), 1444 sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE); 1445 1446 if (++prod == TX_ENTRIES) { 1447 fxd = (struct txp_frag_desc *)r->r_desc; 1448 prod = 0; 1449 } else 1450 fxd++; 1451 1452 } 1453 1454 ifp->if_timer = 5; 1455 1456 #if NBPFILTER > 0 1457 if (ifp->if_bpf) 1458 bpf_mtap(ifp->if_bpf, m); 1459 #endif 1460 1461 txd->tx_flags |= TX_FLAGS_VALID; 1462 bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map, 1463 txdidx * sizeof(struct txp_tx_desc), 1464 sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE); 1465 1466 #if 0 1467 { 1468 struct mbuf *mx; 1469 int i; 1470 1471 printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n", 1472 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, 1473 txd->tx_pflags); 1474 for (mx = m; mx != NULL; mx = mx->m_next) { 1475 for (i = 0; i < mx->m_len; i++) { 1476 printf(":%02x", 1477 (u_int8_t)m->m_data[i]); 1478 } 1479 } 1480 printf("\n"); 1481 } 1482 #endif 1483 1484 WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod)); 1485 } 1486 1487 r->r_prod = prod; 1488 r->r_cnt = cnt; 1489 return; 1490 1491 oactive: 1492 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 1493 oactive1: 1494 ifp->if_flags |= IFF_OACTIVE; 1495 r->r_prod = firstprod; 1496 r->r_cnt = firstcnt; 1497 IF_PREPEND(&ifp->if_snd, m); 1498 } 1499 1500 /* 1501 * Handle simple commands sent to the typhoon 1502 */ 1503 int 1504 txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait) 1505 struct txp_softc *sc; 1506 u_int16_t id, in1, *out1; 1507 u_int32_t in2, in3, *out2, *out3; 1508 int wait; 1509 { 1510 struct txp_rsp_desc *rsp = NULL; 1511 1512 if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait)) 1513 return (-1); 1514 1515 if (!wait) 1516 return (0); 1517 1518 if (out1 != NULL) 1519 *out1 = rsp->rsp_par1; 1520 if (out2 != NULL) 1521 *out2 = rsp->rsp_par2; 1522 if (out3 != NULL) 1523 *out3 = rsp->rsp_par3; 1524 free(rsp, M_DEVBUF); 1525 return (0); 1526 } 1527 1528 int 1529 txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait) 1530 struct txp_softc *sc; 1531 u_int16_t id, in1; 1532 u_int32_t in2, in3; 1533 struct txp_ext_desc *in_extp; 1534 u_int8_t in_extn; 1535 struct txp_rsp_desc **rspp; 1536 int wait; 1537 { 1538 struct txp_hostvar *hv = sc->sc_hostvar; 1539 struct txp_cmd_desc *cmd; 1540 struct txp_ext_desc *ext; 1541 u_int32_t idx, i; 1542 u_int16_t seq; 1543 1544 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) { 1545 printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc)); 1546 return (-1); 1547 } 1548 1549 idx = sc->sc_cmdring.lastwrite; 1550 cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1551 bzero(cmd, sizeof(*cmd)); 1552 1553 cmd->cmd_numdesc = in_extn; 1554 cmd->cmd_seq = seq = sc->sc_seq++; 1555 cmd->cmd_id = id; 1556 cmd->cmd_par1 = in1; 1557 cmd->cmd_par2 = in2; 1558 cmd->cmd_par3 = in3; 1559 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD | 1560 (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; 1561 1562 idx += sizeof(struct txp_cmd_desc); 1563 if (idx == sc->sc_cmdring.size) 1564 idx = 0; 1565 1566 for (i = 0; i < in_extn; i++) { 1567 ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1568 bcopy(in_extp, ext, sizeof(struct txp_ext_desc)); 1569 in_extp++; 1570 idx += sizeof(struct txp_cmd_desc); 1571 if (idx == sc->sc_cmdring.size) 1572 idx = 0; 1573 } 1574 1575 sc->sc_cmdring.lastwrite = idx; 1576 1577 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite); 1578 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1579 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); 1580 1581 if (!wait) 1582 return (0); 1583 1584 for (i = 0; i < 10000; i++) { 1585 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1586 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD); 1587 idx = hv->hv_resp_read_idx; 1588 if (idx != hv->hv_resp_write_idx) { 1589 *rspp = NULL; 1590 if (txp_response(sc, idx, id, seq, rspp)) 1591 return (-1); 1592 if (*rspp != NULL) 1593 break; 1594 } 1595 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1596 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); 1597 DELAY(50); 1598 } 1599 if (i == 1000 || (*rspp) == NULL) { 1600 printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id); 1601 return (-1); 1602 } 1603 1604 return (0); 1605 } 1606 1607 int 1608 txp_response(sc, ridx, id, seq, rspp) 1609 struct txp_softc *sc; 1610 u_int32_t ridx; 1611 u_int16_t id; 1612 u_int16_t seq; 1613 struct txp_rsp_desc **rspp; 1614 { 1615 struct txp_hostvar *hv = sc->sc_hostvar; 1616 struct txp_rsp_desc *rsp; 1617 1618 while (ridx != hv->hv_resp_write_idx) { 1619 rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx); 1620 1621 if (id == rsp->rsp_id && rsp->rsp_seq == seq) { 1622 *rspp = (struct txp_rsp_desc *)malloc( 1623 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1), 1624 M_DEVBUF, M_NOWAIT); 1625 if ((*rspp) == NULL) 1626 return (-1); 1627 txp_rsp_fixup(sc, rsp, *rspp); 1628 return (0); 1629 } 1630 1631 if (rsp->rsp_flags & RSP_FLAGS_ERROR) { 1632 printf("%s: response error: id 0x%x\n", 1633 TXP_DEVNAME(sc), rsp->rsp_id); 1634 txp_rsp_fixup(sc, rsp, NULL); 1635 ridx = hv->hv_resp_read_idx; 1636 continue; 1637 } 1638 1639 switch (rsp->rsp_id) { 1640 case TXP_CMD_CYCLE_STATISTICS: 1641 case TXP_CMD_MEDIA_STATUS_READ: 1642 break; 1643 case TXP_CMD_HELLO_RESPONSE: 1644 printf("%s: hello\n", TXP_DEVNAME(sc)); 1645 break; 1646 default: 1647 printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc), 1648 rsp->rsp_id); 1649 } 1650 1651 txp_rsp_fixup(sc, rsp, NULL); 1652 ridx = hv->hv_resp_read_idx; 1653 hv->hv_resp_read_idx = ridx; 1654 } 1655 1656 return (0); 1657 } 1658 1659 void 1660 txp_rsp_fixup(sc, rsp, dst) 1661 struct txp_softc *sc; 1662 struct txp_rsp_desc *rsp, *dst; 1663 { 1664 struct txp_rsp_desc *src = rsp; 1665 struct txp_hostvar *hv = sc->sc_hostvar; 1666 u_int32_t i, ridx; 1667 1668 ridx = hv->hv_resp_read_idx; 1669 1670 for (i = 0; i < rsp->rsp_numdesc + 1; i++) { 1671 if (dst != NULL) 1672 bcopy(src, dst++, sizeof(struct txp_rsp_desc)); 1673 ridx += sizeof(struct txp_rsp_desc); 1674 if (ridx == sc->sc_rspring.size) { 1675 src = sc->sc_rspring.base; 1676 ridx = 0; 1677 } else 1678 src++; 1679 sc->sc_rspring.lastwrite = hv->hv_resp_read_idx = ridx; 1680 } 1681 1682 hv->hv_resp_read_idx = ridx; 1683 } 1684 1685 int 1686 txp_cmd_desc_numfree(sc) 1687 struct txp_softc *sc; 1688 { 1689 struct txp_hostvar *hv = sc->sc_hostvar; 1690 struct txp_boot_record *br = sc->sc_boot; 1691 u_int32_t widx, ridx, nfree; 1692 1693 widx = sc->sc_cmdring.lastwrite; 1694 ridx = hv->hv_cmd_read_idx; 1695 1696 if (widx == ridx) { 1697 /* Ring is completely free */ 1698 nfree = br->br_cmd_siz - sizeof(struct txp_cmd_desc); 1699 } else { 1700 if (widx > ridx) 1701 nfree = br->br_cmd_siz - 1702 (widx - ridx + sizeof(struct txp_cmd_desc)); 1703 else 1704 nfree = ridx - widx - sizeof(struct txp_cmd_desc); 1705 } 1706 1707 return (nfree / sizeof(struct txp_cmd_desc)); 1708 } 1709 1710 void 1711 txp_stop(sc) 1712 struct txp_softc *sc; 1713 { 1714 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1715 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1716 1717 if (timeout_pending(&sc->sc_tick)) 1718 timeout_del(&sc->sc_tick); 1719 } 1720 1721 void 1722 txp_watchdog(ifp) 1723 struct ifnet *ifp; 1724 { 1725 } 1726 1727 int 1728 txp_ifmedia_upd(ifp) 1729 struct ifnet *ifp; 1730 { 1731 struct txp_softc *sc = ifp->if_softc; 1732 struct ifmedia *ifm = &sc->sc_ifmedia; 1733 u_int16_t new_xcvr; 1734 1735 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1736 return (EINVAL); 1737 1738 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1739 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1740 new_xcvr = TXP_XCVR_10_FDX; 1741 else 1742 new_xcvr = TXP_XCVR_10_HDX; 1743 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1744 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1745 new_xcvr = TXP_XCVR_100_FDX; 1746 else 1747 new_xcvr = TXP_XCVR_100_HDX; 1748 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1749 new_xcvr = TXP_XCVR_AUTO; 1750 } else 1751 return (EINVAL); 1752 1753 /* nothing to do */ 1754 if (sc->sc_xcvr == new_xcvr) 1755 return (0); 1756 1757 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0, 1758 NULL, NULL, NULL, 0); 1759 sc->sc_xcvr = new_xcvr; 1760 1761 return (0); 1762 } 1763 1764 void 1765 txp_ifmedia_sts(ifp, ifmr) 1766 struct ifnet *ifp; 1767 struct ifmediareq *ifmr; 1768 { 1769 struct txp_softc *sc = ifp->if_softc; 1770 struct ifmedia *ifm = &sc->sc_ifmedia; 1771 u_int16_t bmsr, bmcr, anlpar; 1772 1773 ifmr->ifm_status = IFM_AVALID; 1774 ifmr->ifm_active = IFM_ETHER; 1775 1776 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1777 &bmsr, NULL, NULL, 1)) 1778 goto bail; 1779 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1780 &bmsr, NULL, NULL, 1)) 1781 goto bail; 1782 1783 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0, 1784 &bmcr, NULL, NULL, 1)) 1785 goto bail; 1786 1787 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0, 1788 &anlpar, NULL, NULL, 1)) 1789 goto bail; 1790 1791 if (bmsr & BMSR_LINK) 1792 ifmr->ifm_status |= IFM_ACTIVE; 1793 1794 if (bmcr & BMCR_ISO) { 1795 ifmr->ifm_active |= IFM_NONE; 1796 ifmr->ifm_status = 0; 1797 return; 1798 } 1799 1800 if (bmcr & BMCR_LOOP) 1801 ifmr->ifm_active |= IFM_LOOP; 1802 1803 if (bmcr & BMCR_AUTOEN) { 1804 if ((bmsr & BMSR_ACOMP) == 0) { 1805 ifmr->ifm_active |= IFM_NONE; 1806 return; 1807 } 1808 1809 if (anlpar & ANLPAR_T4) 1810 ifmr->ifm_active |= IFM_100_T4; 1811 else if (anlpar & ANLPAR_TX_FD) 1812 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1813 else if (anlpar & ANLPAR_TX) 1814 ifmr->ifm_active |= IFM_100_TX; 1815 else if (anlpar & ANLPAR_10_FD) 1816 ifmr->ifm_active |= IFM_10_T|IFM_FDX; 1817 else if (anlpar & ANLPAR_10) 1818 ifmr->ifm_active |= IFM_10_T; 1819 else 1820 ifmr->ifm_active |= IFM_NONE; 1821 } else 1822 ifmr->ifm_active = ifm->ifm_cur->ifm_media; 1823 return; 1824 1825 bail: 1826 ifmr->ifm_active |= IFM_NONE; 1827 ifmr->ifm_status &= ~IFM_AVALID; 1828 } 1829 1830 void 1831 txp_show_descriptor(d) 1832 void *d; 1833 { 1834 struct txp_cmd_desc *cmd = d; 1835 struct txp_rsp_desc *rsp = d; 1836 struct txp_tx_desc *txd = d; 1837 struct txp_frag_desc *frgd = d; 1838 1839 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) { 1840 case CMD_FLAGS_TYPE_CMD: 1841 /* command descriptor */ 1842 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1843 cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, 1844 cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); 1845 break; 1846 case CMD_FLAGS_TYPE_RESP: 1847 /* response descriptor */ 1848 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1849 rsp->rsp_flags, rsp->rsp_numdesc, rsp->rsp_id, rsp->rsp_seq, 1850 rsp->rsp_par1, rsp->rsp_par2, rsp->rsp_par3); 1851 break; 1852 case CMD_FLAGS_TYPE_DATA: 1853 /* data header (assuming tx for now) */ 1854 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]", 1855 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, 1856 txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags); 1857 break; 1858 case CMD_FLAGS_TYPE_FRAG: 1859 /* fragment descriptor */ 1860 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]", 1861 frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len, 1862 frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2); 1863 break; 1864 default: 1865 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1866 cmd->cmd_flags & CMD_FLAGS_TYPE_M, 1867 cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, 1868 cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); 1869 break; 1870 } 1871 } 1872 1873 void 1874 txp_set_filter(sc) 1875 struct txp_softc *sc; 1876 { 1877 struct arpcom *ac = &sc->sc_arpcom; 1878 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1879 u_int32_t crc, carry, hashbit, hash[2]; 1880 u_int16_t filter; 1881 u_int8_t octet; 1882 int i, j, mcnt = 0; 1883 struct ether_multi *enm; 1884 struct ether_multistep step; 1885 1886 if (ifp->if_flags & IFF_PROMISC) { 1887 filter = TXP_RXFILT_PROMISC; 1888 goto setit; 1889 } 1890 1891 again: 1892 filter = TXP_RXFILT_DIRECT; 1893 1894 if (ifp->if_flags & IFF_BROADCAST) 1895 filter |= TXP_RXFILT_BROADCAST; 1896 1897 if (ifp->if_flags & IFF_ALLMULTI) 1898 filter |= TXP_RXFILT_ALLMULTI; 1899 else { 1900 hash[0] = hash[1] = 0; 1901 1902 ETHER_FIRST_MULTI(step, ac, enm); 1903 while (enm != NULL) { 1904 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1905 /* 1906 * We must listen to a range of multicast 1907 * addresses. For now, just accept all 1908 * multicasts, rather than trying to set only 1909 * those filter bits needed to match the range. 1910 * (At this time, the only use of address 1911 * ranges is for IP multicast routing, for 1912 * which the range is big enough to require 1913 * all bits set.) 1914 */ 1915 ifp->if_flags |= IFF_ALLMULTI; 1916 goto again; 1917 } 1918 1919 mcnt++; 1920 crc = 0xffffffff; 1921 1922 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1923 octet = enm->enm_addrlo[i]; 1924 for (j = 0; j < 8; j++) { 1925 carry = ((crc & 0x80000000) ? 1 : 0) ^ 1926 (octet & 1); 1927 crc <<= 1; 1928 octet >>= 1; 1929 if (carry) 1930 crc = (crc ^ TXP_POLYNOMIAL) | 1931 carry; 1932 } 1933 } 1934 hashbit = (u_int16_t)(crc & (64 - 1)); 1935 hash[hashbit / 32] |= (1 << hashbit % 32); 1936 ETHER_NEXT_MULTI(step, enm); 1937 } 1938 1939 if (mcnt > 0) { 1940 filter |= TXP_RXFILT_HASHMULTI; 1941 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 1942 2, hash[0], hash[1], NULL, NULL, NULL, 0); 1943 } 1944 } 1945 1946 setit: 1947 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0, 1948 NULL, NULL, NULL, 1); 1949 } 1950 1951 void 1952 txp_capabilities(sc) 1953 struct txp_softc *sc; 1954 { 1955 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1956 struct txp_rsp_desc *rsp = NULL; 1957 struct txp_ext_desc *ext; 1958 1959 if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1)) 1960 goto out; 1961 1962 if (rsp->rsp_numdesc != 1) 1963 goto out; 1964 ext = (struct txp_ext_desc *)(rsp + 1); 1965 1966 sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK; 1967 sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK; 1968 1969 #if NVLAN > 0 1970 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1971 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) { 1972 sc->sc_tx_capability |= OFFLOAD_VLAN; 1973 sc->sc_rx_capability |= OFFLOAD_VLAN; 1974 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1975 } 1976 #endif 1977 1978 #if 0 1979 /* not ready yet */ 1980 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) { 1981 sc->sc_tx_capability |= OFFLOAD_IPSEC; 1982 sc->sc_rx_capability |= OFFLOAD_IPSEC; 1983 ifp->if_capabilities |= IFCAP_IPSEC; 1984 } 1985 #endif 1986 1987 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) { 1988 sc->sc_tx_capability |= OFFLOAD_IPCKSUM; 1989 sc->sc_rx_capability |= OFFLOAD_IPCKSUM; 1990 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 1991 } 1992 1993 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) { 1994 sc->sc_rx_capability |= OFFLOAD_TCPCKSUM; 1995 #ifdef TRY_TX_TCP_CSUM 1996 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM; 1997 ifp->if_capabilities |= IFCAP_CSUM_TCPv4; 1998 #endif 1999 } 2000 2001 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) { 2002 sc->sc_rx_capability |= OFFLOAD_UDPCKSUM; 2003 #ifdef TRY_TX_UDP_CSUM 2004 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM; 2005 ifp->if_capabilities |= IFCAP_CSUM_UDPv4; 2006 #endif 2007 } 2008 2009 if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, 2010 sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1)) 2011 goto out; 2012 2013 out: 2014 if (rsp != NULL) 2015 free(rsp, M_DEVBUF); 2016 } 2017