1 /* $OpenBSD: if_txp.c,v 1.69 2003/06/02 19:03:37 jason Exp $ */ 2 3 /* 4 * Copyright (c) 2001 5 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and 6 * Aaron Campbell <aaron@monkey.org>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR THE VOICES IN THEIR HEADS 21 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGE. 28 */ 29 30 /* 31 * Driver for 3c990 (Typhoon) Ethernet ASIC 32 */ 33 34 #include "bpfilter.h" 35 #include "vlan.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/sockio.h> 40 #include <sys/mbuf.h> 41 #include <sys/malloc.h> 42 #include <sys/kernel.h> 43 #include <sys/socket.h> 44 #include <sys/device.h> 45 #include <sys/timeout.h> 46 47 #include <net/if.h> 48 #include <net/if_dl.h> 49 #include <net/if_types.h> 50 51 #ifdef INET 52 #include <netinet/in.h> 53 #include <netinet/in_systm.h> 54 #include <netinet/in_var.h> 55 #include <netinet/ip.h> 56 #include <netinet/if_ether.h> 57 #endif 58 59 #include <net/if_media.h> 60 61 #if NBPFILTER > 0 62 #include <net/bpf.h> 63 #endif 64 65 #if NVLAN > 0 66 #include <net/if_vlan_var.h> 67 #endif 68 69 #include <uvm/uvm_extern.h> /* for vtophys */ 70 #include <machine/bus.h> 71 72 #include <dev/mii/mii.h> 73 #include <dev/mii/miivar.h> 74 #include <dev/pci/pcireg.h> 75 #include <dev/pci/pcivar.h> 76 #include <dev/pci/pcidevs.h> 77 78 #include <dev/pci/if_txpreg.h> 79 80 #include <dev/microcode/typhoon/3c990img.h> 81 82 /* 83 * These currently break the 3c990 firmware, hopefully will be resolved 84 * at some point. 85 */ 86 #undef TRY_TX_UDP_CSUM 87 #undef TRY_TX_TCP_CSUM 88 89 int txp_probe(struct device *, void *, void *); 90 void txp_attach(struct device *, struct device *, void *); 91 int txp_intr(void *); 92 void txp_tick(void *); 93 void txp_shutdown(void *); 94 int txp_ioctl(struct ifnet *, u_long, caddr_t); 95 void txp_start(struct ifnet *); 96 void txp_stop(struct txp_softc *); 97 void txp_init(struct txp_softc *); 98 void txp_watchdog(struct ifnet *); 99 100 int txp_chip_init(struct txp_softc *); 101 int txp_reset_adapter(struct txp_softc *); 102 int txp_download_fw(struct txp_softc *); 103 int txp_download_fw_wait(struct txp_softc *); 104 int txp_download_fw_section(struct txp_softc *, 105 struct txp_fw_section_header *, int); 106 int txp_alloc_rings(struct txp_softc *); 107 void txp_dma_free(struct txp_softc *, struct txp_dma_alloc *); 108 int txp_dma_malloc(struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int); 109 void txp_set_filter(struct txp_softc *); 110 111 int txp_cmd_desc_numfree(struct txp_softc *); 112 int txp_command(struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, 113 u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int); 114 int txp_command2(struct txp_softc *, u_int16_t, u_int16_t, 115 u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t, 116 struct txp_rsp_desc **, int); 117 int txp_response(struct txp_softc *, u_int32_t, u_int16_t, u_int16_t, 118 struct txp_rsp_desc **); 119 void txp_rsp_fixup(struct txp_softc *, struct txp_rsp_desc *, 120 struct txp_rsp_desc *); 121 void txp_capabilities(struct txp_softc *); 122 123 void txp_ifmedia_sts(struct ifnet *, struct ifmediareq *); 124 int txp_ifmedia_upd(struct ifnet *); 125 void txp_show_descriptor(void *); 126 void txp_tx_reclaim(struct txp_softc *, struct txp_tx_ring *, 127 struct txp_dma_alloc *); 128 void txp_rxbuf_reclaim(struct txp_softc *); 129 void txp_rx_reclaim(struct txp_softc *, struct txp_rx_ring *, 130 struct txp_dma_alloc *); 131 132 struct cfattach txp_ca = { 133 sizeof(struct txp_softc), txp_probe, txp_attach, 134 }; 135 136 struct cfdriver txp_cd = { 137 0, "txp", DV_IFNET 138 }; 139 140 const struct pci_matchid txp_devices[] = { 141 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990 }, 142 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX95 }, 143 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990TX97 }, 144 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR95 }, 145 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990SVR97 }, 146 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BTXM }, 147 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3C990BSVR }, 148 { PCI_VENDOR_3COM, PCI_PRODUCT_3COM_3CR990FX }, 149 }; 150 151 int 152 txp_probe(parent, match, aux) 153 struct device *parent; 154 void *match, *aux; 155 { 156 return (pci_matchbyid((struct pci_attach_args *)aux, txp_devices, 157 sizeof(txp_devices)/sizeof(txp_devices[0]))); 158 } 159 160 void 161 txp_attach(parent, self, aux) 162 struct device *parent, *self; 163 void *aux; 164 { 165 struct txp_softc *sc = (struct txp_softc *)self; 166 struct pci_attach_args *pa = aux; 167 pci_chipset_tag_t pc = pa->pa_pc; 168 pci_intr_handle_t ih; 169 const char *intrstr = NULL; 170 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 171 bus_size_t iosize; 172 u_int32_t command; 173 u_int16_t p1; 174 u_int32_t p2; 175 176 sc->sc_cold = 1; 177 178 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 179 180 if (!(command & PCI_COMMAND_MASTER_ENABLE)) { 181 printf(": failed to enable bus mastering\n"); 182 return; 183 } 184 185 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 186 printf(": failed to enable memory mapping\n"); 187 return; 188 } 189 if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 190 &sc->sc_bt, &sc->sc_bh, NULL, &iosize, 0)) { 191 printf(": can't map mem space %d\n", 0); 192 return; 193 } 194 195 sc->sc_dmat = pa->pa_dmat; 196 197 /* 198 * Allocate our interrupt. 199 */ 200 if (pci_intr_map(pa, &ih)) { 201 printf(": couldn't map interrupt\n"); 202 return; 203 } 204 205 intrstr = pci_intr_string(pc, ih); 206 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc, 207 self->dv_xname); 208 if (sc->sc_ih == NULL) { 209 printf(": couldn't establish interrupt"); 210 if (intrstr != NULL) 211 printf(" at %s", intrstr); 212 printf("\n"); 213 return; 214 } 215 printf(": %s", intrstr); 216 217 if (txp_chip_init(sc)) 218 return; 219 220 if (txp_download_fw(sc)) 221 return; 222 223 if (txp_alloc_rings(sc)) 224 return; 225 226 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, 227 NULL, NULL, NULL, 1)) 228 return; 229 230 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0, 231 &p1, &p2, NULL, 1)) 232 return; 233 234 txp_set_filter(sc); 235 236 p1 = htole16(p1); 237 sc->sc_arpcom.ac_enaddr[0] = ((u_int8_t *)&p1)[1]; 238 sc->sc_arpcom.ac_enaddr[1] = ((u_int8_t *)&p1)[0]; 239 p2 = htole32(p2); 240 sc->sc_arpcom.ac_enaddr[2] = ((u_int8_t *)&p2)[3]; 241 sc->sc_arpcom.ac_enaddr[3] = ((u_int8_t *)&p2)[2]; 242 sc->sc_arpcom.ac_enaddr[4] = ((u_int8_t *)&p2)[1]; 243 sc->sc_arpcom.ac_enaddr[5] = ((u_int8_t *)&p2)[0]; 244 245 printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 246 sc->sc_cold = 0; 247 248 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); 249 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 250 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 251 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 252 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 253 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); 254 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 255 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 256 257 sc->sc_xcvr = TXP_XCVR_AUTO; 258 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, 259 NULL, NULL, NULL, 0); 260 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 261 262 ifp->if_softc = sc; 263 ifp->if_mtu = ETHERMTU; 264 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 265 ifp->if_ioctl = txp_ioctl; 266 ifp->if_output = ether_output; 267 ifp->if_start = txp_start; 268 ifp->if_watchdog = txp_watchdog; 269 ifp->if_baudrate = 10000000; 270 IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES); 271 IFQ_SET_READY(&ifp->if_snd); 272 ifp->if_capabilities = 0; 273 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 274 275 txp_capabilities(sc); 276 277 timeout_set(&sc->sc_tick, txp_tick, sc); 278 279 /* 280 * Attach us everywhere 281 */ 282 if_attach(ifp); 283 ether_ifattach(ifp); 284 285 shutdownhook_establish(txp_shutdown, sc); 286 } 287 288 int 289 txp_chip_init(sc) 290 struct txp_softc *sc; 291 { 292 /* disable interrupts */ 293 WRITE_REG(sc, TXP_IER, 0); 294 WRITE_REG(sc, TXP_IMR, 295 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 296 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 297 TXP_INT_LATCH); 298 299 /* ack all interrupts */ 300 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 301 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 302 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 303 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 304 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 305 306 if (txp_reset_adapter(sc)) 307 return (-1); 308 309 /* disable interrupts */ 310 WRITE_REG(sc, TXP_IER, 0); 311 WRITE_REG(sc, TXP_IMR, 312 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 313 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 314 TXP_INT_LATCH); 315 316 /* ack all interrupts */ 317 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 318 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 319 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 320 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 321 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 322 323 return (0); 324 } 325 326 int 327 txp_reset_adapter(sc) 328 struct txp_softc *sc; 329 { 330 u_int32_t r; 331 int i; 332 333 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL); 334 DELAY(1000); 335 WRITE_REG(sc, TXP_SRR, 0); 336 337 /* Should wait max 6 seconds */ 338 for (i = 0; i < 6000; i++) { 339 r = READ_REG(sc, TXP_A2H_0); 340 if (r == STAT_WAITING_FOR_HOST_REQUEST) 341 break; 342 DELAY(1000); 343 } 344 345 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 346 printf("%s: reset hung\n", TXP_DEVNAME(sc)); 347 return (-1); 348 } 349 350 return (0); 351 } 352 353 int 354 txp_download_fw(sc) 355 struct txp_softc *sc; 356 { 357 struct txp_fw_file_header *fileheader; 358 struct txp_fw_section_header *secthead; 359 int sect; 360 u_int32_t r, i, ier, imr; 361 362 ier = READ_REG(sc, TXP_IER); 363 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0); 364 365 imr = READ_REG(sc, TXP_IMR); 366 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0); 367 368 for (i = 0; i < 10000; i++) { 369 r = READ_REG(sc, TXP_A2H_0); 370 if (r == STAT_WAITING_FOR_HOST_REQUEST) 371 break; 372 DELAY(50); 373 } 374 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 375 printf(": not waiting for host request\n"); 376 return (-1); 377 } 378 379 /* Ack the status */ 380 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 381 382 fileheader = (struct txp_fw_file_header *)tc990image; 383 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) { 384 printf(": fw invalid magic\n"); 385 return (-1); 386 } 387 388 /* Tell boot firmware to get ready for image */ 389 WRITE_REG(sc, TXP_H2A_1, letoh32(fileheader->addr)); 390 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE); 391 392 if (txp_download_fw_wait(sc)) { 393 printf("%s: fw wait failed, initial\n", sc->sc_dev.dv_xname); 394 return (-1); 395 } 396 397 secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) + 398 sizeof(struct txp_fw_file_header)); 399 400 for (sect = 0; sect < letoh32(fileheader->nsections); sect++) { 401 if (txp_download_fw_section(sc, secthead, sect)) 402 return (-1); 403 secthead = (struct txp_fw_section_header *) 404 (((u_int8_t *)secthead) + letoh32(secthead->nbytes) + 405 sizeof(*secthead)); 406 } 407 408 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE); 409 410 for (i = 0; i < 10000; i++) { 411 r = READ_REG(sc, TXP_A2H_0); 412 if (r == STAT_WAITING_FOR_BOOT) 413 break; 414 DELAY(50); 415 } 416 if (r != STAT_WAITING_FOR_BOOT) { 417 printf(": not waiting for boot\n"); 418 return (-1); 419 } 420 421 WRITE_REG(sc, TXP_IER, ier); 422 WRITE_REG(sc, TXP_IMR, imr); 423 424 return (0); 425 } 426 427 int 428 txp_download_fw_wait(sc) 429 struct txp_softc *sc; 430 { 431 u_int32_t i, r; 432 433 for (i = 0; i < 10000; i++) { 434 r = READ_REG(sc, TXP_ISR); 435 if (r & TXP_INT_A2H_0) 436 break; 437 DELAY(50); 438 } 439 440 if (!(r & TXP_INT_A2H_0)) { 441 printf(": fw wait failed comm0\n"); 442 return (-1); 443 } 444 445 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 446 447 r = READ_REG(sc, TXP_A2H_0); 448 if (r != STAT_WAITING_FOR_SEGMENT) { 449 printf(": fw not waiting for segment\n"); 450 return (-1); 451 } 452 return (0); 453 } 454 455 int 456 txp_download_fw_section(sc, sect, sectnum) 457 struct txp_softc *sc; 458 struct txp_fw_section_header *sect; 459 int sectnum; 460 { 461 struct txp_dma_alloc dma; 462 int rseg, err = 0; 463 struct mbuf m; 464 u_int16_t csum; 465 466 /* Skip zero length sections */ 467 if (sect->nbytes == 0) 468 return (0); 469 470 /* Make sure we aren't past the end of the image */ 471 rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image); 472 if (rseg >= sizeof(tc990image)) { 473 printf(": fw invalid section address, section %d\n", sectnum); 474 return (-1); 475 } 476 477 /* Make sure this section doesn't go past the end */ 478 rseg += letoh32(sect->nbytes); 479 if (rseg >= sizeof(tc990image)) { 480 printf(": fw truncated section %d\n", sectnum); 481 return (-1); 482 } 483 484 /* map a buffer, copy segment to it, get physaddr */ 485 if (txp_dma_malloc(sc, letoh32(sect->nbytes), &dma, 0)) { 486 printf(": fw dma malloc failed, section %d\n", sectnum); 487 return (-1); 488 } 489 490 bcopy(((u_int8_t *)sect) + sizeof(*sect), dma.dma_vaddr, 491 letoh32(sect->nbytes)); 492 493 /* 494 * dummy up mbuf and verify section checksum 495 */ 496 m.m_type = MT_DATA; 497 m.m_next = m.m_nextpkt = NULL; 498 m.m_len = letoh32(sect->nbytes); 499 m.m_data = dma.dma_vaddr; 500 m.m_flags = 0; 501 csum = in_cksum(&m, letoh32(sect->nbytes)); 502 if (csum != sect->cksum) { 503 printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n", 504 sectnum, sect->cksum, csum); 505 err = -1; 506 goto bail; 507 } 508 509 bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, 510 dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 511 512 WRITE_REG(sc, TXP_H2A_1, letoh32(sect->nbytes)); 513 WRITE_REG(sc, TXP_H2A_2, letoh16(sect->cksum)); 514 WRITE_REG(sc, TXP_H2A_3, letoh32(sect->addr)); 515 WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32); 516 WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff); 517 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE); 518 519 if (txp_download_fw_wait(sc)) { 520 printf("%s: fw wait failed, section %d\n", 521 sc->sc_dev.dv_xname, sectnum); 522 err = -1; 523 } 524 525 bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, 526 dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 527 528 bail: 529 txp_dma_free(sc, &dma); 530 531 return (err); 532 } 533 534 int 535 txp_intr(vsc) 536 void *vsc; 537 { 538 struct txp_softc *sc = vsc; 539 struct txp_hostvar *hv = sc->sc_hostvar; 540 u_int32_t isr; 541 int claimed = 0; 542 543 /* mask all interrupts */ 544 WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF | 545 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 546 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 547 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 548 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 549 550 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 551 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); 552 553 isr = READ_REG(sc, TXP_ISR); 554 while (isr) { 555 claimed = 1; 556 WRITE_REG(sc, TXP_ISR, isr); 557 558 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff)) 559 txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma); 560 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff)) 561 txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma); 562 563 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx) 564 txp_rxbuf_reclaim(sc); 565 566 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != 567 TXP_OFFSET2IDX(letoh32(*(sc->sc_txhir.r_off))))) 568 txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma); 569 570 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != 571 TXP_OFFSET2IDX(letoh32(*(sc->sc_txlor.r_off))))) 572 txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma); 573 574 isr = READ_REG(sc, TXP_ISR); 575 } 576 577 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 578 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); 579 580 /* unmask all interrupts */ 581 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 582 583 txp_start(&sc->sc_arpcom.ac_if); 584 585 return (claimed); 586 } 587 588 void 589 txp_rx_reclaim(sc, r, dma) 590 struct txp_softc *sc; 591 struct txp_rx_ring *r; 592 struct txp_dma_alloc *dma; 593 { 594 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 595 struct txp_rx_desc *rxd; 596 struct mbuf *m; 597 struct txp_swdesc *sd; 598 u_int32_t roff, woff; 599 int sumflags = 0, idx; 600 601 roff = letoh32(*r->r_roff); 602 woff = letoh32(*r->r_woff); 603 idx = roff / sizeof(struct txp_rx_desc); 604 rxd = r->r_desc + idx; 605 606 while (roff != woff) { 607 608 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 609 idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc), 610 BUS_DMASYNC_POSTREAD); 611 612 if (rxd->rx_flags & RX_FLAGS_ERROR) { 613 printf("%s: error 0x%x\n", sc->sc_dev.dv_xname, 614 letoh32(rxd->rx_stat)); 615 ifp->if_ierrors++; 616 goto next; 617 } 618 619 /* retrieve stashed pointer */ 620 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd)); 621 622 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 623 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 624 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 625 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 626 m = sd->sd_mbuf; 627 free(sd, M_DEVBUF); 628 m->m_pkthdr.len = m->m_len = letoh16(rxd->rx_len); 629 630 #ifdef __STRICT_ALIGNMENT 631 { 632 /* 633 * XXX Nice chip, except it won't accept "off by 2" 634 * buffers, so we're force to copy. Supposedly 635 * this will be fixed in a newer firmware rev 636 * and this will be temporary. 637 */ 638 struct mbuf *mnew; 639 640 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 641 if (mnew == NULL) { 642 m_freem(m); 643 goto next; 644 } 645 if (m->m_len > (MHLEN - 2)) { 646 MCLGET(mnew, M_DONTWAIT); 647 if (!(mnew->m_flags & M_EXT)) { 648 m_freem(mnew); 649 m_freem(m); 650 goto next; 651 } 652 } 653 mnew->m_pkthdr.rcvif = ifp; 654 mnew->m_pkthdr.len = mnew->m_len = m->m_len; 655 mnew->m_data += 2; 656 bcopy(m->m_data, mnew->m_data, m->m_len); 657 m_freem(m); 658 m = mnew; 659 } 660 #endif 661 662 #if NBPFILTER > 0 663 /* 664 * Handle BPF listeners. Let the BPF user see the packet. 665 */ 666 if (ifp->if_bpf) 667 bpf_mtap(ifp->if_bpf, m); 668 #endif 669 670 if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMBAD)) 671 sumflags |= M_IPV4_CSUM_IN_BAD; 672 else if (rxd->rx_stat & htole32(RX_STAT_IPCKSUMGOOD)) 673 sumflags |= M_IPV4_CSUM_IN_OK; 674 675 if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMBAD)) 676 sumflags |= M_TCP_CSUM_IN_BAD; 677 else if (rxd->rx_stat & htole32(RX_STAT_TCPCKSUMGOOD)) 678 sumflags |= M_TCP_CSUM_IN_OK; 679 680 if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMBAD)) 681 sumflags |= M_UDP_CSUM_IN_BAD; 682 else if (rxd->rx_stat & htole32(RX_STAT_UDPCKSUMGOOD)) 683 sumflags |= M_UDP_CSUM_IN_OK; 684 685 m->m_pkthdr.csum = sumflags; 686 687 #if NVLAN > 0 688 if (rxd->rx_stat & htole32(RX_STAT_VLAN)) { 689 if (vlan_input_tag(m, htons(rxd->rx_vlan >> 16)) < 0) 690 ifp->if_noproto++; 691 goto next; 692 } 693 #endif 694 695 ether_input_mbuf(ifp, m); 696 697 next: 698 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 699 idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc), 700 BUS_DMASYNC_PREREAD); 701 702 roff += sizeof(struct txp_rx_desc); 703 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) { 704 idx = 0; 705 roff = 0; 706 rxd = r->r_desc; 707 } else { 708 idx++; 709 rxd++; 710 } 711 woff = letoh32(*r->r_woff); 712 } 713 714 *r->r_roff = htole32(woff); 715 } 716 717 void 718 txp_rxbuf_reclaim(sc) 719 struct txp_softc *sc; 720 { 721 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 722 struct txp_hostvar *hv = sc->sc_hostvar; 723 struct txp_rxbuf_desc *rbd; 724 struct txp_swdesc *sd; 725 u_int32_t i, end; 726 727 end = TXP_OFFSET2IDX(letoh32(hv->hv_rx_buf_read_idx)); 728 i = TXP_OFFSET2IDX(letoh32(hv->hv_rx_buf_write_idx)); 729 730 if (++i == RXBUF_ENTRIES) 731 i = 0; 732 733 rbd = sc->sc_rxbufs + i; 734 735 while (i != end) { 736 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), 737 M_DEVBUF, M_NOWAIT); 738 if (sd == NULL) 739 break; 740 741 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); 742 if (sd->sd_mbuf == NULL) 743 goto err_sd; 744 745 MCLGET(sd->sd_mbuf, M_DONTWAIT); 746 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) 747 goto err_mbuf; 748 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 749 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 750 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, 751 TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) 752 goto err_mbuf; 753 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, 754 BUS_DMA_NOWAIT)) { 755 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 756 goto err_mbuf; 757 } 758 759 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 760 i * sizeof(struct txp_rxbuf_desc), 761 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE); 762 763 /* stash away pointer */ 764 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd)); 765 766 rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) 767 & 0xffffffff; 768 rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) 769 >> 32; 770 771 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 772 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 773 774 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 775 i * sizeof(struct txp_rxbuf_desc), 776 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE); 777 778 hv->hv_rx_buf_write_idx = htole32(TXP_IDX2OFFSET(i)); 779 780 if (++i == RXBUF_ENTRIES) { 781 i = 0; 782 rbd = sc->sc_rxbufs; 783 } else 784 rbd++; 785 } 786 return; 787 788 err_mbuf: 789 m_freem(sd->sd_mbuf); 790 err_sd: 791 free(sd, M_DEVBUF); 792 } 793 794 /* 795 * Reclaim mbufs and entries from a transmit ring. 796 */ 797 void 798 txp_tx_reclaim(sc, r, dma) 799 struct txp_softc *sc; 800 struct txp_tx_ring *r; 801 struct txp_dma_alloc *dma; 802 { 803 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 804 u_int32_t idx = TXP_OFFSET2IDX(letoh32(*(r->r_off))); 805 u_int32_t cons = r->r_cons, cnt = r->r_cnt; 806 struct txp_tx_desc *txd = r->r_desc + cons; 807 struct txp_swdesc *sd = sc->sc_txd + cons; 808 struct mbuf *m; 809 810 while (cons != idx) { 811 if (cnt == 0) 812 break; 813 814 bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 815 cons * sizeof(struct txp_tx_desc), 816 sizeof(struct txp_tx_desc), 817 BUS_DMASYNC_POSTWRITE); 818 819 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == 820 TX_FLAGS_TYPE_DATA) { 821 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 822 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 823 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 824 m = sd->sd_mbuf; 825 if (m != NULL) { 826 m_freem(m); 827 txd->tx_addrlo = 0; 828 txd->tx_addrhi = 0; 829 ifp->if_opackets++; 830 } 831 } 832 ifp->if_flags &= ~IFF_OACTIVE; 833 834 if (++cons == TX_ENTRIES) { 835 txd = r->r_desc; 836 cons = 0; 837 sd = sc->sc_txd; 838 } else { 839 txd++; 840 sd++; 841 } 842 843 cnt--; 844 } 845 846 r->r_cons = cons; 847 r->r_cnt = cnt; 848 if (cnt == 0) 849 ifp->if_timer = 0; 850 } 851 852 void 853 txp_shutdown(vsc) 854 void *vsc; 855 { 856 struct txp_softc *sc = (struct txp_softc *)vsc; 857 858 /* mask all interrupts */ 859 WRITE_REG(sc, TXP_IMR, 860 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 861 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 862 TXP_INT_LATCH); 863 864 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 865 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 866 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0); 867 } 868 869 int 870 txp_alloc_rings(sc) 871 struct txp_softc *sc; 872 { 873 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 874 struct txp_boot_record *boot; 875 struct txp_swdesc *sd; 876 u_int32_t r; 877 int i, j; 878 879 /* boot record */ 880 if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), &sc->sc_boot_dma, 881 BUS_DMA_COHERENT)) { 882 printf(": can't allocate boot record\n"); 883 return (-1); 884 } 885 boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr; 886 bzero(boot, sizeof(*boot)); 887 sc->sc_boot = boot; 888 889 /* host variables */ 890 if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma, 891 BUS_DMA_COHERENT)) { 892 printf(": can't allocate host ring\n"); 893 goto bail_boot; 894 } 895 bzero(sc->sc_host_dma.dma_vaddr, sizeof(struct txp_hostvar)); 896 boot->br_hostvar_lo = htole32(sc->sc_host_dma.dma_paddr & 0xffffffff); 897 boot->br_hostvar_hi = htole32(sc->sc_host_dma.dma_paddr >> 32); 898 sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr; 899 900 /* high priority tx ring */ 901 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, 902 &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) { 903 printf(": can't allocate high tx ring\n"); 904 goto bail_host; 905 } 906 bzero(sc->sc_txhiring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES); 907 boot->br_txhipri_lo = htole32(sc->sc_txhiring_dma.dma_paddr & 0xffffffff); 908 boot->br_txhipri_hi = htole32(sc->sc_txhiring_dma.dma_paddr >> 32); 909 boot->br_txhipri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc)); 910 sc->sc_txhir.r_reg = TXP_H2A_1; 911 sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr; 912 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0; 913 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx; 914 for (i = 0; i < TX_ENTRIES; i++) { 915 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 916 TX_ENTRIES - 4, TXP_MAX_SEGLEN, 0, 917 BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) { 918 for (j = 0; j < i; j++) { 919 bus_dmamap_destroy(sc->sc_dmat, 920 sc->sc_txd[j].sd_map); 921 sc->sc_txd[j].sd_map = NULL; 922 } 923 goto bail_txhiring; 924 } 925 } 926 927 /* low priority tx ring */ 928 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, 929 &sc->sc_txloring_dma, BUS_DMA_COHERENT)) { 930 printf(": can't allocate low tx ring\n"); 931 goto bail_txhiring; 932 } 933 bzero(sc->sc_txloring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES); 934 boot->br_txlopri_lo = htole32(sc->sc_txloring_dma.dma_paddr & 0xffffffff); 935 boot->br_txlopri_hi = htole32(sc->sc_txloring_dma.dma_paddr >> 32); 936 boot->br_txlopri_siz = htole32(TX_ENTRIES * sizeof(struct txp_tx_desc)); 937 sc->sc_txlor.r_reg = TXP_H2A_3; 938 sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr; 939 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0; 940 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx; 941 942 /* high priority rx ring */ 943 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, 944 &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) { 945 printf(": can't allocate high rx ring\n"); 946 goto bail_txloring; 947 } 948 bzero(sc->sc_rxhiring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES); 949 boot->br_rxhipri_lo = htole32(sc->sc_rxhiring_dma.dma_paddr & 0xffffffff); 950 boot->br_rxhipri_hi = htole32(sc->sc_rxhiring_dma.dma_paddr >> 32); 951 boot->br_rxhipri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc)); 952 sc->sc_rxhir.r_desc = 953 (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr; 954 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx; 955 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx; 956 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map, 957 0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 958 959 /* low priority ring */ 960 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, 961 &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) { 962 printf(": can't allocate low rx ring\n"); 963 goto bail_rxhiring; 964 } 965 bzero(sc->sc_rxloring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES); 966 boot->br_rxlopri_lo = htole32(sc->sc_rxloring_dma.dma_paddr & 0xffffffff); 967 boot->br_rxlopri_hi = htole32(sc->sc_rxloring_dma.dma_paddr >> 32); 968 boot->br_rxlopri_siz = htole32(RX_ENTRIES * sizeof(struct txp_rx_desc)); 969 sc->sc_rxlor.r_desc = 970 (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr; 971 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx; 972 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx; 973 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map, 974 0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 975 976 /* command ring */ 977 if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES, 978 &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) { 979 printf(": can't allocate command ring\n"); 980 goto bail_rxloring; 981 } 982 bzero(sc->sc_cmdring_dma.dma_vaddr, sizeof(struct txp_cmd_desc) * CMD_ENTRIES); 983 boot->br_cmd_lo = htole32(sc->sc_cmdring_dma.dma_paddr & 0xffffffff); 984 boot->br_cmd_hi = htole32(sc->sc_cmdring_dma.dma_paddr >> 32); 985 boot->br_cmd_siz = htole32(CMD_ENTRIES * sizeof(struct txp_cmd_desc)); 986 sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr; 987 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc); 988 sc->sc_cmdring.lastwrite = 0; 989 990 /* response ring */ 991 if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES, 992 &sc->sc_rspring_dma, BUS_DMA_COHERENT)) { 993 printf(": can't allocate response ring\n"); 994 goto bail_cmdring; 995 } 996 bzero(sc->sc_rspring_dma.dma_vaddr, sizeof(struct txp_rsp_desc) * RSP_ENTRIES); 997 boot->br_resp_lo = htole32(sc->sc_rspring_dma.dma_paddr & 0xffffffff); 998 boot->br_resp_hi = htole32(sc->sc_rspring_dma.dma_paddr >> 32); 999 boot->br_resp_siz = htole32(CMD_ENTRIES * sizeof(struct txp_rsp_desc)); 1000 sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr; 1001 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc); 1002 sc->sc_rspring.lastwrite = 0; 1003 1004 /* receive buffer ring */ 1005 if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES, 1006 &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) { 1007 printf(": can't allocate rx buffer ring\n"); 1008 goto bail_rspring; 1009 } 1010 bzero(sc->sc_rxbufring_dma.dma_vaddr, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES); 1011 boot->br_rxbuf_lo = htole32(sc->sc_rxbufring_dma.dma_paddr & 0xffffffff); 1012 boot->br_rxbuf_hi = htole32(sc->sc_rxbufring_dma.dma_paddr >> 32); 1013 boot->br_rxbuf_siz = htole32(RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc)); 1014 sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr; 1015 for (i = 0; i < RXBUF_ENTRIES; i++) { 1016 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), 1017 M_DEVBUF, M_NOWAIT); 1018 if (sd == NULL) 1019 break; 1020 1021 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); 1022 if (sd->sd_mbuf == NULL) { 1023 goto bail_rxbufring; 1024 } 1025 1026 MCLGET(sd->sd_mbuf, M_DONTWAIT); 1027 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) { 1028 goto bail_rxbufring; 1029 } 1030 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 1031 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 1032 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, 1033 TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) { 1034 goto bail_rxbufring; 1035 } 1036 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, 1037 BUS_DMA_NOWAIT)) { 1038 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 1039 goto bail_rxbufring; 1040 } 1041 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 1042 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1043 1044 /* stash away pointer */ 1045 bcopy(&sd, (u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, sizeof(sd)); 1046 1047 sc->sc_rxbufs[i].rb_paddrlo = 1048 ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff; 1049 sc->sc_rxbufs[i].rb_paddrhi = 1050 ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32; 1051 } 1052 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 1053 0, sc->sc_rxbufring_dma.dma_map->dm_mapsize, 1054 BUS_DMASYNC_PREWRITE); 1055 sc->sc_hostvar->hv_rx_buf_write_idx = htole32((RXBUF_ENTRIES - 1) * 1056 sizeof(struct txp_rxbuf_desc)); 1057 1058 /* zero dma */ 1059 if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma, 1060 BUS_DMA_COHERENT)) { 1061 printf(": can't allocate response ring\n"); 1062 goto bail_rxbufring; 1063 } 1064 bzero(sc->sc_zero_dma.dma_vaddr, sizeof(u_int32_t)); 1065 boot->br_zero_lo = htole32(sc->sc_zero_dma.dma_paddr & 0xffffffff); 1066 boot->br_zero_hi = htole32(sc->sc_zero_dma.dma_paddr >> 32); 1067 1068 /* See if it's waiting for boot, and try to boot it */ 1069 for (i = 0; i < 10000; i++) { 1070 r = READ_REG(sc, TXP_A2H_0); 1071 if (r == STAT_WAITING_FOR_BOOT) 1072 break; 1073 DELAY(50); 1074 } 1075 if (r != STAT_WAITING_FOR_BOOT) { 1076 printf(": not waiting for boot\n"); 1077 goto bail; 1078 } 1079 WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32); 1080 WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff); 1081 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); 1082 1083 /* See if it booted */ 1084 for (i = 0; i < 10000; i++) { 1085 r = READ_REG(sc, TXP_A2H_0); 1086 if (r == STAT_RUNNING) 1087 break; 1088 DELAY(50); 1089 } 1090 if (r != STAT_RUNNING) { 1091 printf(": fw not running\n"); 1092 goto bail; 1093 } 1094 1095 /* Clear TX and CMD ring write registers */ 1096 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); 1097 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); 1098 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); 1099 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); 1100 1101 return (0); 1102 1103 bail: 1104 txp_dma_free(sc, &sc->sc_zero_dma); 1105 bail_rxbufring: 1106 txp_dma_free(sc, &sc->sc_rxbufring_dma); 1107 bail_rspring: 1108 txp_dma_free(sc, &sc->sc_rspring_dma); 1109 bail_cmdring: 1110 txp_dma_free(sc, &sc->sc_cmdring_dma); 1111 bail_rxloring: 1112 txp_dma_free(sc, &sc->sc_rxloring_dma); 1113 bail_rxhiring: 1114 txp_dma_free(sc, &sc->sc_rxhiring_dma); 1115 bail_txloring: 1116 txp_dma_free(sc, &sc->sc_txloring_dma); 1117 bail_txhiring: 1118 txp_dma_free(sc, &sc->sc_txhiring_dma); 1119 bail_host: 1120 txp_dma_free(sc, &sc->sc_host_dma); 1121 bail_boot: 1122 txp_dma_free(sc, &sc->sc_boot_dma); 1123 return (-1); 1124 } 1125 1126 int 1127 txp_dma_malloc(sc, size, dma, mapflags) 1128 struct txp_softc *sc; 1129 bus_size_t size; 1130 struct txp_dma_alloc *dma; 1131 int mapflags; 1132 { 1133 int r; 1134 1135 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, 1136 &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0) 1137 goto fail_0; 1138 1139 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, 1140 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) 1141 goto fail_1; 1142 1143 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1144 BUS_DMA_NOWAIT, &dma->dma_map)) != 0) 1145 goto fail_2; 1146 1147 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 1148 size, NULL, BUS_DMA_NOWAIT)) != 0) 1149 goto fail_3; 1150 1151 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 1152 return (0); 1153 1154 fail_3: 1155 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1156 fail_2: 1157 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 1158 fail_1: 1159 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1160 fail_0: 1161 return (r); 1162 } 1163 1164 void 1165 txp_dma_free(sc, dma) 1166 struct txp_softc *sc; 1167 struct txp_dma_alloc *dma; 1168 { 1169 bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 1170 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize); 1171 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1172 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1173 } 1174 1175 int 1176 txp_ioctl(ifp, command, data) 1177 struct ifnet *ifp; 1178 u_long command; 1179 caddr_t data; 1180 { 1181 struct txp_softc *sc = ifp->if_softc; 1182 struct ifreq *ifr = (struct ifreq *)data; 1183 struct ifaddr *ifa = (struct ifaddr *)data; 1184 int s, error = 0; 1185 1186 s = splnet(); 1187 1188 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) { 1189 splx(s); 1190 return error; 1191 } 1192 1193 switch(command) { 1194 case SIOCSIFADDR: 1195 ifp->if_flags |= IFF_UP; 1196 switch (ifa->ifa_addr->sa_family) { 1197 #ifdef INET 1198 case AF_INET: 1199 txp_init(sc); 1200 arp_ifinit(&sc->sc_arpcom, ifa); 1201 break; 1202 #endif /* INET */ 1203 default: 1204 txp_init(sc); 1205 break; 1206 } 1207 break; 1208 case SIOCSIFFLAGS: 1209 if (ifp->if_flags & IFF_UP) { 1210 txp_init(sc); 1211 } else { 1212 if (ifp->if_flags & IFF_RUNNING) 1213 txp_stop(sc); 1214 } 1215 break; 1216 case SIOCADDMULTI: 1217 case SIOCDELMULTI: 1218 error = (command == SIOCADDMULTI) ? 1219 ether_addmulti(ifr, &sc->sc_arpcom) : 1220 ether_delmulti(ifr, &sc->sc_arpcom); 1221 1222 if (error == ENETRESET) { 1223 /* 1224 * Multicast list has changed; set the hardware 1225 * filter accordingly. 1226 */ 1227 txp_set_filter(sc); 1228 error = 0; 1229 } 1230 break; 1231 case SIOCGIFMEDIA: 1232 case SIOCSIFMEDIA: 1233 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command); 1234 break; 1235 default: 1236 error = EINVAL; 1237 break; 1238 } 1239 1240 splx(s); 1241 1242 return(error); 1243 } 1244 1245 void 1246 txp_init(sc) 1247 struct txp_softc *sc; 1248 { 1249 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1250 int s; 1251 1252 txp_stop(sc); 1253 1254 s = splnet(); 1255 1256 txp_set_filter(sc); 1257 1258 txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1259 txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1260 1261 WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF | 1262 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 1263 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 1264 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 1265 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 1266 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 1267 1268 ifp->if_flags |= IFF_RUNNING; 1269 ifp->if_flags &= ~IFF_OACTIVE; 1270 ifp->if_timer = 0; 1271 1272 if (!timeout_pending(&sc->sc_tick)) 1273 timeout_add(&sc->sc_tick, hz); 1274 1275 splx(s); 1276 } 1277 1278 void 1279 txp_tick(vsc) 1280 void *vsc; 1281 { 1282 struct txp_softc *sc = vsc; 1283 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1284 struct txp_rsp_desc *rsp = NULL; 1285 struct txp_ext_desc *ext; 1286 int s; 1287 1288 s = splnet(); 1289 txp_rxbuf_reclaim(sc); 1290 1291 if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, 1292 &rsp, 1)) 1293 goto out; 1294 if (rsp->rsp_numdesc != 6) 1295 goto out; 1296 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, 1297 NULL, NULL, NULL, 1)) 1298 goto out; 1299 ext = (struct txp_ext_desc *)(rsp + 1); 1300 1301 ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 + 1302 ext[4].ext_1 + ext[4].ext_4; 1303 ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + 1304 ext[2].ext_1; 1305 ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + 1306 ext[1].ext_3; 1307 ifp->if_opackets += rsp->rsp_par2; 1308 ifp->if_ipackets += ext[2].ext_3; 1309 1310 out: 1311 if (rsp != NULL) 1312 free(rsp, M_DEVBUF); 1313 1314 splx(s); 1315 timeout_add(&sc->sc_tick, hz); 1316 } 1317 1318 void 1319 txp_start(ifp) 1320 struct ifnet *ifp; 1321 { 1322 struct txp_softc *sc = ifp->if_softc; 1323 struct txp_tx_ring *r = &sc->sc_txhir; 1324 struct txp_tx_desc *txd; 1325 int txdidx; 1326 struct txp_frag_desc *fxd; 1327 struct mbuf *m, *mnew; 1328 struct txp_swdesc *sd; 1329 u_int32_t firstprod, firstcnt, prod, cnt, i; 1330 #if NVLAN > 0 1331 struct ifvlan *ifv; 1332 #endif 1333 1334 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1335 return; 1336 1337 prod = r->r_prod; 1338 cnt = r->r_cnt; 1339 1340 while (1) { 1341 IFQ_POLL(&ifp->if_snd, m); 1342 if (m == NULL) 1343 break; 1344 mnew = NULL; 1345 1346 firstprod = prod; 1347 firstcnt = cnt; 1348 1349 sd = sc->sc_txd + prod; 1350 sd->sd_mbuf = m; 1351 1352 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, 1353 BUS_DMA_NOWAIT)) { 1354 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 1355 if (mnew == NULL) 1356 goto oactive1; 1357 if (m->m_pkthdr.len > MHLEN) { 1358 MCLGET(mnew, M_DONTWAIT); 1359 if ((mnew->m_flags & M_EXT) == 0) { 1360 m_freem(mnew); 1361 goto oactive1; 1362 } 1363 } 1364 m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t)); 1365 mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len; 1366 IFQ_DEQUEUE(&ifp->if_snd, m); 1367 m_freem(m); 1368 m = mnew; 1369 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, 1370 BUS_DMA_NOWAIT)) 1371 goto oactive1; 1372 } 1373 1374 if ((TX_ENTRIES - cnt) < 4) 1375 goto oactive; 1376 1377 txd = r->r_desc + prod; 1378 txdidx = prod; 1379 txd->tx_flags = TX_FLAGS_TYPE_DATA; 1380 txd->tx_numdesc = 0; 1381 txd->tx_addrlo = 0; 1382 txd->tx_addrhi = 0; 1383 txd->tx_totlen = m->m_pkthdr.len; 1384 txd->tx_pflags = 0; 1385 txd->tx_numdesc = sd->sd_map->dm_nsegs; 1386 1387 if (++prod == TX_ENTRIES) 1388 prod = 0; 1389 1390 if (++cnt >= (TX_ENTRIES - 4)) 1391 goto oactive; 1392 1393 #if NVLAN > 0 1394 if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 1395 m->m_pkthdr.rcvif != NULL) { 1396 ifv = m->m_pkthdr.rcvif->if_softc; 1397 txd->tx_pflags = TX_PFLAGS_VLAN | 1398 (htons(ifv->ifv_tag) << TX_PFLAGS_VLANTAG_S); 1399 } 1400 #endif 1401 1402 if (m->m_pkthdr.csum & M_IPV4_CSUM_OUT) 1403 txd->tx_pflags |= TX_PFLAGS_IPCKSUM; 1404 #ifdef TRY_TX_TCP_CSUM 1405 if (m->m_pkthdr.csum & M_TCPV4_CSUM_OUT) 1406 txd->tx_pflags |= TX_PFLAGS_TCPCKSUM; 1407 #endif 1408 #ifdef TRY_TX_UDP_CSUM 1409 if (m->m_pkthdr.csum & M_UDPV4_CSUM_OUT) 1410 txd->tx_pflags |= TX_PFLAGS_UDPCKSUM; 1411 #endif 1412 1413 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 1414 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1415 1416 fxd = (struct txp_frag_desc *)(r->r_desc + prod); 1417 for (i = 0; i < sd->sd_map->dm_nsegs; i++) { 1418 if (++cnt >= (TX_ENTRIES - 4)) { 1419 bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 1420 0, sd->sd_map->dm_mapsize, 1421 BUS_DMASYNC_POSTWRITE); 1422 goto oactive; 1423 } 1424 1425 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG | 1426 FRAG_FLAGS_VALID; 1427 fxd->frag_rsvd1 = 0; 1428 fxd->frag_len = sd->sd_map->dm_segs[i].ds_len; 1429 fxd->frag_addrlo = 1430 ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) & 1431 0xffffffff; 1432 fxd->frag_addrhi = 1433 ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >> 1434 32; 1435 fxd->frag_rsvd2 = 0; 1436 1437 bus_dmamap_sync(sc->sc_dmat, 1438 sc->sc_txhiring_dma.dma_map, 1439 prod * sizeof(struct txp_frag_desc), 1440 sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE); 1441 1442 if (++prod == TX_ENTRIES) { 1443 fxd = (struct txp_frag_desc *)r->r_desc; 1444 prod = 0; 1445 } else 1446 fxd++; 1447 1448 } 1449 1450 /* 1451 * if mnew isn't NULL, we already dequeued and copied 1452 * the packet. 1453 */ 1454 if (mnew == NULL) 1455 IFQ_DEQUEUE(&ifp->if_snd, m); 1456 1457 ifp->if_timer = 5; 1458 1459 #if NBPFILTER > 0 1460 if (ifp->if_bpf) 1461 bpf_mtap(ifp->if_bpf, m); 1462 #endif 1463 1464 txd->tx_flags |= TX_FLAGS_VALID; 1465 bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map, 1466 txdidx * sizeof(struct txp_tx_desc), 1467 sizeof(struct txp_tx_desc), BUS_DMASYNC_PREWRITE); 1468 1469 #if 0 1470 { 1471 struct mbuf *mx; 1472 int i; 1473 1474 printf("txd: flags 0x%x ndesc %d totlen %d pflags 0x%x\n", 1475 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, 1476 txd->tx_pflags); 1477 for (mx = m; mx != NULL; mx = mx->m_next) { 1478 for (i = 0; i < mx->m_len; i++) { 1479 printf(":%02x", 1480 (u_int8_t)m->m_data[i]); 1481 } 1482 } 1483 printf("\n"); 1484 } 1485 #endif 1486 1487 WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod)); 1488 } 1489 1490 r->r_prod = prod; 1491 r->r_cnt = cnt; 1492 return; 1493 1494 oactive: 1495 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 1496 oactive1: 1497 ifp->if_flags |= IFF_OACTIVE; 1498 r->r_prod = firstprod; 1499 r->r_cnt = firstcnt; 1500 } 1501 1502 /* 1503 * Handle simple commands sent to the typhoon 1504 */ 1505 int 1506 txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait) 1507 struct txp_softc *sc; 1508 u_int16_t id, in1, *out1; 1509 u_int32_t in2, in3, *out2, *out3; 1510 int wait; 1511 { 1512 struct txp_rsp_desc *rsp = NULL; 1513 1514 if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait)) 1515 return (-1); 1516 1517 if (!wait) 1518 return (0); 1519 1520 if (out1 != NULL) 1521 *out1 = letoh16(rsp->rsp_par1); 1522 if (out2 != NULL) 1523 *out2 = letoh32(rsp->rsp_par2); 1524 if (out3 != NULL) 1525 *out3 = letoh32(rsp->rsp_par3); 1526 free(rsp, M_DEVBUF); 1527 return (0); 1528 } 1529 1530 int 1531 txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait) 1532 struct txp_softc *sc; 1533 u_int16_t id, in1; 1534 u_int32_t in2, in3; 1535 struct txp_ext_desc *in_extp; 1536 u_int8_t in_extn; 1537 struct txp_rsp_desc **rspp; 1538 int wait; 1539 { 1540 struct txp_hostvar *hv = sc->sc_hostvar; 1541 struct txp_cmd_desc *cmd; 1542 struct txp_ext_desc *ext; 1543 u_int32_t idx, i; 1544 u_int16_t seq; 1545 1546 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) { 1547 printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc)); 1548 return (-1); 1549 } 1550 1551 idx = sc->sc_cmdring.lastwrite; 1552 cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1553 bzero(cmd, sizeof(*cmd)); 1554 1555 cmd->cmd_numdesc = in_extn; 1556 seq = sc->sc_seq++; 1557 cmd->cmd_seq = htole16(seq); 1558 cmd->cmd_id = htole16(id); 1559 cmd->cmd_par1 = htole16(in1); 1560 cmd->cmd_par2 = htole32(in2); 1561 cmd->cmd_par3 = htole32(in3); 1562 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD | 1563 (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; 1564 1565 idx += sizeof(struct txp_cmd_desc); 1566 if (idx == sc->sc_cmdring.size) 1567 idx = 0; 1568 1569 for (i = 0; i < in_extn; i++) { 1570 ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1571 bcopy(in_extp, ext, sizeof(struct txp_ext_desc)); 1572 in_extp++; 1573 idx += sizeof(struct txp_cmd_desc); 1574 if (idx == sc->sc_cmdring.size) 1575 idx = 0; 1576 } 1577 1578 sc->sc_cmdring.lastwrite = idx; 1579 1580 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite); 1581 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1582 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); 1583 1584 if (!wait) 1585 return (0); 1586 1587 for (i = 0; i < 10000; i++) { 1588 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1589 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD); 1590 idx = letoh32(hv->hv_resp_read_idx); 1591 if (idx != letoh32(hv->hv_resp_write_idx)) { 1592 *rspp = NULL; 1593 if (txp_response(sc, idx, id, seq, rspp)) 1594 return (-1); 1595 if (*rspp != NULL) 1596 break; 1597 } 1598 bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1599 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); 1600 DELAY(50); 1601 } 1602 if (i == 1000 || (*rspp) == NULL) { 1603 printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id); 1604 return (-1); 1605 } 1606 1607 return (0); 1608 } 1609 1610 int 1611 txp_response(sc, ridx, id, seq, rspp) 1612 struct txp_softc *sc; 1613 u_int32_t ridx; 1614 u_int16_t id; 1615 u_int16_t seq; 1616 struct txp_rsp_desc **rspp; 1617 { 1618 struct txp_hostvar *hv = sc->sc_hostvar; 1619 struct txp_rsp_desc *rsp; 1620 1621 while (ridx != letoh32(hv->hv_resp_write_idx)) { 1622 rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx); 1623 1624 if (id == letoh16(rsp->rsp_id) && letoh16(rsp->rsp_seq) == seq) { 1625 *rspp = (struct txp_rsp_desc *)malloc( 1626 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1), 1627 M_DEVBUF, M_NOWAIT); 1628 if ((*rspp) == NULL) 1629 return (-1); 1630 txp_rsp_fixup(sc, rsp, *rspp); 1631 return (0); 1632 } 1633 1634 if (rsp->rsp_flags & RSP_FLAGS_ERROR) { 1635 printf("%s: response error: id 0x%x\n", 1636 TXP_DEVNAME(sc), letoh16(rsp->rsp_id)); 1637 txp_rsp_fixup(sc, rsp, NULL); 1638 ridx = letoh32(hv->hv_resp_read_idx); 1639 continue; 1640 } 1641 1642 switch (letoh16(rsp->rsp_id)) { 1643 case TXP_CMD_CYCLE_STATISTICS: 1644 case TXP_CMD_MEDIA_STATUS_READ: 1645 break; 1646 case TXP_CMD_HELLO_RESPONSE: 1647 printf("%s: hello\n", TXP_DEVNAME(sc)); 1648 break; 1649 default: 1650 printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc), 1651 letoh16(rsp->rsp_id)); 1652 } 1653 1654 txp_rsp_fixup(sc, rsp, NULL); 1655 ridx = letoh32(hv->hv_resp_read_idx); 1656 hv->hv_resp_read_idx = letoh32(ridx); 1657 } 1658 1659 return (0); 1660 } 1661 1662 void 1663 txp_rsp_fixup(sc, rsp, dst) 1664 struct txp_softc *sc; 1665 struct txp_rsp_desc *rsp, *dst; 1666 { 1667 struct txp_rsp_desc *src = rsp; 1668 struct txp_hostvar *hv = sc->sc_hostvar; 1669 u_int32_t i, ridx; 1670 1671 ridx = letoh32(hv->hv_resp_read_idx); 1672 1673 for (i = 0; i < rsp->rsp_numdesc + 1; i++) { 1674 if (dst != NULL) 1675 bcopy(src, dst++, sizeof(struct txp_rsp_desc)); 1676 ridx += sizeof(struct txp_rsp_desc); 1677 if (ridx == sc->sc_rspring.size) { 1678 src = sc->sc_rspring.base; 1679 ridx = 0; 1680 } else 1681 src++; 1682 sc->sc_rspring.lastwrite = ridx; 1683 hv->hv_resp_read_idx = htole32(ridx); 1684 } 1685 1686 hv->hv_resp_read_idx = htole32(ridx); 1687 } 1688 1689 int 1690 txp_cmd_desc_numfree(sc) 1691 struct txp_softc *sc; 1692 { 1693 struct txp_hostvar *hv = sc->sc_hostvar; 1694 struct txp_boot_record *br = sc->sc_boot; 1695 u_int32_t widx, ridx, nfree; 1696 1697 widx = sc->sc_cmdring.lastwrite; 1698 ridx = letoh32(hv->hv_cmd_read_idx); 1699 1700 if (widx == ridx) { 1701 /* Ring is completely free */ 1702 nfree = letoh32(br->br_cmd_siz) - sizeof(struct txp_cmd_desc); 1703 } else { 1704 if (widx > ridx) 1705 nfree = letoh32(br->br_cmd_siz) - 1706 (widx - ridx + sizeof(struct txp_cmd_desc)); 1707 else 1708 nfree = ridx - widx - sizeof(struct txp_cmd_desc); 1709 } 1710 1711 return (nfree / sizeof(struct txp_cmd_desc)); 1712 } 1713 1714 void 1715 txp_stop(sc) 1716 struct txp_softc *sc; 1717 { 1718 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1719 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1720 1721 if (timeout_pending(&sc->sc_tick)) 1722 timeout_del(&sc->sc_tick); 1723 } 1724 1725 void 1726 txp_watchdog(ifp) 1727 struct ifnet *ifp; 1728 { 1729 } 1730 1731 int 1732 txp_ifmedia_upd(ifp) 1733 struct ifnet *ifp; 1734 { 1735 struct txp_softc *sc = ifp->if_softc; 1736 struct ifmedia *ifm = &sc->sc_ifmedia; 1737 u_int16_t new_xcvr; 1738 1739 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1740 return (EINVAL); 1741 1742 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1743 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1744 new_xcvr = TXP_XCVR_10_FDX; 1745 else 1746 new_xcvr = TXP_XCVR_10_HDX; 1747 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1748 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1749 new_xcvr = TXP_XCVR_100_FDX; 1750 else 1751 new_xcvr = TXP_XCVR_100_HDX; 1752 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1753 new_xcvr = TXP_XCVR_AUTO; 1754 } else 1755 return (EINVAL); 1756 1757 /* nothing to do */ 1758 if (sc->sc_xcvr == new_xcvr) 1759 return (0); 1760 1761 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0, 1762 NULL, NULL, NULL, 0); 1763 sc->sc_xcvr = new_xcvr; 1764 1765 return (0); 1766 } 1767 1768 void 1769 txp_ifmedia_sts(ifp, ifmr) 1770 struct ifnet *ifp; 1771 struct ifmediareq *ifmr; 1772 { 1773 struct txp_softc *sc = ifp->if_softc; 1774 struct ifmedia *ifm = &sc->sc_ifmedia; 1775 u_int16_t bmsr, bmcr, anlpar; 1776 1777 ifmr->ifm_status = IFM_AVALID; 1778 ifmr->ifm_active = IFM_ETHER; 1779 1780 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1781 &bmsr, NULL, NULL, 1)) 1782 goto bail; 1783 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1784 &bmsr, NULL, NULL, 1)) 1785 goto bail; 1786 1787 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0, 1788 &bmcr, NULL, NULL, 1)) 1789 goto bail; 1790 1791 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0, 1792 &anlpar, NULL, NULL, 1)) 1793 goto bail; 1794 1795 if (bmsr & BMSR_LINK) 1796 ifmr->ifm_status |= IFM_ACTIVE; 1797 1798 if (bmcr & BMCR_ISO) { 1799 ifmr->ifm_active |= IFM_NONE; 1800 ifmr->ifm_status = 0; 1801 return; 1802 } 1803 1804 if (bmcr & BMCR_LOOP) 1805 ifmr->ifm_active |= IFM_LOOP; 1806 1807 if (bmcr & BMCR_AUTOEN) { 1808 if ((bmsr & BMSR_ACOMP) == 0) { 1809 ifmr->ifm_active |= IFM_NONE; 1810 return; 1811 } 1812 1813 if (anlpar & ANLPAR_T4) 1814 ifmr->ifm_active |= IFM_100_T4; 1815 else if (anlpar & ANLPAR_TX_FD) 1816 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1817 else if (anlpar & ANLPAR_TX) 1818 ifmr->ifm_active |= IFM_100_TX; 1819 else if (anlpar & ANLPAR_10_FD) 1820 ifmr->ifm_active |= IFM_10_T|IFM_FDX; 1821 else if (anlpar & ANLPAR_10) 1822 ifmr->ifm_active |= IFM_10_T; 1823 else 1824 ifmr->ifm_active |= IFM_NONE; 1825 } else 1826 ifmr->ifm_active = ifm->ifm_cur->ifm_media; 1827 return; 1828 1829 bail: 1830 ifmr->ifm_active |= IFM_NONE; 1831 ifmr->ifm_status &= ~IFM_AVALID; 1832 } 1833 1834 void 1835 txp_show_descriptor(d) 1836 void *d; 1837 { 1838 struct txp_cmd_desc *cmd = d; 1839 struct txp_rsp_desc *rsp = d; 1840 struct txp_tx_desc *txd = d; 1841 struct txp_frag_desc *frgd = d; 1842 1843 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) { 1844 case CMD_FLAGS_TYPE_CMD: 1845 /* command descriptor */ 1846 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1847 cmd->cmd_flags, cmd->cmd_numdesc, letoh16(cmd->cmd_id), 1848 letoh16(cmd->cmd_seq), letoh16(cmd->cmd_par1), 1849 letoh32(cmd->cmd_par2), letoh32(cmd->cmd_par3)); 1850 break; 1851 case CMD_FLAGS_TYPE_RESP: 1852 /* response descriptor */ 1853 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1854 rsp->rsp_flags, rsp->rsp_numdesc, letoh16(rsp->rsp_id), 1855 letoh16(rsp->rsp_seq), letoh16(rsp->rsp_par1), 1856 letoh32(rsp->rsp_par2), letoh32(rsp->rsp_par3)); 1857 break; 1858 case CMD_FLAGS_TYPE_DATA: 1859 /* data header (assuming tx for now) */ 1860 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]", 1861 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, 1862 txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags); 1863 break; 1864 case CMD_FLAGS_TYPE_FRAG: 1865 /* fragment descriptor */ 1866 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]", 1867 frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len, 1868 frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2); 1869 break; 1870 default: 1871 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1872 cmd->cmd_flags & CMD_FLAGS_TYPE_M, 1873 cmd->cmd_flags, cmd->cmd_numdesc, letoh16(cmd->cmd_id), 1874 letoh16(cmd->cmd_seq), letoh16(cmd->cmd_par1), 1875 letoh32(cmd->cmd_par2), letoh32(cmd->cmd_par3)); 1876 break; 1877 } 1878 } 1879 1880 void 1881 txp_set_filter(sc) 1882 struct txp_softc *sc; 1883 { 1884 struct arpcom *ac = &sc->sc_arpcom; 1885 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1886 u_int32_t crc, carry, hashbit, hash[2]; 1887 u_int16_t filter; 1888 u_int8_t octet; 1889 int i, j, mcnt = 0; 1890 struct ether_multi *enm; 1891 struct ether_multistep step; 1892 1893 if (ifp->if_flags & IFF_PROMISC) { 1894 filter = TXP_RXFILT_PROMISC; 1895 goto setit; 1896 } 1897 1898 again: 1899 filter = TXP_RXFILT_DIRECT; 1900 1901 if (ifp->if_flags & IFF_BROADCAST) 1902 filter |= TXP_RXFILT_BROADCAST; 1903 1904 if (ifp->if_flags & IFF_ALLMULTI) 1905 filter |= TXP_RXFILT_ALLMULTI; 1906 else { 1907 hash[0] = hash[1] = 0; 1908 1909 ETHER_FIRST_MULTI(step, ac, enm); 1910 while (enm != NULL) { 1911 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1912 /* 1913 * We must listen to a range of multicast 1914 * addresses. For now, just accept all 1915 * multicasts, rather than trying to set only 1916 * those filter bits needed to match the range. 1917 * (At this time, the only use of address 1918 * ranges is for IP multicast routing, for 1919 * which the range is big enough to require 1920 * all bits set.) 1921 */ 1922 ifp->if_flags |= IFF_ALLMULTI; 1923 goto again; 1924 } 1925 1926 mcnt++; 1927 crc = 0xffffffff; 1928 1929 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1930 octet = enm->enm_addrlo[i]; 1931 for (j = 0; j < 8; j++) { 1932 carry = ((crc & 0x80000000) ? 1 : 0) ^ 1933 (octet & 1); 1934 crc <<= 1; 1935 octet >>= 1; 1936 if (carry) 1937 crc = (crc ^ TXP_POLYNOMIAL) | 1938 carry; 1939 } 1940 } 1941 hashbit = (u_int16_t)(crc & (64 - 1)); 1942 hash[hashbit / 32] |= (1 << hashbit % 32); 1943 ETHER_NEXT_MULTI(step, enm); 1944 } 1945 1946 if (mcnt > 0) { 1947 filter |= TXP_RXFILT_HASHMULTI; 1948 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 1949 2, hash[0], hash[1], NULL, NULL, NULL, 0); 1950 } 1951 } 1952 1953 setit: 1954 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0, 1955 NULL, NULL, NULL, 1); 1956 } 1957 1958 void 1959 txp_capabilities(sc) 1960 struct txp_softc *sc; 1961 { 1962 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1963 struct txp_rsp_desc *rsp = NULL; 1964 struct txp_ext_desc *ext; 1965 1966 if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1)) 1967 goto out; 1968 1969 if (rsp->rsp_numdesc != 1) 1970 goto out; 1971 ext = (struct txp_ext_desc *)(rsp + 1); 1972 1973 sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK; 1974 sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK; 1975 1976 #if NVLAN > 0 1977 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1978 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) { 1979 sc->sc_tx_capability |= OFFLOAD_VLAN; 1980 sc->sc_rx_capability |= OFFLOAD_VLAN; 1981 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1982 } 1983 #endif 1984 1985 #if 0 1986 /* not ready yet */ 1987 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) { 1988 sc->sc_tx_capability |= OFFLOAD_IPSEC; 1989 sc->sc_rx_capability |= OFFLOAD_IPSEC; 1990 ifp->if_capabilities |= IFCAP_IPSEC; 1991 } 1992 #endif 1993 1994 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) { 1995 sc->sc_tx_capability |= OFFLOAD_IPCKSUM; 1996 sc->sc_rx_capability |= OFFLOAD_IPCKSUM; 1997 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 1998 } 1999 2000 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) { 2001 sc->sc_rx_capability |= OFFLOAD_TCPCKSUM; 2002 #ifdef TRY_TX_TCP_CSUM 2003 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM; 2004 ifp->if_capabilities |= IFCAP_CSUM_TCPv4; 2005 #endif 2006 } 2007 2008 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) { 2009 sc->sc_rx_capability |= OFFLOAD_UDPCKSUM; 2010 #ifdef TRY_TX_UDP_CSUM 2011 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM; 2012 ifp->if_capabilities |= IFCAP_CSUM_UDPv4; 2013 #endif 2014 } 2015 2016 if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, 2017 sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1)) 2018 goto out; 2019 2020 out: 2021 if (rsp != NULL) 2022 free(rsp, M_DEVBUF); 2023 } 2024