1 /* $OpenBSD: if_txp.c,v 1.55 2001/09/11 20:05:25 miod Exp $ */ 2 3 /* 4 * Copyright (c) 2001 5 * Jason L. Wright <jason@thought.net>, Theo de Raadt, and 6 * Aaron Campbell <aaron@monkey.org>. All rights reserved. 7 * 8 * Redistribution and use in source and binary forms, with or without 9 * modification, are permitted provided that the following conditions 10 * are met: 11 * 1. Redistributions of source code must retain the above copyright 12 * notice, this list of conditions and the following disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 3. All advertising materials mentioning features or use of this software 17 * must display the following acknowledgement: 18 * This product includes software developed by Jason L. Wright, 19 * Theo de Raadt and Aaron Campbell. 20 * 4. Neither the name of the author nor the names of any co-contributors 21 * may be used to endorse or promote products derived from this software 22 * without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR 25 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 26 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 27 * ARE DISCLAIMED. IN NO EVENT SHALL Bill Paul OR THE VOICES IN HIS HEAD 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 34 * THE POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * Driver for 3c990 (Typhoon) Ethernet ASIC 39 */ 40 41 #include "bpfilter.h" 42 #include "vlan.h" 43 44 #include <sys/param.h> 45 #include <sys/systm.h> 46 #include <sys/sockio.h> 47 #include <sys/mbuf.h> 48 #include <sys/malloc.h> 49 #include <sys/kernel.h> 50 #include <sys/socket.h> 51 #include <sys/device.h> 52 #include <sys/timeout.h> 53 54 #include <net/if.h> 55 #include <net/if_dl.h> 56 #include <net/if_types.h> 57 58 #ifdef INET 59 #include <netinet/in.h> 60 #include <netinet/in_systm.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip.h> 63 #include <netinet/if_ether.h> 64 #endif 65 66 #include <net/if_media.h> 67 68 #if NBPFILTER > 0 69 #include <net/bpf.h> 70 #endif 71 72 #if NVLAN > 0 73 #include <net/if_vlan_var.h> 74 #endif 75 76 #include <vm/vm.h> /* for vtophys */ 77 #include <machine/bus.h> 78 79 #include <dev/mii/mii.h> 80 #include <dev/mii/miivar.h> 81 #include <dev/pci/pcireg.h> 82 #include <dev/pci/pcivar.h> 83 #include <dev/pci/pcidevs.h> 84 85 #include <dev/pci/if_txpreg.h> 86 87 #include <dev/microcode/typhoon/3c990img.h> 88 89 int txp_probe __P((struct device *, void *, void *)); 90 void txp_attach __P((struct device *, struct device *, void *)); 91 int txp_intr __P((void *)); 92 void txp_tick __P((void *)); 93 void txp_shutdown __P((void *)); 94 int txp_ioctl __P((struct ifnet *, u_long, caddr_t)); 95 void txp_start __P((struct ifnet *)); 96 void txp_stop __P((struct txp_softc *)); 97 void txp_init __P((struct txp_softc *)); 98 void txp_watchdog __P((struct ifnet *)); 99 100 int txp_chip_init __P((struct txp_softc *)); 101 int txp_reset_adapter __P((struct txp_softc *)); 102 int txp_download_fw __P((struct txp_softc *)); 103 int txp_download_fw_wait __P((struct txp_softc *)); 104 int txp_download_fw_section __P((struct txp_softc *, 105 struct txp_fw_section_header *, int)); 106 int txp_alloc_rings __P((struct txp_softc *)); 107 void txp_dma_free __P((struct txp_softc *, struct txp_dma_alloc *)); 108 int txp_dma_malloc __P((struct txp_softc *, bus_size_t, struct txp_dma_alloc *, int)); 109 void txp_set_filter __P((struct txp_softc *)); 110 111 int txp_cmd_desc_numfree __P((struct txp_softc *)); 112 int txp_command __P((struct txp_softc *, u_int16_t, u_int16_t, u_int32_t, 113 u_int32_t, u_int16_t *, u_int32_t *, u_int32_t *, int)); 114 int txp_command2 __P((struct txp_softc *, u_int16_t, u_int16_t, 115 u_int32_t, u_int32_t, struct txp_ext_desc *, u_int8_t, 116 struct txp_rsp_desc **, int)); 117 int txp_response __P((struct txp_softc *, u_int32_t, u_int16_t, u_int16_t, 118 struct txp_rsp_desc **)); 119 void txp_rsp_fixup __P((struct txp_softc *, struct txp_rsp_desc *, 120 struct txp_rsp_desc *)); 121 void txp_capabilities __P((struct txp_softc *)); 122 123 void txp_ifmedia_sts __P((struct ifnet *, struct ifmediareq *)); 124 int txp_ifmedia_upd __P((struct ifnet *)); 125 void txp_show_descriptor __P((void *)); 126 void txp_tx_reclaim __P((struct txp_softc *, struct txp_tx_ring *, 127 struct txp_dma_alloc *)); 128 void txp_rxbuf_reclaim __P((struct txp_softc *)); 129 void txp_rx_reclaim __P((struct txp_softc *, struct txp_rx_ring *, 130 struct txp_dma_alloc *)); 131 132 struct cfattach txp_ca = { 133 sizeof(struct txp_softc), txp_probe, txp_attach, 134 }; 135 136 struct cfdriver txp_cd = { 137 0, "txp", DV_IFNET 138 }; 139 140 int 141 txp_probe(parent, match, aux) 142 struct device *parent; 143 void *match, *aux; 144 { 145 struct pci_attach_args *pa = (struct pci_attach_args *)aux; 146 147 if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_3COM) 148 return (0); 149 150 switch (PCI_PRODUCT(pa->pa_id)) { 151 case PCI_PRODUCT_3COM_3CR990TX95: 152 case PCI_PRODUCT_3COM_3CR990TX97: 153 case PCI_PRODUCT_3COM_3CR990SVR95: 154 case PCI_PRODUCT_3COM_3CR990SVR97: 155 case PCI_PRODUCT_3COM_3C990BTXM: 156 case PCI_PRODUCT_3COM_3C990BSVR: 157 return (1); 158 } 159 160 return (0); 161 } 162 163 void 164 txp_attach(parent, self, aux) 165 struct device *parent, *self; 166 void *aux; 167 { 168 struct txp_softc *sc = (struct txp_softc *)self; 169 struct pci_attach_args *pa = aux; 170 pci_chipset_tag_t pc = pa->pa_pc; 171 pci_intr_handle_t ih; 172 const char *intrstr = NULL; 173 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 174 bus_size_t iosize; 175 u_int32_t command; 176 u_int16_t p1; 177 u_int32_t p2; 178 179 sc->sc_cold = 1; 180 181 command = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 182 183 if (!(command & PCI_COMMAND_MASTER_ENABLE)) { 184 printf(": failed to enable bus mastering\n"); 185 return; 186 } 187 188 if (!(command & PCI_COMMAND_MEM_ENABLE)) { 189 printf(": failed to enable memory mapping\n"); 190 return; 191 } 192 if (pci_mapreg_map(pa, TXP_PCI_LOMEM, PCI_MAPREG_TYPE_MEM, 0, 193 &sc->sc_bt, &sc->sc_bh, NULL, &iosize, 0)) { 194 printf(": can't map mem space %d\n", 0); 195 return; 196 } 197 198 sc->sc_dmat = pa->pa_dmat; 199 200 /* 201 * Allocate our interrupt. 202 */ 203 if (pci_intr_map(pa, &ih)) { 204 printf(": couldn't map interrupt\n"); 205 return; 206 } 207 208 intrstr = pci_intr_string(pc, ih); 209 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, txp_intr, sc, 210 self->dv_xname); 211 if (sc->sc_ih == NULL) { 212 printf(": couldn't establish interrupt"); 213 if (intrstr != NULL) 214 printf(" at %s", intrstr); 215 printf("\n"); 216 return; 217 } 218 printf(": %s", intrstr); 219 220 if (txp_chip_init(sc)) 221 return; 222 223 if (txp_download_fw(sc)) 224 return; 225 226 if (txp_alloc_rings(sc)) 227 return; 228 229 if (txp_command(sc, TXP_CMD_MAX_PKT_SIZE_WRITE, TXP_MAX_PKTLEN, 0, 0, 230 NULL, NULL, NULL, 1)) 231 return; 232 233 if (txp_command(sc, TXP_CMD_STATION_ADDRESS_READ, 0, 0, 0, 234 &p1, &p2, NULL, 1)) 235 return; 236 237 txp_set_filter(sc); 238 239 sc->sc_arpcom.ac_enaddr[0] = ((u_int8_t *)&p1)[1]; 240 sc->sc_arpcom.ac_enaddr[1] = ((u_int8_t *)&p1)[0]; 241 sc->sc_arpcom.ac_enaddr[2] = ((u_int8_t *)&p2)[3]; 242 sc->sc_arpcom.ac_enaddr[3] = ((u_int8_t *)&p2)[2]; 243 sc->sc_arpcom.ac_enaddr[4] = ((u_int8_t *)&p2)[1]; 244 sc->sc_arpcom.ac_enaddr[5] = ((u_int8_t *)&p2)[0]; 245 246 printf(" address %s\n", ether_sprintf(sc->sc_arpcom.ac_enaddr)); 247 sc->sc_cold = 0; 248 249 ifmedia_init(&sc->sc_ifmedia, 0, txp_ifmedia_upd, txp_ifmedia_sts); 250 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T, 0, NULL); 251 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_HDX, 0, NULL); 252 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_10_T|IFM_FDX, 0, NULL); 253 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX, 0, NULL); 254 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_HDX, 0, NULL); 255 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_100_TX|IFM_FDX, 0, NULL); 256 ifmedia_add(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO, 0, NULL); 257 258 sc->sc_xcvr = TXP_XCVR_AUTO; 259 txp_command(sc, TXP_CMD_XCVR_SELECT, TXP_XCVR_AUTO, 0, 0, 260 NULL, NULL, NULL, 0); 261 ifmedia_set(&sc->sc_ifmedia, IFM_ETHER|IFM_AUTO); 262 263 ifp->if_softc = sc; 264 ifp->if_mtu = ETHERMTU; 265 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 266 ifp->if_ioctl = txp_ioctl; 267 ifp->if_output = ether_output; 268 ifp->if_start = txp_start; 269 ifp->if_watchdog = txp_watchdog; 270 ifp->if_baudrate = 10000000; 271 IFQ_SET_MAXLEN(&ifp->if_snd, TX_ENTRIES); 272 IFQ_SET_READY(&ifp->if_snd); 273 ifp->if_capabilities = 0; 274 bcopy(sc->sc_dev.dv_xname, ifp->if_xname, IFNAMSIZ); 275 276 txp_capabilities(sc); 277 278 timeout_set(&sc->sc_tick, txp_tick, sc); 279 280 /* 281 * Attach us everywhere 282 */ 283 if_attach(ifp); 284 ether_ifattach(ifp); 285 286 shutdownhook_establish(txp_shutdown, sc); 287 } 288 289 int 290 txp_chip_init(sc) 291 struct txp_softc *sc; 292 { 293 /* disable interrupts */ 294 WRITE_REG(sc, TXP_IER, 0); 295 WRITE_REG(sc, TXP_IMR, 296 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 297 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 298 TXP_INT_LATCH); 299 300 /* ack all interrupts */ 301 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 302 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 303 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 304 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 305 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 306 307 if (txp_reset_adapter(sc)) 308 return (-1); 309 310 /* disable interrupts */ 311 WRITE_REG(sc, TXP_IER, 0); 312 WRITE_REG(sc, TXP_IMR, 313 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 314 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 315 TXP_INT_LATCH); 316 317 /* ack all interrupts */ 318 WRITE_REG(sc, TXP_ISR, TXP_INT_RESERVED | TXP_INT_LATCH | 319 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 320 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 321 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 322 TXP_INT_A2H_3 | TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0); 323 324 return (0); 325 } 326 327 int 328 txp_reset_adapter(sc) 329 struct txp_softc *sc; 330 { 331 u_int32_t r; 332 int i; 333 334 WRITE_REG(sc, TXP_SRR, TXP_SRR_ALL); 335 DELAY(1000); 336 WRITE_REG(sc, TXP_SRR, 0); 337 338 /* Should wait max 6 seconds */ 339 for (i = 0; i < 6000; i++) { 340 r = READ_REG(sc, TXP_A2H_0); 341 if (r == STAT_WAITING_FOR_HOST_REQUEST) 342 break; 343 DELAY(1000); 344 } 345 346 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 347 printf("%s: reset hung\n", TXP_DEVNAME(sc)); 348 return (-1); 349 } 350 351 return (0); 352 } 353 354 int 355 txp_download_fw(sc) 356 struct txp_softc *sc; 357 { 358 struct txp_fw_file_header *fileheader; 359 struct txp_fw_section_header *secthead; 360 int sect; 361 u_int32_t r, i, ier, imr; 362 363 ier = READ_REG(sc, TXP_IER); 364 WRITE_REG(sc, TXP_IER, ier | TXP_INT_A2H_0); 365 366 imr = READ_REG(sc, TXP_IMR); 367 WRITE_REG(sc, TXP_IMR, imr | TXP_INT_A2H_0); 368 369 for (i = 0; i < 10000; i++) { 370 r = READ_REG(sc, TXP_A2H_0); 371 if (r == STAT_WAITING_FOR_HOST_REQUEST) 372 break; 373 DELAY(50); 374 } 375 if (r != STAT_WAITING_FOR_HOST_REQUEST) { 376 printf(": not waiting for host request\n"); 377 return (-1); 378 } 379 380 /* Ack the status */ 381 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 382 383 fileheader = (struct txp_fw_file_header *)tc990image; 384 if (bcmp("TYPHOON", fileheader->magicid, sizeof(fileheader->magicid))) { 385 printf(": fw invalid magic\n"); 386 return (-1); 387 } 388 389 /* Tell boot firmware to get ready for image */ 390 WRITE_REG(sc, TXP_H2A_1, fileheader->addr); 391 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_RUNTIME_IMAGE); 392 393 if (txp_download_fw_wait(sc)) { 394 printf(": fw wait failed, initial\n"); 395 return (-1); 396 } 397 398 secthead = (struct txp_fw_section_header *)(((u_int8_t *)tc990image) + 399 sizeof(struct txp_fw_file_header)); 400 401 for (sect = 0; sect < fileheader->nsections; sect++) { 402 if (txp_download_fw_section(sc, secthead, sect)) 403 return (-1); 404 secthead = (struct txp_fw_section_header *) 405 (((u_int8_t *)secthead) + secthead->nbytes + sizeof(*secthead)); 406 } 407 408 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_DOWNLOAD_COMPLETE); 409 410 for (i = 0; i < 10000; i++) { 411 r = READ_REG(sc, TXP_A2H_0); 412 if (r == STAT_WAITING_FOR_BOOT) 413 break; 414 DELAY(50); 415 } 416 if (r != STAT_WAITING_FOR_BOOT) { 417 printf(": not waiting for boot\n"); 418 return (-1); 419 } 420 421 WRITE_REG(sc, TXP_IER, ier); 422 WRITE_REG(sc, TXP_IMR, imr); 423 424 return (0); 425 } 426 427 int 428 txp_download_fw_wait(sc) 429 struct txp_softc *sc; 430 { 431 u_int32_t i, r; 432 433 for (i = 0; i < 10000; i++) { 434 r = READ_REG(sc, TXP_ISR); 435 if (r & TXP_INT_A2H_0) 436 break; 437 DELAY(50); 438 } 439 440 if (!(r & TXP_INT_A2H_0)) { 441 printf(": fw wait failed comm0\n", sc->sc_dev.dv_xname); 442 return (-1); 443 } 444 445 WRITE_REG(sc, TXP_ISR, TXP_INT_A2H_0); 446 447 r = READ_REG(sc, TXP_A2H_0); 448 if (r != STAT_WAITING_FOR_SEGMENT) { 449 printf(": fw not waiting for segment\n", sc->sc_dev.dv_xname); 450 return (-1); 451 } 452 return (0); 453 } 454 455 int 456 txp_download_fw_section(sc, sect, sectnum) 457 struct txp_softc *sc; 458 struct txp_fw_section_header *sect; 459 int sectnum; 460 { 461 struct txp_dma_alloc dma; 462 int rseg, err = 0; 463 struct mbuf m; 464 u_int16_t csum; 465 466 /* Skip zero length sections */ 467 if (sect->nbytes == 0) 468 return (0); 469 470 /* Make sure we aren't past the end of the image */ 471 rseg = ((u_int8_t *)sect) - ((u_int8_t *)tc990image); 472 if (rseg >= sizeof(tc990image)) { 473 printf(": fw invalid section address, section %d\n", sectnum); 474 return (-1); 475 } 476 477 /* Make sure this section doesn't go past the end */ 478 rseg += sect->nbytes; 479 if (rseg >= sizeof(tc990image)) { 480 printf(": fw truncated section %d\n", sectnum); 481 return (-1); 482 } 483 484 /* map a buffer, copy segment to it, get physaddr */ 485 if (txp_dma_malloc(sc, sect->nbytes, &dma, 0)) { 486 printf(": fw dma malloc failed, section %d\n", sectnum); 487 return (-1); 488 } 489 490 bcopy(((u_int8_t *)sect) + sizeof(*sect), dma.dma_vaddr, sect->nbytes); 491 492 /* 493 * dummy up mbuf and verify section checksum 494 */ 495 m.m_type = MT_DATA; 496 m.m_next = m.m_nextpkt = NULL; 497 m.m_len = sect->nbytes; 498 m.m_data = dma.dma_vaddr; 499 m.m_flags = 0; 500 csum = in_cksum(&m, sect->nbytes); 501 if (csum != sect->cksum) { 502 printf(": fw section %d, bad cksum (expected 0x%x got 0x%x)\n", 503 sectnum, sect->cksum, csum); 504 err = -1; 505 goto bail; 506 } 507 508 txp_bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, 509 dma.dma_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 510 511 WRITE_REG(sc, TXP_H2A_1, sect->nbytes); 512 WRITE_REG(sc, TXP_H2A_2, sect->cksum); 513 WRITE_REG(sc, TXP_H2A_3, sect->addr); 514 WRITE_REG(sc, TXP_H2A_4, dma.dma_paddr >> 32); 515 WRITE_REG(sc, TXP_H2A_5, dma.dma_paddr & 0xffffffff); 516 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_SEGMENT_AVAILABLE); 517 518 if (txp_download_fw_wait(sc)) { 519 printf(": fw wait failed, section %d\n", sectnum); 520 err = -1; 521 } 522 523 txp_bus_dmamap_sync(sc->sc_dmat, dma.dma_map, 0, 524 dma.dma_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 525 526 bail: 527 txp_dma_free(sc, &dma); 528 529 return (err); 530 } 531 532 int 533 txp_intr(vsc) 534 void *vsc; 535 { 536 struct txp_softc *sc = vsc; 537 struct txp_hostvar *hv = sc->sc_hostvar; 538 u_int32_t isr; 539 int claimed = 0; 540 541 /* mask all interrupts */ 542 WRITE_REG(sc, TXP_IMR, TXP_INT_RESERVED | TXP_INT_SELF | 543 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 544 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 545 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 546 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 547 548 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 549 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); 550 551 isr = READ_REG(sc, TXP_ISR); 552 while (isr) { 553 claimed = 1; 554 WRITE_REG(sc, TXP_ISR, isr); 555 556 if ((*sc->sc_rxhir.r_roff) != (*sc->sc_rxhir.r_woff)) 557 txp_rx_reclaim(sc, &sc->sc_rxhir, &sc->sc_rxhiring_dma); 558 if ((*sc->sc_rxlor.r_roff) != (*sc->sc_rxlor.r_woff)) 559 txp_rx_reclaim(sc, &sc->sc_rxlor, &sc->sc_rxloring_dma); 560 561 if (hv->hv_rx_buf_write_idx == hv->hv_rx_buf_read_idx) 562 txp_rxbuf_reclaim(sc); 563 564 if (sc->sc_txhir.r_cnt && (sc->sc_txhir.r_cons != 565 TXP_OFFSET2IDX(*(sc->sc_txhir.r_off)))) 566 txp_tx_reclaim(sc, &sc->sc_txhir, &sc->sc_txhiring_dma); 567 568 if (sc->sc_txlor.r_cnt && (sc->sc_txlor.r_cons != 569 TXP_OFFSET2IDX(*(sc->sc_txlor.r_off)))) 570 txp_tx_reclaim(sc, &sc->sc_txlor, &sc->sc_txloring_dma); 571 572 isr = READ_REG(sc, TXP_ISR); 573 } 574 575 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 576 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTWRITE|BUS_DMASYNC_POSTREAD); 577 578 /* unmask all interrupts */ 579 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 580 581 txp_start(&sc->sc_arpcom.ac_if); 582 583 return (claimed); 584 } 585 586 void 587 txp_rx_reclaim(sc, r, dma) 588 struct txp_softc *sc; 589 struct txp_rx_ring *r; 590 struct txp_dma_alloc *dma; 591 { 592 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 593 struct txp_rx_desc *rxd; 594 struct mbuf *m; 595 struct txp_swdesc *sd; 596 u_int32_t roff, woff; 597 int sumflags = 0, idx; 598 599 roff = *r->r_roff; 600 woff = *r->r_woff; 601 idx = roff / sizeof(struct txp_rx_desc); 602 rxd = r->r_desc + idx; 603 604 while (roff != woff) { 605 606 txp_bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 607 idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc), 608 BUS_DMASYNC_POSTREAD); 609 610 if (rxd->rx_flags & RX_FLAGS_ERROR) { 611 printf("%s: error 0x%x\n", sc->sc_dev.dv_xname, 612 rxd->rx_stat); 613 ifp->if_ierrors++; 614 goto next; 615 } 616 617 /* retrieve stashed pointer */ 618 bcopy((u_long *)&rxd->rx_vaddrlo, &sd, sizeof(sd)); 619 620 txp_bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 621 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTREAD); 622 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 623 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 624 m = sd->sd_mbuf; 625 free(sd, M_DEVBUF); 626 m->m_pkthdr.len = m->m_len = rxd->rx_len; 627 628 #ifdef __STRICT_ALIGNMENT 629 { 630 /* 631 * XXX Nice chip, except it won't accept "off by 2" 632 * buffers, so we're force to copy. Supposedly 633 * this will be fixed in a newer firmware rev 634 * and this will be temporary. 635 */ 636 struct mbuf *mnew; 637 638 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 639 if (mnew == NULL) { 640 m_freem(m); 641 goto next; 642 } 643 if (m->m_len > (MHLEN - 2)) { 644 MCLGET(mnew, M_DONTWAIT); 645 if (!(mnew->m_flags & M_EXT)) { 646 m_freem(mnew); 647 m_freem(m); 648 goto next; 649 } 650 } 651 mnew->m_pkthdr.rcvif = ifp; 652 mnew->m_pkthdr.len = mnew->m_len = m->m_len; 653 mnew->m_data += 2; 654 bcopy(m->m_data, mnew->m_data, m->m_len); 655 m_freem(m); 656 m = mnew; 657 } 658 #endif 659 660 #if NBPFILTER > 0 661 /* 662 * Handle BPF listeners. Let the BPF user see the packet. 663 */ 664 if (ifp->if_bpf) 665 bpf_mtap(ifp->if_bpf, m); 666 #endif 667 668 if (rxd->rx_stat & RX_STAT_IPCKSUMBAD) 669 sumflags |= M_IPV4_CSUM_IN_BAD; 670 else if (rxd->rx_stat & RX_STAT_IPCKSUMGOOD) 671 sumflags |= M_IPV4_CSUM_IN_OK; 672 673 if (rxd->rx_stat & RX_STAT_TCPCKSUMBAD) 674 sumflags |= M_TCP_CSUM_IN_BAD; 675 else if (rxd->rx_stat & RX_STAT_TCPCKSUMGOOD) 676 sumflags |= M_TCP_CSUM_IN_OK; 677 678 if (rxd->rx_stat & RX_STAT_UDPCKSUMBAD) 679 sumflags |= M_UDP_CSUM_IN_BAD; 680 else if (rxd->rx_stat & RX_STAT_UDPCKSUMGOOD) 681 sumflags |= M_UDP_CSUM_IN_OK; 682 683 m->m_pkthdr.csum = sumflags; 684 685 #if NVLAN > 0 686 if (rxd->rx_stat & RX_STAT_VLAN) { 687 if (vlan_input_tag(m, htons(rxd->rx_vlan >> 16)) < 0) 688 ifp->if_noproto++; 689 goto next; 690 } 691 #endif 692 693 ether_input_mbuf(ifp, m); 694 695 next: 696 txp_bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 697 idx * sizeof(struct txp_rx_desc), sizeof(struct txp_rx_desc), 698 BUS_DMASYNC_PREREAD); 699 700 roff += sizeof(struct txp_rx_desc); 701 if (roff == (RX_ENTRIES * sizeof(struct txp_rx_desc))) { 702 idx = 0; 703 roff = 0; 704 rxd = r->r_desc; 705 } else { 706 idx++; 707 rxd++; 708 } 709 woff = *r->r_woff; 710 } 711 712 *r->r_roff = woff; 713 } 714 715 void 716 txp_rxbuf_reclaim(sc) 717 struct txp_softc *sc; 718 { 719 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 720 struct txp_hostvar *hv = sc->sc_hostvar; 721 struct txp_rxbuf_desc *rbd; 722 struct txp_swdesc *sd; 723 u_int32_t i, end; 724 725 end = TXP_OFFSET2IDX(hv->hv_rx_buf_read_idx); 726 i = TXP_OFFSET2IDX(hv->hv_rx_buf_write_idx); 727 728 if (++i == RXBUF_ENTRIES) 729 i = 0; 730 731 rbd = sc->sc_rxbufs + i; 732 733 while (i != end) { 734 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), 735 M_DEVBUF, M_NOWAIT); 736 if (sd == NULL) 737 break; 738 739 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); 740 if (sd->sd_mbuf == NULL) 741 goto err_sd; 742 743 MCLGET(sd->sd_mbuf, M_DONTWAIT); 744 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) 745 goto err_mbuf; 746 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 747 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 748 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, 749 TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) 750 goto err_mbuf; 751 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, 752 BUS_DMA_NOWAIT)) { 753 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 754 goto err_mbuf; 755 } 756 757 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 758 i * sizeof(struct txp_rxbuf_desc), 759 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_POSTWRITE); 760 761 /* stash away pointer */ 762 bcopy(&sd, (u_long *)&rbd->rb_vaddrlo, sizeof(sd)); 763 764 rbd->rb_paddrlo = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) 765 & 0xffffffff; 766 rbd->rb_paddrhi = ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) 767 >> 32; 768 769 txp_bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 770 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 771 772 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 773 i * sizeof(struct txp_rxbuf_desc), 774 sizeof(struct txp_rxbuf_desc), BUS_DMASYNC_PREWRITE); 775 776 hv->hv_rx_buf_write_idx = TXP_IDX2OFFSET(i); 777 778 if (++i == RXBUF_ENTRIES) { 779 i = 0; 780 rbd = sc->sc_rxbufs; 781 } else 782 rbd++; 783 } 784 return; 785 786 err_mbuf: 787 m_freem(sd->sd_mbuf); 788 err_sd: 789 free(sd, M_DEVBUF); 790 } 791 792 /* 793 * Reclaim mbufs and entries from a transmit ring. 794 */ 795 void 796 txp_tx_reclaim(sc, r, dma) 797 struct txp_softc *sc; 798 struct txp_tx_ring *r; 799 struct txp_dma_alloc *dma; 800 { 801 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 802 u_int32_t idx = TXP_OFFSET2IDX(*(r->r_off)); 803 u_int32_t cons = r->r_cons, cnt = r->r_cnt; 804 struct txp_tx_desc *txd = r->r_desc + cons; 805 struct txp_swdesc *sd = sc->sc_txd + cons; 806 struct mbuf *m; 807 808 while (cons != idx) { 809 if (cnt == 0) 810 break; 811 812 txp_bus_dmamap_sync(sc->sc_dmat, dma->dma_map, 813 cons * sizeof(struct txp_tx_desc), 814 sizeof(struct txp_tx_desc), 815 BUS_DMASYNC_POSTWRITE); 816 817 if ((txd->tx_flags & TX_FLAGS_TYPE_M) == 818 TX_FLAGS_TYPE_DATA) { 819 txp_bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 820 sd->sd_map->dm_mapsize, BUS_DMASYNC_POSTWRITE); 821 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 822 m = sd->sd_mbuf; 823 if (m != NULL) { 824 m_freem(m); 825 txd->tx_addrlo = 0; 826 txd->tx_addrhi = 0; 827 ifp->if_opackets++; 828 } 829 } 830 ifp->if_flags &= ~IFF_OACTIVE; 831 832 if (++cons == TX_ENTRIES) { 833 txd = r->r_desc; 834 cons = 0; 835 sd = sc->sc_txd; 836 } else { 837 txd++; 838 sd++; 839 } 840 841 cnt--; 842 } 843 844 r->r_cons = cons; 845 r->r_cnt = cnt; 846 if (cnt == 0) 847 ifp->if_timer = 0; 848 } 849 850 void 851 txp_shutdown(vsc) 852 void *vsc; 853 { 854 struct txp_softc *sc = (struct txp_softc *)vsc; 855 856 /* mask all interrupts */ 857 WRITE_REG(sc, TXP_IMR, 858 TXP_INT_SELF | TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | 859 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 860 TXP_INT_LATCH); 861 862 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 863 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 0); 864 txp_command(sc, TXP_CMD_HALT, 0, 0, 0, NULL, NULL, NULL, 0); 865 } 866 867 int 868 txp_alloc_rings(sc) 869 struct txp_softc *sc; 870 { 871 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 872 struct txp_boot_record *boot; 873 struct txp_swdesc *sd; 874 u_int32_t r; 875 int i, j; 876 877 /* boot record */ 878 if (txp_dma_malloc(sc, sizeof(struct txp_boot_record), &sc->sc_boot_dma, 879 BUS_DMA_COHERENT)) { 880 printf(": can't allocate boot record\n"); 881 return (-1); 882 } 883 boot = (struct txp_boot_record *)sc->sc_boot_dma.dma_vaddr; 884 bzero(boot, sizeof(*boot)); 885 sc->sc_boot = boot; 886 887 /* host variables */ 888 if (txp_dma_malloc(sc, sizeof(struct txp_hostvar), &sc->sc_host_dma, 889 BUS_DMA_COHERENT)) { 890 printf(": can't allocate host ring\n"); 891 goto bail_boot; 892 } 893 bzero(sc->sc_host_dma.dma_vaddr, sizeof(struct txp_hostvar)); 894 boot->br_hostvar_lo = sc->sc_host_dma.dma_paddr & 0xffffffff; 895 boot->br_hostvar_hi = sc->sc_host_dma.dma_paddr >> 32; 896 sc->sc_hostvar = (struct txp_hostvar *)sc->sc_host_dma.dma_vaddr; 897 898 /* high priority tx ring */ 899 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, 900 &sc->sc_txhiring_dma, BUS_DMA_COHERENT)) { 901 printf(": can't allocate high tx ring\n"); 902 goto bail_host; 903 } 904 bzero(sc->sc_txhiring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES); 905 boot->br_txhipri_lo = sc->sc_txhiring_dma.dma_paddr & 0xffffffff; 906 boot->br_txhipri_hi = sc->sc_txhiring_dma.dma_paddr >> 32; 907 boot->br_txhipri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); 908 sc->sc_txhir.r_reg = TXP_H2A_1; 909 sc->sc_txhir.r_desc = (struct txp_tx_desc *)sc->sc_txhiring_dma.dma_vaddr; 910 sc->sc_txhir.r_cons = sc->sc_txhir.r_prod = sc->sc_txhir.r_cnt = 0; 911 sc->sc_txhir.r_off = &sc->sc_hostvar->hv_tx_hi_desc_read_idx; 912 for (i = 0; i < TX_ENTRIES; i++) { 913 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 914 TX_ENTRIES - 4, TXP_MAX_SEGLEN, 0, 915 BUS_DMA_NOWAIT, &sc->sc_txd[i].sd_map) != 0) { 916 for (j = 0; j < i; j++) { 917 bus_dmamap_destroy(sc->sc_dmat, 918 sc->sc_txd[j].sd_map); 919 sc->sc_txd[j].sd_map = NULL; 920 } 921 goto bail_txhiring; 922 } 923 } 924 925 /* low priority tx ring */ 926 if (txp_dma_malloc(sc, sizeof(struct txp_tx_desc) * TX_ENTRIES, 927 &sc->sc_txloring_dma, BUS_DMA_COHERENT)) { 928 printf(": can't allocate low tx ring\n"); 929 goto bail_txhiring; 930 } 931 bzero(sc->sc_txloring_dma.dma_vaddr, sizeof(struct txp_tx_desc) * TX_ENTRIES); 932 boot->br_txlopri_lo = sc->sc_txloring_dma.dma_paddr & 0xffffffff; 933 boot->br_txlopri_hi = sc->sc_txloring_dma.dma_paddr >> 32; 934 boot->br_txlopri_siz = TX_ENTRIES * sizeof(struct txp_tx_desc); 935 sc->sc_txlor.r_reg = TXP_H2A_3; 936 sc->sc_txlor.r_desc = (struct txp_tx_desc *)sc->sc_txloring_dma.dma_vaddr; 937 sc->sc_txlor.r_cons = sc->sc_txlor.r_prod = sc->sc_txlor.r_cnt = 0; 938 sc->sc_txlor.r_off = &sc->sc_hostvar->hv_tx_lo_desc_read_idx; 939 940 /* high priority rx ring */ 941 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, 942 &sc->sc_rxhiring_dma, BUS_DMA_COHERENT)) { 943 printf(": can't allocate high rx ring\n"); 944 goto bail_txloring; 945 } 946 bzero(sc->sc_rxhiring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES); 947 boot->br_rxhipri_lo = sc->sc_rxhiring_dma.dma_paddr & 0xffffffff; 948 boot->br_rxhipri_hi = sc->sc_rxhiring_dma.dma_paddr >> 32; 949 boot->br_rxhipri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); 950 sc->sc_rxhir.r_desc = 951 (struct txp_rx_desc *)sc->sc_rxhiring_dma.dma_vaddr; 952 sc->sc_rxhir.r_roff = &sc->sc_hostvar->hv_rx_hi_read_idx; 953 sc->sc_rxhir.r_woff = &sc->sc_hostvar->hv_rx_hi_write_idx; 954 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_rxhiring_dma.dma_map, 955 0, sc->sc_rxhiring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 956 957 /* low priority ring */ 958 if (txp_dma_malloc(sc, sizeof(struct txp_rx_desc) * RX_ENTRIES, 959 &sc->sc_rxloring_dma, BUS_DMA_COHERENT)) { 960 printf(": can't allocate low rx ring\n"); 961 goto bail_rxhiring; 962 } 963 bzero(sc->sc_rxloring_dma.dma_vaddr, sizeof(struct txp_rx_desc) * RX_ENTRIES); 964 boot->br_rxlopri_lo = sc->sc_rxloring_dma.dma_paddr & 0xffffffff; 965 boot->br_rxlopri_hi = sc->sc_rxloring_dma.dma_paddr >> 32; 966 boot->br_rxlopri_siz = RX_ENTRIES * sizeof(struct txp_rx_desc); 967 sc->sc_rxlor.r_desc = 968 (struct txp_rx_desc *)sc->sc_rxloring_dma.dma_vaddr; 969 sc->sc_rxlor.r_roff = &sc->sc_hostvar->hv_rx_lo_read_idx; 970 sc->sc_rxlor.r_woff = &sc->sc_hostvar->hv_rx_lo_write_idx; 971 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_rxloring_dma.dma_map, 972 0, sc->sc_rxloring_dma.dma_map->dm_mapsize, BUS_DMASYNC_PREREAD); 973 974 /* command ring */ 975 if (txp_dma_malloc(sc, sizeof(struct txp_cmd_desc) * CMD_ENTRIES, 976 &sc->sc_cmdring_dma, BUS_DMA_COHERENT)) { 977 printf(": can't allocate command ring\n"); 978 goto bail_rxloring; 979 } 980 bzero(sc->sc_cmdring_dma.dma_vaddr, sizeof(struct txp_cmd_desc) * CMD_ENTRIES); 981 boot->br_cmd_lo = sc->sc_cmdring_dma.dma_paddr & 0xffffffff; 982 boot->br_cmd_hi = sc->sc_cmdring_dma.dma_paddr >> 32; 983 boot->br_cmd_siz = CMD_ENTRIES * sizeof(struct txp_cmd_desc); 984 sc->sc_cmdring.base = (struct txp_cmd_desc *)sc->sc_cmdring_dma.dma_vaddr; 985 sc->sc_cmdring.size = CMD_ENTRIES * sizeof(struct txp_cmd_desc); 986 sc->sc_cmdring.lastwrite = 0; 987 988 /* response ring */ 989 if (txp_dma_malloc(sc, sizeof(struct txp_rsp_desc) * RSP_ENTRIES, 990 &sc->sc_rspring_dma, BUS_DMA_COHERENT)) { 991 printf(": can't allocate response ring\n"); 992 goto bail_cmdring; 993 } 994 bzero(sc->sc_rspring_dma.dma_vaddr, sizeof(struct txp_rsp_desc) * RSP_ENTRIES); 995 boot->br_resp_lo = sc->sc_rspring_dma.dma_paddr & 0xffffffff; 996 boot->br_resp_hi = sc->sc_rspring_dma.dma_paddr >> 32; 997 boot->br_resp_siz = CMD_ENTRIES * sizeof(struct txp_rsp_desc); 998 sc->sc_rspring.base = (struct txp_rsp_desc *)sc->sc_rspring_dma.dma_vaddr; 999 sc->sc_rspring.size = RSP_ENTRIES * sizeof(struct txp_rsp_desc); 1000 sc->sc_rspring.lastwrite = 0; 1001 1002 /* receive buffer ring */ 1003 if (txp_dma_malloc(sc, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES, 1004 &sc->sc_rxbufring_dma, BUS_DMA_COHERENT)) { 1005 printf(": can't allocate rx buffer ring\n"); 1006 goto bail_rspring; 1007 } 1008 bzero(sc->sc_rxbufring_dma.dma_vaddr, sizeof(struct txp_rxbuf_desc) * RXBUF_ENTRIES); 1009 boot->br_rxbuf_lo = sc->sc_rxbufring_dma.dma_paddr & 0xffffffff; 1010 boot->br_rxbuf_hi = sc->sc_rxbufring_dma.dma_paddr >> 32; 1011 boot->br_rxbuf_siz = RXBUF_ENTRIES * sizeof(struct txp_rxbuf_desc); 1012 sc->sc_rxbufs = (struct txp_rxbuf_desc *)sc->sc_rxbufring_dma.dma_vaddr; 1013 for (i = 0; i < RXBUF_ENTRIES; i++) { 1014 sd = (struct txp_swdesc *)malloc(sizeof(struct txp_swdesc), 1015 M_DEVBUF, M_NOWAIT); 1016 if (sd == NULL) 1017 break; 1018 1019 MGETHDR(sd->sd_mbuf, M_DONTWAIT, MT_DATA); 1020 if (sd->sd_mbuf == NULL) { 1021 goto bail_rxbufring; 1022 } 1023 1024 MCLGET(sd->sd_mbuf, M_DONTWAIT); 1025 if ((sd->sd_mbuf->m_flags & M_EXT) == 0) { 1026 goto bail_rxbufring; 1027 } 1028 sd->sd_mbuf->m_pkthdr.len = sd->sd_mbuf->m_len = MCLBYTES; 1029 sd->sd_mbuf->m_pkthdr.rcvif = ifp; 1030 if (bus_dmamap_create(sc->sc_dmat, TXP_MAX_PKTLEN, 1, 1031 TXP_MAX_PKTLEN, 0, BUS_DMA_NOWAIT, &sd->sd_map)) { 1032 goto bail_rxbufring; 1033 } 1034 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, sd->sd_mbuf, 1035 BUS_DMA_NOWAIT)) { 1036 bus_dmamap_destroy(sc->sc_dmat, sd->sd_map); 1037 goto bail_rxbufring; 1038 } 1039 txp_bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 1040 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREREAD); 1041 1042 /* stash away pointer */ 1043 bcopy(&sd, (u_long *)&sc->sc_rxbufs[i].rb_vaddrlo, sizeof(sd)); 1044 1045 sc->sc_rxbufs[i].rb_paddrlo = 1046 ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) & 0xffffffff; 1047 sc->sc_rxbufs[i].rb_paddrhi = 1048 ((u_int64_t)sd->sd_map->dm_segs[0].ds_addr) >> 32; 1049 } 1050 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_rxbufring_dma.dma_map, 1051 0, sc->sc_rxbufring_dma.dma_map->dm_mapsize, 1052 BUS_DMASYNC_PREWRITE); 1053 sc->sc_hostvar->hv_rx_buf_write_idx = (RXBUF_ENTRIES - 1) * 1054 sizeof(struct txp_rxbuf_desc); 1055 1056 /* zero dma */ 1057 if (txp_dma_malloc(sc, sizeof(u_int32_t), &sc->sc_zero_dma, 1058 BUS_DMA_COHERENT)) { 1059 printf(": can't allocate response ring\n"); 1060 goto bail_rxbufring; 1061 } 1062 bzero(sc->sc_zero_dma.dma_vaddr, sizeof(u_int32_t)); 1063 boot->br_zero_lo = sc->sc_zero_dma.dma_paddr & 0xffffffff; 1064 boot->br_zero_hi = sc->sc_zero_dma.dma_paddr >> 32; 1065 1066 /* See if it's waiting for boot, and try to boot it */ 1067 for (i = 0; i < 10000; i++) { 1068 r = READ_REG(sc, TXP_A2H_0); 1069 if (r == STAT_WAITING_FOR_BOOT) 1070 break; 1071 DELAY(50); 1072 } 1073 if (r != STAT_WAITING_FOR_BOOT) { 1074 printf(": not waiting for boot\n"); 1075 goto bail; 1076 } 1077 WRITE_REG(sc, TXP_H2A_2, sc->sc_boot_dma.dma_paddr >> 32); 1078 WRITE_REG(sc, TXP_H2A_1, sc->sc_boot_dma.dma_paddr & 0xffffffff); 1079 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_REGISTER_BOOT_RECORD); 1080 1081 /* See if it booted */ 1082 for (i = 0; i < 10000; i++) { 1083 r = READ_REG(sc, TXP_A2H_0); 1084 if (r == STAT_RUNNING) 1085 break; 1086 DELAY(50); 1087 } 1088 if (r != STAT_RUNNING) { 1089 printf(": fw not running\n"); 1090 goto bail; 1091 } 1092 1093 /* Clear TX and CMD ring write registers */ 1094 WRITE_REG(sc, TXP_H2A_1, TXP_BOOTCMD_NULL); 1095 WRITE_REG(sc, TXP_H2A_2, TXP_BOOTCMD_NULL); 1096 WRITE_REG(sc, TXP_H2A_3, TXP_BOOTCMD_NULL); 1097 WRITE_REG(sc, TXP_H2A_0, TXP_BOOTCMD_NULL); 1098 1099 return (0); 1100 1101 bail: 1102 txp_dma_free(sc, &sc->sc_zero_dma); 1103 bail_rxbufring: 1104 txp_dma_free(sc, &sc->sc_rxbufring_dma); 1105 bail_rspring: 1106 txp_dma_free(sc, &sc->sc_rspring_dma); 1107 bail_cmdring: 1108 txp_dma_free(sc, &sc->sc_cmdring_dma); 1109 bail_rxloring: 1110 txp_dma_free(sc, &sc->sc_rxloring_dma); 1111 bail_rxhiring: 1112 txp_dma_free(sc, &sc->sc_rxhiring_dma); 1113 bail_txloring: 1114 txp_dma_free(sc, &sc->sc_txloring_dma); 1115 bail_txhiring: 1116 txp_dma_free(sc, &sc->sc_txhiring_dma); 1117 bail_host: 1118 txp_dma_free(sc, &sc->sc_host_dma); 1119 bail_boot: 1120 txp_dma_free(sc, &sc->sc_boot_dma); 1121 return (-1); 1122 } 1123 1124 int 1125 txp_dma_malloc(sc, size, dma, mapflags) 1126 struct txp_softc *sc; 1127 bus_size_t size; 1128 struct txp_dma_alloc *dma; 1129 int mapflags; 1130 { 1131 int r; 1132 1133 if ((r = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, 1134 &dma->dma_seg, 1, &dma->dma_nseg, 0)) != 0) 1135 goto fail_0; 1136 1137 if ((r = bus_dmamem_map(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg, 1138 size, &dma->dma_vaddr, mapflags | BUS_DMA_NOWAIT)) != 0) 1139 goto fail_1; 1140 1141 if ((r = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, 1142 BUS_DMA_NOWAIT, &dma->dma_map)) != 0) 1143 goto fail_2; 1144 1145 if ((r = bus_dmamap_load(sc->sc_dmat, dma->dma_map, dma->dma_vaddr, 1146 size, NULL, BUS_DMA_NOWAIT)) != 0) 1147 goto fail_3; 1148 1149 dma->dma_paddr = dma->dma_map->dm_segs[0].ds_addr; 1150 return (0); 1151 1152 fail_3: 1153 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1154 fail_2: 1155 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, size); 1156 fail_1: 1157 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1158 fail_0: 1159 return (r); 1160 } 1161 1162 void 1163 txp_dma_free(sc, dma) 1164 struct txp_softc *sc; 1165 struct txp_dma_alloc *dma; 1166 { 1167 bus_dmamap_unload(sc->sc_dmat, dma->dma_map); 1168 bus_dmamem_unmap(sc->sc_dmat, dma->dma_vaddr, dma->dma_map->dm_mapsize); 1169 bus_dmamem_free(sc->sc_dmat, &dma->dma_seg, dma->dma_nseg); 1170 bus_dmamap_destroy(sc->sc_dmat, dma->dma_map); 1171 } 1172 1173 int 1174 txp_ioctl(ifp, command, data) 1175 struct ifnet *ifp; 1176 u_long command; 1177 caddr_t data; 1178 { 1179 struct txp_softc *sc = ifp->if_softc; 1180 struct ifreq *ifr = (struct ifreq *)data; 1181 struct ifaddr *ifa = (struct ifaddr *)data; 1182 int s, error = 0; 1183 1184 s = splnet(); 1185 1186 if ((error = ether_ioctl(ifp, &sc->sc_arpcom, command, data)) > 0) { 1187 splx(s); 1188 return error; 1189 } 1190 1191 switch(command) { 1192 case SIOCSIFADDR: 1193 ifp->if_flags |= IFF_UP; 1194 switch (ifa->ifa_addr->sa_family) { 1195 #ifdef INET 1196 case AF_INET: 1197 txp_init(sc); 1198 arp_ifinit(&sc->sc_arpcom, ifa); 1199 break; 1200 #endif /* INET */ 1201 default: 1202 txp_init(sc); 1203 break; 1204 } 1205 break; 1206 case SIOCSIFFLAGS: 1207 if (ifp->if_flags & IFF_UP) { 1208 txp_init(sc); 1209 } else { 1210 if (ifp->if_flags & IFF_RUNNING) 1211 txp_stop(sc); 1212 } 1213 break; 1214 case SIOCADDMULTI: 1215 case SIOCDELMULTI: 1216 error = (command == SIOCADDMULTI) ? 1217 ether_addmulti(ifr, &sc->sc_arpcom) : 1218 ether_delmulti(ifr, &sc->sc_arpcom); 1219 1220 if (error == ENETRESET) { 1221 /* 1222 * Multicast list has changed; set the hardware 1223 * filter accordingly. 1224 */ 1225 txp_set_filter(sc); 1226 error = 0; 1227 } 1228 break; 1229 case SIOCGIFMEDIA: 1230 case SIOCSIFMEDIA: 1231 error = ifmedia_ioctl(ifp, ifr, &sc->sc_ifmedia, command); 1232 break; 1233 default: 1234 error = EINVAL; 1235 break; 1236 } 1237 1238 (void)splx(s); 1239 1240 return(error); 1241 } 1242 1243 void 1244 txp_init(sc) 1245 struct txp_softc *sc; 1246 { 1247 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1248 int s; 1249 1250 txp_stop(sc); 1251 1252 s = splnet(); 1253 1254 txp_set_filter(sc); 1255 1256 txp_command(sc, TXP_CMD_TX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1257 txp_command(sc, TXP_CMD_RX_ENABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1258 1259 WRITE_REG(sc, TXP_IER, TXP_INT_RESERVED | TXP_INT_SELF | 1260 TXP_INT_A2H_7 | TXP_INT_A2H_6 | TXP_INT_A2H_5 | TXP_INT_A2H_4 | 1261 TXP_INT_A2H_2 | TXP_INT_A2H_1 | TXP_INT_A2H_0 | 1262 TXP_INT_DMA3 | TXP_INT_DMA2 | TXP_INT_DMA1 | TXP_INT_DMA0 | 1263 TXP_INT_PCI_TABORT | TXP_INT_PCI_MABORT | TXP_INT_LATCH); 1264 WRITE_REG(sc, TXP_IMR, TXP_INT_A2H_3); 1265 1266 ifp->if_flags |= IFF_RUNNING; 1267 ifp->if_flags &= ~IFF_OACTIVE; 1268 ifp->if_timer = 0; 1269 1270 if (!timeout_pending(&sc->sc_tick)) 1271 timeout_add(&sc->sc_tick, hz); 1272 1273 splx(s); 1274 } 1275 1276 void 1277 txp_tick(vsc) 1278 void *vsc; 1279 { 1280 struct txp_softc *sc = vsc; 1281 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1282 struct txp_rsp_desc *rsp = NULL; 1283 struct txp_ext_desc *ext; 1284 int s; 1285 1286 s = splnet(); 1287 txp_rxbuf_reclaim(sc); 1288 1289 if (txp_command2(sc, TXP_CMD_READ_STATISTICS, 0, 0, 0, NULL, 0, 1290 &rsp, 1)) 1291 goto out; 1292 if (rsp->rsp_numdesc != 6) 1293 goto out; 1294 if (txp_command(sc, TXP_CMD_CLEAR_STATISTICS, 0, 0, 0, 1295 NULL, NULL, NULL, 1)) 1296 goto out; 1297 ext = (struct txp_ext_desc *)(rsp + 1); 1298 1299 ifp->if_ierrors += ext[3].ext_2 + ext[3].ext_3 + ext[3].ext_4 + 1300 ext[4].ext_1 + ext[4].ext_4; 1301 ifp->if_oerrors += ext[0].ext_1 + ext[1].ext_1 + ext[1].ext_4 + 1302 ext[2].ext_1; 1303 ifp->if_collisions += ext[0].ext_2 + ext[0].ext_3 + ext[1].ext_2 + 1304 ext[1].ext_3; 1305 ifp->if_opackets += rsp->rsp_par2; 1306 ifp->if_ipackets += ext[2].ext_3; 1307 1308 out: 1309 if (rsp != NULL) 1310 free(rsp, M_DEVBUF); 1311 1312 splx(s); 1313 timeout_add(&sc->sc_tick, hz); 1314 } 1315 1316 void 1317 txp_start(ifp) 1318 struct ifnet *ifp; 1319 { 1320 struct txp_softc *sc = ifp->if_softc; 1321 struct txp_tx_ring *r = &sc->sc_txhir; 1322 struct txp_tx_desc *txd; 1323 struct txp_frag_desc *fxd; 1324 struct mbuf *m, *mnew; 1325 struct txp_swdesc *sd; 1326 u_int32_t firstprod, firstcnt, prod, cnt, i; 1327 #if NVLAN > 0 1328 struct ifvlan *ifv; 1329 #endif 1330 1331 if ((ifp->if_flags & (IFF_RUNNING | IFF_OACTIVE)) != IFF_RUNNING) 1332 return; 1333 1334 prod = r->r_prod; 1335 cnt = r->r_cnt; 1336 1337 while (1) { 1338 IFQ_DEQUEUE(&ifp->if_snd, m); 1339 if (m == NULL) 1340 break; 1341 1342 firstprod = prod; 1343 firstcnt = cnt; 1344 1345 sd = sc->sc_txd + prod; 1346 sd->sd_mbuf = m; 1347 1348 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, 1349 BUS_DMA_NOWAIT)) { 1350 MGETHDR(mnew, M_DONTWAIT, MT_DATA); 1351 if (mnew == NULL) 1352 goto oactive1; 1353 if (m->m_pkthdr.len > MHLEN) { 1354 MCLGET(mnew, M_DONTWAIT); 1355 if ((mnew->m_flags & M_EXT) == 0) { 1356 m_freem(mnew); 1357 goto oactive1; 1358 } 1359 } 1360 m_copydata(m, 0, m->m_pkthdr.len, mtod(mnew, caddr_t)); 1361 mnew->m_pkthdr.len = mnew->m_len = m->m_pkthdr.len; 1362 m_freem(m); 1363 m = mnew; 1364 if (bus_dmamap_load_mbuf(sc->sc_dmat, sd->sd_map, m, 1365 BUS_DMA_NOWAIT)) 1366 goto oactive1; 1367 } 1368 1369 if ((TX_ENTRIES - cnt) < 4) 1370 goto oactive; 1371 1372 txd = r->r_desc + prod; 1373 1374 txd->tx_flags = TX_FLAGS_TYPE_DATA; 1375 txd->tx_numdesc = 0; 1376 txd->tx_addrlo = 0; 1377 txd->tx_addrhi = 0; 1378 txd->tx_totlen = 0; 1379 txd->tx_pflags = 0; 1380 txd->tx_numdesc = sd->sd_map->dm_nsegs; 1381 1382 if (++prod == TX_ENTRIES) 1383 prod = 0; 1384 1385 if (++cnt >= (TX_ENTRIES - 4)) 1386 goto oactive; 1387 1388 #if NVLAN > 0 1389 if ((m->m_flags & (M_PROTO1|M_PKTHDR)) == (M_PROTO1|M_PKTHDR) && 1390 m->m_pkthdr.rcvif != NULL) { 1391 ifv = m->m_pkthdr.rcvif->if_softc; 1392 txd->tx_pflags = TX_PFLAGS_VLAN | 1393 (htons(ifv->ifv_tag) << TX_PFLAGS_VLANTAG_S); 1394 } 1395 #endif 1396 1397 if (m->m_pkthdr.csum & M_IPV4_CSUM_OUT) 1398 txd->tx_pflags |= TX_PFLAGS_IPCKSUM; 1399 #if 0 1400 if (m->m_pkthdr.csum & M_TCPV4_CSUM_OUT) 1401 txd->tx_pflags |= TX_PFLAGS_TCPCKSUM; 1402 if (m->m_pkthdr.csum & M_UDPV4_CSUM_OUT) 1403 txd->tx_pflags |= TX_PFLAGS_UDPCKSUM; 1404 #endif 1405 1406 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_txhiring_dma.dma_map, 1407 prod * sizeof(struct txp_tx_desc), sizeof(struct txp_tx_desc), 1408 BUS_DMASYNC_PREWRITE); 1409 1410 fxd = (struct txp_frag_desc *)(r->r_desc + prod); 1411 for (i = 0; i < sd->sd_map->dm_nsegs; i++) { 1412 if (++cnt >= (TX_ENTRIES - 4)) 1413 goto oactive; 1414 1415 fxd->frag_flags = FRAG_FLAGS_TYPE_FRAG; 1416 fxd->frag_rsvd1 = 0; 1417 fxd->frag_len = sd->sd_map->dm_segs[i].ds_len; 1418 fxd->frag_addrlo = 1419 ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) & 1420 0xffffffff; 1421 fxd->frag_addrhi = 1422 ((u_int64_t)sd->sd_map->dm_segs[i].ds_addr) >> 1423 32; 1424 fxd->frag_rsvd2 = 0; 1425 1426 txp_bus_dmamap_sync(sc->sc_dmat, 1427 sc->sc_txhiring_dma.dma_map, 1428 prod * sizeof(struct txp_frag_desc), 1429 sizeof(struct txp_frag_desc), BUS_DMASYNC_PREWRITE); 1430 1431 if (++prod == TX_ENTRIES) { 1432 fxd = (struct txp_frag_desc *)r->r_desc; 1433 prod = 0; 1434 } else 1435 fxd++; 1436 1437 } 1438 1439 ifp->if_timer = 5; 1440 1441 #if NBPFILTER > 0 1442 if (ifp->if_bpf) 1443 bpf_mtap(ifp->if_bpf, m); 1444 #endif 1445 1446 txp_bus_dmamap_sync(sc->sc_dmat, sd->sd_map, 0, 1447 sd->sd_map->dm_mapsize, BUS_DMASYNC_PREWRITE); 1448 WRITE_REG(sc, r->r_reg, TXP_IDX2OFFSET(prod)); 1449 } 1450 1451 r->r_prod = prod; 1452 r->r_cnt = cnt; 1453 return; 1454 1455 oactive: 1456 bus_dmamap_unload(sc->sc_dmat, sd->sd_map); 1457 oactive1: 1458 ifp->if_flags |= IFF_OACTIVE; 1459 r->r_prod = firstprod; 1460 r->r_cnt = firstcnt; 1461 IF_PREPEND(&ifp->if_snd, m); 1462 } 1463 1464 /* 1465 * Handle simple commands sent to the typhoon 1466 */ 1467 int 1468 txp_command(sc, id, in1, in2, in3, out1, out2, out3, wait) 1469 struct txp_softc *sc; 1470 u_int16_t id, in1, *out1; 1471 u_int32_t in2, in3, *out2, *out3; 1472 int wait; 1473 { 1474 struct txp_rsp_desc *rsp = NULL; 1475 1476 if (txp_command2(sc, id, in1, in2, in3, NULL, 0, &rsp, wait)) 1477 return (-1); 1478 1479 if (!wait) 1480 return (0); 1481 1482 if (out1 != NULL) 1483 *out1 = rsp->rsp_par1; 1484 if (out2 != NULL) 1485 *out2 = rsp->rsp_par2; 1486 if (out3 != NULL) 1487 *out3 = rsp->rsp_par3; 1488 free(rsp, M_DEVBUF); 1489 return (0); 1490 } 1491 1492 int 1493 txp_command2(sc, id, in1, in2, in3, in_extp, in_extn, rspp, wait) 1494 struct txp_softc *sc; 1495 u_int16_t id, in1; 1496 u_int32_t in2, in3; 1497 struct txp_ext_desc *in_extp; 1498 u_int8_t in_extn; 1499 struct txp_rsp_desc **rspp; 1500 int wait; 1501 { 1502 struct txp_hostvar *hv = sc->sc_hostvar; 1503 struct txp_cmd_desc *cmd; 1504 struct txp_ext_desc *ext; 1505 u_int32_t idx, i; 1506 u_int16_t seq; 1507 1508 if (txp_cmd_desc_numfree(sc) < (in_extn + 1)) { 1509 printf("%s: no free cmd descriptors\n", TXP_DEVNAME(sc)); 1510 return (-1); 1511 } 1512 1513 idx = sc->sc_cmdring.lastwrite; 1514 cmd = (struct txp_cmd_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1515 bzero(cmd, sizeof(*cmd)); 1516 1517 cmd->cmd_numdesc = in_extn; 1518 cmd->cmd_seq = seq = sc->sc_seq++; 1519 cmd->cmd_id = id; 1520 cmd->cmd_par1 = in1; 1521 cmd->cmd_par2 = in2; 1522 cmd->cmd_par3 = in3; 1523 cmd->cmd_flags = CMD_FLAGS_TYPE_CMD | 1524 (wait ? CMD_FLAGS_RESP : 0) | CMD_FLAGS_VALID; 1525 1526 idx += sizeof(struct txp_cmd_desc); 1527 if (idx == sc->sc_cmdring.size) 1528 idx = 0; 1529 1530 for (i = 0; i < in_extn; i++) { 1531 ext = (struct txp_ext_desc *)(((u_int8_t *)sc->sc_cmdring.base) + idx); 1532 bcopy(in_extp, ext, sizeof(struct txp_ext_desc)); 1533 in_extp++; 1534 idx += sizeof(struct txp_cmd_desc); 1535 if (idx == sc->sc_cmdring.size) 1536 idx = 0; 1537 } 1538 1539 sc->sc_cmdring.lastwrite = idx; 1540 1541 WRITE_REG(sc, TXP_H2A_2, sc->sc_cmdring.lastwrite); 1542 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1543 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); 1544 1545 if (!wait) 1546 return (0); 1547 1548 for (i = 0; i < 10000; i++) { 1549 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1550 sizeof(struct txp_hostvar), BUS_DMASYNC_POSTREAD); 1551 idx = hv->hv_resp_read_idx; 1552 if (idx != hv->hv_resp_write_idx) { 1553 *rspp = NULL; 1554 if (txp_response(sc, idx, id, seq, rspp)) 1555 return (-1); 1556 if (*rspp != NULL) 1557 break; 1558 } 1559 txp_bus_dmamap_sync(sc->sc_dmat, sc->sc_host_dma.dma_map, 0, 1560 sizeof(struct txp_hostvar), BUS_DMASYNC_PREREAD); 1561 DELAY(50); 1562 } 1563 if (i == 1000 || (*rspp) == NULL) { 1564 printf("%s: 0x%x command failed\n", TXP_DEVNAME(sc), id); 1565 return (-1); 1566 } 1567 1568 return (0); 1569 } 1570 1571 int 1572 txp_response(sc, ridx, id, seq, rspp) 1573 struct txp_softc *sc; 1574 u_int32_t ridx; 1575 u_int16_t id; 1576 u_int16_t seq; 1577 struct txp_rsp_desc **rspp; 1578 { 1579 struct txp_hostvar *hv = sc->sc_hostvar; 1580 struct txp_rsp_desc *rsp; 1581 1582 while (ridx != hv->hv_resp_write_idx) { 1583 rsp = (struct txp_rsp_desc *)(((u_int8_t *)sc->sc_rspring.base) + ridx); 1584 1585 if (id == rsp->rsp_id && rsp->rsp_seq == seq) { 1586 *rspp = (struct txp_rsp_desc *)malloc( 1587 sizeof(struct txp_rsp_desc) * (rsp->rsp_numdesc + 1), 1588 M_DEVBUF, M_NOWAIT); 1589 if ((*rspp) == NULL) 1590 return (-1); 1591 txp_rsp_fixup(sc, rsp, *rspp); 1592 return (0); 1593 } 1594 1595 if (rsp->rsp_flags & RSP_FLAGS_ERROR) { 1596 printf("%s: response error!\n", TXP_DEVNAME(sc)); 1597 txp_rsp_fixup(sc, rsp, NULL); 1598 ridx = hv->hv_resp_read_idx; 1599 continue; 1600 } 1601 1602 switch (rsp->rsp_id) { 1603 case TXP_CMD_CYCLE_STATISTICS: 1604 case TXP_CMD_MEDIA_STATUS_READ: 1605 break; 1606 case TXP_CMD_HELLO_RESPONSE: 1607 printf("%s: hello\n", TXP_DEVNAME(sc)); 1608 break; 1609 default: 1610 printf("%s: unknown id(0x%x)\n", TXP_DEVNAME(sc), 1611 rsp->rsp_id); 1612 } 1613 1614 txp_rsp_fixup(sc, rsp, NULL); 1615 ridx = hv->hv_resp_read_idx; 1616 hv->hv_resp_read_idx = ridx; 1617 } 1618 1619 return (0); 1620 } 1621 1622 void 1623 txp_rsp_fixup(sc, rsp, dst) 1624 struct txp_softc *sc; 1625 struct txp_rsp_desc *rsp, *dst; 1626 { 1627 struct txp_rsp_desc *src = rsp; 1628 struct txp_hostvar *hv = sc->sc_hostvar; 1629 u_int32_t i, ridx; 1630 1631 ridx = hv->hv_resp_read_idx; 1632 1633 for (i = 0; i < rsp->rsp_numdesc + 1; i++) { 1634 if (dst != NULL) 1635 bcopy(src, dst++, sizeof(struct txp_rsp_desc)); 1636 ridx += sizeof(struct txp_rsp_desc); 1637 if (ridx == sc->sc_rspring.size) { 1638 src = sc->sc_rspring.base; 1639 ridx = 0; 1640 } else 1641 src++; 1642 sc->sc_rspring.lastwrite = hv->hv_resp_read_idx = ridx; 1643 } 1644 1645 hv->hv_resp_read_idx = ridx; 1646 } 1647 1648 int 1649 txp_cmd_desc_numfree(sc) 1650 struct txp_softc *sc; 1651 { 1652 struct txp_hostvar *hv = sc->sc_hostvar; 1653 struct txp_boot_record *br = sc->sc_boot; 1654 u_int32_t widx, ridx, nfree; 1655 1656 widx = sc->sc_cmdring.lastwrite; 1657 ridx = hv->hv_cmd_read_idx; 1658 1659 if (widx == ridx) { 1660 /* Ring is completely free */ 1661 nfree = br->br_cmd_siz - sizeof(struct txp_cmd_desc); 1662 } else { 1663 if (widx > ridx) 1664 nfree = br->br_cmd_siz - 1665 (widx - ridx + sizeof(struct txp_cmd_desc)); 1666 else 1667 nfree = ridx - widx - sizeof(struct txp_cmd_desc); 1668 } 1669 1670 return (nfree / sizeof(struct txp_cmd_desc)); 1671 } 1672 1673 void 1674 txp_stop(sc) 1675 struct txp_softc *sc; 1676 { 1677 txp_command(sc, TXP_CMD_TX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1678 txp_command(sc, TXP_CMD_RX_DISABLE, 0, 0, 0, NULL, NULL, NULL, 1); 1679 1680 if (timeout_pending(&sc->sc_tick)) 1681 timeout_del(&sc->sc_tick); 1682 } 1683 1684 void 1685 txp_watchdog(ifp) 1686 struct ifnet *ifp; 1687 { 1688 } 1689 1690 int 1691 txp_ifmedia_upd(ifp) 1692 struct ifnet *ifp; 1693 { 1694 struct txp_softc *sc = ifp->if_softc; 1695 struct ifmedia *ifm = &sc->sc_ifmedia; 1696 u_int16_t new_xcvr; 1697 1698 if (IFM_TYPE(ifm->ifm_media) != IFM_ETHER) 1699 return (EINVAL); 1700 1701 if (IFM_SUBTYPE(ifm->ifm_media) == IFM_10_T) { 1702 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1703 new_xcvr = TXP_XCVR_10_FDX; 1704 else 1705 new_xcvr = TXP_XCVR_10_HDX; 1706 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_100_TX) { 1707 if ((ifm->ifm_media & IFM_GMASK) == IFM_FDX) 1708 new_xcvr = TXP_XCVR_100_FDX; 1709 else 1710 new_xcvr = TXP_XCVR_100_HDX; 1711 } else if (IFM_SUBTYPE(ifm->ifm_media) == IFM_AUTO) { 1712 new_xcvr = TXP_XCVR_AUTO; 1713 } else 1714 return (EINVAL); 1715 1716 /* nothing to do */ 1717 if (sc->sc_xcvr == new_xcvr) 1718 return (0); 1719 1720 txp_command(sc, TXP_CMD_XCVR_SELECT, new_xcvr, 0, 0, 1721 NULL, NULL, NULL, 0); 1722 sc->sc_xcvr = new_xcvr; 1723 1724 return (0); 1725 } 1726 1727 void 1728 txp_ifmedia_sts(ifp, ifmr) 1729 struct ifnet *ifp; 1730 struct ifmediareq *ifmr; 1731 { 1732 struct txp_softc *sc = ifp->if_softc; 1733 struct ifmedia *ifm = &sc->sc_ifmedia; 1734 u_int16_t bmsr, bmcr, anlpar; 1735 1736 ifmr->ifm_status = IFM_AVALID; 1737 ifmr->ifm_active = IFM_ETHER; 1738 1739 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1740 &bmsr, NULL, NULL, 1)) 1741 goto bail; 1742 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMSR, 0, 1743 &bmsr, NULL, NULL, 1)) 1744 goto bail; 1745 1746 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_BMCR, 0, 1747 &bmcr, NULL, NULL, 1)) 1748 goto bail; 1749 1750 if (txp_command(sc, TXP_CMD_PHY_MGMT_READ, 0, MII_ANLPAR, 0, 1751 &anlpar, NULL, NULL, 1)) 1752 goto bail; 1753 1754 if (bmsr & BMSR_LINK) 1755 ifmr->ifm_status |= IFM_ACTIVE; 1756 1757 if (bmcr & BMCR_ISO) { 1758 ifmr->ifm_active |= IFM_NONE; 1759 ifmr->ifm_status = 0; 1760 return; 1761 } 1762 1763 if (bmcr & BMCR_LOOP) 1764 ifmr->ifm_active |= IFM_LOOP; 1765 1766 if (bmcr & BMCR_AUTOEN) { 1767 if ((bmsr & BMSR_ACOMP) == 0) { 1768 ifmr->ifm_active |= IFM_NONE; 1769 return; 1770 } 1771 1772 if (anlpar & ANLPAR_T4) 1773 ifmr->ifm_active |= IFM_100_T4; 1774 else if (anlpar & ANLPAR_TX_FD) 1775 ifmr->ifm_active |= IFM_100_TX|IFM_FDX; 1776 else if (anlpar & ANLPAR_TX) 1777 ifmr->ifm_active |= IFM_100_TX; 1778 else if (anlpar & ANLPAR_10_FD) 1779 ifmr->ifm_active |= IFM_10_T|IFM_FDX; 1780 else if (anlpar & ANLPAR_10) 1781 ifmr->ifm_active |= IFM_10_T; 1782 else 1783 ifmr->ifm_active |= IFM_NONE; 1784 } else 1785 ifmr->ifm_active = ifm->ifm_cur->ifm_media; 1786 return; 1787 1788 bail: 1789 ifmr->ifm_active |= IFM_NONE; 1790 ifmr->ifm_status &= ~IFM_AVALID; 1791 } 1792 1793 void 1794 txp_show_descriptor(d) 1795 void *d; 1796 { 1797 struct txp_cmd_desc *cmd = d; 1798 struct txp_rsp_desc *rsp = d; 1799 struct txp_tx_desc *txd = d; 1800 struct txp_frag_desc *frgd = d; 1801 1802 switch (cmd->cmd_flags & CMD_FLAGS_TYPE_M) { 1803 case CMD_FLAGS_TYPE_CMD: 1804 /* command descriptor */ 1805 printf("[cmd flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1806 cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, 1807 cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); 1808 break; 1809 case CMD_FLAGS_TYPE_RESP: 1810 /* response descriptor */ 1811 printf("[rsp flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1812 rsp->rsp_flags, rsp->rsp_numdesc, rsp->rsp_id, rsp->rsp_seq, 1813 rsp->rsp_par1, rsp->rsp_par2, rsp->rsp_par3); 1814 break; 1815 case CMD_FLAGS_TYPE_DATA: 1816 /* data header (assuming tx for now) */ 1817 printf("[data flags 0x%x num %d totlen %d addr 0x%x/0x%x pflags 0x%x]", 1818 txd->tx_flags, txd->tx_numdesc, txd->tx_totlen, 1819 txd->tx_addrlo, txd->tx_addrhi, txd->tx_pflags); 1820 break; 1821 case CMD_FLAGS_TYPE_FRAG: 1822 /* fragment descriptor */ 1823 printf("[frag flags 0x%x rsvd1 0x%x len %d addr 0x%x/0x%x rsvd2 0x%x]", 1824 frgd->frag_flags, frgd->frag_rsvd1, frgd->frag_len, 1825 frgd->frag_addrlo, frgd->frag_addrhi, frgd->frag_rsvd2); 1826 break; 1827 default: 1828 printf("[unknown(%x) flags 0x%x num %d id %d seq %d par1 0x%x par2 0x%x par3 0x%x]\n", 1829 cmd->cmd_flags & CMD_FLAGS_TYPE_M, 1830 cmd->cmd_flags, cmd->cmd_numdesc, cmd->cmd_id, cmd->cmd_seq, 1831 cmd->cmd_par1, cmd->cmd_par2, cmd->cmd_par3); 1832 break; 1833 } 1834 } 1835 1836 void 1837 txp_set_filter(sc) 1838 struct txp_softc *sc; 1839 { 1840 struct arpcom *ac = &sc->sc_arpcom; 1841 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1842 u_int32_t crc, carry, hashbit, hash[2]; 1843 u_int16_t filter; 1844 u_int8_t octet; 1845 int i, j, mcnt = 0; 1846 struct ether_multi *enm; 1847 struct ether_multistep step; 1848 1849 if (ifp->if_flags & IFF_PROMISC) { 1850 filter = TXP_RXFILT_PROMISC; 1851 goto setit; 1852 } 1853 1854 again: 1855 filter = TXP_RXFILT_DIRECT; 1856 1857 if (ifp->if_flags & IFF_BROADCAST) 1858 filter |= TXP_RXFILT_BROADCAST; 1859 1860 if (ifp->if_flags & IFF_ALLMULTI) 1861 filter |= TXP_RXFILT_ALLMULTI; 1862 else { 1863 hash[0] = hash[1] = 0; 1864 1865 ETHER_FIRST_MULTI(step, ac, enm); 1866 while (enm != NULL) { 1867 if (bcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1868 /* 1869 * We must listen to a range of multicast 1870 * addresses. For now, just accept all 1871 * multicasts, rather than trying to set only 1872 * those filter bits needed to match the range. 1873 * (At this time, the only use of address 1874 * ranges is for IP multicast routing, for 1875 * which the range is big enough to require 1876 * all bits set.) 1877 */ 1878 ifp->if_flags |= IFF_ALLMULTI; 1879 goto again; 1880 } 1881 1882 mcnt++; 1883 crc = 0xffffffff; 1884 1885 for (i = 0; i < ETHER_ADDR_LEN; i++) { 1886 octet = enm->enm_addrlo[i]; 1887 for (j = 0; j < 8; j++) { 1888 carry = ((crc & 0x80000000) ? 1 : 0) ^ 1889 (octet & 1); 1890 crc <<= 1; 1891 octet >>= 1; 1892 if (carry) 1893 crc = (crc ^ TXP_POLYNOMIAL) | 1894 carry; 1895 } 1896 } 1897 hashbit = (u_int16_t)(crc & (64 - 1)); 1898 hash[hashbit / 32] |= (1 << hashbit % 32); 1899 ETHER_NEXT_MULTI(step, enm); 1900 } 1901 1902 if (mcnt > 0) { 1903 filter |= TXP_RXFILT_HASHMULTI; 1904 txp_command(sc, TXP_CMD_MCAST_HASH_MASK_WRITE, 1905 2, hash[0], hash[1], NULL, NULL, NULL, 0); 1906 } 1907 } 1908 1909 setit: 1910 txp_command(sc, TXP_CMD_RX_FILTER_WRITE, filter, 0, 0, 1911 NULL, NULL, NULL, 1); 1912 } 1913 1914 void 1915 txp_capabilities(sc) 1916 struct txp_softc *sc; 1917 { 1918 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1919 struct txp_rsp_desc *rsp = NULL; 1920 struct txp_ext_desc *ext; 1921 1922 if (txp_command2(sc, TXP_CMD_OFFLOAD_READ, 0, 0, 0, NULL, 0, &rsp, 1)) 1923 goto out; 1924 1925 if (rsp->rsp_numdesc != 1) 1926 goto out; 1927 ext = (struct txp_ext_desc *)(rsp + 1); 1928 1929 sc->sc_tx_capability = ext->ext_1 & OFFLOAD_MASK; 1930 sc->sc_rx_capability = ext->ext_2 & OFFLOAD_MASK; 1931 1932 #if NVLAN > 0 1933 ifp->if_capabilities |= IFCAP_VLAN_MTU; 1934 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_VLAN) { 1935 sc->sc_tx_capability |= OFFLOAD_VLAN; 1936 sc->sc_rx_capability |= OFFLOAD_VLAN; 1937 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 1938 } 1939 #endif 1940 1941 #if 0 1942 /* not ready yet */ 1943 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPSEC) { 1944 sc->sc_tx_capability |= OFFLOAD_IPSEC; 1945 sc->sc_rx_capability |= OFFLOAD_IPSEC; 1946 ifp->if_capabilities |= IFCAP_IPSEC; 1947 } 1948 #endif 1949 1950 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_IPCKSUM) { 1951 sc->sc_tx_capability |= OFFLOAD_IPCKSUM; 1952 sc->sc_rx_capability |= OFFLOAD_IPCKSUM; 1953 ifp->if_capabilities |= IFCAP_CSUM_IPv4; 1954 } 1955 1956 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_TCPCKSUM) { 1957 #if 0 1958 sc->sc_tx_capability |= OFFLOAD_TCPCKSUM; 1959 #endif 1960 sc->sc_rx_capability |= OFFLOAD_TCPCKSUM; 1961 #if 0 1962 ifp->if_capabilities |= IFCAP_CSUM_TCPv4; 1963 #endif 1964 } 1965 1966 if (rsp->rsp_par2 & rsp->rsp_par3 & OFFLOAD_UDPCKSUM) { 1967 #if 0 1968 sc->sc_tx_capability |= OFFLOAD_UDPCKSUM; 1969 #endif 1970 sc->sc_rx_capability |= OFFLOAD_UDPCKSUM; 1971 #if 0 1972 ifp->if_capabilities |= IFCAP_CSUM_UDPv4; 1973 #endif 1974 } 1975 1976 if (txp_command(sc, TXP_CMD_OFFLOAD_WRITE, 0, 1977 sc->sc_tx_capability, sc->sc_rx_capability, NULL, NULL, NULL, 1)) 1978 goto out; 1979 1980 out: 1981 if (rsp != NULL) 1982 free(rsp, M_DEVBUF); 1983 } 1984