1 /* $OpenBSD: if_xge.c,v 1.49 2008/11/28 02:44:18 brad Exp $ */ 2 /* $NetBSD: if_xge.c,v 1.1 2005/09/09 10:30:27 ragge Exp $ */ 3 4 /* 5 * Copyright (c) 2004, SUNET, Swedish University Computer Network. 6 * All rights reserved. 7 * 8 * Written by Anders Magnusson for SUNET, Swedish University Computer Network. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed for the NetBSD Project by 21 * SUNET, Swedish University Computer Network. 22 * 4. The name of SUNET may not be used to endorse or promote products 23 * derived from this software without specific prior written permission. 24 * 25 * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND 26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 27 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 28 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SUNET 29 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 30 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 31 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 32 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 33 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 34 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 35 * POSSIBILITY OF SUCH DAMAGE. 36 */ 37 38 /* 39 * Driver for the Neterion Xframe Ten Gigabit Ethernet controller. 40 */ 41 42 #include "bpfilter.h" 43 #include "vlan.h" 44 45 #include <sys/param.h> 46 #include <sys/systm.h> 47 #include <sys/sockio.h> 48 #include <sys/mbuf.h> 49 #include <sys/malloc.h> 50 #include <sys/kernel.h> 51 #include <sys/socket.h> 52 #include <sys/device.h> 53 54 #include <net/if.h> 55 #include <net/if_dl.h> 56 #include <net/if_media.h> 57 58 #ifdef INET 59 #include <netinet/in.h> 60 #include <netinet/in_systm.h> 61 #include <netinet/in_var.h> 62 #include <netinet/ip.h> 63 #include <netinet/if_ether.h> 64 #include <netinet/tcp.h> 65 #include <netinet/udp.h> 66 #endif 67 68 #if NVLAN > 0 69 #include <net/if_types.h> 70 #include <net/if_vlan_var.h> 71 #endif 72 73 #if NBPFILTER > 0 74 #include <net/bpf.h> 75 #endif 76 77 #include <machine/bus.h> 78 #include <machine/intr.h> 79 #include <machine/endian.h> 80 81 #include <dev/mii/mii.h> 82 #include <dev/mii/miivar.h> 83 84 #include <dev/pci/pcivar.h> 85 #include <dev/pci/pcireg.h> 86 #include <dev/pci/pcidevs.h> 87 88 #include <sys/lock.h> 89 #include <sys/proc.h> 90 91 #include <dev/pci/if_xgereg.h> 92 93 /* Xframe chipset revisions */ 94 #define XGE_TYPE_XENA 1 /* Xframe */ 95 #define XGE_TYPE_HERC 2 /* Xframe-II */ 96 97 #define XGE_PCISIZE_XENA 26 98 #define XGE_PCISIZE_HERC 64 99 100 /* 101 * Some tunable constants, tune with care! 102 */ 103 #define RX_MODE RX_MODE_1 /* Receive mode (buffer usage, see below) */ 104 #define NRXDESCS 1016 /* # of receive descriptors (requested) */ 105 #define NTXDESCS 2048 /* Number of transmit descriptors */ 106 #define NTXFRAGS 100 /* Max fragments per packet */ 107 108 /* 109 * Receive buffer modes; 1, 3 or 5 buffers. 110 */ 111 #define RX_MODE_1 1 112 #define RX_MODE_3 3 113 #define RX_MODE_5 5 114 115 /* 116 * Use clever macros to avoid a bunch of #ifdef's. 117 */ 118 #define XCONCAT3(x,y,z) x ## y ## z 119 #define CONCAT3(x,y,z) XCONCAT3(x,y,z) 120 #define NDESC_BUFMODE CONCAT3(NDESC_,RX_MODE,BUFMODE) 121 #define rxd_4k CONCAT3(rxd,RX_MODE,_4k) 122 /* XXX */ 123 #if 0 124 #define rxdesc ___CONCAT(rxd,RX_MODE) 125 #endif 126 #define rxdesc rxd1 127 128 #define NEXTTX(x) (((x)+1) % NTXDESCS) 129 #define NRXFRAGS RX_MODE /* hardware imposed frags */ 130 #define NRXPAGES ((NRXDESCS/NDESC_BUFMODE)+1) 131 #define NRXREAL (NRXPAGES*NDESC_BUFMODE) 132 #define RXMAPSZ (NRXPAGES*PAGE_SIZE) 133 134 /* 135 * Magic to fix a bug when the MAC address cannot be read correctly. 136 * This came from the Linux driver. 137 */ 138 static uint64_t fix_mac[] = { 139 0x0060000000000000ULL, 0x0060600000000000ULL, 140 0x0040600000000000ULL, 0x0000600000000000ULL, 141 0x0020600000000000ULL, 0x0060600000000000ULL, 142 0x0020600000000000ULL, 0x0060600000000000ULL, 143 0x0020600000000000ULL, 0x0060600000000000ULL, 144 0x0020600000000000ULL, 0x0060600000000000ULL, 145 0x0020600000000000ULL, 0x0060600000000000ULL, 146 0x0020600000000000ULL, 0x0060600000000000ULL, 147 0x0020600000000000ULL, 0x0060600000000000ULL, 148 0x0020600000000000ULL, 0x0060600000000000ULL, 149 0x0020600000000000ULL, 0x0060600000000000ULL, 150 0x0020600000000000ULL, 0x0060600000000000ULL, 151 0x0020600000000000ULL, 0x0000600000000000ULL, 152 0x0040600000000000ULL, 0x0060600000000000ULL, 153 }; 154 155 /* 156 * Constants to be programmed into Hercules's registers, to configure 157 * the XGXS transciever. 158 */ 159 #define END_SIGN 0x0 160 static uint64_t herc_dtx_cfg[] = { 161 0x8000051536750000ULL, 0x80000515367500E0ULL, 162 0x8000051536750004ULL, 0x80000515367500E4ULL, 163 164 0x80010515003F0000ULL, 0x80010515003F00E0ULL, 165 0x80010515003F0004ULL, 0x80010515003F00E4ULL, 166 167 0x801205150D440000ULL, 0x801205150D4400E0ULL, 168 0x801205150D440004ULL, 0x801205150D4400E4ULL, 169 170 0x80020515F2100000ULL, 0x80020515F21000E0ULL, 171 0x80020515F2100004ULL, 0x80020515F21000E4ULL, 172 173 END_SIGN 174 }; 175 176 struct xge_softc { 177 struct device sc_dev; 178 struct arpcom sc_arpcom; 179 struct ifmedia xena_media; 180 181 void *sc_ih; 182 void *sc_shutdownhook; 183 184 bus_dma_tag_t sc_dmat; 185 bus_space_tag_t sc_st; 186 bus_space_handle_t sc_sh; 187 bus_space_tag_t sc_txt; 188 bus_space_handle_t sc_txh; 189 190 pcireg_t sc_pciregs[16]; 191 192 int xge_type; /* chip type */ 193 int xge_if_flags; 194 195 /* Transmit structures */ 196 struct txd *sc_txd[NTXDESCS]; /* transmit frags array */ 197 bus_addr_t sc_txdp[NTXDESCS]; /* dva of transmit frags */ 198 bus_dmamap_t sc_txm[NTXDESCS]; /* transmit frags map */ 199 struct mbuf *sc_txb[NTXDESCS]; /* transmit mbuf pointer */ 200 int sc_nexttx, sc_lasttx; 201 bus_dmamap_t sc_txmap; /* transmit descriptor map */ 202 203 /* Receive data */ 204 bus_dmamap_t sc_rxmap; /* receive descriptor map */ 205 struct rxd_4k *sc_rxd_4k[NRXPAGES]; /* receive desc pages */ 206 bus_dmamap_t sc_rxm[NRXREAL]; /* receive buffer map */ 207 struct mbuf *sc_rxb[NRXREAL]; /* mbufs on rx descriptors */ 208 int sc_nextrx; /* next descriptor to check */ 209 }; 210 211 #ifdef XGE_DEBUG 212 #define DPRINTF(x) do { if (xgedebug) printf x ; } while (0) 213 #define DPRINTFN(n,x) do { if (xgedebug >= (n)) printf x ; } while (0) 214 int xgedebug = 0; 215 #else 216 #define DPRINTF(x) 217 #define DPRINTFN(n,x) 218 #endif 219 220 int xge_match(struct device *, void *, void *); 221 void xge_attach(struct device *, struct device *, void *); 222 int xge_alloc_txmem(struct xge_softc *); 223 int xge_alloc_rxmem(struct xge_softc *); 224 void xge_start(struct ifnet *); 225 void xge_stop(struct ifnet *, int); 226 void xge_shutdown(void *); 227 int xge_add_rxbuf(struct xge_softc *, int); 228 void xge_setmulti(struct xge_softc *); 229 void xge_setpromisc(struct xge_softc *); 230 int xge_setup_xgxs_xena(struct xge_softc *); 231 int xge_setup_xgxs_herc(struct xge_softc *); 232 int xge_ioctl(struct ifnet *, u_long, caddr_t); 233 int xge_init(struct ifnet *); 234 void xge_ifmedia_status(struct ifnet *, struct ifmediareq *); 235 int xge_xgmii_mediachange(struct ifnet *); 236 void xge_enable(struct xge_softc *); 237 int xge_intr(void *); 238 239 /* 240 * Helpers to address registers. 241 */ 242 #define PIF_WCSR(csr, val) pif_wcsr(sc, csr, val) 243 #define PIF_RCSR(csr) pif_rcsr(sc, csr) 244 #define TXP_WCSR(csr, val) txp_wcsr(sc, csr, val) 245 #define PIF_WKEY(csr, val) pif_wkey(sc, csr, val) 246 247 static inline void 248 pif_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val) 249 { 250 uint32_t lval, hval; 251 252 lval = val&0xffffffff; 253 hval = val>>32; 254 255 bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval); 256 bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval); 257 } 258 259 static inline uint64_t 260 pif_rcsr(struct xge_softc *sc, bus_size_t csr) 261 { 262 uint64_t val, val2; 263 264 val = bus_space_read_4(sc->sc_st, sc->sc_sh, csr); 265 val2 = bus_space_read_4(sc->sc_st, sc->sc_sh, csr+4); 266 val |= (val2 << 32); 267 return (val); 268 } 269 270 static inline void 271 txp_wcsr(struct xge_softc *sc, bus_size_t csr, uint64_t val) 272 { 273 uint32_t lval, hval; 274 275 lval = val&0xffffffff; 276 hval = val>>32; 277 278 bus_space_write_4(sc->sc_txt, sc->sc_txh, csr, lval); 279 bus_space_write_4(sc->sc_txt, sc->sc_txh, csr+4, hval); 280 } 281 282 283 static inline void 284 pif_wkey(struct xge_softc *sc, bus_size_t csr, uint64_t val) 285 { 286 uint32_t lval, hval; 287 288 lval = val&0xffffffff; 289 hval = val>>32; 290 291 if (sc->xge_type == XGE_TYPE_XENA) 292 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE); 293 294 bus_space_write_4(sc->sc_st, sc->sc_sh, csr, lval); 295 296 if (sc->xge_type == XGE_TYPE_XENA) 297 PIF_WCSR(RMAC_CFG_KEY, RMAC_KEY_VALUE); 298 299 bus_space_write_4(sc->sc_st, sc->sc_sh, csr+4, hval); 300 } 301 302 struct cfattach xge_ca = { 303 sizeof(struct xge_softc), xge_match, xge_attach 304 }; 305 306 struct cfdriver xge_cd = { 307 0, "xge", DV_IFNET 308 }; 309 310 #define XNAME sc->sc_dev.dv_xname 311 312 #define XGE_RXSYNC(desc, what) \ 313 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxmap, \ 314 (desc/NDESC_BUFMODE) * XGE_PAGE + sizeof(struct rxdesc) * \ 315 (desc%NDESC_BUFMODE), sizeof(struct rxdesc), what) 316 #define XGE_RXD(desc) &sc->sc_rxd_4k[desc/NDESC_BUFMODE]-> \ 317 r4_rxd[desc%NDESC_BUFMODE] 318 319 /* 320 * Non-tunable constants. 321 */ 322 #define XGE_MAX_FRAMELEN 9622 323 #define XGE_MAX_MTU (XGE_MAX_FRAMELEN - ETHER_HDR_LEN - \ 324 ETHER_CRC_LEN - ETHER_VLAN_ENCAP_LEN) 325 326 const struct pci_matchid xge_devices[] = { 327 { PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME }, 328 { PCI_VENDOR_NETERION, PCI_PRODUCT_NETERION_XFRAME_2 } 329 }; 330 331 int 332 xge_match(struct device *parent, void *match, void *aux) 333 { 334 return (pci_matchbyid((struct pci_attach_args *)aux, xge_devices, 335 sizeof(xge_devices)/sizeof(xge_devices[0]))); 336 } 337 338 void 339 xge_attach(struct device *parent, struct device *self, void *aux) 340 { 341 struct pci_attach_args *pa = aux; 342 struct xge_softc *sc; 343 struct ifnet *ifp; 344 pcireg_t memtype; 345 pci_intr_handle_t ih; 346 const char *intrstr = NULL; 347 pci_chipset_tag_t pc = pa->pa_pc; 348 uint8_t enaddr[ETHER_ADDR_LEN]; 349 uint64_t val; 350 int i; 351 352 sc = (struct xge_softc *)self; 353 354 sc->sc_dmat = pa->pa_dmat; 355 356 if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_NETERION_XFRAME) 357 sc->xge_type = XGE_TYPE_XENA; 358 else 359 sc->xge_type = XGE_TYPE_HERC; 360 361 /* Get BAR0 address */ 362 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_PIF_BAR); 363 if (pci_mapreg_map(pa, XGE_PIF_BAR, memtype, 0, 364 &sc->sc_st, &sc->sc_sh, 0, 0, 0)) { 365 printf(": unable to map PIF BAR registers\n"); 366 return; 367 } 368 369 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, XGE_TXP_BAR); 370 if (pci_mapreg_map(pa, XGE_TXP_BAR, memtype, 0, 371 &sc->sc_txt, &sc->sc_txh, 0, 0, 0)) { 372 printf(": unable to map TXP BAR registers\n"); 373 return; 374 } 375 376 if (sc->xge_type == XGE_TYPE_XENA) { 377 /* Save PCI config space */ 378 for (i = 0; i < XGE_PCISIZE_XENA; i += 4) 379 sc->sc_pciregs[i/4] = pci_conf_read(pa->pa_pc, pa->pa_tag, i); 380 } 381 382 #if BYTE_ORDER == LITTLE_ENDIAN 383 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL; 384 val &= ~(TxF_R_SE|RxF_W_SE); 385 PIF_WCSR(SWAPPER_CTRL, val); 386 PIF_WCSR(SWAPPER_CTRL, val); 387 #endif 388 389 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) { 390 printf(": failed configuring endian, %llx != %llx!\n", 391 (unsigned long long)val, SWAPPER_MAGIC); 392 return; 393 } 394 395 /* 396 * Fix for all "FFs" MAC address problems observed on 397 * Alpha platforms. Not needed for Herc. 398 */ 399 if (sc->xge_type == XGE_TYPE_XENA) { 400 /* 401 * The MAC addr may be all FF's, which is not good. 402 * Resolve it by writing some magics to GPIO_CONTROL and 403 * force a chip reset to read in the serial eeprom again. 404 */ 405 for (i = 0; i < sizeof(fix_mac)/sizeof(fix_mac[0]); i++) { 406 PIF_WCSR(GPIO_CONTROL, fix_mac[i]); 407 PIF_RCSR(GPIO_CONTROL); 408 } 409 410 /* 411 * Reset the chip and restore the PCI registers. 412 */ 413 PIF_WCSR(SW_RESET, 0xa5a5a50000000000ULL); 414 DELAY(500000); 415 for (i = 0; i < XGE_PCISIZE_XENA; i += 4) 416 pci_conf_write(pa->pa_pc, pa->pa_tag, i, sc->sc_pciregs[i/4]); 417 418 /* 419 * Restore the byte order registers. 420 */ 421 #if BYTE_ORDER == LITTLE_ENDIAN 422 val = (uint64_t)0xFFFFFFFFFFFFFFFFULL; 423 val &= ~(TxF_R_SE|RxF_W_SE); 424 PIF_WCSR(SWAPPER_CTRL, val); 425 PIF_WCSR(SWAPPER_CTRL, val); 426 #endif 427 428 if ((val = PIF_RCSR(PIF_RD_SWAPPER_Fb)) != SWAPPER_MAGIC) { 429 printf(": failed configuring endian2, %llx != %llx!\n", 430 (unsigned long long)val, SWAPPER_MAGIC); 431 return; 432 } 433 } 434 435 /* 436 * XGXS initialization. 437 */ 438 439 /* 440 * For Herc, bring EOI out of reset before XGXS. 441 */ 442 if (sc->xge_type == XGE_TYPE_HERC) { 443 val = PIF_RCSR(SW_RESET); 444 val &= 0xffff00ffffffffffULL; 445 PIF_WCSR(SW_RESET,val); 446 delay(1000*1000); /* wait for 1 sec */ 447 } 448 449 /* 29, Bring adapter out of reset */ 450 val = PIF_RCSR(SW_RESET); 451 val &= 0xffffff00ffffffffULL; 452 PIF_WCSR(SW_RESET, val); 453 DELAY(500000); 454 455 /* Ensure that it's safe to access registers by checking 456 * RIC_RUNNING bit is reset. Check is valid only for XframeII. 457 */ 458 if (sc->xge_type == XGE_TYPE_HERC){ 459 for (i = 0; i < 50; i++) { 460 val = PIF_RCSR(ADAPTER_STATUS); 461 if (!(val & RIC_RUNNING)) 462 break; 463 delay(20*1000); 464 } 465 466 if (i == 50) { 467 printf(": not safe to access registers\n"); 468 return; 469 } 470 } 471 472 /* 30, configure XGXS transceiver */ 473 if (sc->xge_type == XGE_TYPE_XENA) 474 xge_setup_xgxs_xena(sc); 475 else if(sc->xge_type == XGE_TYPE_HERC) 476 xge_setup_xgxs_herc(sc); 477 478 /* 33, program MAC address (not needed here) */ 479 /* Get ethernet address */ 480 PIF_WCSR(RMAC_ADDR_CMD_MEM, 481 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(0)); 482 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR) 483 ; 484 val = PIF_RCSR(RMAC_ADDR_DATA0_MEM); 485 for (i = 0; i < ETHER_ADDR_LEN; i++) 486 enaddr[i] = (uint8_t)(val >> (56 - (8*i))); 487 488 /* 489 * Get memory for transmit descriptor lists. 490 */ 491 if (xge_alloc_txmem(sc)) { 492 printf(": failed allocating txmem.\n"); 493 return; 494 } 495 496 /* 9 and 10 - set FIFO number/prio */ 497 PIF_WCSR(TX_FIFO_P0, TX_FIFO_LEN0(NTXDESCS)); 498 PIF_WCSR(TX_FIFO_P1, 0ULL); 499 PIF_WCSR(TX_FIFO_P2, 0ULL); 500 PIF_WCSR(TX_FIFO_P3, 0ULL); 501 502 /* 11, XXX set round-robin prio? */ 503 504 /* 12, enable transmit FIFO */ 505 val = PIF_RCSR(TX_FIFO_P0); 506 val |= TX_FIFO_ENABLE; 507 PIF_WCSR(TX_FIFO_P0, val); 508 509 /* 13, disable some error checks */ 510 PIF_WCSR(TX_PA_CFG, 511 TX_PA_CFG_IFR|TX_PA_CFG_ISO|TX_PA_CFG_ILC|TX_PA_CFG_ILE); 512 513 /* Create transmit DMA maps */ 514 for (i = 0; i < NTXDESCS; i++) { 515 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN, 516 NTXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_txm[i])) { 517 printf(": cannot create TX DMA maps\n"); 518 return; 519 } 520 } 521 522 sc->sc_lasttx = NTXDESCS-1; 523 524 /* 525 * RxDMA initialization. 526 * Only use one out of 8 possible receive queues. 527 */ 528 /* allocate rx descriptor memory */ 529 if (xge_alloc_rxmem(sc)) { 530 printf(": failed allocating rxmem\n"); 531 return; 532 } 533 534 /* Create receive buffer DMA maps */ 535 for (i = 0; i < NRXREAL; i++) { 536 if (bus_dmamap_create(sc->sc_dmat, XGE_MAX_FRAMELEN, 537 NRXFRAGS, MCLBYTES, 0, BUS_DMA_NOWAIT, &sc->sc_rxm[i])) { 538 printf(": cannot create RX DMA maps\n"); 539 return; 540 } 541 } 542 543 /* allocate mbufs to receive descriptors */ 544 for (i = 0; i < NRXREAL; i++) 545 if (xge_add_rxbuf(sc, i)) 546 panic("out of mbufs too early"); 547 548 /* 14, setup receive ring priority */ 549 PIF_WCSR(RX_QUEUE_PRIORITY, 0ULL); /* only use one ring */ 550 551 /* 15, setup receive ring round-robin calendar */ 552 PIF_WCSR(RX_W_ROUND_ROBIN_0, 0ULL); /* only use one ring */ 553 PIF_WCSR(RX_W_ROUND_ROBIN_1, 0ULL); 554 PIF_WCSR(RX_W_ROUND_ROBIN_2, 0ULL); 555 PIF_WCSR(RX_W_ROUND_ROBIN_3, 0ULL); 556 PIF_WCSR(RX_W_ROUND_ROBIN_4, 0ULL); 557 558 /* 16, write receive ring start address */ 559 PIF_WCSR(PRC_RXD0_0, (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr); 560 /* PRC_RXD0_[1-7] are not used */ 561 562 /* 17, Setup alarm registers */ 563 PIF_WCSR(PRC_ALARM_ACTION, 0ULL); /* Default everything to retry */ 564 565 /* 18, init receive ring controller */ 566 #if RX_MODE == RX_MODE_1 567 val = RING_MODE_1; 568 #elif RX_MODE == RX_MODE_3 569 val = RING_MODE_3; 570 #else /* RX_MODE == RX_MODE_5 */ 571 val = RING_MODE_5; 572 #endif 573 PIF_WCSR(PRC_CTRL_0, RC_IN_SVC|val); 574 /* leave 1-7 disabled */ 575 /* XXXX snoop configuration? */ 576 577 /* 19, set chip memory assigned to the queue */ 578 if (sc->xge_type == XGE_TYPE_XENA) { 579 /* all 64M to queue 0 */ 580 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 64)); 581 } else { 582 /* all 32M to queue 0 */ 583 PIF_WCSR(RX_QUEUE_CFG, MC_QUEUE(0, 32)); 584 } 585 586 /* 20, setup RLDRAM parameters */ 587 /* do not touch it for now */ 588 589 /* 21, setup pause frame thresholds */ 590 /* so not touch the defaults */ 591 /* XXX - must 0xff be written as stated in the manual? */ 592 593 /* 22, configure RED */ 594 /* we do not want to drop packets, so ignore */ 595 596 /* 23, initiate RLDRAM */ 597 val = PIF_RCSR(MC_RLDRAM_MRS); 598 val |= MC_QUEUE_SIZE_ENABLE|MC_RLDRAM_MRS_ENABLE; 599 PIF_WCSR(MC_RLDRAM_MRS, val); 600 DELAY(1000); 601 602 /* 603 * Setup interrupt policies. 604 */ 605 /* 40, Transmit interrupts */ 606 PIF_WCSR(TTI_DATA1_MEM, TX_TIMER_VAL(0x1ff) | TX_TIMER_AC | 607 TX_URNG_A(5) | TX_URNG_B(20) | TX_URNG_C(48)); 608 PIF_WCSR(TTI_DATA2_MEM, 609 TX_UFC_A(25) | TX_UFC_B(64) | TX_UFC_C(128) | TX_UFC_D(512)); 610 PIF_WCSR(TTI_COMMAND_MEM, TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE); 611 while (PIF_RCSR(TTI_COMMAND_MEM) & TTI_CMD_MEM_STROBE) 612 ; 613 614 /* 41, Receive interrupts */ 615 PIF_WCSR(RTI_DATA1_MEM, RX_TIMER_VAL(0x800) | RX_TIMER_AC | 616 RX_URNG_A(5) | RX_URNG_B(20) | RX_URNG_C(50)); 617 PIF_WCSR(RTI_DATA2_MEM, 618 RX_UFC_A(64) | RX_UFC_B(128) | RX_UFC_C(256) | RX_UFC_D(512)); 619 PIF_WCSR(RTI_COMMAND_MEM, RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE); 620 while (PIF_RCSR(RTI_COMMAND_MEM) & RTI_CMD_MEM_STROBE) 621 ; 622 623 /* 624 * Setup media stuff. 625 */ 626 ifmedia_init(&sc->xena_media, IFM_IMASK, xge_xgmii_mediachange, 627 xge_ifmedia_status); 628 ifmedia_add(&sc->xena_media, IFM_ETHER|IFM_10G_SR, 0, NULL); 629 ifmedia_set(&sc->xena_media, IFM_ETHER|IFM_10G_SR); 630 631 ifp = &sc->sc_arpcom.ac_if; 632 strlcpy(ifp->if_xname, XNAME, IFNAMSIZ); 633 memcpy(sc->sc_arpcom.ac_enaddr, enaddr, ETHER_ADDR_LEN); 634 ifp->if_baudrate = IF_Gbps(10); 635 ifp->if_softc = sc; 636 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 637 ifp->if_ioctl = xge_ioctl; 638 ifp->if_start = xge_start; 639 #ifdef XGE_JUMBO 640 ifp->if_hardmtu = XGE_MAX_MTU; 641 #endif 642 IFQ_SET_MAXLEN(&ifp->if_snd, NTXDESCS - 1); 643 IFQ_SET_READY(&ifp->if_snd); 644 645 ifp->if_capabilities = IFCAP_VLAN_MTU | IFCAP_CSUM_IPv4 | 646 IFCAP_CSUM_TCPv4 | IFCAP_CSUM_UDPv4; 647 648 #if NVLAN > 0 649 ifp->if_capabilities |= IFCAP_VLAN_HWTAGGING; 650 #endif 651 652 /* 653 * Attach the interface. 654 */ 655 if_attach(ifp); 656 ether_ifattach(ifp); 657 658 sc->sc_shutdownhook = shutdownhook_establish(xge_shutdown, sc); 659 660 /* 661 * Setup interrupt vector before initializing. 662 */ 663 if (pci_intr_map(pa, &ih)) { 664 printf(": unable to map interrupt\n"); 665 return; 666 } 667 intrstr = pci_intr_string(pc, ih); 668 if ((sc->sc_ih = 669 pci_intr_establish(pc, ih, IPL_NET, xge_intr, sc, XNAME)) == NULL) { 670 printf(": unable to establish interrupt at %s\n", 671 intrstr ? intrstr : "<unknown>"); 672 return; 673 } 674 printf(": %s, address %s\n", intrstr, ether_sprintf(enaddr)); 675 } 676 677 void 678 xge_ifmedia_status(struct ifnet *ifp, struct ifmediareq *ifmr) 679 { 680 struct xge_softc *sc = ifp->if_softc; 681 uint64_t reg; 682 683 ifmr->ifm_status = IFM_AVALID; 684 ifmr->ifm_active = IFM_ETHER|IFM_10G_SR; 685 686 reg = PIF_RCSR(ADAPTER_STATUS); 687 if ((reg & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0) 688 ifmr->ifm_status |= IFM_ACTIVE; 689 } 690 691 int 692 xge_xgmii_mediachange(struct ifnet *ifp) 693 { 694 return (0); 695 } 696 697 void 698 xge_enable(struct xge_softc *sc) 699 { 700 uint64_t val; 701 702 /* 2, enable adapter */ 703 val = PIF_RCSR(ADAPTER_CONTROL); 704 val |= ADAPTER_EN; 705 PIF_WCSR(ADAPTER_CONTROL, val); 706 707 /* 3, light the card enable led */ 708 val = PIF_RCSR(ADAPTER_CONTROL); 709 val |= LED_ON; 710 PIF_WCSR(ADAPTER_CONTROL, val); 711 #ifdef XGE_DEBUG 712 printf("%s: link up\n", XNAME); 713 #endif 714 } 715 716 int 717 xge_init(struct ifnet *ifp) 718 { 719 struct xge_softc *sc = ifp->if_softc; 720 uint64_t val; 721 int s; 722 723 s = splnet(); 724 725 /* 726 * Cancel any pending I/O 727 */ 728 xge_stop(ifp, 0); 729 730 /* 31+32, setup MAC config */ 731 PIF_WKEY(MAC_CFG, TMAC_EN|RMAC_EN|TMAC_APPEND_PAD|RMAC_STRIP_FCS| 732 RMAC_BCAST_EN|RMAC_DISCARD_PFRM); 733 734 DELAY(1000); 735 736 /* 54, ensure that the adapter is 'quiescent' */ 737 val = PIF_RCSR(ADAPTER_STATUS); 738 if ((val & QUIESCENT) != QUIESCENT) { 739 #if 0 740 char buf[200]; 741 #endif 742 printf("%s: adapter not quiescent, aborting\n", XNAME); 743 val = (val & QUIESCENT) ^ QUIESCENT; 744 #if 0 745 bitmask_snprintf(val, QUIESCENT_BMSK, buf, sizeof buf); 746 printf("%s: ADAPTER_STATUS missing bits %s\n", XNAME, buf); 747 #endif 748 splx(s); 749 return (1); 750 } 751 752 if (!(ifp->if_capabilities & IFCAP_VLAN_HWTAGGING)) { 753 /* disable VLAN tag stripping */ 754 val = PIF_RCSR(RX_PA_CFG); 755 val &= ~STRIP_VLAN_TAG; 756 PIF_WCSR(RX_PA_CFG, val); 757 } 758 759 /* set MRU */ 760 #ifdef XGE_JUMBO 761 PIF_WCSR(RMAC_MAX_PYLD_LEN, RMAC_PYLD_LEN(XGE_MAX_FRAMELEN)); 762 #else 763 PIF_WCSR(RMAC_MAX_PYLD_LEN, RMAC_PYLD_LEN(ETHER_MAX_LEN + ETHER_VLAN_ENCAP_LEN)); 764 #endif 765 766 /* 56, enable the transmit laser */ 767 val = PIF_RCSR(ADAPTER_CONTROL); 768 val |= EOI_TX_ON; 769 PIF_WCSR(ADAPTER_CONTROL, val); 770 771 xge_enable(sc); 772 773 /* 774 * Enable all interrupts 775 */ 776 PIF_WCSR(TX_TRAFFIC_MASK, 0); 777 PIF_WCSR(RX_TRAFFIC_MASK, 0); 778 PIF_WCSR(GENERAL_INT_MASK, 0); 779 PIF_WCSR(TXPIC_INT_MASK, 0); 780 PIF_WCSR(RXPIC_INT_MASK, 0); 781 782 PIF_WCSR(MAC_INT_MASK, MAC_TMAC_INT); /* only from RMAC */ 783 PIF_WCSR(MAC_RMAC_ERR_MASK, ~RMAC_LINK_STATE_CHANGE_INT); 784 785 xge_setpromisc(sc); 786 787 xge_setmulti(sc); 788 789 /* Done... */ 790 ifp->if_flags |= IFF_RUNNING; 791 ifp->if_flags &= ~IFF_OACTIVE; 792 793 splx(s); 794 795 return (0); 796 } 797 798 void 799 xge_stop(struct ifnet *ifp, int disable) 800 { 801 struct xge_softc *sc = ifp->if_softc; 802 uint64_t val; 803 804 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 805 806 val = PIF_RCSR(ADAPTER_CONTROL); 807 val &= ~ADAPTER_EN; 808 PIF_WCSR(ADAPTER_CONTROL, val); 809 810 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT) 811 ; 812 } 813 814 void 815 xge_shutdown(void *pv) 816 { 817 struct xge_softc *sc = (struct xge_softc *)pv; 818 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 819 820 xge_stop(ifp, 1); 821 } 822 823 int 824 xge_intr(void *pv) 825 { 826 struct xge_softc *sc = pv; 827 struct txd *txd; 828 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 829 bus_dmamap_t dmp; 830 uint64_t val; 831 int i, lasttx, plen; 832 833 val = PIF_RCSR(GENERAL_INT_STATUS); 834 if (val == 0) 835 return (0); /* no interrupt here */ 836 837 PIF_WCSR(GENERAL_INT_STATUS, val); 838 839 if ((val = PIF_RCSR(MAC_RMAC_ERR_REG)) & RMAC_LINK_STATE_CHANGE_INT) { 840 /* Wait for quiescence */ 841 #ifdef XGE_DEBUG 842 printf("%s: link down\n", XNAME); 843 #endif 844 while ((PIF_RCSR(ADAPTER_STATUS) & QUIESCENT) != QUIESCENT) 845 ; 846 PIF_WCSR(MAC_RMAC_ERR_REG, RMAC_LINK_STATE_CHANGE_INT); 847 848 val = PIF_RCSR(ADAPTER_STATUS); 849 if ((val & (RMAC_REMOTE_FAULT|RMAC_LOCAL_FAULT)) == 0) 850 xge_enable(sc); /* Only if link restored */ 851 } 852 853 if ((val = PIF_RCSR(TX_TRAFFIC_INT))) 854 PIF_WCSR(TX_TRAFFIC_INT, val); /* clear interrupt bits */ 855 /* 856 * Collect sent packets. 857 */ 858 lasttx = sc->sc_lasttx; 859 while ((i = NEXTTX(sc->sc_lasttx)) != sc->sc_nexttx) { 860 txd = sc->sc_txd[i]; 861 dmp = sc->sc_txm[i]; 862 863 bus_dmamap_sync(sc->sc_dmat, dmp, 0, 864 dmp->dm_mapsize, 865 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 866 867 if (txd->txd_control1 & TXD_CTL1_OWN) { 868 bus_dmamap_sync(sc->sc_dmat, dmp, 0, 869 dmp->dm_mapsize, BUS_DMASYNC_PREREAD); 870 break; 871 } 872 bus_dmamap_unload(sc->sc_dmat, dmp); 873 m_freem(sc->sc_txb[i]); 874 ifp->if_opackets++; 875 sc->sc_lasttx = i; 876 } 877 878 if (sc->sc_lasttx != lasttx) 879 ifp->if_flags &= ~IFF_OACTIVE; 880 881 /* Try to get more packets on the wire */ 882 xge_start(ifp); 883 884 /* clear interrupt bits */ 885 if ((val = PIF_RCSR(RX_TRAFFIC_INT))) 886 PIF_WCSR(RX_TRAFFIC_INT, val); 887 888 for (;;) { 889 struct rxdesc *rxd; 890 struct mbuf *m; 891 892 XGE_RXSYNC(sc->sc_nextrx, 893 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 894 895 rxd = XGE_RXD(sc->sc_nextrx); 896 if (rxd->rxd_control1 & RXD_CTL1_OWN) { 897 XGE_RXSYNC(sc->sc_nextrx, BUS_DMASYNC_PREREAD); 898 break; 899 } 900 901 /* got a packet */ 902 m = sc->sc_rxb[sc->sc_nextrx]; 903 #if RX_MODE == RX_MODE_1 904 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2); 905 #elif RX_MODE == RX_MODE_3 906 #error Fix rxmodes in xge_intr 907 #elif RX_MODE == RX_MODE_5 908 plen = m->m_len = RXD_CTL2_BUF0SIZ(rxd->rxd_control2); 909 plen += m->m_next->m_len = RXD_CTL2_BUF1SIZ(rxd->rxd_control2); 910 plen += m->m_next->m_next->m_len = 911 RXD_CTL2_BUF2SIZ(rxd->rxd_control2); 912 plen += m->m_next->m_next->m_next->m_len = 913 RXD_CTL3_BUF3SIZ(rxd->rxd_control3); 914 plen += m->m_next->m_next->m_next->m_next->m_len = 915 RXD_CTL3_BUF4SIZ(rxd->rxd_control3); 916 #endif 917 m->m_pkthdr.rcvif = ifp; 918 m->m_pkthdr.len = plen; 919 920 val = rxd->rxd_control1; 921 922 if (xge_add_rxbuf(sc, sc->sc_nextrx)) { 923 /* Failed, recycle this mbuf */ 924 #if RX_MODE == RX_MODE_1 925 rxd->rxd_control2 = RXD_MKCTL2(MCLBYTES, 0, 0); 926 rxd->rxd_control1 = RXD_CTL1_OWN; 927 #elif RX_MODE == RX_MODE_3 928 #elif RX_MODE == RX_MODE_5 929 #endif 930 XGE_RXSYNC(sc->sc_nextrx, 931 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 932 ifp->if_ierrors++; 933 break; 934 } 935 936 ifp->if_ipackets++; 937 938 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_IPv4) 939 m->m_pkthdr.csum_flags |= M_IPV4_CSUM_IN_OK; 940 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_TCP) 941 m->m_pkthdr.csum_flags |= M_TCP_CSUM_IN_OK; 942 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_UDP) 943 m->m_pkthdr.csum_flags |= M_UDP_CSUM_IN_OK; 944 945 #if NVLAN > 0 946 if (RXD_CTL1_PROTOS(val) & RXD_CTL1_P_VLAN) { 947 m->m_pkthdr.ether_vtag = 948 RXD_CTL2_VLANTAG(rxd->rxd_control2); 949 m->m_flags |= M_VLANTAG; 950 } 951 #endif 952 953 #if NBPFILTER > 0 954 if (ifp->if_bpf) 955 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_IN); 956 #endif /* NBPFILTER > 0 */ 957 958 ether_input_mbuf(ifp, m); 959 960 if (++sc->sc_nextrx == NRXREAL) 961 sc->sc_nextrx = 0; 962 } 963 964 return (1); 965 } 966 967 int 968 xge_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data) 969 { 970 struct xge_softc *sc = ifp->if_softc; 971 struct ifaddr *ifa = (struct ifaddr *) data; 972 struct ifreq *ifr = (struct ifreq *) data; 973 int s, error = 0; 974 975 s = splnet(); 976 977 switch (cmd) { 978 case SIOCSIFADDR: 979 ifp->if_flags |= IFF_UP; 980 if (!(ifp->if_flags & IFF_RUNNING)) 981 xge_init(ifp); 982 #ifdef INET 983 if (ifa->ifa_addr->sa_family == AF_INET) 984 arp_ifinit(&sc->sc_arpcom, ifa); 985 #endif /* INET */ 986 break; 987 988 case SIOCSIFFLAGS: 989 if (ifp->if_flags & IFF_UP) { 990 if (ifp->if_flags & IFF_RUNNING && 991 (ifp->if_flags ^ sc->xge_if_flags) & 992 IFF_PROMISC) { 993 xge_setpromisc(sc); 994 } else { 995 if (!(ifp->if_flags & IFF_RUNNING)) 996 xge_init(ifp); 997 } 998 } else { 999 if (ifp->if_flags & IFF_RUNNING) 1000 xge_stop(ifp, 1); 1001 } 1002 sc->xge_if_flags = ifp->if_flags; 1003 break; 1004 1005 case SIOCGIFMEDIA: 1006 case SIOCSIFMEDIA: 1007 error = ifmedia_ioctl(ifp, ifr, &sc->xena_media, cmd); 1008 break; 1009 1010 default: 1011 error = ether_ioctl(ifp, &sc->sc_arpcom, cmd, data); 1012 } 1013 1014 if (error == ENETRESET) { 1015 if (ifp->if_flags & IFF_RUNNING) 1016 xge_setmulti(sc); 1017 error = 0; 1018 } 1019 1020 splx(s); 1021 return (error); 1022 } 1023 1024 void 1025 xge_setmulti(struct xge_softc *sc) 1026 { 1027 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1028 struct arpcom *ac = &sc->sc_arpcom; 1029 struct ether_multi *enm; 1030 struct ether_multistep step; 1031 int i, numaddr = 1; /* first slot used for card unicast address */ 1032 uint64_t val; 1033 1034 ETHER_FIRST_MULTI(step, ac, enm); 1035 while (enm != NULL) { 1036 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 1037 /* Skip ranges */ 1038 goto allmulti; 1039 } 1040 if (numaddr == MAX_MCAST_ADDR) 1041 goto allmulti; 1042 for (val = 0, i = 0; i < ETHER_ADDR_LEN; i++) { 1043 val <<= 8; 1044 val |= enm->enm_addrlo[i]; 1045 } 1046 PIF_WCSR(RMAC_ADDR_DATA0_MEM, val << 16); 1047 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL); 1048 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE| 1049 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(numaddr)); 1050 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR) 1051 ; 1052 numaddr++; 1053 ETHER_NEXT_MULTI(step, enm); 1054 } 1055 /* set the remaining entries to the broadcast address */ 1056 for (i = numaddr; i < MAX_MCAST_ADDR; i++) { 1057 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0xffffffffffff0000ULL); 1058 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xFFFFFFFFFFFFFFFFULL); 1059 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE| 1060 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(i)); 1061 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR) 1062 ; 1063 } 1064 ifp->if_flags &= ~IFF_ALLMULTI; 1065 return; 1066 1067 allmulti: 1068 /* Just receive everything with the multicast bit set */ 1069 ifp->if_flags |= IFF_ALLMULTI; 1070 PIF_WCSR(RMAC_ADDR_DATA0_MEM, 0x8000000000000000ULL); 1071 PIF_WCSR(RMAC_ADDR_DATA1_MEM, 0xF000000000000000ULL); 1072 PIF_WCSR(RMAC_ADDR_CMD_MEM, RMAC_ADDR_CMD_MEM_WE| 1073 RMAC_ADDR_CMD_MEM_STR|RMAC_ADDR_CMD_MEM_OFF(1)); 1074 while (PIF_RCSR(RMAC_ADDR_CMD_MEM) & RMAC_ADDR_CMD_MEM_STR) 1075 ; 1076 } 1077 1078 void 1079 xge_setpromisc(struct xge_softc *sc) 1080 { 1081 struct ifnet *ifp = &sc->sc_arpcom.ac_if; 1082 uint64_t val; 1083 1084 val = PIF_RCSR(MAC_CFG); 1085 1086 if (ifp->if_flags & IFF_PROMISC) 1087 val |= RMAC_PROM_EN; 1088 else 1089 val &= ~RMAC_PROM_EN; 1090 1091 PIF_WCSR(MAC_CFG, val); 1092 } 1093 1094 void 1095 xge_start(struct ifnet *ifp) 1096 { 1097 struct xge_softc *sc = ifp->if_softc; 1098 struct txd *txd = NULL; /* XXX - gcc */ 1099 bus_dmamap_t dmp; 1100 struct mbuf *m; 1101 uint64_t par, lcr; 1102 int nexttx = 0, ntxd, error, i; 1103 1104 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1105 return; 1106 1107 par = lcr = 0; 1108 for (;;) { 1109 IFQ_POLL(&ifp->if_snd, m); 1110 if (m == NULL) 1111 break; /* out of packets */ 1112 1113 if (sc->sc_nexttx == sc->sc_lasttx) 1114 break; /* No more space */ 1115 1116 nexttx = sc->sc_nexttx; 1117 dmp = sc->sc_txm[nexttx]; 1118 1119 if ((error = bus_dmamap_load_mbuf(sc->sc_dmat, dmp, m, 1120 BUS_DMA_WRITE|BUS_DMA_NOWAIT)) != 0) { 1121 printf("%s: bus_dmamap_load_mbuf error %d\n", 1122 XNAME, error); 1123 break; 1124 } 1125 IFQ_DEQUEUE(&ifp->if_snd, m); 1126 1127 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize, 1128 BUS_DMASYNC_PREWRITE); 1129 1130 txd = sc->sc_txd[nexttx]; 1131 sc->sc_txb[nexttx] = m; 1132 for (i = 0; i < dmp->dm_nsegs; i++) { 1133 if (dmp->dm_segs[i].ds_len == 0) 1134 continue; 1135 txd->txd_control1 = dmp->dm_segs[i].ds_len; 1136 txd->txd_control2 = 0; 1137 txd->txd_bufaddr = dmp->dm_segs[i].ds_addr; 1138 txd++; 1139 } 1140 ntxd = txd - sc->sc_txd[nexttx] - 1; 1141 txd = sc->sc_txd[nexttx]; 1142 txd->txd_control1 |= TXD_CTL1_OWN|TXD_CTL1_GCF; 1143 txd->txd_control2 = TXD_CTL2_UTIL; 1144 1145 #if NVLAN > 0 1146 if (m->m_flags & M_VLANTAG) { 1147 txd->txd_control2 |= TXD_CTL2_VLANE; 1148 txd->txd_control2 |= 1149 TXD_CTL2_VLANT(m->m_pkthdr.ether_vtag); 1150 } 1151 #endif 1152 1153 if (m->m_pkthdr.csum_flags & M_IPV4_CSUM_OUT) 1154 txd->txd_control2 |= TXD_CTL2_CIPv4; 1155 if (m->m_pkthdr.csum_flags & M_TCPV4_CSUM_OUT) 1156 txd->txd_control2 |= TXD_CTL2_CTCP; 1157 if (m->m_pkthdr.csum_flags & M_UDPV4_CSUM_OUT) 1158 txd->txd_control2 |= TXD_CTL2_CUDP; 1159 1160 txd[ntxd].txd_control1 |= TXD_CTL1_GCL; 1161 1162 bus_dmamap_sync(sc->sc_dmat, dmp, 0, dmp->dm_mapsize, 1163 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1164 1165 par = sc->sc_txdp[nexttx]; 1166 lcr = TXDL_NUMTXD(ntxd) | TXDL_LGC_FIRST | TXDL_LGC_LAST; 1167 TXP_WCSR(TXDL_PAR, par); 1168 TXP_WCSR(TXDL_LCR, lcr); 1169 1170 #if NBPFILTER > 0 1171 if (ifp->if_bpf) 1172 bpf_mtap_ether(ifp->if_bpf, m, BPF_DIRECTION_OUT); 1173 #endif /* NBPFILTER > 0 */ 1174 1175 sc->sc_nexttx = NEXTTX(nexttx); 1176 } 1177 } 1178 1179 /* 1180 * Allocate DMA memory for transmit descriptor fragments. 1181 * Only one map is used for all descriptors. 1182 */ 1183 int 1184 xge_alloc_txmem(struct xge_softc *sc) 1185 { 1186 struct txd *txp; 1187 bus_dma_segment_t seg; 1188 bus_addr_t txdp; 1189 caddr_t kva; 1190 int i, rseg, state; 1191 1192 #define TXMAPSZ (NTXDESCS*NTXFRAGS*sizeof(struct txd)) 1193 state = 0; 1194 if (bus_dmamem_alloc(sc->sc_dmat, TXMAPSZ, PAGE_SIZE, 0, 1195 &seg, 1, &rseg, BUS_DMA_NOWAIT)) 1196 goto err; 1197 state++; 1198 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, TXMAPSZ, &kva, 1199 BUS_DMA_NOWAIT)) 1200 goto err; 1201 1202 state++; 1203 if (bus_dmamap_create(sc->sc_dmat, TXMAPSZ, 1, TXMAPSZ, 0, 1204 BUS_DMA_NOWAIT, &sc->sc_txmap)) 1205 goto err; 1206 state++; 1207 if (bus_dmamap_load(sc->sc_dmat, sc->sc_txmap, 1208 kva, TXMAPSZ, NULL, BUS_DMA_NOWAIT)) 1209 goto err; 1210 1211 /* setup transmit array pointers */ 1212 txp = (struct txd *)kva; 1213 txdp = seg.ds_addr; 1214 for (i = 0; i < NTXDESCS; i++) { 1215 sc->sc_txd[i] = txp; 1216 sc->sc_txdp[i] = txdp; 1217 txp += NTXFRAGS; 1218 txdp += (NTXFRAGS * sizeof(struct txd)); 1219 } 1220 1221 return (0); 1222 1223 err: 1224 if (state > 2) 1225 bus_dmamap_destroy(sc->sc_dmat, sc->sc_txmap); 1226 if (state > 1) 1227 bus_dmamem_unmap(sc->sc_dmat, kva, TXMAPSZ); 1228 if (state > 0) 1229 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1230 return (ENOBUFS); 1231 } 1232 1233 /* 1234 * Allocate DMA memory for receive descriptor, 1235 * only one map is used for all descriptors. 1236 * link receive descriptor pages together. 1237 */ 1238 int 1239 xge_alloc_rxmem(struct xge_softc *sc) 1240 { 1241 struct rxd_4k *rxpp; 1242 bus_dma_segment_t seg; 1243 caddr_t kva; 1244 int i, rseg, state; 1245 1246 /* sanity check */ 1247 if (sizeof(struct rxd_4k) != XGE_PAGE) { 1248 printf("bad compiler struct alignment, %d != %d\n", 1249 (int)sizeof(struct rxd_4k), XGE_PAGE); 1250 return (EINVAL); 1251 } 1252 1253 state = 0; 1254 if (bus_dmamem_alloc(sc->sc_dmat, RXMAPSZ, PAGE_SIZE, 0, 1255 &seg, 1, &rseg, BUS_DMA_NOWAIT)) 1256 goto err; 1257 state++; 1258 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, RXMAPSZ, &kva, 1259 BUS_DMA_NOWAIT)) 1260 goto err; 1261 1262 state++; 1263 if (bus_dmamap_create(sc->sc_dmat, RXMAPSZ, 1, RXMAPSZ, 0, 1264 BUS_DMA_NOWAIT, &sc->sc_rxmap)) 1265 goto err; 1266 state++; 1267 if (bus_dmamap_load(sc->sc_dmat, sc->sc_rxmap, 1268 kva, RXMAPSZ, NULL, BUS_DMA_NOWAIT)) 1269 goto err; 1270 1271 /* setup receive page link pointers */ 1272 for (rxpp = (struct rxd_4k *)kva, i = 0; i < NRXPAGES; i++, rxpp++) { 1273 sc->sc_rxd_4k[i] = rxpp; 1274 rxpp->r4_next = (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr + 1275 (i*sizeof(struct rxd_4k)) + sizeof(struct rxd_4k); 1276 } 1277 sc->sc_rxd_4k[NRXPAGES-1]->r4_next = 1278 (uint64_t)sc->sc_rxmap->dm_segs[0].ds_addr; 1279 1280 return (0); 1281 1282 err: 1283 if (state > 2) 1284 bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxmap); 1285 if (state > 1) 1286 bus_dmamem_unmap(sc->sc_dmat, kva, RXMAPSZ); 1287 if (state > 0) 1288 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 1289 return (ENOBUFS); 1290 } 1291 1292 1293 /* 1294 * Add a new mbuf chain to descriptor id. 1295 */ 1296 int 1297 xge_add_rxbuf(struct xge_softc *sc, int id) 1298 { 1299 struct rxdesc *rxd; 1300 struct mbuf *m[5]; 1301 int page, desc, error; 1302 #if RX_MODE == RX_MODE_5 1303 int i; 1304 #endif 1305 1306 page = id/NDESC_BUFMODE; 1307 desc = id%NDESC_BUFMODE; 1308 1309 rxd = &sc->sc_rxd_4k[page]->r4_rxd[desc]; 1310 1311 /* 1312 * Allocate mbufs. 1313 * Currently five mbufs and two clusters are used, 1314 * the hardware will put (ethernet, ip, tcp/udp) headers in 1315 * their own buffer and the clusters are only used for data. 1316 */ 1317 #if RX_MODE == RX_MODE_1 1318 MGETHDR(m[0], M_DONTWAIT, MT_DATA); 1319 if (m[0] == NULL) 1320 return (ENOBUFS); 1321 MCLGET(m[0], M_DONTWAIT); 1322 if ((m[0]->m_flags & M_EXT) == 0) { 1323 m_freem(m[0]); 1324 return (ENOBUFS); 1325 } 1326 m[0]->m_len = m[0]->m_pkthdr.len = m[0]->m_ext.ext_size; 1327 #elif RX_MODE == RX_MODE_3 1328 #error missing rxmode 3. 1329 #elif RX_MODE == RX_MODE_5 1330 MGETHDR(m[0], M_DONTWAIT, MT_DATA); 1331 for (i = 1; i < 5; i++) { 1332 MGET(m[i], M_DONTWAIT, MT_DATA); 1333 } 1334 if (m[3]) 1335 MCLGET(m[3], M_DONTWAIT); 1336 if (m[4]) 1337 MCLGET(m[4], M_DONTWAIT); 1338 if (!m[0] || !m[1] || !m[2] || !m[3] || !m[4] || 1339 ((m[3]->m_flags & M_EXT) == 0) || ((m[4]->m_flags & M_EXT) == 0)) { 1340 /* Out of something */ 1341 for (i = 0; i < 5; i++) 1342 if (m[i] != NULL) 1343 m_free(m[i]); 1344 return (ENOBUFS); 1345 } 1346 /* Link'em together */ 1347 m[0]->m_next = m[1]; 1348 m[1]->m_next = m[2]; 1349 m[2]->m_next = m[3]; 1350 m[3]->m_next = m[4]; 1351 #else 1352 #error bad mode RX_MODE 1353 #endif 1354 1355 if (sc->sc_rxb[id]) 1356 bus_dmamap_unload(sc->sc_dmat, sc->sc_rxm[id]); 1357 sc->sc_rxb[id] = m[0]; 1358 1359 error = bus_dmamap_load_mbuf(sc->sc_dmat, sc->sc_rxm[id], m[0], 1360 BUS_DMA_READ|BUS_DMA_NOWAIT); 1361 if (error) 1362 return (error); 1363 bus_dmamap_sync(sc->sc_dmat, sc->sc_rxm[id], 0, 1364 sc->sc_rxm[id]->dm_mapsize, BUS_DMASYNC_PREREAD); 1365 1366 #if RX_MODE == RX_MODE_1 1367 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, 0, 0); 1368 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr; 1369 rxd->rxd_control1 = RXD_CTL1_OWN; 1370 #elif RX_MODE == RX_MODE_3 1371 #elif RX_MODE == RX_MODE_5 1372 rxd->rxd_control3 = RXD_MKCTL3(0, m[3]->m_len, m[4]->m_len); 1373 rxd->rxd_control2 = RXD_MKCTL2(m[0]->m_len, m[1]->m_len, m[2]->m_len); 1374 rxd->rxd_buf0 = (uint64_t)sc->sc_rxm[id]->dm_segs[0].ds_addr; 1375 rxd->rxd_buf1 = (uint64_t)sc->sc_rxm[id]->dm_segs[1].ds_addr; 1376 rxd->rxd_buf2 = (uint64_t)sc->sc_rxm[id]->dm_segs[2].ds_addr; 1377 rxd->rxd_buf3 = (uint64_t)sc->sc_rxm[id]->dm_segs[3].ds_addr; 1378 rxd->rxd_buf4 = (uint64_t)sc->sc_rxm[id]->dm_segs[4].ds_addr; 1379 rxd->rxd_control1 = RXD_CTL1_OWN; 1380 #endif 1381 1382 XGE_RXSYNC(id, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1383 return (0); 1384 } 1385 1386 /* 1387 * This magic comes from the FreeBSD driver. 1388 */ 1389 int 1390 xge_setup_xgxs_xena(struct xge_softc *sc) 1391 { 1392 /* The magic numbers are described in the users guide */ 1393 1394 /* Writing to MDIO 0x8000 (Global Config 0) */ 1395 PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50); 1396 PIF_WCSR(DTX_CONTROL, 0x80000515000000E0ULL); DELAY(50); 1397 PIF_WCSR(DTX_CONTROL, 0x80000515D93500E4ULL); DELAY(50); 1398 1399 /* Writing to MDIO 0x8000 (Global Config 1) */ 1400 PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50); 1401 PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50); 1402 PIF_WCSR(DTX_CONTROL, 0x80010515001e00e4ULL); DELAY(50); 1403 1404 /* Reset the Gigablaze */ 1405 PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50); 1406 PIF_WCSR(DTX_CONTROL, 0x80020515000000E0ULL); DELAY(50); 1407 PIF_WCSR(DTX_CONTROL, 0x80020515F21000E4ULL); DELAY(50); 1408 1409 /* read the pole settings */ 1410 PIF_WCSR(DTX_CONTROL, 0x8000051500000000ULL); DELAY(50); 1411 PIF_WCSR(DTX_CONTROL, 0x80000515000000e0ULL); DELAY(50); 1412 PIF_WCSR(DTX_CONTROL, 0x80000515000000ecULL); DELAY(50); 1413 1414 PIF_WCSR(DTX_CONTROL, 0x8001051500000000ULL); DELAY(50); 1415 PIF_WCSR(DTX_CONTROL, 0x80010515000000e0ULL); DELAY(50); 1416 PIF_WCSR(DTX_CONTROL, 0x80010515000000ecULL); DELAY(50); 1417 1418 PIF_WCSR(DTX_CONTROL, 0x8002051500000000ULL); DELAY(50); 1419 PIF_WCSR(DTX_CONTROL, 0x80020515000000e0ULL); DELAY(50); 1420 PIF_WCSR(DTX_CONTROL, 0x80020515000000ecULL); DELAY(50); 1421 1422 /* Workaround for TX Lane XAUI initialization error. 1423 Read Xpak PHY register 24 for XAUI lane status */ 1424 PIF_WCSR(DTX_CONTROL, 0x0018040000000000ULL); DELAY(50); 1425 PIF_WCSR(DTX_CONTROL, 0x00180400000000e0ULL); DELAY(50); 1426 PIF_WCSR(DTX_CONTROL, 0x00180400000000ecULL); DELAY(50); 1427 1428 /* 1429 * Reading the MDIO control with value 0x1804001c0F001c 1430 * means the TxLanes were already in sync 1431 * Reading the MDIO control with value 0x1804000c0x001c 1432 * means some TxLanes are not in sync where x is a 4-bit 1433 * value representing each lanes 1434 */ 1435 #if 0 1436 val = PIF_RCSR(MDIO_CONTROL); 1437 if (val != 0x1804001c0F001cULL) { 1438 printf("%s: MDIO_CONTROL: %llx != %llx\n", 1439 XNAME, val, 0x1804001c0F001cULL); 1440 return (1); 1441 } 1442 #endif 1443 1444 /* Set and remove the DTE XS INTLoopBackN */ 1445 PIF_WCSR(DTX_CONTROL, 0x0000051500000000ULL); DELAY(50); 1446 PIF_WCSR(DTX_CONTROL, 0x00000515604000e0ULL); DELAY(50); 1447 PIF_WCSR(DTX_CONTROL, 0x00000515604000e4ULL); DELAY(50); 1448 PIF_WCSR(DTX_CONTROL, 0x00000515204000e4ULL); DELAY(50); 1449 PIF_WCSR(DTX_CONTROL, 0x00000515204000ecULL); DELAY(50); 1450 1451 #if 0 1452 /* Reading the DTX control register Should be 0x5152040001c */ 1453 val = PIF_RCSR(DTX_CONTROL); 1454 if (val != 0x5152040001cULL) { 1455 printf("%s: DTX_CONTROL: %llx != %llx\n", 1456 XNAME, val, 0x5152040001cULL); 1457 return (1); 1458 } 1459 #endif 1460 1461 PIF_WCSR(MDIO_CONTROL, 0x0018040000000000ULL); DELAY(50); 1462 PIF_WCSR(MDIO_CONTROL, 0x00180400000000e0ULL); DELAY(50); 1463 PIF_WCSR(MDIO_CONTROL, 0x00180400000000ecULL); DELAY(50); 1464 1465 #if 0 1466 /* Reading the MIOD control should be 0x1804001c0f001c */ 1467 val = PIF_RCSR(MDIO_CONTROL); 1468 if (val != 0x1804001c0f001cULL) { 1469 printf("%s: MDIO_CONTROL2: %llx != %llx\n", 1470 XNAME, val, 0x1804001c0f001cULL); 1471 return (1); 1472 } 1473 #endif 1474 return (0); 1475 } 1476 1477 int 1478 xge_setup_xgxs_herc(struct xge_softc *sc) 1479 { 1480 int dtx_cnt = 0; 1481 1482 while (herc_dtx_cfg[dtx_cnt] != END_SIGN) { 1483 PIF_WCSR(DTX_CONTROL, herc_dtx_cfg[dtx_cnt]); 1484 DELAY(100); 1485 dtx_cnt++; 1486 } 1487 1488 return (0); 1489 } 1490