1 /* $NetBSD: if_dge.c,v 1.28 2009/11/26 15:17:09 njoly Exp $ */ 2 3 /* 4 * Copyright (c) 2004, SUNET, Swedish University Computer Network. 5 * All rights reserved. 6 * 7 * Written by Anders Magnusson for SUNET, Swedish University Computer Network. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 3. All advertising materials mentioning features or use of this software 18 * must display the following acknowledgement: 19 * This product includes software developed for the NetBSD Project by 20 * SUNET, Swedish University Computer Network. 21 * 4. The name of SUNET may not be used to endorse or promote products 22 * derived from this software without specific prior written permission. 23 * 24 * THIS SOFTWARE IS PROVIDED BY SUNET ``AS IS'' AND 25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 26 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 27 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 28 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 29 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 30 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 31 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 32 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 33 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 34 * POSSIBILITY OF SUCH DAMAGE. 35 */ 36 37 /* 38 * Copyright (c) 2001, 2002, 2003 Wasabi Systems, Inc. 39 * All rights reserved. 40 * 41 * Written by Jason R. Thorpe for Wasabi Systems, Inc. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. All advertising materials mentioning features or use of this software 52 * must display the following acknowledgement: 53 * This product includes software developed for the NetBSD Project by 54 * Wasabi Systems, Inc. 55 * 4. The name of Wasabi Systems, Inc. may not be used to endorse 56 * or promote products derived from this software without specific prior 57 * written permission. 58 * 59 * THIS SOFTWARE IS PROVIDED BY WASABI SYSTEMS, INC. ``AS IS'' AND 60 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 61 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 62 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL WASABI SYSTEMS, INC 63 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 64 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 65 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 66 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 67 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 68 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 69 * POSSIBILITY OF SUCH DAMAGE. 70 */ 71 72 /* 73 * Device driver for the Intel 82597EX Ten Gigabit Ethernet controller. 74 * 75 * TODO (in no specific order): 76 * HW VLAN support. 77 * TSE offloading (needs kernel changes...) 78 * RAIDC (receive interrupt delay adaptation) 79 * Use memory > 4GB. 80 */ 81 82 #include <sys/cdefs.h> 83 __KERNEL_RCSID(0, "$NetBSD: if_dge.c,v 1.28 2009/11/26 15:17:09 njoly Exp $"); 84 85 #include "bpfilter.h" 86 #include "rnd.h" 87 88 #include <sys/param.h> 89 #include <sys/systm.h> 90 #include <sys/callout.h> 91 #include <sys/mbuf.h> 92 #include <sys/malloc.h> 93 #include <sys/kernel.h> 94 #include <sys/socket.h> 95 #include <sys/ioctl.h> 96 #include <sys/errno.h> 97 #include <sys/device.h> 98 #include <sys/queue.h> 99 100 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 101 102 #if NRND > 0 103 #include <sys/rnd.h> 104 #endif 105 106 #include <net/if.h> 107 #include <net/if_dl.h> 108 #include <net/if_media.h> 109 #include <net/if_ether.h> 110 111 #if NBPFILTER > 0 112 #include <net/bpf.h> 113 #endif 114 115 #include <netinet/in.h> /* XXX for struct ip */ 116 #include <netinet/in_systm.h> /* XXX for struct ip */ 117 #include <netinet/ip.h> /* XXX for struct ip */ 118 #include <netinet/tcp.h> /* XXX for struct tcphdr */ 119 120 #include <sys/bus.h> 121 #include <sys/intr.h> 122 #include <machine/endian.h> 123 124 #include <dev/mii/mii.h> 125 #include <dev/mii/miivar.h> 126 #include <dev/mii/mii_bitbang.h> 127 128 #include <dev/pci/pcireg.h> 129 #include <dev/pci/pcivar.h> 130 #include <dev/pci/pcidevs.h> 131 132 #include <dev/pci/if_dgereg.h> 133 134 /* 135 * The receive engine may sometimes become off-by-one when writing back 136 * chained descriptors. Avoid this by allocating a large chunk of 137 * memory and use if instead (to avoid chained descriptors). 138 * This only happens with chained descriptors under heavy load. 139 */ 140 #define DGE_OFFBYONE_RXBUG 141 142 #define DGE_EVENT_COUNTERS 143 #define DGE_DEBUG 144 145 #ifdef DGE_DEBUG 146 #define DGE_DEBUG_LINK 0x01 147 #define DGE_DEBUG_TX 0x02 148 #define DGE_DEBUG_RX 0x04 149 #define DGE_DEBUG_CKSUM 0x08 150 int dge_debug = 0; 151 152 #define DPRINTF(x, y) if (dge_debug & (x)) printf y 153 #else 154 #define DPRINTF(x, y) /* nothing */ 155 #endif /* DGE_DEBUG */ 156 157 /* 158 * Transmit descriptor list size. We allow up to 100 DMA segments per 159 * packet (Intel reports of jumbo frame packets with as 160 * many as 80 DMA segments when using 16k buffers). 161 */ 162 #define DGE_NTXSEGS 100 163 #define DGE_IFQUEUELEN 20000 164 #define DGE_TXQUEUELEN 2048 165 #define DGE_TXQUEUELEN_MASK (DGE_TXQUEUELEN - 1) 166 #define DGE_TXQUEUE_GC (DGE_TXQUEUELEN / 8) 167 #define DGE_NTXDESC 1024 168 #define DGE_NTXDESC_MASK (DGE_NTXDESC - 1) 169 #define DGE_NEXTTX(x) (((x) + 1) & DGE_NTXDESC_MASK) 170 #define DGE_NEXTTXS(x) (((x) + 1) & DGE_TXQUEUELEN_MASK) 171 172 /* 173 * Receive descriptor list size. 174 * Packet is of size MCLBYTES, and for jumbo packets buffers may 175 * be chained. Due to the nature of the card (high-speed), keep this 176 * ring large. With 2k buffers the ring can store 400 jumbo packets, 177 * which at full speed will be received in just under 3ms. 178 */ 179 #define DGE_NRXDESC 2048 180 #define DGE_NRXDESC_MASK (DGE_NRXDESC - 1) 181 #define DGE_NEXTRX(x) (((x) + 1) & DGE_NRXDESC_MASK) 182 /* 183 * # of descriptors between head and written descriptors. 184 * This is to work-around two erratas. 185 */ 186 #define DGE_RXSPACE 10 187 #define DGE_PREVRX(x) (((x) - DGE_RXSPACE) & DGE_NRXDESC_MASK) 188 /* 189 * Receive descriptor fetch threshholds. These are values recommended 190 * by Intel, do not touch them unless you know what you are doing. 191 */ 192 #define RXDCTL_PTHRESH_VAL 128 193 #define RXDCTL_HTHRESH_VAL 16 194 #define RXDCTL_WTHRESH_VAL 16 195 196 197 /* 198 * Tweakable parameters; default values. 199 */ 200 #define FCRTH 0x30000 /* Send XOFF water mark */ 201 #define FCRTL 0x28000 /* Send XON water mark */ 202 #define RDTR 0x20 /* Interrupt delay after receive, .8192us units */ 203 #define TIDV 0x20 /* Interrupt delay after send, .8192us units */ 204 205 /* 206 * Control structures are DMA'd to the i82597 chip. We allocate them in 207 * a single clump that maps to a single DMA segment to make serveral things 208 * easier. 209 */ 210 struct dge_control_data { 211 /* 212 * The transmit descriptors. 213 */ 214 struct dge_tdes wcd_txdescs[DGE_NTXDESC]; 215 216 /* 217 * The receive descriptors. 218 */ 219 struct dge_rdes wcd_rxdescs[DGE_NRXDESC]; 220 }; 221 222 #define DGE_CDOFF(x) offsetof(struct dge_control_data, x) 223 #define DGE_CDTXOFF(x) DGE_CDOFF(wcd_txdescs[(x)]) 224 #define DGE_CDRXOFF(x) DGE_CDOFF(wcd_rxdescs[(x)]) 225 226 /* 227 * The DGE interface have a higher max MTU size than normal jumbo frames. 228 */ 229 #define DGE_MAX_MTU 16288 /* Max MTU size for this interface */ 230 231 /* 232 * Software state for transmit jobs. 233 */ 234 struct dge_txsoft { 235 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 236 bus_dmamap_t txs_dmamap; /* our DMA map */ 237 int txs_firstdesc; /* first descriptor in packet */ 238 int txs_lastdesc; /* last descriptor in packet */ 239 int txs_ndesc; /* # of descriptors used */ 240 }; 241 242 /* 243 * Software state for receive buffers. Each descriptor gets a 244 * 2k (MCLBYTES) buffer and a DMA map. For packets which fill 245 * more than one buffer, we chain them together. 246 */ 247 struct dge_rxsoft { 248 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 249 bus_dmamap_t rxs_dmamap; /* our DMA map */ 250 }; 251 252 /* 253 * Software state per device. 254 */ 255 struct dge_softc { 256 struct device sc_dev; /* generic device information */ 257 bus_space_tag_t sc_st; /* bus space tag */ 258 bus_space_handle_t sc_sh; /* bus space handle */ 259 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 260 struct ethercom sc_ethercom; /* ethernet common data */ 261 262 int sc_flags; /* flags; see below */ 263 int sc_bus_speed; /* PCI/PCIX bus speed */ 264 int sc_pcix_offset; /* PCIX capability register offset */ 265 266 pci_chipset_tag_t sc_pc; 267 pcitag_t sc_pt; 268 int sc_mmrbc; /* Max PCIX memory read byte count */ 269 270 void *sc_ih; /* interrupt cookie */ 271 272 struct ifmedia sc_media; 273 274 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 275 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 276 277 int sc_align_tweak; 278 279 /* 280 * Software state for the transmit and receive descriptors. 281 */ 282 struct dge_txsoft sc_txsoft[DGE_TXQUEUELEN]; 283 struct dge_rxsoft sc_rxsoft[DGE_NRXDESC]; 284 285 /* 286 * Control data structures. 287 */ 288 struct dge_control_data *sc_control_data; 289 #define sc_txdescs sc_control_data->wcd_txdescs 290 #define sc_rxdescs sc_control_data->wcd_rxdescs 291 292 #ifdef DGE_EVENT_COUNTERS 293 /* Event counters. */ 294 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 295 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 296 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */ 297 struct evcnt sc_ev_txdw; /* Tx descriptor interrupts */ 298 struct evcnt sc_ev_txqe; /* Tx queue empty interrupts */ 299 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 300 struct evcnt sc_ev_linkintr; /* Link interrupts */ 301 302 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 303 struct evcnt sc_ev_rxtusum; /* TCP/UDP cksums checked in-bound */ 304 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 305 struct evcnt sc_ev_txtusum; /* TCP/UDP cksums comp. out-bound */ 306 307 struct evcnt sc_ev_txctx_init; /* Tx cksum context cache initialized */ 308 struct evcnt sc_ev_txctx_hit; /* Tx cksum context cache hit */ 309 struct evcnt sc_ev_txctx_miss; /* Tx cksum context cache miss */ 310 311 struct evcnt sc_ev_txseg[DGE_NTXSEGS]; /* Tx packets w/ N segments */ 312 struct evcnt sc_ev_txdrop; /* Tx packets dropped (too many segs) */ 313 #endif /* DGE_EVENT_COUNTERS */ 314 315 int sc_txfree; /* number of free Tx descriptors */ 316 int sc_txnext; /* next ready Tx descriptor */ 317 318 int sc_txsfree; /* number of free Tx jobs */ 319 int sc_txsnext; /* next free Tx job */ 320 int sc_txsdirty; /* dirty Tx jobs */ 321 322 uint32_t sc_txctx_ipcs; /* cached Tx IP cksum ctx */ 323 uint32_t sc_txctx_tucs; /* cached Tx TCP/UDP cksum ctx */ 324 325 int sc_rxptr; /* next ready Rx descriptor/queue ent */ 326 int sc_rxdiscard; 327 int sc_rxlen; 328 struct mbuf *sc_rxhead; 329 struct mbuf *sc_rxtail; 330 struct mbuf **sc_rxtailp; 331 332 uint32_t sc_ctrl0; /* prototype CTRL0 register */ 333 uint32_t sc_icr; /* prototype interrupt bits */ 334 uint32_t sc_tctl; /* prototype TCTL register */ 335 uint32_t sc_rctl; /* prototype RCTL register */ 336 337 int sc_mchash_type; /* multicast filter offset */ 338 339 uint16_t sc_eeprom[EEPROM_SIZE]; 340 341 #if NRND > 0 342 rndsource_element_t rnd_source; /* random source */ 343 #endif 344 #ifdef DGE_OFFBYONE_RXBUG 345 void *sc_bugbuf; 346 SLIST_HEAD(, rxbugentry) sc_buglist; 347 bus_dmamap_t sc_bugmap; 348 struct rxbugentry *sc_entry; 349 #endif 350 }; 351 352 #define DGE_RXCHAIN_RESET(sc) \ 353 do { \ 354 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 355 *(sc)->sc_rxtailp = NULL; \ 356 (sc)->sc_rxlen = 0; \ 357 } while (/*CONSTCOND*/0) 358 359 #define DGE_RXCHAIN_LINK(sc, m) \ 360 do { \ 361 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 362 (sc)->sc_rxtailp = &(m)->m_next; \ 363 } while (/*CONSTCOND*/0) 364 365 /* sc_flags */ 366 #define DGE_F_BUS64 0x20 /* bus is 64-bit */ 367 #define DGE_F_PCIX 0x40 /* bus is PCI-X */ 368 369 #ifdef DGE_EVENT_COUNTERS 370 #define DGE_EVCNT_INCR(ev) (ev)->ev_count++ 371 #else 372 #define DGE_EVCNT_INCR(ev) /* nothing */ 373 #endif 374 375 #define CSR_READ(sc, reg) \ 376 bus_space_read_4((sc)->sc_st, (sc)->sc_sh, (reg)) 377 #define CSR_WRITE(sc, reg, val) \ 378 bus_space_write_4((sc)->sc_st, (sc)->sc_sh, (reg), (val)) 379 380 #define DGE_CDTXADDR(sc, x) ((sc)->sc_cddma + DGE_CDTXOFF((x))) 381 #define DGE_CDRXADDR(sc, x) ((sc)->sc_cddma + DGE_CDRXOFF((x))) 382 383 #define DGE_CDTXSYNC(sc, x, n, ops) \ 384 do { \ 385 int __x, __n; \ 386 \ 387 __x = (x); \ 388 __n = (n); \ 389 \ 390 /* If it will wrap around, sync to the end of the ring. */ \ 391 if ((__x + __n) > DGE_NTXDESC) { \ 392 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 393 DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * \ 394 (DGE_NTXDESC - __x), (ops)); \ 395 __n -= (DGE_NTXDESC - __x); \ 396 __x = 0; \ 397 } \ 398 \ 399 /* Now sync whatever is left. */ \ 400 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 401 DGE_CDTXOFF(__x), sizeof(struct dge_tdes) * __n, (ops)); \ 402 } while (/*CONSTCOND*/0) 403 404 #define DGE_CDRXSYNC(sc, x, ops) \ 405 do { \ 406 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 407 DGE_CDRXOFF((x)), sizeof(struct dge_rdes), (ops)); \ 408 } while (/*CONSTCOND*/0) 409 410 #ifdef DGE_OFFBYONE_RXBUG 411 #define DGE_INIT_RXDESC(sc, x) \ 412 do { \ 413 struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 414 struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 415 struct mbuf *__m = __rxs->rxs_mbuf; \ 416 \ 417 __rxd->dr_baddrl = htole32(sc->sc_bugmap->dm_segs[0].ds_addr + \ 418 (mtod((__m), char *) - (char *)sc->sc_bugbuf)); \ 419 __rxd->dr_baddrh = 0; \ 420 __rxd->dr_len = 0; \ 421 __rxd->dr_cksum = 0; \ 422 __rxd->dr_status = 0; \ 423 __rxd->dr_errors = 0; \ 424 __rxd->dr_special = 0; \ 425 DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 426 \ 427 CSR_WRITE((sc), DGE_RDT, (x)); \ 428 } while (/*CONSTCOND*/0) 429 #else 430 #define DGE_INIT_RXDESC(sc, x) \ 431 do { \ 432 struct dge_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 433 struct dge_rdes *__rxd = &(sc)->sc_rxdescs[(x)]; \ 434 struct mbuf *__m = __rxs->rxs_mbuf; \ 435 \ 436 /* \ 437 * Note: We scoot the packet forward 2 bytes in the buffer \ 438 * so that the payload after the Ethernet header is aligned \ 439 * to a 4-byte boundary. \ 440 * \ 441 * XXX BRAINDAMAGE ALERT! \ 442 * The stupid chip uses the same size for every buffer, which \ 443 * is set in the Receive Control register. We are using the 2K \ 444 * size option, but what we REALLY want is (2K - 2)! For this \ 445 * reason, we can't "scoot" packets longer than the standard \ 446 * Ethernet MTU. On strict-alignment platforms, if the total \ 447 * size exceeds (2K - 2) we set align_tweak to 0 and let \ 448 * the upper layer copy the headers. \ 449 */ \ 450 __m->m_data = __m->m_ext.ext_buf + (sc)->sc_align_tweak; \ 451 \ 452 __rxd->dr_baddrl = \ 453 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr + \ 454 (sc)->sc_align_tweak); \ 455 __rxd->dr_baddrh = 0; \ 456 __rxd->dr_len = 0; \ 457 __rxd->dr_cksum = 0; \ 458 __rxd->dr_status = 0; \ 459 __rxd->dr_errors = 0; \ 460 __rxd->dr_special = 0; \ 461 DGE_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 462 \ 463 CSR_WRITE((sc), DGE_RDT, (x)); \ 464 } while (/*CONSTCOND*/0) 465 #endif 466 467 #ifdef DGE_OFFBYONE_RXBUG 468 /* 469 * Allocation constants. Much memory may be used for this. 470 */ 471 #ifndef DGE_BUFFER_SIZE 472 #define DGE_BUFFER_SIZE DGE_MAX_MTU 473 #endif 474 #define DGE_NBUFFERS (4*DGE_NRXDESC) 475 #define DGE_RXMEM (DGE_NBUFFERS*DGE_BUFFER_SIZE) 476 477 struct rxbugentry { 478 SLIST_ENTRY(rxbugentry) rb_entry; 479 int rb_slot; 480 }; 481 482 static int 483 dge_alloc_rcvmem(struct dge_softc *sc) 484 { 485 char *ptr, *kva; 486 bus_dma_segment_t seg; 487 int i, rseg, state, error; 488 struct rxbugentry *entry; 489 490 state = error = 0; 491 492 if (bus_dmamem_alloc(sc->sc_dmat, DGE_RXMEM, PAGE_SIZE, 0, 493 &seg, 1, &rseg, BUS_DMA_NOWAIT)) { 494 aprint_error_dev(&sc->sc_dev, "can't alloc rx buffers\n"); 495 return ENOBUFS; 496 } 497 498 state = 1; 499 if (bus_dmamem_map(sc->sc_dmat, &seg, rseg, DGE_RXMEM, (void **)&kva, 500 BUS_DMA_NOWAIT)) { 501 aprint_error_dev(&sc->sc_dev, "can't map DMA buffers (%d bytes)\n", 502 (int)DGE_RXMEM); 503 error = ENOBUFS; 504 goto out; 505 } 506 507 state = 2; 508 if (bus_dmamap_create(sc->sc_dmat, DGE_RXMEM, 1, DGE_RXMEM, 0, 509 BUS_DMA_NOWAIT, &sc->sc_bugmap)) { 510 aprint_error_dev(&sc->sc_dev, "can't create DMA map\n"); 511 error = ENOBUFS; 512 goto out; 513 } 514 515 state = 3; 516 if (bus_dmamap_load(sc->sc_dmat, sc->sc_bugmap, 517 kva, DGE_RXMEM, NULL, BUS_DMA_NOWAIT)) { 518 aprint_error_dev(&sc->sc_dev, "can't load DMA map\n"); 519 error = ENOBUFS; 520 goto out; 521 } 522 523 state = 4; 524 sc->sc_bugbuf = (void *)kva; 525 SLIST_INIT(&sc->sc_buglist); 526 527 /* 528 * Now divide it up into DGE_BUFFER_SIZE pieces and save the addresses 529 * in an array. 530 */ 531 ptr = sc->sc_bugbuf; 532 if ((entry = malloc(sizeof(*entry) * DGE_NBUFFERS, 533 M_DEVBUF, M_NOWAIT)) == NULL) { 534 error = ENOBUFS; 535 goto out; 536 } 537 sc->sc_entry = entry; 538 for (i = 0; i < DGE_NBUFFERS; i++) { 539 entry[i].rb_slot = i; 540 SLIST_INSERT_HEAD(&sc->sc_buglist, &entry[i], rb_entry); 541 } 542 out: 543 if (error != 0) { 544 switch (state) { 545 case 4: 546 bus_dmamap_unload(sc->sc_dmat, sc->sc_bugmap); 547 case 3: 548 bus_dmamap_destroy(sc->sc_dmat, sc->sc_bugmap); 549 case 2: 550 bus_dmamem_unmap(sc->sc_dmat, kva, DGE_RXMEM); 551 case 1: 552 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 553 break; 554 default: 555 break; 556 } 557 } 558 559 return error; 560 } 561 562 /* 563 * Allocate a jumbo buffer. 564 */ 565 static void * 566 dge_getbuf(struct dge_softc *sc) 567 { 568 struct rxbugentry *entry; 569 570 entry = SLIST_FIRST(&sc->sc_buglist); 571 572 if (entry == NULL) { 573 printf("%s: no free RX buffers\n", device_xname(&sc->sc_dev)); 574 return(NULL); 575 } 576 577 SLIST_REMOVE_HEAD(&sc->sc_buglist, rb_entry); 578 return (char *)sc->sc_bugbuf + entry->rb_slot * DGE_BUFFER_SIZE; 579 } 580 581 /* 582 * Release a jumbo buffer. 583 */ 584 static void 585 dge_freebuf(struct mbuf *m, void *buf, size_t size, void *arg) 586 { 587 struct rxbugentry *entry; 588 struct dge_softc *sc; 589 int i, s; 590 591 /* Extract the softc struct pointer. */ 592 sc = (struct dge_softc *)arg; 593 594 if (sc == NULL) 595 panic("dge_freebuf: can't find softc pointer!"); 596 597 /* calculate the slot this buffer belongs to */ 598 599 i = ((char *)buf - (char *)sc->sc_bugbuf) / DGE_BUFFER_SIZE; 600 601 if ((i < 0) || (i >= DGE_NBUFFERS)) 602 panic("dge_freebuf: asked to free buffer %d!", i); 603 604 s = splvm(); 605 entry = sc->sc_entry + i; 606 SLIST_INSERT_HEAD(&sc->sc_buglist, entry, rb_entry); 607 608 if (__predict_true(m != NULL)) 609 pool_cache_put(mb_cache, m); 610 splx(s); 611 } 612 #endif 613 614 static void dge_start(struct ifnet *); 615 static void dge_watchdog(struct ifnet *); 616 static int dge_ioctl(struct ifnet *, u_long, void *); 617 static int dge_init(struct ifnet *); 618 static void dge_stop(struct ifnet *, int); 619 620 static bool dge_shutdown(device_t, int); 621 622 static void dge_reset(struct dge_softc *); 623 static void dge_rxdrain(struct dge_softc *); 624 static int dge_add_rxbuf(struct dge_softc *, int); 625 626 static void dge_set_filter(struct dge_softc *); 627 628 static int dge_intr(void *); 629 static void dge_txintr(struct dge_softc *); 630 static void dge_rxintr(struct dge_softc *); 631 static void dge_linkintr(struct dge_softc *, uint32_t); 632 633 static int dge_match(device_t, cfdata_t, void *); 634 static void dge_attach(device_t, device_t, void *); 635 636 static int dge_read_eeprom(struct dge_softc *sc); 637 static int dge_eeprom_clockin(struct dge_softc *sc); 638 static void dge_eeprom_clockout(struct dge_softc *sc, int bit); 639 static uint16_t dge_eeprom_word(struct dge_softc *sc, int addr); 640 static int dge_xgmii_mediachange(struct ifnet *); 641 static void dge_xgmii_mediastatus(struct ifnet *, struct ifmediareq *); 642 static void dge_xgmii_reset(struct dge_softc *); 643 static void dge_xgmii_writereg(device_t, int, int, int); 644 645 646 CFATTACH_DECL(dge, sizeof(struct dge_softc), 647 dge_match, dge_attach, NULL, NULL); 648 649 #ifdef DGE_EVENT_COUNTERS 650 #if DGE_NTXSEGS > 100 651 #error Update dge_txseg_evcnt_names 652 #endif 653 static char (*dge_txseg_evcnt_names)[DGE_NTXSEGS][8 /* "txseg00" + \0 */]; 654 #endif /* DGE_EVENT_COUNTERS */ 655 656 static int 657 dge_match(device_t parent, cfdata_t cf, void *aux) 658 { 659 struct pci_attach_args *pa = aux; 660 661 if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL && 662 PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82597EX) 663 return (1); 664 665 return (0); 666 } 667 668 static void 669 dge_attach(device_t parent, device_t self, void *aux) 670 { 671 struct dge_softc *sc = device_private(self); 672 struct pci_attach_args *pa = aux; 673 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 674 pci_chipset_tag_t pc = pa->pa_pc; 675 pci_intr_handle_t ih; 676 const char *intrstr = NULL; 677 bus_dma_segment_t seg; 678 int i, rseg, error; 679 uint8_t enaddr[ETHER_ADDR_LEN]; 680 pcireg_t preg, memtype; 681 uint32_t reg; 682 683 sc->sc_dmat = pa->pa_dmat; 684 sc->sc_pc = pa->pa_pc; 685 sc->sc_pt = pa->pa_tag; 686 687 preg = PCI_REVISION(pci_conf_read(pc, pa->pa_tag, PCI_CLASS_REG)); 688 aprint_naive(": Ethernet controller\n"); 689 aprint_normal(": Intel i82597EX 10GbE-LR Ethernet, rev. %d\n", preg); 690 691 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, DGE_PCI_BAR); 692 if (pci_mapreg_map(pa, DGE_PCI_BAR, memtype, 0, 693 &sc->sc_st, &sc->sc_sh, NULL, NULL)) { 694 aprint_error_dev(&sc->sc_dev, "unable to map device registers\n"); 695 return; 696 } 697 698 /* Enable bus mastering */ 699 preg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 700 preg |= PCI_COMMAND_MASTER_ENABLE; 701 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, preg); 702 703 /* 704 * Map and establish our interrupt. 705 */ 706 if (pci_intr_map(pa, &ih)) { 707 aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n"); 708 return; 709 } 710 intrstr = pci_intr_string(pc, ih); 711 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, dge_intr, sc); 712 if (sc->sc_ih == NULL) { 713 aprint_error_dev(&sc->sc_dev, "unable to establish interrupt"); 714 if (intrstr != NULL) 715 aprint_error(" at %s", intrstr); 716 aprint_error("\n"); 717 return; 718 } 719 aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr); 720 721 /* 722 * Determine a few things about the bus we're connected to. 723 */ 724 reg = CSR_READ(sc, DGE_STATUS); 725 if (reg & STATUS_BUS64) 726 sc->sc_flags |= DGE_F_BUS64; 727 728 sc->sc_flags |= DGE_F_PCIX; 729 if (pci_get_capability(pa->pa_pc, pa->pa_tag, 730 PCI_CAP_PCIX, 731 &sc->sc_pcix_offset, NULL) == 0) 732 aprint_error_dev(&sc->sc_dev, "unable to find PCIX " 733 "capability\n"); 734 735 if (sc->sc_flags & DGE_F_PCIX) { 736 switch (reg & STATUS_PCIX_MSK) { 737 case STATUS_PCIX_66: 738 sc->sc_bus_speed = 66; 739 break; 740 case STATUS_PCIX_100: 741 sc->sc_bus_speed = 100; 742 break; 743 case STATUS_PCIX_133: 744 sc->sc_bus_speed = 133; 745 break; 746 default: 747 aprint_error_dev(&sc->sc_dev, 748 "unknown PCIXSPD %d; assuming 66MHz\n", 749 reg & STATUS_PCIX_MSK); 750 sc->sc_bus_speed = 66; 751 } 752 } else 753 sc->sc_bus_speed = (reg & STATUS_BUS64) ? 66 : 33; 754 aprint_verbose_dev(&sc->sc_dev, "%d-bit %dMHz %s bus\n", 755 (sc->sc_flags & DGE_F_BUS64) ? 64 : 32, sc->sc_bus_speed, 756 (sc->sc_flags & DGE_F_PCIX) ? "PCIX" : "PCI"); 757 758 /* 759 * Allocate the control data structures, and create and load the 760 * DMA map for it. 761 */ 762 if ((error = bus_dmamem_alloc(sc->sc_dmat, 763 sizeof(struct dge_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 764 0)) != 0) { 765 aprint_error_dev(&sc->sc_dev, 766 "unable to allocate control data, error = %d\n", 767 error); 768 goto fail_0; 769 } 770 771 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 772 sizeof(struct dge_control_data), (void **)&sc->sc_control_data, 773 0)) != 0) { 774 aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n", 775 error); 776 goto fail_1; 777 } 778 779 if ((error = bus_dmamap_create(sc->sc_dmat, 780 sizeof(struct dge_control_data), 1, 781 sizeof(struct dge_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 782 aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, " 783 "error = %d\n", error); 784 goto fail_2; 785 } 786 787 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 788 sc->sc_control_data, sizeof(struct dge_control_data), NULL, 789 0)) != 0) { 790 aprint_error_dev(&sc->sc_dev, 791 "unable to load control data DMA map, error = %d\n", 792 error); 793 goto fail_3; 794 } 795 796 #ifdef DGE_OFFBYONE_RXBUG 797 if (dge_alloc_rcvmem(sc) != 0) 798 return; /* Already complained */ 799 #endif 800 /* 801 * Create the transmit buffer DMA maps. 802 */ 803 for (i = 0; i < DGE_TXQUEUELEN; i++) { 804 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_MAX_MTU, 805 DGE_NTXSEGS, MCLBYTES, 0, 0, 806 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 807 aprint_error_dev(&sc->sc_dev, "unable to create Tx DMA map %d, " 808 "error = %d\n", i, error); 809 goto fail_4; 810 } 811 } 812 813 /* 814 * Create the receive buffer DMA maps. 815 */ 816 for (i = 0; i < DGE_NRXDESC; i++) { 817 #ifdef DGE_OFFBYONE_RXBUG 818 if ((error = bus_dmamap_create(sc->sc_dmat, DGE_BUFFER_SIZE, 1, 819 DGE_BUFFER_SIZE, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 820 #else 821 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 822 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 823 #endif 824 aprint_error_dev(&sc->sc_dev, "unable to create Rx DMA map %d, " 825 "error = %d\n", i, error); 826 goto fail_5; 827 } 828 sc->sc_rxsoft[i].rxs_mbuf = NULL; 829 } 830 831 /* 832 * Set bits in ctrl0 register. 833 * Should get the software defined pins out of EEPROM? 834 */ 835 sc->sc_ctrl0 |= CTRL0_RPE | CTRL0_TPE; /* XON/XOFF */ 836 sc->sc_ctrl0 |= CTRL0_SDP3_DIR | CTRL0_SDP2_DIR | CTRL0_SDP1_DIR | 837 CTRL0_SDP0_DIR | CTRL0_SDP3 | CTRL0_SDP2 | CTRL0_SDP0; 838 839 /* 840 * Reset the chip to a known state. 841 */ 842 dge_reset(sc); 843 844 /* 845 * Reset the PHY. 846 */ 847 dge_xgmii_reset(sc); 848 849 /* 850 * Read in EEPROM data. 851 */ 852 if (dge_read_eeprom(sc)) { 853 aprint_error_dev(&sc->sc_dev, "couldn't read EEPROM\n"); 854 return; 855 } 856 857 /* 858 * Get the ethernet address. 859 */ 860 enaddr[0] = sc->sc_eeprom[EE_ADDR01] & 0377; 861 enaddr[1] = sc->sc_eeprom[EE_ADDR01] >> 8; 862 enaddr[2] = sc->sc_eeprom[EE_ADDR23] & 0377; 863 enaddr[3] = sc->sc_eeprom[EE_ADDR23] >> 8; 864 enaddr[4] = sc->sc_eeprom[EE_ADDR45] & 0377; 865 enaddr[5] = sc->sc_eeprom[EE_ADDR45] >> 8; 866 867 aprint_normal_dev(&sc->sc_dev, "Ethernet address %s\n", 868 ether_sprintf(enaddr)); 869 870 /* 871 * Setup media stuff. 872 */ 873 ifmedia_init(&sc->sc_media, IFM_IMASK, dge_xgmii_mediachange, 874 dge_xgmii_mediastatus); 875 ifmedia_add(&sc->sc_media, IFM_ETHER|IFM_10G_LR, 0, NULL); 876 ifmedia_set(&sc->sc_media, IFM_ETHER|IFM_10G_LR); 877 878 ifp = &sc->sc_ethercom.ec_if; 879 strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); 880 ifp->if_softc = sc; 881 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 882 ifp->if_ioctl = dge_ioctl; 883 ifp->if_start = dge_start; 884 ifp->if_watchdog = dge_watchdog; 885 ifp->if_init = dge_init; 886 ifp->if_stop = dge_stop; 887 IFQ_SET_MAXLEN(&ifp->if_snd, max(DGE_IFQUEUELEN, IFQ_MAXLEN)); 888 IFQ_SET_READY(&ifp->if_snd); 889 890 sc->sc_ethercom.ec_capabilities |= 891 ETHERCAP_JUMBO_MTU | ETHERCAP_VLAN_MTU; 892 893 /* 894 * We can perform TCPv4 and UDPv4 checkums in-bound. 895 */ 896 ifp->if_capabilities |= 897 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 898 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 899 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 900 901 /* 902 * Attach the interface. 903 */ 904 if_attach(ifp); 905 ether_ifattach(ifp, enaddr); 906 #if NRND > 0 907 rnd_attach_source(&sc->rnd_source, device_xname(&sc->sc_dev), 908 RND_TYPE_NET, 0); 909 #endif 910 911 #ifdef DGE_EVENT_COUNTERS 912 /* Fix segment event naming */ 913 if (dge_txseg_evcnt_names == NULL) { 914 dge_txseg_evcnt_names = 915 malloc(sizeof(*dge_txseg_evcnt_names), M_DEVBUF, M_WAITOK); 916 for (i = 0; i < DGE_NTXSEGS; i++) 917 snprintf((*dge_txseg_evcnt_names)[i], 918 sizeof((*dge_txseg_evcnt_names)[i]), "txseg%d", i); 919 } 920 921 /* Attach event counters. */ 922 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 923 NULL, device_xname(&sc->sc_dev), "txsstall"); 924 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 925 NULL, device_xname(&sc->sc_dev), "txdstall"); 926 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_MISC, 927 NULL, device_xname(&sc->sc_dev), "txforceintr"); 928 evcnt_attach_dynamic(&sc->sc_ev_txdw, EVCNT_TYPE_INTR, 929 NULL, device_xname(&sc->sc_dev), "txdw"); 930 evcnt_attach_dynamic(&sc->sc_ev_txqe, EVCNT_TYPE_INTR, 931 NULL, device_xname(&sc->sc_dev), "txqe"); 932 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 933 NULL, device_xname(&sc->sc_dev), "rxintr"); 934 evcnt_attach_dynamic(&sc->sc_ev_linkintr, EVCNT_TYPE_INTR, 935 NULL, device_xname(&sc->sc_dev), "linkintr"); 936 937 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 938 NULL, device_xname(&sc->sc_dev), "rxipsum"); 939 evcnt_attach_dynamic(&sc->sc_ev_rxtusum, EVCNT_TYPE_MISC, 940 NULL, device_xname(&sc->sc_dev), "rxtusum"); 941 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 942 NULL, device_xname(&sc->sc_dev), "txipsum"); 943 evcnt_attach_dynamic(&sc->sc_ev_txtusum, EVCNT_TYPE_MISC, 944 NULL, device_xname(&sc->sc_dev), "txtusum"); 945 946 evcnt_attach_dynamic(&sc->sc_ev_txctx_init, EVCNT_TYPE_MISC, 947 NULL, device_xname(&sc->sc_dev), "txctx init"); 948 evcnt_attach_dynamic(&sc->sc_ev_txctx_hit, EVCNT_TYPE_MISC, 949 NULL, device_xname(&sc->sc_dev), "txctx hit"); 950 evcnt_attach_dynamic(&sc->sc_ev_txctx_miss, EVCNT_TYPE_MISC, 951 NULL, device_xname(&sc->sc_dev), "txctx miss"); 952 953 for (i = 0; i < DGE_NTXSEGS; i++) 954 evcnt_attach_dynamic(&sc->sc_ev_txseg[i], EVCNT_TYPE_MISC, 955 NULL, device_xname(&sc->sc_dev), (*dge_txseg_evcnt_names)[i]); 956 957 evcnt_attach_dynamic(&sc->sc_ev_txdrop, EVCNT_TYPE_MISC, 958 NULL, device_xname(&sc->sc_dev), "txdrop"); 959 960 #endif /* DGE_EVENT_COUNTERS */ 961 962 /* 963 * Make sure the interface is shutdown during reboot. 964 */ 965 if (pmf_device_register1(self, NULL, NULL, dge_shutdown)) 966 pmf_class_network_register(self, ifp); 967 else 968 aprint_error_dev(self, "couldn't establish power handler\n"); 969 970 return; 971 972 /* 973 * Free any resources we've allocated during the failed attach 974 * attempt. Do this in reverse order and fall through. 975 */ 976 fail_5: 977 for (i = 0; i < DGE_NRXDESC; i++) { 978 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 979 bus_dmamap_destroy(sc->sc_dmat, 980 sc->sc_rxsoft[i].rxs_dmamap); 981 } 982 fail_4: 983 for (i = 0; i < DGE_TXQUEUELEN; i++) { 984 if (sc->sc_txsoft[i].txs_dmamap != NULL) 985 bus_dmamap_destroy(sc->sc_dmat, 986 sc->sc_txsoft[i].txs_dmamap); 987 } 988 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 989 fail_3: 990 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 991 fail_2: 992 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 993 sizeof(struct dge_control_data)); 994 fail_1: 995 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 996 fail_0: 997 return; 998 } 999 1000 /* 1001 * dge_shutdown: 1002 * 1003 * Make sure the interface is stopped at reboot time. 1004 */ 1005 static bool 1006 dge_shutdown(device_t self, int howto) 1007 { 1008 struct dge_softc *sc; 1009 1010 sc = device_private(self); 1011 dge_stop(&sc->sc_ethercom.ec_if, 1); 1012 1013 return true; 1014 } 1015 1016 /* 1017 * dge_tx_cksum: 1018 * 1019 * Set up TCP/IP checksumming parameters for the 1020 * specified packet. 1021 */ 1022 static int 1023 dge_tx_cksum(struct dge_softc *sc, struct dge_txsoft *txs, uint8_t *fieldsp) 1024 { 1025 struct mbuf *m0 = txs->txs_mbuf; 1026 struct dge_ctdes *t; 1027 uint32_t ipcs, tucs; 1028 struct ether_header *eh; 1029 int offset, iphl; 1030 uint8_t fields = 0; 1031 1032 /* 1033 * XXX It would be nice if the mbuf pkthdr had offset 1034 * fields for the protocol headers. 1035 */ 1036 1037 eh = mtod(m0, struct ether_header *); 1038 switch (htons(eh->ether_type)) { 1039 case ETHERTYPE_IP: 1040 offset = ETHER_HDR_LEN; 1041 break; 1042 1043 case ETHERTYPE_VLAN: 1044 offset = ETHER_HDR_LEN + ETHER_VLAN_ENCAP_LEN; 1045 break; 1046 1047 default: 1048 /* 1049 * Don't support this protocol or encapsulation. 1050 */ 1051 *fieldsp = 0; 1052 return (0); 1053 } 1054 1055 iphl = M_CSUM_DATA_IPv4_IPHL(m0->m_pkthdr.csum_data); 1056 1057 /* 1058 * NOTE: Even if we're not using the IP or TCP/UDP checksum 1059 * offload feature, if we load the context descriptor, we 1060 * MUST provide valid values for IPCSS and TUCSS fields. 1061 */ 1062 1063 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 1064 DGE_EVCNT_INCR(&sc->sc_ev_txipsum); 1065 fields |= TDESC_POPTS_IXSM; 1066 ipcs = DGE_TCPIP_IPCSS(offset) | 1067 DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1068 DGE_TCPIP_IPCSE(offset + iphl - 1); 1069 } else if (__predict_true(sc->sc_txctx_ipcs != 0xffffffff)) { 1070 /* Use the cached value. */ 1071 ipcs = sc->sc_txctx_ipcs; 1072 } else { 1073 /* Just initialize it to the likely value anyway. */ 1074 ipcs = DGE_TCPIP_IPCSS(offset) | 1075 DGE_TCPIP_IPCSO(offset + offsetof(struct ip, ip_sum)) | 1076 DGE_TCPIP_IPCSE(offset + iphl - 1); 1077 } 1078 DPRINTF(DGE_DEBUG_CKSUM, 1079 ("%s: CKSUM: offset %d ipcs 0x%x\n", 1080 device_xname(&sc->sc_dev), offset, ipcs)); 1081 1082 offset += iphl; 1083 1084 if (m0->m_pkthdr.csum_flags & (M_CSUM_TCPv4|M_CSUM_UDPv4)) { 1085 DGE_EVCNT_INCR(&sc->sc_ev_txtusum); 1086 fields |= TDESC_POPTS_TXSM; 1087 tucs = DGE_TCPIP_TUCSS(offset) | 1088 DGE_TCPIP_TUCSO(offset + M_CSUM_DATA_IPv4_OFFSET(m0->m_pkthdr.csum_data)) | 1089 DGE_TCPIP_TUCSE(0) /* rest of packet */; 1090 } else if (__predict_true(sc->sc_txctx_tucs != 0xffffffff)) { 1091 /* Use the cached value. */ 1092 tucs = sc->sc_txctx_tucs; 1093 } else { 1094 /* Just initialize it to a valid TCP context. */ 1095 tucs = DGE_TCPIP_TUCSS(offset) | 1096 DGE_TCPIP_TUCSO(offset + offsetof(struct tcphdr, th_sum)) | 1097 DGE_TCPIP_TUCSE(0) /* rest of packet */; 1098 } 1099 1100 DPRINTF(DGE_DEBUG_CKSUM, 1101 ("%s: CKSUM: offset %d tucs 0x%x\n", 1102 device_xname(&sc->sc_dev), offset, tucs)); 1103 1104 if (sc->sc_txctx_ipcs == ipcs && 1105 sc->sc_txctx_tucs == tucs) { 1106 /* Cached context is fine. */ 1107 DGE_EVCNT_INCR(&sc->sc_ev_txctx_hit); 1108 } else { 1109 /* Fill in the context descriptor. */ 1110 #ifdef DGE_EVENT_COUNTERS 1111 if (sc->sc_txctx_ipcs == 0xffffffff && 1112 sc->sc_txctx_tucs == 0xffffffff) 1113 DGE_EVCNT_INCR(&sc->sc_ev_txctx_init); 1114 else 1115 DGE_EVCNT_INCR(&sc->sc_ev_txctx_miss); 1116 #endif 1117 t = (struct dge_ctdes *)&sc->sc_txdescs[sc->sc_txnext]; 1118 t->dc_tcpip_ipcs = htole32(ipcs); 1119 t->dc_tcpip_tucs = htole32(tucs); 1120 t->dc_tcpip_cmdlen = htole32(TDESC_DTYP_CTD); 1121 t->dc_tcpip_seg = 0; 1122 DGE_CDTXSYNC(sc, sc->sc_txnext, 1, BUS_DMASYNC_PREWRITE); 1123 1124 sc->sc_txctx_ipcs = ipcs; 1125 sc->sc_txctx_tucs = tucs; 1126 1127 sc->sc_txnext = DGE_NEXTTX(sc->sc_txnext); 1128 txs->txs_ndesc++; 1129 } 1130 1131 *fieldsp = fields; 1132 1133 return (0); 1134 } 1135 1136 /* 1137 * dge_start: [ifnet interface function] 1138 * 1139 * Start packet transmission on the interface. 1140 */ 1141 static void 1142 dge_start(struct ifnet *ifp) 1143 { 1144 struct dge_softc *sc = ifp->if_softc; 1145 struct mbuf *m0; 1146 struct dge_txsoft *txs; 1147 bus_dmamap_t dmamap; 1148 int error, nexttx, lasttx = -1, ofree, seg; 1149 uint32_t cksumcmd; 1150 uint8_t cksumfields; 1151 1152 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1153 return; 1154 1155 /* 1156 * Remember the previous number of free descriptors. 1157 */ 1158 ofree = sc->sc_txfree; 1159 1160 /* 1161 * Loop through the send queue, setting up transmit descriptors 1162 * until we drain the queue, or use up all available transmit 1163 * descriptors. 1164 */ 1165 for (;;) { 1166 /* Grab a packet off the queue. */ 1167 IFQ_POLL(&ifp->if_snd, m0); 1168 if (m0 == NULL) 1169 break; 1170 1171 DPRINTF(DGE_DEBUG_TX, 1172 ("%s: TX: have packet to transmit: %p\n", 1173 device_xname(&sc->sc_dev), m0)); 1174 1175 /* Get a work queue entry. */ 1176 if (sc->sc_txsfree < DGE_TXQUEUE_GC) { 1177 dge_txintr(sc); 1178 if (sc->sc_txsfree == 0) { 1179 DPRINTF(DGE_DEBUG_TX, 1180 ("%s: TX: no free job descriptors\n", 1181 device_xname(&sc->sc_dev))); 1182 DGE_EVCNT_INCR(&sc->sc_ev_txsstall); 1183 break; 1184 } 1185 } 1186 1187 txs = &sc->sc_txsoft[sc->sc_txsnext]; 1188 dmamap = txs->txs_dmamap; 1189 1190 /* 1191 * Load the DMA map. If this fails, the packet either 1192 * didn't fit in the allotted number of segments, or we 1193 * were short on resources. For the too-many-segments 1194 * case, we simply report an error and drop the packet, 1195 * since we can't sanely copy a jumbo packet to a single 1196 * buffer. 1197 */ 1198 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1199 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1200 if (error) { 1201 if (error == EFBIG) { 1202 DGE_EVCNT_INCR(&sc->sc_ev_txdrop); 1203 printf("%s: Tx packet consumes too many " 1204 "DMA segments, dropping...\n", 1205 device_xname(&sc->sc_dev)); 1206 IFQ_DEQUEUE(&ifp->if_snd, m0); 1207 m_freem(m0); 1208 continue; 1209 } 1210 /* 1211 * Short on resources, just stop for now. 1212 */ 1213 DPRINTF(DGE_DEBUG_TX, 1214 ("%s: TX: dmamap load failed: %d\n", 1215 device_xname(&sc->sc_dev), error)); 1216 break; 1217 } 1218 1219 /* 1220 * Ensure we have enough descriptors free to describe 1221 * the packet. Note, we always reserve one descriptor 1222 * at the end of the ring due to the semantics of the 1223 * TDT register, plus one more in the event we need 1224 * to re-load checksum offload context. 1225 */ 1226 if (dmamap->dm_nsegs > (sc->sc_txfree - 2)) { 1227 /* 1228 * Not enough free descriptors to transmit this 1229 * packet. We haven't committed anything yet, 1230 * so just unload the DMA map, put the packet 1231 * pack on the queue, and punt. Notify the upper 1232 * layer that there are no more slots left. 1233 */ 1234 DPRINTF(DGE_DEBUG_TX, 1235 ("%s: TX: need %d descriptors, have %d\n", 1236 device_xname(&sc->sc_dev), dmamap->dm_nsegs, 1237 sc->sc_txfree - 1)); 1238 ifp->if_flags |= IFF_OACTIVE; 1239 bus_dmamap_unload(sc->sc_dmat, dmamap); 1240 DGE_EVCNT_INCR(&sc->sc_ev_txdstall); 1241 break; 1242 } 1243 1244 IFQ_DEQUEUE(&ifp->if_snd, m0); 1245 1246 /* 1247 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1248 */ 1249 1250 /* Sync the DMA map. */ 1251 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1252 BUS_DMASYNC_PREWRITE); 1253 1254 DPRINTF(DGE_DEBUG_TX, 1255 ("%s: TX: packet has %d DMA segments\n", 1256 device_xname(&sc->sc_dev), dmamap->dm_nsegs)); 1257 1258 DGE_EVCNT_INCR(&sc->sc_ev_txseg[dmamap->dm_nsegs - 1]); 1259 1260 /* 1261 * Store a pointer to the packet so that we can free it 1262 * later. 1263 * 1264 * Initially, we consider the number of descriptors the 1265 * packet uses the number of DMA segments. This may be 1266 * incremented by 1 if we do checksum offload (a descriptor 1267 * is used to set the checksum context). 1268 */ 1269 txs->txs_mbuf = m0; 1270 txs->txs_firstdesc = sc->sc_txnext; 1271 txs->txs_ndesc = dmamap->dm_nsegs; 1272 1273 /* 1274 * Set up checksum offload parameters for 1275 * this packet. 1276 */ 1277 if (m0->m_pkthdr.csum_flags & 1278 (M_CSUM_IPv4|M_CSUM_TCPv4|M_CSUM_UDPv4)) { 1279 if (dge_tx_cksum(sc, txs, &cksumfields) != 0) { 1280 /* Error message already displayed. */ 1281 bus_dmamap_unload(sc->sc_dmat, dmamap); 1282 continue; 1283 } 1284 } else { 1285 cksumfields = 0; 1286 } 1287 1288 cksumcmd = TDESC_DCMD_IDE | TDESC_DTYP_DATA; 1289 1290 /* 1291 * Initialize the transmit descriptor. 1292 */ 1293 for (nexttx = sc->sc_txnext, seg = 0; 1294 seg < dmamap->dm_nsegs; 1295 seg++, nexttx = DGE_NEXTTX(nexttx)) { 1296 /* 1297 * Note: we currently only use 32-bit DMA 1298 * addresses. 1299 */ 1300 sc->sc_txdescs[nexttx].dt_baddrh = 0; 1301 sc->sc_txdescs[nexttx].dt_baddrl = 1302 htole32(dmamap->dm_segs[seg].ds_addr); 1303 sc->sc_txdescs[nexttx].dt_ctl = 1304 htole32(cksumcmd | dmamap->dm_segs[seg].ds_len); 1305 sc->sc_txdescs[nexttx].dt_status = 0; 1306 sc->sc_txdescs[nexttx].dt_popts = cksumfields; 1307 sc->sc_txdescs[nexttx].dt_vlan = 0; 1308 lasttx = nexttx; 1309 1310 DPRINTF(DGE_DEBUG_TX, 1311 ("%s: TX: desc %d: low 0x%08lx, len 0x%04lx\n", 1312 device_xname(&sc->sc_dev), nexttx, 1313 (unsigned long)le32toh(dmamap->dm_segs[seg].ds_addr), 1314 (unsigned long)le32toh(dmamap->dm_segs[seg].ds_len))); 1315 } 1316 1317 KASSERT(lasttx != -1); 1318 1319 /* 1320 * Set up the command byte on the last descriptor of 1321 * the packet. If we're in the interrupt delay window, 1322 * delay the interrupt. 1323 */ 1324 sc->sc_txdescs[lasttx].dt_ctl |= 1325 htole32(TDESC_DCMD_EOP | TDESC_DCMD_RS); 1326 1327 txs->txs_lastdesc = lasttx; 1328 1329 DPRINTF(DGE_DEBUG_TX, 1330 ("%s: TX: desc %d: cmdlen 0x%08x\n", device_xname(&sc->sc_dev), 1331 lasttx, le32toh(sc->sc_txdescs[lasttx].dt_ctl))); 1332 1333 /* Sync the descriptors we're using. */ 1334 DGE_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 1335 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1336 1337 /* Give the packet to the chip. */ 1338 CSR_WRITE(sc, DGE_TDT, nexttx); 1339 1340 DPRINTF(DGE_DEBUG_TX, 1341 ("%s: TX: TDT -> %d\n", device_xname(&sc->sc_dev), nexttx)); 1342 1343 DPRINTF(DGE_DEBUG_TX, 1344 ("%s: TX: finished transmitting packet, job %d\n", 1345 device_xname(&sc->sc_dev), sc->sc_txsnext)); 1346 1347 /* Advance the tx pointer. */ 1348 sc->sc_txfree -= txs->txs_ndesc; 1349 sc->sc_txnext = nexttx; 1350 1351 sc->sc_txsfree--; 1352 sc->sc_txsnext = DGE_NEXTTXS(sc->sc_txsnext); 1353 1354 #if NBPFILTER > 0 1355 /* Pass the packet to any BPF listeners. */ 1356 if (ifp->if_bpf) 1357 bpf_mtap(ifp->if_bpf, m0); 1358 #endif /* NBPFILTER > 0 */ 1359 } 1360 1361 if (sc->sc_txsfree == 0 || sc->sc_txfree <= 2) { 1362 /* No more slots; notify upper layer. */ 1363 ifp->if_flags |= IFF_OACTIVE; 1364 } 1365 1366 if (sc->sc_txfree != ofree) { 1367 /* Set a watchdog timer in case the chip flakes out. */ 1368 ifp->if_timer = 5; 1369 } 1370 } 1371 1372 /* 1373 * dge_watchdog: [ifnet interface function] 1374 * 1375 * Watchdog timer handler. 1376 */ 1377 static void 1378 dge_watchdog(struct ifnet *ifp) 1379 { 1380 struct dge_softc *sc = ifp->if_softc; 1381 1382 /* 1383 * Since we're using delayed interrupts, sweep up 1384 * before we report an error. 1385 */ 1386 dge_txintr(sc); 1387 1388 if (sc->sc_txfree != DGE_NTXDESC) { 1389 printf("%s: device timeout (txfree %d txsfree %d txnext %d)\n", 1390 device_xname(&sc->sc_dev), sc->sc_txfree, sc->sc_txsfree, 1391 sc->sc_txnext); 1392 ifp->if_oerrors++; 1393 1394 /* Reset the interface. */ 1395 (void) dge_init(ifp); 1396 } 1397 1398 /* Try to get more packets going. */ 1399 dge_start(ifp); 1400 } 1401 1402 /* 1403 * dge_ioctl: [ifnet interface function] 1404 * 1405 * Handle control requests from the operator. 1406 */ 1407 static int 1408 dge_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1409 { 1410 struct dge_softc *sc = ifp->if_softc; 1411 struct ifreq *ifr = (struct ifreq *) data; 1412 pcireg_t preg; 1413 int s, error, mmrbc; 1414 1415 s = splnet(); 1416 1417 switch (cmd) { 1418 case SIOCSIFMEDIA: 1419 case SIOCGIFMEDIA: 1420 error = ifmedia_ioctl(ifp, ifr, &sc->sc_media, cmd); 1421 break; 1422 1423 case SIOCSIFMTU: 1424 if (ifr->ifr_mtu < ETHERMIN || ifr->ifr_mtu > DGE_MAX_MTU) 1425 error = EINVAL; 1426 else if ((error = ifioctl_common(ifp, cmd, data)) != ENETRESET) 1427 break; 1428 else if (ifp->if_flags & IFF_UP) 1429 error = (*ifp->if_init)(ifp); 1430 else 1431 error = 0; 1432 break; 1433 1434 case SIOCSIFFLAGS: 1435 if ((error = ifioctl_common(ifp, cmd, data)) != 0) 1436 break; 1437 /* extract link flags */ 1438 if ((ifp->if_flags & IFF_LINK0) == 0 && 1439 (ifp->if_flags & IFF_LINK1) == 0) 1440 mmrbc = PCIX_MMRBC_512; 1441 else if ((ifp->if_flags & IFF_LINK0) == 0 && 1442 (ifp->if_flags & IFF_LINK1) != 0) 1443 mmrbc = PCIX_MMRBC_1024; 1444 else if ((ifp->if_flags & IFF_LINK0) != 0 && 1445 (ifp->if_flags & IFF_LINK1) == 0) 1446 mmrbc = PCIX_MMRBC_2048; 1447 else 1448 mmrbc = PCIX_MMRBC_4096; 1449 if (mmrbc != sc->sc_mmrbc) { 1450 preg = pci_conf_read(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD); 1451 preg &= ~PCIX_MMRBC_MSK; 1452 preg |= mmrbc; 1453 pci_conf_write(sc->sc_pc, sc->sc_pt,DGE_PCIX_CMD, preg); 1454 sc->sc_mmrbc = mmrbc; 1455 } 1456 /* FALLTHROUGH */ 1457 default: 1458 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 1459 break; 1460 1461 error = 0; 1462 1463 if (cmd == SIOCSIFCAP) 1464 error = (*ifp->if_init)(ifp); 1465 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1466 ; 1467 else if (ifp->if_flags & IFF_RUNNING) { 1468 /* 1469 * Multicast list has changed; set the hardware filter 1470 * accordingly. 1471 */ 1472 dge_set_filter(sc); 1473 } 1474 break; 1475 } 1476 1477 /* Try to get more packets going. */ 1478 dge_start(ifp); 1479 1480 splx(s); 1481 return (error); 1482 } 1483 1484 /* 1485 * dge_intr: 1486 * 1487 * Interrupt service routine. 1488 */ 1489 static int 1490 dge_intr(void *arg) 1491 { 1492 struct dge_softc *sc = arg; 1493 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1494 uint32_t icr; 1495 int wantinit, handled = 0; 1496 1497 for (wantinit = 0; wantinit == 0;) { 1498 icr = CSR_READ(sc, DGE_ICR); 1499 if ((icr & sc->sc_icr) == 0) 1500 break; 1501 1502 #if 0 /*NRND > 0*/ 1503 if (RND_ENABLED(&sc->rnd_source)) 1504 rnd_add_uint32(&sc->rnd_source, icr); 1505 #endif 1506 1507 handled = 1; 1508 1509 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS) 1510 if (icr & (ICR_RXDMT0|ICR_RXT0)) { 1511 DPRINTF(DGE_DEBUG_RX, 1512 ("%s: RX: got Rx intr 0x%08x\n", 1513 device_xname(&sc->sc_dev), 1514 icr & (ICR_RXDMT0|ICR_RXT0))); 1515 DGE_EVCNT_INCR(&sc->sc_ev_rxintr); 1516 } 1517 #endif 1518 dge_rxintr(sc); 1519 1520 #if defined(DGE_DEBUG) || defined(DGE_EVENT_COUNTERS) 1521 if (icr & ICR_TXDW) { 1522 DPRINTF(DGE_DEBUG_TX, 1523 ("%s: TX: got TXDW interrupt\n", 1524 device_xname(&sc->sc_dev))); 1525 DGE_EVCNT_INCR(&sc->sc_ev_txdw); 1526 } 1527 if (icr & ICR_TXQE) 1528 DGE_EVCNT_INCR(&sc->sc_ev_txqe); 1529 #endif 1530 dge_txintr(sc); 1531 1532 if (icr & (ICR_LSC|ICR_RXSEQ)) { 1533 DGE_EVCNT_INCR(&sc->sc_ev_linkintr); 1534 dge_linkintr(sc, icr); 1535 } 1536 1537 if (icr & ICR_RXO) { 1538 printf("%s: Receive overrun\n", device_xname(&sc->sc_dev)); 1539 wantinit = 1; 1540 } 1541 } 1542 1543 if (handled) { 1544 if (wantinit) 1545 dge_init(ifp); 1546 1547 /* Try to get more packets going. */ 1548 dge_start(ifp); 1549 } 1550 1551 return (handled); 1552 } 1553 1554 /* 1555 * dge_txintr: 1556 * 1557 * Helper; handle transmit interrupts. 1558 */ 1559 static void 1560 dge_txintr(struct dge_softc *sc) 1561 { 1562 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1563 struct dge_txsoft *txs; 1564 uint8_t status; 1565 int i; 1566 1567 ifp->if_flags &= ~IFF_OACTIVE; 1568 1569 /* 1570 * Go through the Tx list and free mbufs for those 1571 * frames which have been transmitted. 1572 */ 1573 for (i = sc->sc_txsdirty; sc->sc_txsfree != DGE_TXQUEUELEN; 1574 i = DGE_NEXTTXS(i), sc->sc_txsfree++) { 1575 txs = &sc->sc_txsoft[i]; 1576 1577 DPRINTF(DGE_DEBUG_TX, 1578 ("%s: TX: checking job %d\n", device_xname(&sc->sc_dev), i)); 1579 1580 DGE_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs, 1581 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1582 1583 status = 1584 sc->sc_txdescs[txs->txs_lastdesc].dt_status; 1585 if ((status & TDESC_STA_DD) == 0) { 1586 DGE_CDTXSYNC(sc, txs->txs_lastdesc, 1, 1587 BUS_DMASYNC_PREREAD); 1588 break; 1589 } 1590 1591 DPRINTF(DGE_DEBUG_TX, 1592 ("%s: TX: job %d done: descs %d..%d\n", 1593 device_xname(&sc->sc_dev), i, txs->txs_firstdesc, 1594 txs->txs_lastdesc)); 1595 1596 ifp->if_opackets++; 1597 sc->sc_txfree += txs->txs_ndesc; 1598 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1599 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1600 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1601 m_freem(txs->txs_mbuf); 1602 txs->txs_mbuf = NULL; 1603 } 1604 1605 /* Update the dirty transmit buffer pointer. */ 1606 sc->sc_txsdirty = i; 1607 DPRINTF(DGE_DEBUG_TX, 1608 ("%s: TX: txsdirty -> %d\n", device_xname(&sc->sc_dev), i)); 1609 1610 /* 1611 * If there are no more pending transmissions, cancel the watchdog 1612 * timer. 1613 */ 1614 if (sc->sc_txsfree == DGE_TXQUEUELEN) 1615 ifp->if_timer = 0; 1616 } 1617 1618 /* 1619 * dge_rxintr: 1620 * 1621 * Helper; handle receive interrupts. 1622 */ 1623 static void 1624 dge_rxintr(struct dge_softc *sc) 1625 { 1626 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1627 struct dge_rxsoft *rxs; 1628 struct mbuf *m; 1629 int i, len; 1630 uint8_t status, errors; 1631 1632 for (i = sc->sc_rxptr;; i = DGE_NEXTRX(i)) { 1633 rxs = &sc->sc_rxsoft[i]; 1634 1635 DPRINTF(DGE_DEBUG_RX, 1636 ("%s: RX: checking descriptor %d\n", 1637 device_xname(&sc->sc_dev), i)); 1638 1639 DGE_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1640 1641 status = sc->sc_rxdescs[i].dr_status; 1642 errors = sc->sc_rxdescs[i].dr_errors; 1643 len = le16toh(sc->sc_rxdescs[i].dr_len); 1644 1645 if ((status & RDESC_STS_DD) == 0) { 1646 /* 1647 * We have processed all of the receive descriptors. 1648 */ 1649 DGE_CDRXSYNC(sc, i, BUS_DMASYNC_PREREAD); 1650 break; 1651 } 1652 1653 if (__predict_false(sc->sc_rxdiscard)) { 1654 DPRINTF(DGE_DEBUG_RX, 1655 ("%s: RX: discarding contents of descriptor %d\n", 1656 device_xname(&sc->sc_dev), i)); 1657 DGE_INIT_RXDESC(sc, i); 1658 if (status & RDESC_STS_EOP) { 1659 /* Reset our state. */ 1660 DPRINTF(DGE_DEBUG_RX, 1661 ("%s: RX: resetting rxdiscard -> 0\n", 1662 device_xname(&sc->sc_dev))); 1663 sc->sc_rxdiscard = 0; 1664 } 1665 continue; 1666 } 1667 1668 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1669 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1670 1671 m = rxs->rxs_mbuf; 1672 1673 /* 1674 * Add a new receive buffer to the ring. 1675 */ 1676 if (dge_add_rxbuf(sc, i) != 0) { 1677 /* 1678 * Failed, throw away what we've done so 1679 * far, and discard the rest of the packet. 1680 */ 1681 ifp->if_ierrors++; 1682 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1683 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1684 DGE_INIT_RXDESC(sc, i); 1685 if ((status & RDESC_STS_EOP) == 0) 1686 sc->sc_rxdiscard = 1; 1687 if (sc->sc_rxhead != NULL) 1688 m_freem(sc->sc_rxhead); 1689 DGE_RXCHAIN_RESET(sc); 1690 DPRINTF(DGE_DEBUG_RX, 1691 ("%s: RX: Rx buffer allocation failed, " 1692 "dropping packet%s\n", device_xname(&sc->sc_dev), 1693 sc->sc_rxdiscard ? " (discard)" : "")); 1694 continue; 1695 } 1696 DGE_INIT_RXDESC(sc, DGE_PREVRX(i)); /* Write the descriptor */ 1697 1698 DGE_RXCHAIN_LINK(sc, m); 1699 1700 m->m_len = len; 1701 1702 DPRINTF(DGE_DEBUG_RX, 1703 ("%s: RX: buffer at %p len %d\n", 1704 device_xname(&sc->sc_dev), m->m_data, len)); 1705 1706 /* 1707 * If this is not the end of the packet, keep 1708 * looking. 1709 */ 1710 if ((status & RDESC_STS_EOP) == 0) { 1711 sc->sc_rxlen += len; 1712 DPRINTF(DGE_DEBUG_RX, 1713 ("%s: RX: not yet EOP, rxlen -> %d\n", 1714 device_xname(&sc->sc_dev), sc->sc_rxlen)); 1715 continue; 1716 } 1717 1718 /* 1719 * Okay, we have the entire packet now... 1720 */ 1721 *sc->sc_rxtailp = NULL; 1722 m = sc->sc_rxhead; 1723 len += sc->sc_rxlen; 1724 1725 DGE_RXCHAIN_RESET(sc); 1726 1727 DPRINTF(DGE_DEBUG_RX, 1728 ("%s: RX: have entire packet, len -> %d\n", 1729 device_xname(&sc->sc_dev), len)); 1730 1731 /* 1732 * If an error occurred, update stats and drop the packet. 1733 */ 1734 if (errors & 1735 (RDESC_ERR_CE|RDESC_ERR_SE|RDESC_ERR_P|RDESC_ERR_RXE)) { 1736 ifp->if_ierrors++; 1737 if (errors & RDESC_ERR_SE) 1738 printf("%s: symbol error\n", 1739 device_xname(&sc->sc_dev)); 1740 else if (errors & RDESC_ERR_P) 1741 printf("%s: parity error\n", 1742 device_xname(&sc->sc_dev)); 1743 else if (errors & RDESC_ERR_CE) 1744 printf("%s: CRC error\n", 1745 device_xname(&sc->sc_dev)); 1746 m_freem(m); 1747 continue; 1748 } 1749 1750 /* 1751 * No errors. Receive the packet. 1752 */ 1753 m->m_pkthdr.rcvif = ifp; 1754 m->m_pkthdr.len = len; 1755 1756 /* 1757 * Set up checksum info for this packet. 1758 */ 1759 if (status & RDESC_STS_IPCS) { 1760 DGE_EVCNT_INCR(&sc->sc_ev_rxipsum); 1761 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1762 if (errors & RDESC_ERR_IPE) 1763 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1764 } 1765 if (status & RDESC_STS_TCPCS) { 1766 /* 1767 * Note: we don't know if this was TCP or UDP, 1768 * so we just set both bits, and expect the 1769 * upper layers to deal. 1770 */ 1771 DGE_EVCNT_INCR(&sc->sc_ev_rxtusum); 1772 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4|M_CSUM_UDPv4; 1773 if (errors & RDESC_ERR_TCPE) 1774 m->m_pkthdr.csum_flags |= M_CSUM_TCP_UDP_BAD; 1775 } 1776 1777 ifp->if_ipackets++; 1778 1779 #if NBPFILTER > 0 1780 /* Pass this up to any BPF listeners. */ 1781 if (ifp->if_bpf) 1782 bpf_mtap(ifp->if_bpf, m); 1783 #endif /* NBPFILTER > 0 */ 1784 1785 /* Pass it on. */ 1786 (*ifp->if_input)(ifp, m); 1787 } 1788 1789 /* Update the receive pointer. */ 1790 sc->sc_rxptr = i; 1791 1792 DPRINTF(DGE_DEBUG_RX, 1793 ("%s: RX: rxptr -> %d\n", device_xname(&sc->sc_dev), i)); 1794 } 1795 1796 /* 1797 * dge_linkintr: 1798 * 1799 * Helper; handle link interrupts. 1800 */ 1801 static void 1802 dge_linkintr(struct dge_softc *sc, uint32_t icr) 1803 { 1804 uint32_t status; 1805 1806 if (icr & ICR_LSC) { 1807 status = CSR_READ(sc, DGE_STATUS); 1808 if (status & STATUS_LINKUP) { 1809 DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> up\n", 1810 device_xname(&sc->sc_dev))); 1811 } else { 1812 DPRINTF(DGE_DEBUG_LINK, ("%s: LINK: LSC -> down\n", 1813 device_xname(&sc->sc_dev))); 1814 } 1815 } else if (icr & ICR_RXSEQ) { 1816 DPRINTF(DGE_DEBUG_LINK, 1817 ("%s: LINK: Receive sequence error\n", 1818 device_xname(&sc->sc_dev))); 1819 } 1820 /* XXX - fix errata */ 1821 } 1822 1823 /* 1824 * dge_reset: 1825 * 1826 * Reset the i82597 chip. 1827 */ 1828 static void 1829 dge_reset(struct dge_softc *sc) 1830 { 1831 int i; 1832 1833 /* 1834 * Do a chip reset. 1835 */ 1836 CSR_WRITE(sc, DGE_CTRL0, CTRL0_RST | sc->sc_ctrl0); 1837 1838 delay(10000); 1839 1840 for (i = 0; i < 1000; i++) { 1841 if ((CSR_READ(sc, DGE_CTRL0) & CTRL0_RST) == 0) 1842 break; 1843 delay(20); 1844 } 1845 1846 if (CSR_READ(sc, DGE_CTRL0) & CTRL0_RST) 1847 printf("%s: WARNING: reset failed to complete\n", 1848 device_xname(&sc->sc_dev)); 1849 /* 1850 * Reset the EEPROM logic. 1851 * This will cause the chip to reread its default values, 1852 * which doesn't happen otherwise (errata). 1853 */ 1854 CSR_WRITE(sc, DGE_CTRL1, CTRL1_EE_RST); 1855 delay(10000); 1856 } 1857 1858 /* 1859 * dge_init: [ifnet interface function] 1860 * 1861 * Initialize the interface. Must be called at splnet(). 1862 */ 1863 static int 1864 dge_init(struct ifnet *ifp) 1865 { 1866 struct dge_softc *sc = ifp->if_softc; 1867 struct dge_rxsoft *rxs; 1868 int i, error = 0; 1869 uint32_t reg; 1870 1871 /* 1872 * *_HDR_ALIGNED_P is constant 1 if __NO_STRICT_ALIGMENT is set. 1873 * There is a small but measurable benefit to avoiding the adjusment 1874 * of the descriptor so that the headers are aligned, for normal mtu, 1875 * on such platforms. One possibility is that the DMA itself is 1876 * slightly more efficient if the front of the entire packet (instead 1877 * of the front of the headers) is aligned. 1878 * 1879 * Note we must always set align_tweak to 0 if we are using 1880 * jumbo frames. 1881 */ 1882 #ifdef __NO_STRICT_ALIGNMENT 1883 sc->sc_align_tweak = 0; 1884 #else 1885 if ((ifp->if_mtu + ETHER_HDR_LEN + ETHER_CRC_LEN) > (MCLBYTES - 2)) 1886 sc->sc_align_tweak = 0; 1887 else 1888 sc->sc_align_tweak = 2; 1889 #endif /* __NO_STRICT_ALIGNMENT */ 1890 1891 /* Cancel any pending I/O. */ 1892 dge_stop(ifp, 0); 1893 1894 /* Reset the chip to a known state. */ 1895 dge_reset(sc); 1896 1897 /* Initialize the transmit descriptor ring. */ 1898 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 1899 DGE_CDTXSYNC(sc, 0, DGE_NTXDESC, 1900 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1901 sc->sc_txfree = DGE_NTXDESC; 1902 sc->sc_txnext = 0; 1903 1904 sc->sc_txctx_ipcs = 0xffffffff; 1905 sc->sc_txctx_tucs = 0xffffffff; 1906 1907 CSR_WRITE(sc, DGE_TDBAH, 0); 1908 CSR_WRITE(sc, DGE_TDBAL, DGE_CDTXADDR(sc, 0)); 1909 CSR_WRITE(sc, DGE_TDLEN, sizeof(sc->sc_txdescs)); 1910 CSR_WRITE(sc, DGE_TDH, 0); 1911 CSR_WRITE(sc, DGE_TDT, 0); 1912 CSR_WRITE(sc, DGE_TIDV, TIDV); 1913 1914 #if 0 1915 CSR_WRITE(sc, DGE_TXDCTL, TXDCTL_PTHRESH(0) | 1916 TXDCTL_HTHRESH(0) | TXDCTL_WTHRESH(0)); 1917 #endif 1918 CSR_WRITE(sc, DGE_RXDCTL, 1919 RXDCTL_PTHRESH(RXDCTL_PTHRESH_VAL) | 1920 RXDCTL_HTHRESH(RXDCTL_HTHRESH_VAL) | 1921 RXDCTL_WTHRESH(RXDCTL_WTHRESH_VAL)); 1922 1923 /* Initialize the transmit job descriptors. */ 1924 for (i = 0; i < DGE_TXQUEUELEN; i++) 1925 sc->sc_txsoft[i].txs_mbuf = NULL; 1926 sc->sc_txsfree = DGE_TXQUEUELEN; 1927 sc->sc_txsnext = 0; 1928 sc->sc_txsdirty = 0; 1929 1930 /* 1931 * Initialize the receive descriptor and receive job 1932 * descriptor rings. 1933 */ 1934 CSR_WRITE(sc, DGE_RDBAH, 0); 1935 CSR_WRITE(sc, DGE_RDBAL, DGE_CDRXADDR(sc, 0)); 1936 CSR_WRITE(sc, DGE_RDLEN, sizeof(sc->sc_rxdescs)); 1937 CSR_WRITE(sc, DGE_RDH, DGE_RXSPACE); 1938 CSR_WRITE(sc, DGE_RDT, 0); 1939 CSR_WRITE(sc, DGE_RDTR, RDTR | 0x80000000); 1940 CSR_WRITE(sc, DGE_FCRTL, FCRTL | FCRTL_XONE); 1941 CSR_WRITE(sc, DGE_FCRTH, FCRTH); 1942 1943 for (i = 0; i < DGE_NRXDESC; i++) { 1944 rxs = &sc->sc_rxsoft[i]; 1945 if (rxs->rxs_mbuf == NULL) { 1946 if ((error = dge_add_rxbuf(sc, i)) != 0) { 1947 printf("%s: unable to allocate or map rx " 1948 "buffer %d, error = %d\n", 1949 device_xname(&sc->sc_dev), i, error); 1950 /* 1951 * XXX Should attempt to run with fewer receive 1952 * XXX buffers instead of just failing. 1953 */ 1954 dge_rxdrain(sc); 1955 goto out; 1956 } 1957 } 1958 DGE_INIT_RXDESC(sc, i); 1959 } 1960 sc->sc_rxptr = DGE_RXSPACE; 1961 sc->sc_rxdiscard = 0; 1962 DGE_RXCHAIN_RESET(sc); 1963 1964 if (sc->sc_ethercom.ec_capabilities & ETHERCAP_JUMBO_MTU) { 1965 sc->sc_ctrl0 |= CTRL0_JFE; 1966 CSR_WRITE(sc, DGE_MFS, ETHER_MAX_LEN_JUMBO << 16); 1967 } 1968 1969 /* Write the control registers. */ 1970 CSR_WRITE(sc, DGE_CTRL0, sc->sc_ctrl0); 1971 1972 /* 1973 * Set up checksum offload parameters. 1974 */ 1975 reg = CSR_READ(sc, DGE_RXCSUM); 1976 if (ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) 1977 reg |= RXCSUM_IPOFL; 1978 else 1979 reg &= ~RXCSUM_IPOFL; 1980 if (ifp->if_capenable & (IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 1981 reg |= RXCSUM_IPOFL | RXCSUM_TUOFL; 1982 else { 1983 reg &= ~RXCSUM_TUOFL; 1984 if ((ifp->if_capenable & IFCAP_CSUM_IPv4_Rx) == 0) 1985 reg &= ~RXCSUM_IPOFL; 1986 } 1987 CSR_WRITE(sc, DGE_RXCSUM, reg); 1988 1989 /* 1990 * Set up the interrupt registers. 1991 */ 1992 CSR_WRITE(sc, DGE_IMC, 0xffffffffU); 1993 sc->sc_icr = ICR_TXDW | ICR_LSC | ICR_RXSEQ | ICR_RXDMT0 | 1994 ICR_RXO | ICR_RXT0; 1995 1996 CSR_WRITE(sc, DGE_IMS, sc->sc_icr); 1997 1998 /* 1999 * Set up the transmit control register. 2000 */ 2001 sc->sc_tctl = TCTL_TCE|TCTL_TPDE|TCTL_TXEN; 2002 CSR_WRITE(sc, DGE_TCTL, sc->sc_tctl); 2003 2004 /* 2005 * Set up the receive control register; we actually program 2006 * the register when we set the receive filter. Use multicast 2007 * address offset type 0. 2008 */ 2009 sc->sc_mchash_type = 0; 2010 2011 sc->sc_rctl = RCTL_RXEN | RCTL_RDMTS_12 | RCTL_RPDA_MC | 2012 RCTL_CFF | RCTL_SECRC | RCTL_MO(sc->sc_mchash_type); 2013 2014 #ifdef DGE_OFFBYONE_RXBUG 2015 sc->sc_rctl |= RCTL_BSIZE_16k; 2016 #else 2017 switch(MCLBYTES) { 2018 case 2048: 2019 sc->sc_rctl |= RCTL_BSIZE_2k; 2020 break; 2021 case 4096: 2022 sc->sc_rctl |= RCTL_BSIZE_4k; 2023 break; 2024 case 8192: 2025 sc->sc_rctl |= RCTL_BSIZE_8k; 2026 break; 2027 case 16384: 2028 sc->sc_rctl |= RCTL_BSIZE_16k; 2029 break; 2030 default: 2031 panic("dge_init: MCLBYTES %d unsupported", MCLBYTES); 2032 } 2033 #endif 2034 2035 /* Set the receive filter. */ 2036 /* Also sets RCTL */ 2037 dge_set_filter(sc); 2038 2039 /* ...all done! */ 2040 ifp->if_flags |= IFF_RUNNING; 2041 ifp->if_flags &= ~IFF_OACTIVE; 2042 2043 out: 2044 if (error) 2045 printf("%s: interface not running\n", device_xname(&sc->sc_dev)); 2046 return (error); 2047 } 2048 2049 /* 2050 * dge_rxdrain: 2051 * 2052 * Drain the receive queue. 2053 */ 2054 static void 2055 dge_rxdrain(struct dge_softc *sc) 2056 { 2057 struct dge_rxsoft *rxs; 2058 int i; 2059 2060 for (i = 0; i < DGE_NRXDESC; i++) { 2061 rxs = &sc->sc_rxsoft[i]; 2062 if (rxs->rxs_mbuf != NULL) { 2063 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2064 m_freem(rxs->rxs_mbuf); 2065 rxs->rxs_mbuf = NULL; 2066 } 2067 } 2068 } 2069 2070 /* 2071 * dge_stop: [ifnet interface function] 2072 * 2073 * Stop transmission on the interface. 2074 */ 2075 static void 2076 dge_stop(struct ifnet *ifp, int disable) 2077 { 2078 struct dge_softc *sc = ifp->if_softc; 2079 struct dge_txsoft *txs; 2080 int i; 2081 2082 /* Stop the transmit and receive processes. */ 2083 CSR_WRITE(sc, DGE_TCTL, 0); 2084 CSR_WRITE(sc, DGE_RCTL, 0); 2085 2086 /* Release any queued transmit buffers. */ 2087 for (i = 0; i < DGE_TXQUEUELEN; i++) { 2088 txs = &sc->sc_txsoft[i]; 2089 if (txs->txs_mbuf != NULL) { 2090 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2091 m_freem(txs->txs_mbuf); 2092 txs->txs_mbuf = NULL; 2093 } 2094 } 2095 2096 /* Mark the interface as down and cancel the watchdog timer. */ 2097 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2098 ifp->if_timer = 0; 2099 2100 if (disable) 2101 dge_rxdrain(sc); 2102 } 2103 2104 /* 2105 * dge_add_rxbuf: 2106 * 2107 * Add a receive buffer to the indiciated descriptor. 2108 */ 2109 static int 2110 dge_add_rxbuf(struct dge_softc *sc, int idx) 2111 { 2112 struct dge_rxsoft *rxs = &sc->sc_rxsoft[idx]; 2113 struct mbuf *m; 2114 int error; 2115 #ifdef DGE_OFFBYONE_RXBUG 2116 void *buf; 2117 #endif 2118 2119 MGETHDR(m, M_DONTWAIT, MT_DATA); 2120 if (m == NULL) 2121 return (ENOBUFS); 2122 2123 #ifdef DGE_OFFBYONE_RXBUG 2124 if ((buf = dge_getbuf(sc)) == NULL) 2125 return ENOBUFS; 2126 2127 m->m_len = m->m_pkthdr.len = DGE_BUFFER_SIZE; 2128 MEXTADD(m, buf, DGE_BUFFER_SIZE, M_DEVBUF, dge_freebuf, sc); 2129 m->m_flags |= M_EXT_RW; 2130 2131 if (rxs->rxs_mbuf != NULL) 2132 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2133 rxs->rxs_mbuf = m; 2134 2135 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, buf, 2136 DGE_BUFFER_SIZE, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT); 2137 #else 2138 MCLGET(m, M_DONTWAIT); 2139 if ((m->m_flags & M_EXT) == 0) { 2140 m_freem(m); 2141 return (ENOBUFS); 2142 } 2143 2144 if (rxs->rxs_mbuf != NULL) 2145 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2146 2147 rxs->rxs_mbuf = m; 2148 2149 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size; 2150 error = bus_dmamap_load_mbuf(sc->sc_dmat, rxs->rxs_dmamap, m, 2151 BUS_DMA_READ|BUS_DMA_NOWAIT); 2152 #endif 2153 if (error) { 2154 printf("%s: unable to load rx DMA map %d, error = %d\n", 2155 device_xname(&sc->sc_dev), idx, error); 2156 panic("dge_add_rxbuf"); /* XXX XXX XXX */ 2157 } 2158 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2159 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2160 2161 return (0); 2162 } 2163 2164 /* 2165 * dge_set_ral: 2166 * 2167 * Set an entry in the receive address list. 2168 */ 2169 static void 2170 dge_set_ral(struct dge_softc *sc, const uint8_t *enaddr, int idx) 2171 { 2172 uint32_t ral_lo, ral_hi; 2173 2174 if (enaddr != NULL) { 2175 ral_lo = enaddr[0] | (enaddr[1] << 8) | (enaddr[2] << 16) | 2176 (enaddr[3] << 24); 2177 ral_hi = enaddr[4] | (enaddr[5] << 8); 2178 ral_hi |= RAH_AV; 2179 } else { 2180 ral_lo = 0; 2181 ral_hi = 0; 2182 } 2183 CSR_WRITE(sc, RA_ADDR(DGE_RAL, idx), ral_lo); 2184 CSR_WRITE(sc, RA_ADDR(DGE_RAH, idx), ral_hi); 2185 } 2186 2187 /* 2188 * dge_mchash: 2189 * 2190 * Compute the hash of the multicast address for the 4096-bit 2191 * multicast filter. 2192 */ 2193 static uint32_t 2194 dge_mchash(struct dge_softc *sc, const uint8_t *enaddr) 2195 { 2196 static const int lo_shift[4] = { 4, 3, 2, 0 }; 2197 static const int hi_shift[4] = { 4, 5, 6, 8 }; 2198 uint32_t hash; 2199 2200 hash = (enaddr[4] >> lo_shift[sc->sc_mchash_type]) | 2201 (((uint16_t) enaddr[5]) << hi_shift[sc->sc_mchash_type]); 2202 2203 return (hash & 0xfff); 2204 } 2205 2206 /* 2207 * dge_set_filter: 2208 * 2209 * Set up the receive filter. 2210 */ 2211 static void 2212 dge_set_filter(struct dge_softc *sc) 2213 { 2214 struct ethercom *ec = &sc->sc_ethercom; 2215 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2216 struct ether_multi *enm; 2217 struct ether_multistep step; 2218 uint32_t hash, reg, bit; 2219 int i; 2220 2221 sc->sc_rctl &= ~(RCTL_BAM | RCTL_UPE | RCTL_MPE); 2222 2223 if (ifp->if_flags & IFF_BROADCAST) 2224 sc->sc_rctl |= RCTL_BAM; 2225 if (ifp->if_flags & IFF_PROMISC) { 2226 sc->sc_rctl |= RCTL_UPE; 2227 goto allmulti; 2228 } 2229 2230 /* 2231 * Set the station address in the first RAL slot, and 2232 * clear the remaining slots. 2233 */ 2234 dge_set_ral(sc, CLLADDR(ifp->if_sadl), 0); 2235 for (i = 1; i < RA_TABSIZE; i++) 2236 dge_set_ral(sc, NULL, i); 2237 2238 /* Clear out the multicast table. */ 2239 for (i = 0; i < MC_TABSIZE; i++) 2240 CSR_WRITE(sc, DGE_MTA + (i << 2), 0); 2241 2242 ETHER_FIRST_MULTI(step, ec, enm); 2243 while (enm != NULL) { 2244 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2245 /* 2246 * We must listen to a range of multicast addresses. 2247 * For now, just accept all multicasts, rather than 2248 * trying to set only those filter bits needed to match 2249 * the range. (At this time, the only use of address 2250 * ranges is for IP multicast routing, for which the 2251 * range is big enough to require all bits set.) 2252 */ 2253 goto allmulti; 2254 } 2255 2256 hash = dge_mchash(sc, enm->enm_addrlo); 2257 2258 reg = (hash >> 5) & 0x7f; 2259 bit = hash & 0x1f; 2260 2261 hash = CSR_READ(sc, DGE_MTA + (reg << 2)); 2262 hash |= 1U << bit; 2263 2264 CSR_WRITE(sc, DGE_MTA + (reg << 2), hash); 2265 2266 ETHER_NEXT_MULTI(step, enm); 2267 } 2268 2269 ifp->if_flags &= ~IFF_ALLMULTI; 2270 goto setit; 2271 2272 allmulti: 2273 ifp->if_flags |= IFF_ALLMULTI; 2274 sc->sc_rctl |= RCTL_MPE; 2275 2276 setit: 2277 CSR_WRITE(sc, DGE_RCTL, sc->sc_rctl); 2278 } 2279 2280 /* 2281 * Read in the EEPROM info and verify checksum. 2282 */ 2283 int 2284 dge_read_eeprom(struct dge_softc *sc) 2285 { 2286 uint16_t cksum; 2287 int i; 2288 2289 cksum = 0; 2290 for (i = 0; i < EEPROM_SIZE; i++) { 2291 sc->sc_eeprom[i] = dge_eeprom_word(sc, i); 2292 cksum += sc->sc_eeprom[i]; 2293 } 2294 return cksum != EEPROM_CKSUM; 2295 } 2296 2297 2298 /* 2299 * Read a 16-bit word from address addr in the serial EEPROM. 2300 */ 2301 uint16_t 2302 dge_eeprom_word(struct dge_softc *sc, int addr) 2303 { 2304 uint32_t reg; 2305 uint16_t rval = 0; 2306 int i; 2307 2308 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_SK|EECD_DI|EECD_CS); 2309 2310 /* Lower clock pulse (and data in to chip) */ 2311 CSR_WRITE(sc, DGE_EECD, reg); 2312 /* Select chip */ 2313 CSR_WRITE(sc, DGE_EECD, reg|EECD_CS); 2314 2315 /* Send read command */ 2316 dge_eeprom_clockout(sc, 1); 2317 dge_eeprom_clockout(sc, 1); 2318 dge_eeprom_clockout(sc, 0); 2319 2320 /* Send address */ 2321 for (i = 5; i >= 0; i--) 2322 dge_eeprom_clockout(sc, (addr >> i) & 1); 2323 2324 /* Read data */ 2325 for (i = 0; i < 16; i++) { 2326 rval <<= 1; 2327 rval |= dge_eeprom_clockin(sc); 2328 } 2329 2330 /* Deselect chip */ 2331 CSR_WRITE(sc, DGE_EECD, reg); 2332 2333 return rval; 2334 } 2335 2336 /* 2337 * Clock out a single bit to the EEPROM. 2338 */ 2339 void 2340 dge_eeprom_clockout(struct dge_softc *sc, int bit) 2341 { 2342 int reg; 2343 2344 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_SK); 2345 if (bit) 2346 reg |= EECD_DI; 2347 2348 CSR_WRITE(sc, DGE_EECD, reg); 2349 delay(2); 2350 CSR_WRITE(sc, DGE_EECD, reg|EECD_SK); 2351 delay(2); 2352 CSR_WRITE(sc, DGE_EECD, reg); 2353 delay(2); 2354 } 2355 2356 /* 2357 * Clock in a single bit from EEPROM. 2358 */ 2359 int 2360 dge_eeprom_clockin(struct dge_softc *sc) 2361 { 2362 int reg, rv; 2363 2364 reg = CSR_READ(sc, DGE_EECD) & ~(EECD_DI|EECD_DO|EECD_SK); 2365 2366 CSR_WRITE(sc, DGE_EECD, reg|EECD_SK); /* Raise clock */ 2367 delay(2); 2368 rv = (CSR_READ(sc, DGE_EECD) & EECD_DO) != 0; /* Get bit */ 2369 CSR_WRITE(sc, DGE_EECD, reg); /* Lower clock */ 2370 delay(2); 2371 2372 return rv; 2373 } 2374 2375 static void 2376 dge_xgmii_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 2377 { 2378 struct dge_softc *sc = ifp->if_softc; 2379 2380 ifmr->ifm_status = IFM_AVALID; 2381 ifmr->ifm_active = IFM_ETHER|IFM_10G_LR; 2382 2383 if (CSR_READ(sc, DGE_STATUS) & STATUS_LINKUP) 2384 ifmr->ifm_status |= IFM_ACTIVE; 2385 } 2386 2387 static inline int 2388 phwait(struct dge_softc *sc, int p, int r, int d, int type) 2389 { 2390 int i, mdic; 2391 2392 CSR_WRITE(sc, DGE_MDIO, 2393 MDIO_PHY(p) | MDIO_REG(r) | MDIO_DEV(d) | type | MDIO_CMD); 2394 for (i = 0; i < 10; i++) { 2395 delay(10); 2396 if (((mdic = CSR_READ(sc, DGE_MDIO)) & MDIO_CMD) == 0) 2397 break; 2398 } 2399 return mdic; 2400 } 2401 2402 2403 static void 2404 dge_xgmii_writereg(device_t self, int phy, int reg, int val) 2405 { 2406 struct dge_softc *sc = device_private(self); 2407 int mdic; 2408 2409 CSR_WRITE(sc, DGE_MDIRW, val); 2410 if (((mdic = phwait(sc, phy, reg, 1, MDIO_ADDR)) & MDIO_CMD)) { 2411 printf("%s: address cycle timeout; phy %d reg %d\n", 2412 device_xname(&sc->sc_dev), phy, reg); 2413 return; 2414 } 2415 if (((mdic = phwait(sc, phy, reg, 1, MDIO_WRITE)) & MDIO_CMD)) { 2416 printf("%s: read cycle timeout; phy %d reg %d\n", 2417 device_xname(&sc->sc_dev), phy, reg); 2418 return; 2419 } 2420 } 2421 2422 static void 2423 dge_xgmii_reset(struct dge_softc *sc) 2424 { 2425 dge_xgmii_writereg((void *)sc, 0, 0, BMCR_RESET); 2426 } 2427 2428 static int 2429 dge_xgmii_mediachange(struct ifnet *ifp) 2430 { 2431 return 0; 2432 } 2433