1 /* $NetBSD: if_sip.c,v 1.51 2002/03/27 21:42:45 briggs Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. All advertising materials mentioning features or use of this software 19 * must display the following acknowledgement: 20 * This product includes software developed by the NetBSD 21 * Foundation, Inc. and its contributors. 22 * 4. Neither the name of The NetBSD Foundation nor the names of its 23 * contributors may be used to endorse or promote products derived 24 * from this software without specific prior written permission. 25 * 26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 36 * POSSIBILITY OF SUCH DAMAGE. 37 */ 38 39 /*- 40 * Copyright (c) 1999 Network Computer, Inc. 41 * All rights reserved. 42 * 43 * Redistribution and use in source and binary forms, with or without 44 * modification, are permitted provided that the following conditions 45 * are met: 46 * 1. Redistributions of source code must retain the above copyright 47 * notice, this list of conditions and the following disclaimer. 48 * 2. Redistributions in binary form must reproduce the above copyright 49 * notice, this list of conditions and the following disclaimer in the 50 * documentation and/or other materials provided with the distribution. 51 * 3. Neither the name of Network Computer, Inc. nor the names of its 52 * contributors may be used to endorse or promote products derived 53 * from this software without specific prior written permission. 54 * 55 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS 56 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 57 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 58 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 59 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 60 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 61 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 62 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 63 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 64 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 65 * POSSIBILITY OF SUCH DAMAGE. 66 */ 67 68 /* 69 * Device driver for the Silicon Integrated Systems SiS 900, 70 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and 71 * National Semiconductor DP83820 10/100/1000 PCI Ethernet 72 * controllers. 73 * 74 * Originally written to support the SiS 900 by Jason R. Thorpe for 75 * Network Computer, Inc. 76 * 77 * TODO: 78 * 79 * - Support the 10-bit interface on the DP83820 (for fiber). 80 * 81 * - Reduce the interrupt load. 82 */ 83 84 #include <sys/cdefs.h> 85 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.51 2002/03/27 21:42:45 briggs Exp $"); 86 87 #include "bpfilter.h" 88 89 #include <sys/param.h> 90 #include <sys/systm.h> 91 #include <sys/callout.h> 92 #include <sys/mbuf.h> 93 #include <sys/malloc.h> 94 #include <sys/kernel.h> 95 #include <sys/socket.h> 96 #include <sys/ioctl.h> 97 #include <sys/errno.h> 98 #include <sys/device.h> 99 #include <sys/queue.h> 100 101 #include <uvm/uvm_extern.h> /* for PAGE_SIZE */ 102 103 #include <net/if.h> 104 #include <net/if_dl.h> 105 #include <net/if_media.h> 106 #include <net/if_ether.h> 107 108 #if NBPFILTER > 0 109 #include <net/bpf.h> 110 #endif 111 112 #include <machine/bus.h> 113 #include <machine/intr.h> 114 #include <machine/endian.h> 115 116 #include <dev/mii/mii.h> 117 #include <dev/mii/miivar.h> 118 #ifdef DP83820 119 #include <dev/mii/mii_bitbang.h> 120 #endif /* DP83820 */ 121 122 #include <dev/pci/pcireg.h> 123 #include <dev/pci/pcivar.h> 124 #include <dev/pci/pcidevs.h> 125 126 #include <dev/pci/if_sipreg.h> 127 128 #ifdef DP83820 /* DP83820 Gigabit Ethernet */ 129 #define SIP_DECL(x) __CONCAT(gsip_,x) 130 #else /* SiS900 and DP83815 */ 131 #define SIP_DECL(x) __CONCAT(sip_,x) 132 #endif 133 134 #define SIP_STR(x) __STRING(SIP_DECL(x)) 135 136 /* 137 * Transmit descriptor list size. This is arbitrary, but allocate 138 * enough descriptors for 128 pending transmissions, and 8 segments 139 * per packet. This MUST work out to a power of 2. 140 */ 141 #define SIP_NTXSEGS 8 142 143 #define SIP_TXQUEUELEN 256 144 #define SIP_NTXDESC (SIP_TXQUEUELEN * SIP_NTXSEGS) 145 #define SIP_NTXDESC_MASK (SIP_NTXDESC - 1) 146 #define SIP_NEXTTX(x) (((x) + 1) & SIP_NTXDESC_MASK) 147 148 #if defined(DP83020) 149 #define TX_DMAMAP_SIZE ETHER_MAX_LEN_JUMBO 150 #else 151 #define TX_DMAMAP_SIZE MCLBYTES 152 #endif 153 154 /* 155 * Receive descriptor list size. We have one Rx buffer per incoming 156 * packet, so this logic is a little simpler. 157 * 158 * Actually, on the DP83820, we allow the packet to consume more than 159 * one buffer, in order to support jumbo Ethernet frames. In that 160 * case, a packet may consume up to 5 buffers (assuming a 2048 byte 161 * mbuf cluster). 256 receive buffers is only 51 maximum size packets, 162 * so we'd better be quick about handling receive interrupts. 163 */ 164 #if defined(DP83820) 165 #define SIP_NRXDESC 256 166 #else 167 #define SIP_NRXDESC 128 168 #endif /* DP83820 */ 169 #define SIP_NRXDESC_MASK (SIP_NRXDESC - 1) 170 #define SIP_NEXTRX(x) (((x) + 1) & SIP_NRXDESC_MASK) 171 172 /* 173 * Control structures are DMA'd to the SiS900 chip. We allocate them in 174 * a single clump that maps to a single DMA segment to make several things 175 * easier. 176 */ 177 struct sip_control_data { 178 /* 179 * The transmit descriptors. 180 */ 181 struct sip_desc scd_txdescs[SIP_NTXDESC]; 182 183 /* 184 * The receive descriptors. 185 */ 186 struct sip_desc scd_rxdescs[SIP_NRXDESC]; 187 }; 188 189 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x) 190 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)]) 191 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)]) 192 193 /* 194 * Software state for transmit jobs. 195 */ 196 struct sip_txsoft { 197 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 198 bus_dmamap_t txs_dmamap; /* our DMA map */ 199 int txs_firstdesc; /* first descriptor in packet */ 200 int txs_lastdesc; /* last descriptor in packet */ 201 SIMPLEQ_ENTRY(sip_txsoft) txs_q; 202 }; 203 204 SIMPLEQ_HEAD(sip_txsq, sip_txsoft); 205 206 /* 207 * Software state for receive jobs. 208 */ 209 struct sip_rxsoft { 210 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 211 bus_dmamap_t rxs_dmamap; /* our DMA map */ 212 }; 213 214 /* 215 * Software state per device. 216 */ 217 struct sip_softc { 218 struct device sc_dev; /* generic device information */ 219 bus_space_tag_t sc_st; /* bus space tag */ 220 bus_space_handle_t sc_sh; /* bus space handle */ 221 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 222 struct ethercom sc_ethercom; /* ethernet common data */ 223 void *sc_sdhook; /* shutdown hook */ 224 225 const struct sip_product *sc_model; /* which model are we? */ 226 int sc_rev; /* chip revision */ 227 228 void *sc_ih; /* interrupt cookie */ 229 230 struct mii_data sc_mii; /* MII/media information */ 231 232 struct callout sc_tick_ch; /* tick callout */ 233 234 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 235 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 236 237 /* 238 * Software state for transmit and receive descriptors. 239 */ 240 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN]; 241 struct sip_rxsoft sc_rxsoft[SIP_NRXDESC]; 242 243 /* 244 * Control data structures. 245 */ 246 struct sip_control_data *sc_control_data; 247 #define sc_txdescs sc_control_data->scd_txdescs 248 #define sc_rxdescs sc_control_data->scd_rxdescs 249 250 #ifdef SIP_EVENT_COUNTERS 251 /* 252 * Event counters. 253 */ 254 struct evcnt sc_ev_txsstall; /* Tx stalled due to no txs */ 255 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 256 struct evcnt sc_ev_txintr; /* Tx interrupts */ 257 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 258 #ifdef DP83820 259 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 260 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */ 261 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */ 262 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 263 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */ 264 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */ 265 #endif /* DP83820 */ 266 #endif /* SIP_EVENT_COUNTERS */ 267 268 u_int32_t sc_txcfg; /* prototype TXCFG register */ 269 u_int32_t sc_rxcfg; /* prototype RXCFG register */ 270 u_int32_t sc_imr; /* prototype IMR register */ 271 u_int32_t sc_rfcr; /* prototype RFCR register */ 272 273 u_int32_t sc_cfg; /* prototype CFG register */ 274 275 #ifdef DP83820 276 u_int32_t sc_gpior; /* prototype GPIOR register */ 277 #endif /* DP83820 */ 278 279 u_int32_t sc_tx_fill_thresh; /* transmit fill threshold */ 280 u_int32_t sc_tx_drain_thresh; /* transmit drain threshold */ 281 282 u_int32_t sc_rx_drain_thresh; /* receive drain threshold */ 283 284 int sc_flags; /* misc. flags; see below */ 285 286 int sc_txfree; /* number of free Tx descriptors */ 287 int sc_txnext; /* next ready Tx descriptor */ 288 289 struct sip_txsq sc_txfreeq; /* free Tx descsofts */ 290 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */ 291 292 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 293 #if defined(DP83820) 294 int sc_rxdiscard; 295 int sc_rxlen; 296 struct mbuf *sc_rxhead; 297 struct mbuf *sc_rxtail; 298 struct mbuf **sc_rxtailp; 299 #endif /* DP83820 */ 300 }; 301 302 /* sc_flags */ 303 #define SIPF_PAUSED 0x00000001 /* paused (802.3x flow control) */ 304 305 #ifdef DP83820 306 #define SIP_RXCHAIN_RESET(sc) \ 307 do { \ 308 (sc)->sc_rxtailp = &(sc)->sc_rxhead; \ 309 *(sc)->sc_rxtailp = NULL; \ 310 (sc)->sc_rxlen = 0; \ 311 } while (/*CONSTCOND*/0) 312 313 #define SIP_RXCHAIN_LINK(sc, m) \ 314 do { \ 315 *(sc)->sc_rxtailp = (sc)->sc_rxtail = (m); \ 316 (sc)->sc_rxtailp = &(m)->m_next; \ 317 } while (/*CONSTCOND*/0) 318 #endif /* DP83820 */ 319 320 #ifdef SIP_EVENT_COUNTERS 321 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++ 322 #else 323 #define SIP_EVCNT_INCR(ev) /* nothing */ 324 #endif 325 326 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x))) 327 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x))) 328 329 #define SIP_CDTXSYNC(sc, x, n, ops) \ 330 do { \ 331 int __x, __n; \ 332 \ 333 __x = (x); \ 334 __n = (n); \ 335 \ 336 /* If it will wrap around, sync to the end of the ring. */ \ 337 if ((__x + __n) > SIP_NTXDESC) { \ 338 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 339 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * \ 340 (SIP_NTXDESC - __x), (ops)); \ 341 __n -= (SIP_NTXDESC - __x); \ 342 __x = 0; \ 343 } \ 344 \ 345 /* Now sync whatever is left. */ \ 346 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 347 SIP_CDTXOFF(__x), sizeof(struct sip_desc) * __n, (ops)); \ 348 } while (0) 349 350 #define SIP_CDRXSYNC(sc, x, ops) \ 351 bus_dmamap_sync((sc)->sc_dmat, (sc)->sc_cddmamap, \ 352 SIP_CDRXOFF((x)), sizeof(struct sip_desc), (ops)) 353 354 #ifdef DP83820 355 #define SIP_INIT_RXDESC_EXTSTS __sipd->sipd_extsts = 0; 356 #define SIP_RXBUF_LEN (MCLBYTES - 4) 357 #else 358 #define SIP_INIT_RXDESC_EXTSTS /* nothing */ 359 #define SIP_RXBUF_LEN (MCLBYTES - 1) /* field width */ 360 #endif 361 #define SIP_INIT_RXDESC(sc, x) \ 362 do { \ 363 struct sip_rxsoft *__rxs = &(sc)->sc_rxsoft[(x)]; \ 364 struct sip_desc *__sipd = &(sc)->sc_rxdescs[(x)]; \ 365 \ 366 __sipd->sipd_link = \ 367 htole32(SIP_CDRXADDR((sc), SIP_NEXTRX((x)))); \ 368 __sipd->sipd_bufptr = \ 369 htole32(__rxs->rxs_dmamap->dm_segs[0].ds_addr); \ 370 __sipd->sipd_cmdsts = htole32(CMDSTS_INTR | \ 371 (SIP_RXBUF_LEN & CMDSTS_SIZE_MASK)); \ 372 SIP_INIT_RXDESC_EXTSTS \ 373 SIP_CDRXSYNC((sc), (x), BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); \ 374 } while (0) 375 376 #define SIP_CHIP_VERS(sc, v, p, r) \ 377 ((sc)->sc_model->sip_vendor == (v) && \ 378 (sc)->sc_model->sip_product == (p) && \ 379 (sc)->sc_rev == (r)) 380 381 #define SIP_CHIP_MODEL(sc, v, p) \ 382 ((sc)->sc_model->sip_vendor == (v) && \ 383 (sc)->sc_model->sip_product == (p)) 384 385 #if !defined(DP83820) 386 #define SIP_SIS900_REV(sc, rev) \ 387 SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev)) 388 #endif 389 390 #define SIP_TIMEOUT 1000 391 392 void SIP_DECL(start)(struct ifnet *); 393 void SIP_DECL(watchdog)(struct ifnet *); 394 int SIP_DECL(ioctl)(struct ifnet *, u_long, caddr_t); 395 int SIP_DECL(init)(struct ifnet *); 396 void SIP_DECL(stop)(struct ifnet *, int); 397 398 void SIP_DECL(shutdown)(void *); 399 400 void SIP_DECL(reset)(struct sip_softc *); 401 void SIP_DECL(rxdrain)(struct sip_softc *); 402 int SIP_DECL(add_rxbuf)(struct sip_softc *, int); 403 void SIP_DECL(read_eeprom)(struct sip_softc *, int, int, u_int16_t *); 404 void SIP_DECL(tick)(void *); 405 406 #if !defined(DP83820) 407 void SIP_DECL(sis900_set_filter)(struct sip_softc *); 408 #endif /* ! DP83820 */ 409 void SIP_DECL(dp83815_set_filter)(struct sip_softc *); 410 411 #if defined(DP83820) 412 void SIP_DECL(dp83820_read_macaddr)(struct sip_softc *, 413 const struct pci_attach_args *, u_int8_t *); 414 #else 415 void SIP_DECL(sis900_read_macaddr)(struct sip_softc *, 416 const struct pci_attach_args *, u_int8_t *); 417 void SIP_DECL(dp83815_read_macaddr)(struct sip_softc *, 418 const struct pci_attach_args *, u_int8_t *); 419 #endif /* DP83820 */ 420 421 int SIP_DECL(intr)(void *); 422 void SIP_DECL(txintr)(struct sip_softc *); 423 void SIP_DECL(rxintr)(struct sip_softc *); 424 425 #if defined(DP83820) 426 int SIP_DECL(dp83820_mii_readreg)(struct device *, int, int); 427 void SIP_DECL(dp83820_mii_writereg)(struct device *, int, int, int); 428 void SIP_DECL(dp83820_mii_statchg)(struct device *); 429 #else 430 int SIP_DECL(sis900_mii_readreg)(struct device *, int, int); 431 void SIP_DECL(sis900_mii_writereg)(struct device *, int, int, int); 432 void SIP_DECL(sis900_mii_statchg)(struct device *); 433 434 int SIP_DECL(dp83815_mii_readreg)(struct device *, int, int); 435 void SIP_DECL(dp83815_mii_writereg)(struct device *, int, int, int); 436 void SIP_DECL(dp83815_mii_statchg)(struct device *); 437 #endif /* DP83820 */ 438 439 int SIP_DECL(mediachange)(struct ifnet *); 440 void SIP_DECL(mediastatus)(struct ifnet *, struct ifmediareq *); 441 442 int SIP_DECL(match)(struct device *, struct cfdata *, void *); 443 void SIP_DECL(attach)(struct device *, struct device *, void *); 444 445 int SIP_DECL(copy_small) = 0; 446 447 struct cfattach SIP_DECL(ca) = { 448 sizeof(struct sip_softc), SIP_DECL(match), SIP_DECL(attach), 449 }; 450 451 /* 452 * Descriptions of the variants of the SiS900. 453 */ 454 struct sip_variant { 455 int (*sipv_mii_readreg)(struct device *, int, int); 456 void (*sipv_mii_writereg)(struct device *, int, int, int); 457 void (*sipv_mii_statchg)(struct device *); 458 void (*sipv_set_filter)(struct sip_softc *); 459 void (*sipv_read_macaddr)(struct sip_softc *, 460 const struct pci_attach_args *, u_int8_t *); 461 }; 462 463 #if defined(DP83820) 464 u_int32_t SIP_DECL(dp83820_mii_bitbang_read)(struct device *); 465 void SIP_DECL(dp83820_mii_bitbang_write)(struct device *, u_int32_t); 466 467 const struct mii_bitbang_ops SIP_DECL(dp83820_mii_bitbang_ops) = { 468 SIP_DECL(dp83820_mii_bitbang_read), 469 SIP_DECL(dp83820_mii_bitbang_write), 470 { 471 EROMAR_MDIO, /* MII_BIT_MDO */ 472 EROMAR_MDIO, /* MII_BIT_MDI */ 473 EROMAR_MDC, /* MII_BIT_MDC */ 474 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */ 475 0, /* MII_BIT_DIR_PHY_HOST */ 476 } 477 }; 478 #endif /* DP83820 */ 479 480 #if defined(DP83820) 481 const struct sip_variant SIP_DECL(variant_dp83820) = { 482 SIP_DECL(dp83820_mii_readreg), 483 SIP_DECL(dp83820_mii_writereg), 484 SIP_DECL(dp83820_mii_statchg), 485 SIP_DECL(dp83815_set_filter), 486 SIP_DECL(dp83820_read_macaddr), 487 }; 488 #else 489 const struct sip_variant SIP_DECL(variant_sis900) = { 490 SIP_DECL(sis900_mii_readreg), 491 SIP_DECL(sis900_mii_writereg), 492 SIP_DECL(sis900_mii_statchg), 493 SIP_DECL(sis900_set_filter), 494 SIP_DECL(sis900_read_macaddr), 495 }; 496 497 const struct sip_variant SIP_DECL(variant_dp83815) = { 498 SIP_DECL(dp83815_mii_readreg), 499 SIP_DECL(dp83815_mii_writereg), 500 SIP_DECL(dp83815_mii_statchg), 501 SIP_DECL(dp83815_set_filter), 502 SIP_DECL(dp83815_read_macaddr), 503 }; 504 #endif /* DP83820 */ 505 506 /* 507 * Devices supported by this driver. 508 */ 509 const struct sip_product { 510 pci_vendor_id_t sip_vendor; 511 pci_product_id_t sip_product; 512 const char *sip_name; 513 const struct sip_variant *sip_variant; 514 } SIP_DECL(products)[] = { 515 #if defined(DP83820) 516 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820, 517 "NatSemi DP83820 Gigabit Ethernet", 518 &SIP_DECL(variant_dp83820) }, 519 #else 520 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, 521 "SiS 900 10/100 Ethernet", 522 &SIP_DECL(variant_sis900) }, 523 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016, 524 "SiS 7016 10/100 Ethernet", 525 &SIP_DECL(variant_sis900) }, 526 527 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815, 528 "NatSemi DP83815 10/100 Ethernet", 529 &SIP_DECL(variant_dp83815) }, 530 #endif /* DP83820 */ 531 532 { 0, 0, 533 NULL, 534 NULL }, 535 }; 536 537 static const struct sip_product * 538 SIP_DECL(lookup)(const struct pci_attach_args *pa) 539 { 540 const struct sip_product *sip; 541 542 for (sip = SIP_DECL(products); sip->sip_name != NULL; sip++) { 543 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor && 544 PCI_PRODUCT(pa->pa_id) == sip->sip_product) 545 return (sip); 546 } 547 return (NULL); 548 } 549 550 int 551 SIP_DECL(match)(struct device *parent, struct cfdata *cf, void *aux) 552 { 553 struct pci_attach_args *pa = aux; 554 555 if (SIP_DECL(lookup)(pa) != NULL) 556 return (1); 557 558 return (0); 559 } 560 561 void 562 SIP_DECL(attach)(struct device *parent, struct device *self, void *aux) 563 { 564 struct sip_softc *sc = (struct sip_softc *) self; 565 struct pci_attach_args *pa = aux; 566 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 567 pci_chipset_tag_t pc = pa->pa_pc; 568 pci_intr_handle_t ih; 569 const char *intrstr = NULL; 570 bus_space_tag_t iot, memt; 571 bus_space_handle_t ioh, memh; 572 bus_dma_segment_t seg; 573 int ioh_valid, memh_valid; 574 int i, rseg, error; 575 const struct sip_product *sip; 576 pcireg_t pmode; 577 u_int8_t enaddr[ETHER_ADDR_LEN]; 578 int pmreg; 579 #ifdef DP83820 580 pcireg_t memtype; 581 u_int32_t reg; 582 #endif /* DP83820 */ 583 584 callout_init(&sc->sc_tick_ch); 585 586 sip = SIP_DECL(lookup)(pa); 587 if (sip == NULL) { 588 printf("\n"); 589 panic(SIP_STR(attach) ": impossible"); 590 } 591 sc->sc_rev = PCI_REVISION(pa->pa_class); 592 593 printf(": %s, rev %#02x\n", sip->sip_name, sc->sc_rev); 594 595 sc->sc_model = sip; 596 597 /* 598 * XXX Work-around broken PXE firmware on some boards. 599 * 600 * The DP83815 shares an address decoder with the MEM BAR 601 * and the ROM BAR. Make sure the ROM BAR is disabled, 602 * so that memory mapped access works. 603 */ 604 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM, 605 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) & 606 ~PCI_MAPREG_ROM_ENABLE); 607 608 /* 609 * Map the device. 610 */ 611 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA, 612 PCI_MAPREG_TYPE_IO, 0, 613 &iot, &ioh, NULL, NULL) == 0); 614 #ifdef DP83820 615 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA); 616 switch (memtype) { 617 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 618 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 619 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA, 620 memtype, 0, &memt, &memh, NULL, NULL) == 0); 621 break; 622 default: 623 memh_valid = 0; 624 } 625 #else 626 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA, 627 PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, 628 &memt, &memh, NULL, NULL) == 0); 629 #endif /* DP83820 */ 630 631 if (memh_valid) { 632 sc->sc_st = memt; 633 sc->sc_sh = memh; 634 } else if (ioh_valid) { 635 sc->sc_st = iot; 636 sc->sc_sh = ioh; 637 } else { 638 printf("%s: unable to map device registers\n", 639 sc->sc_dev.dv_xname); 640 return; 641 } 642 643 sc->sc_dmat = pa->pa_dmat; 644 645 /* 646 * Make sure bus mastering is enabled. Also make sure 647 * Write/Invalidate is enabled if we're allowed to use it. 648 */ 649 pmreg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 650 if (pa->pa_flags & PCI_FLAGS_MWI_OKAY) 651 pmreg |= PCI_COMMAND_INVALIDATE_ENABLE; 652 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 653 pmreg | PCI_COMMAND_MASTER_ENABLE); 654 655 /* Get it out of power save mode if needed. */ 656 if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PWRMGMT, &pmreg, 0)) { 657 pmode = pci_conf_read(pc, pa->pa_tag, pmreg + 4) & 0x3; 658 if (pmode == 3) { 659 /* 660 * The card has lost all configuration data in 661 * this state, so punt. 662 */ 663 printf("%s: unable to wake up from power state D3\n", 664 sc->sc_dev.dv_xname); 665 return; 666 } 667 if (pmode != 0) { 668 printf("%s: waking up from power state D%d\n", 669 sc->sc_dev.dv_xname, pmode); 670 pci_conf_write(pc, pa->pa_tag, pmreg + 4, 0); 671 } 672 } 673 674 /* 675 * Map and establish our interrupt. 676 */ 677 if (pci_intr_map(pa, &ih)) { 678 printf("%s: unable to map interrupt\n", sc->sc_dev.dv_xname); 679 return; 680 } 681 intrstr = pci_intr_string(pc, ih); 682 sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, SIP_DECL(intr), sc); 683 if (sc->sc_ih == NULL) { 684 printf("%s: unable to establish interrupt", 685 sc->sc_dev.dv_xname); 686 if (intrstr != NULL) 687 printf(" at %s", intrstr); 688 printf("\n"); 689 return; 690 } 691 printf("%s: interrupting at %s\n", sc->sc_dev.dv_xname, intrstr); 692 693 SIMPLEQ_INIT(&sc->sc_txfreeq); 694 SIMPLEQ_INIT(&sc->sc_txdirtyq); 695 696 /* 697 * Allocate the control data structures, and create and load the 698 * DMA map for it. 699 */ 700 if ((error = bus_dmamem_alloc(sc->sc_dmat, 701 sizeof(struct sip_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 702 0)) != 0) { 703 printf("%s: unable to allocate control data, error = %d\n", 704 sc->sc_dev.dv_xname, error); 705 goto fail_0; 706 } 707 708 if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, 709 sizeof(struct sip_control_data), (caddr_t *)&sc->sc_control_data, 710 BUS_DMA_COHERENT)) != 0) { 711 printf("%s: unable to map control data, error = %d\n", 712 sc->sc_dev.dv_xname, error); 713 goto fail_1; 714 } 715 716 if ((error = bus_dmamap_create(sc->sc_dmat, 717 sizeof(struct sip_control_data), 1, 718 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 719 printf("%s: unable to create control data DMA map, " 720 "error = %d\n", sc->sc_dev.dv_xname, error); 721 goto fail_2; 722 } 723 724 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 725 sc->sc_control_data, sizeof(struct sip_control_data), NULL, 726 0)) != 0) { 727 printf("%s: unable to load control data DMA map, error = %d\n", 728 sc->sc_dev.dv_xname, error); 729 goto fail_3; 730 } 731 732 /* 733 * Create the transmit buffer DMA maps. 734 */ 735 for (i = 0; i < SIP_TXQUEUELEN; i++) { 736 if ((error = bus_dmamap_create(sc->sc_dmat, TX_DMAMAP_SIZE, 737 SIP_NTXSEGS, MCLBYTES, 0, 0, 738 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 739 printf("%s: unable to create tx DMA map %d, " 740 "error = %d\n", sc->sc_dev.dv_xname, i, error); 741 goto fail_4; 742 } 743 } 744 745 /* 746 * Create the receive buffer DMA maps. 747 */ 748 for (i = 0; i < SIP_NRXDESC; i++) { 749 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 750 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 751 printf("%s: unable to create rx DMA map %d, " 752 "error = %d\n", sc->sc_dev.dv_xname, i, error); 753 goto fail_5; 754 } 755 sc->sc_rxsoft[i].rxs_mbuf = NULL; 756 } 757 758 /* 759 * Reset the chip to a known state. 760 */ 761 SIP_DECL(reset)(sc); 762 763 /* 764 * Read the Ethernet address from the EEPROM. This might 765 * also fetch other stuff from the EEPROM and stash it 766 * in the softc. 767 */ 768 sc->sc_cfg = 0; 769 #if !defined(DP83820) 770 if (SIP_SIS900_REV(sc,SIS_REV_635) || 771 SIP_SIS900_REV(sc,SIS_REV_900B)) 772 sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT); 773 #endif 774 775 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr); 776 777 printf("%s: Ethernet address %s\n", sc->sc_dev.dv_xname, 778 ether_sprintf(enaddr)); 779 780 /* 781 * Initialize the configuration register: aggressive PCI 782 * bus request algorithm, default backoff, default OW timer, 783 * default parity error detection. 784 * 785 * NOTE: "Big endian mode" is useless on the SiS900 and 786 * friends -- it affects packet data, not descriptors. 787 */ 788 #ifdef DP83820 789 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG); 790 if (reg & CFG_PCI64_DET) { 791 printf("%s: 64-bit PCI slot detected\n", sc->sc_dev.dv_xname); 792 /* 793 * XXX Need some PCI flags indicating support for 794 * XXX 64-bit addressing (SAC or DAC) and 64-bit 795 * XXX data path. 796 */ 797 } 798 if (sc->sc_cfg & (CFG_TBI_EN|CFG_EXT_125)) { 799 const char *sep = ""; 800 printf("%s: using ", sc->sc_dev.dv_xname); 801 if (sc->sc_cfg & CFG_EXT_125) { 802 printf("%s125MHz clock", sep); 803 sep = ", "; 804 } 805 if (sc->sc_cfg & CFG_TBI_EN) { 806 printf("%sten-bit interface", sep); 807 sep = ", "; 808 } 809 printf("\n"); 810 } 811 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0) 812 sc->sc_cfg |= CFG_MRM_DIS; 813 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0) 814 sc->sc_cfg |= CFG_MWI_DIS; 815 816 /* 817 * Use the extended descriptor format on the DP83820. This 818 * gives us an interface to VLAN tagging and IPv4/TCP/UDP 819 * checksumming. 820 */ 821 sc->sc_cfg |= CFG_EXTSTS_EN; 822 #endif /* DP83820 */ 823 824 /* 825 * Initialize our media structures and probe the MII. 826 */ 827 sc->sc_mii.mii_ifp = ifp; 828 sc->sc_mii.mii_readreg = sip->sip_variant->sipv_mii_readreg; 829 sc->sc_mii.mii_writereg = sip->sip_variant->sipv_mii_writereg; 830 sc->sc_mii.mii_statchg = sip->sip_variant->sipv_mii_statchg; 831 ifmedia_init(&sc->sc_mii.mii_media, 0, SIP_DECL(mediachange), 832 SIP_DECL(mediastatus)); 833 #ifdef DP83820 834 if (sc->sc_cfg & CFG_TBI_EN) { 835 /* Using ten-bit interface. */ 836 printf("%s: TBI -- FIXME\n", sc->sc_dev.dv_xname); 837 } else { 838 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 839 MII_OFFSET_ANY, 0); 840 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 841 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 842 0, NULL); 843 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 844 } else 845 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 846 } 847 #else 848 mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, 849 MII_OFFSET_ANY, 0); 850 if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { 851 ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); 852 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); 853 } else 854 ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); 855 #endif /* DP83820 */ 856 857 ifp = &sc->sc_ethercom.ec_if; 858 strcpy(ifp->if_xname, sc->sc_dev.dv_xname); 859 ifp->if_softc = sc; 860 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 861 ifp->if_ioctl = SIP_DECL(ioctl); 862 ifp->if_start = SIP_DECL(start); 863 ifp->if_watchdog = SIP_DECL(watchdog); 864 ifp->if_init = SIP_DECL(init); 865 ifp->if_stop = SIP_DECL(stop); 866 IFQ_SET_READY(&ifp->if_snd); 867 868 /* 869 * We can support 802.1Q VLAN-sized frames. 870 */ 871 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 872 873 #ifdef DP83820 874 /* 875 * And the DP83820 can do VLAN tagging in hardware, and 876 * support the jumbo Ethernet MTU. 877 */ 878 sc->sc_ethercom.ec_capabilities |= 879 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU; 880 881 /* 882 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums 883 * in hardware. 884 */ 885 ifp->if_capabilities |= IFCAP_CSUM_IPv4 | IFCAP_CSUM_TCPv4 | 886 IFCAP_CSUM_UDPv4; 887 #endif /* DP83820 */ 888 889 /* 890 * Attach the interface. 891 */ 892 if_attach(ifp); 893 ether_ifattach(ifp, enaddr); 894 895 /* 896 * The number of bytes that must be available in 897 * the Tx FIFO before the bus master can DMA more 898 * data into the FIFO. 899 */ 900 sc->sc_tx_fill_thresh = 64 / 32; 901 902 /* 903 * Start at a drain threshold of 512 bytes. We will 904 * increase it if a DMA underrun occurs. 905 * 906 * XXX The minimum value of this variable should be 907 * tuned. We may be able to improve performance 908 * by starting with a lower value. That, however, 909 * may trash the first few outgoing packets if the 910 * PCI bus is saturated. 911 */ 912 sc->sc_tx_drain_thresh = 512 / 32; 913 914 /* 915 * Initialize the Rx FIFO drain threshold. 916 * 917 * This is in units of 8 bytes. 918 * 919 * We should never set this value lower than 2; 14 bytes are 920 * required to filter the packet. 921 */ 922 sc->sc_rx_drain_thresh = 128 / 8; 923 924 #ifdef SIP_EVENT_COUNTERS 925 /* 926 * Attach event counters. 927 */ 928 evcnt_attach_dynamic(&sc->sc_ev_txsstall, EVCNT_TYPE_MISC, 929 NULL, sc->sc_dev.dv_xname, "txsstall"); 930 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 931 NULL, sc->sc_dev.dv_xname, "txdstall"); 932 evcnt_attach_dynamic(&sc->sc_ev_txintr, EVCNT_TYPE_INTR, 933 NULL, sc->sc_dev.dv_xname, "txintr"); 934 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 935 NULL, sc->sc_dev.dv_xname, "rxintr"); 936 #ifdef DP83820 937 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 938 NULL, sc->sc_dev.dv_xname, "rxipsum"); 939 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC, 940 NULL, sc->sc_dev.dv_xname, "rxtcpsum"); 941 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC, 942 NULL, sc->sc_dev.dv_xname, "rxudpsum"); 943 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 944 NULL, sc->sc_dev.dv_xname, "txipsum"); 945 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC, 946 NULL, sc->sc_dev.dv_xname, "txtcpsum"); 947 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC, 948 NULL, sc->sc_dev.dv_xname, "txudpsum"); 949 #endif /* DP83820 */ 950 #endif /* SIP_EVENT_COUNTERS */ 951 952 /* 953 * Make sure the interface is shutdown during reboot. 954 */ 955 sc->sc_sdhook = shutdownhook_establish(SIP_DECL(shutdown), sc); 956 if (sc->sc_sdhook == NULL) 957 printf("%s: WARNING: unable to establish shutdown hook\n", 958 sc->sc_dev.dv_xname); 959 return; 960 961 /* 962 * Free any resources we've allocated during the failed attach 963 * attempt. Do this in reverse order and fall through. 964 */ 965 fail_5: 966 for (i = 0; i < SIP_NRXDESC; i++) { 967 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 968 bus_dmamap_destroy(sc->sc_dmat, 969 sc->sc_rxsoft[i].rxs_dmamap); 970 } 971 fail_4: 972 for (i = 0; i < SIP_TXQUEUELEN; i++) { 973 if (sc->sc_txsoft[i].txs_dmamap != NULL) 974 bus_dmamap_destroy(sc->sc_dmat, 975 sc->sc_txsoft[i].txs_dmamap); 976 } 977 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 978 fail_3: 979 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 980 fail_2: 981 bus_dmamem_unmap(sc->sc_dmat, (caddr_t)sc->sc_control_data, 982 sizeof(struct sip_control_data)); 983 fail_1: 984 bus_dmamem_free(sc->sc_dmat, &seg, rseg); 985 fail_0: 986 return; 987 } 988 989 /* 990 * sip_shutdown: 991 * 992 * Make sure the interface is stopped at reboot time. 993 */ 994 void 995 SIP_DECL(shutdown)(void *arg) 996 { 997 struct sip_softc *sc = arg; 998 999 SIP_DECL(stop)(&sc->sc_ethercom.ec_if, 1); 1000 } 1001 1002 /* 1003 * sip_start: [ifnet interface function] 1004 * 1005 * Start packet transmission on the interface. 1006 */ 1007 void 1008 SIP_DECL(start)(struct ifnet *ifp) 1009 { 1010 struct sip_softc *sc = ifp->if_softc; 1011 struct mbuf *m0, *m; 1012 struct sip_txsoft *txs; 1013 bus_dmamap_t dmamap; 1014 int error, firsttx, nexttx, lasttx, ofree, seg; 1015 #ifdef DP83820 1016 u_int32_t extsts; 1017 #endif 1018 1019 /* 1020 * If we've been told to pause, don't transmit any more packets. 1021 */ 1022 if (sc->sc_flags & SIPF_PAUSED) 1023 ifp->if_flags |= IFF_OACTIVE; 1024 1025 if ((ifp->if_flags & (IFF_RUNNING|IFF_OACTIVE)) != IFF_RUNNING) 1026 return; 1027 1028 /* 1029 * Remember the previous number of free descriptors and 1030 * the first descriptor we'll use. 1031 */ 1032 ofree = sc->sc_txfree; 1033 firsttx = sc->sc_txnext; 1034 1035 /* 1036 * Loop through the send queue, setting up transmit descriptors 1037 * until we drain the queue, or use up all available transmit 1038 * descriptors. 1039 */ 1040 for (;;) { 1041 /* Get a work queue entry. */ 1042 if ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) == NULL) { 1043 SIP_EVCNT_INCR(&sc->sc_ev_txsstall); 1044 break; 1045 } 1046 1047 /* 1048 * Grab a packet off the queue. 1049 */ 1050 IFQ_POLL(&ifp->if_snd, m0); 1051 if (m0 == NULL) 1052 break; 1053 #ifndef DP83820 1054 m = NULL; 1055 #endif 1056 1057 dmamap = txs->txs_dmamap; 1058 1059 #ifdef DP83820 1060 /* 1061 * Load the DMA map. If this fails, the packet either 1062 * didn't fit in the allotted number of segments, or we 1063 * were short on resources. For the too-many-segments 1064 * case, we simply report an error and drop the packet, 1065 * since we can't sanely copy a jumbo packet to a single 1066 * buffer. 1067 */ 1068 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1069 BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1070 if (error) { 1071 if (error == EFBIG) { 1072 printf("%s: Tx packet consumes too many " 1073 "DMA segments, dropping...\n", 1074 sc->sc_dev.dv_xname); 1075 IFQ_DEQUEUE(&ifp->if_snd, m0); 1076 m_freem(m0); 1077 continue; 1078 } 1079 /* 1080 * Short on resources, just stop for now. 1081 */ 1082 break; 1083 } 1084 #else /* DP83820 */ 1085 /* 1086 * Load the DMA map. If this fails, the packet either 1087 * didn't fit in the alloted number of segments, or we 1088 * were short on resources. In this case, we'll copy 1089 * and try again. 1090 */ 1091 if (bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1092 BUS_DMA_WRITE|BUS_DMA_NOWAIT) != 0) { 1093 MGETHDR(m, M_DONTWAIT, MT_DATA); 1094 if (m == NULL) { 1095 printf("%s: unable to allocate Tx mbuf\n", 1096 sc->sc_dev.dv_xname); 1097 break; 1098 } 1099 if (m0->m_pkthdr.len > MHLEN) { 1100 MCLGET(m, M_DONTWAIT); 1101 if ((m->m_flags & M_EXT) == 0) { 1102 printf("%s: unable to allocate Tx " 1103 "cluster\n", sc->sc_dev.dv_xname); 1104 m_freem(m); 1105 break; 1106 } 1107 } 1108 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, caddr_t)); 1109 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1110 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 1111 m, BUS_DMA_WRITE|BUS_DMA_NOWAIT); 1112 if (error) { 1113 printf("%s: unable to load Tx buffer, " 1114 "error = %d\n", sc->sc_dev.dv_xname, error); 1115 break; 1116 } 1117 } 1118 #endif /* DP83820 */ 1119 1120 /* 1121 * Ensure we have enough descriptors free to describe 1122 * the packet. Note, we always reserve one descriptor 1123 * at the end of the ring as a termination point, to 1124 * prevent wrap-around. 1125 */ 1126 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) { 1127 /* 1128 * Not enough free descriptors to transmit this 1129 * packet. We haven't committed anything yet, 1130 * so just unload the DMA map, put the packet 1131 * back on the queue, and punt. Notify the upper 1132 * layer that there are not more slots left. 1133 * 1134 * XXX We could allocate an mbuf and copy, but 1135 * XXX is it worth it? 1136 */ 1137 ifp->if_flags |= IFF_OACTIVE; 1138 bus_dmamap_unload(sc->sc_dmat, dmamap); 1139 #ifndef DP83820 1140 if (m != NULL) 1141 m_freem(m); 1142 #endif 1143 SIP_EVCNT_INCR(&sc->sc_ev_txdstall); 1144 break; 1145 } 1146 1147 IFQ_DEQUEUE(&ifp->if_snd, m0); 1148 #ifndef DP83820 1149 if (m != NULL) { 1150 m_freem(m0); 1151 m0 = m; 1152 } 1153 #endif 1154 1155 /* 1156 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1157 */ 1158 1159 /* Sync the DMA map. */ 1160 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1161 BUS_DMASYNC_PREWRITE); 1162 1163 /* 1164 * Initialize the transmit descriptors. 1165 */ 1166 for (nexttx = sc->sc_txnext, seg = 0; 1167 seg < dmamap->dm_nsegs; 1168 seg++, nexttx = SIP_NEXTTX(nexttx)) { 1169 /* 1170 * If this is the first descriptor we're 1171 * enqueueing, don't set the OWN bit just 1172 * yet. That could cause a race condition. 1173 * We'll do it below. 1174 */ 1175 sc->sc_txdescs[nexttx].sipd_bufptr = 1176 htole32(dmamap->dm_segs[seg].ds_addr); 1177 sc->sc_txdescs[nexttx].sipd_cmdsts = 1178 htole32((nexttx == firsttx ? 0 : CMDSTS_OWN) | 1179 CMDSTS_MORE | dmamap->dm_segs[seg].ds_len); 1180 #ifdef DP83820 1181 sc->sc_txdescs[nexttx].sipd_extsts = 0; 1182 #endif /* DP83820 */ 1183 lasttx = nexttx; 1184 } 1185 1186 /* Clear the MORE bit on the last segment. */ 1187 sc->sc_txdescs[lasttx].sipd_cmdsts &= htole32(~CMDSTS_MORE); 1188 1189 #ifdef DP83820 1190 /* 1191 * If VLANs are enabled and the packet has a VLAN tag, set 1192 * up the descriptor to encapsulate the packet for us. 1193 * 1194 * This apparently has to be on the last descriptor of 1195 * the packet. 1196 */ 1197 if (sc->sc_ethercom.ec_nvlans != 0 && 1198 (m = m_aux_find(m0, AF_LINK, ETHERTYPE_VLAN)) != NULL) { 1199 sc->sc_txdescs[lasttx].sipd_extsts |= 1200 htole32(EXTSTS_VPKT | 1201 htons(*mtod(m, int *) & EXTSTS_VTCI)); 1202 } 1203 1204 /* 1205 * If the upper-layer has requested IPv4/TCPv4/UDPv4 1206 * checksumming, set up the descriptor to do this work 1207 * for us. 1208 * 1209 * This apparently has to be on the first descriptor of 1210 * the packet. 1211 * 1212 * Byte-swap constants so the compiler can optimize. 1213 */ 1214 extsts = 0; 1215 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 1216 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4); 1217 SIP_EVCNT_INCR(&sc->sc_ev_txipsum); 1218 extsts |= htole32(EXTSTS_IPPKT); 1219 } 1220 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) { 1221 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4); 1222 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum); 1223 extsts |= htole32(EXTSTS_TCPPKT); 1224 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) { 1225 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4); 1226 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum); 1227 extsts |= htole32(EXTSTS_UDPPKT); 1228 } 1229 sc->sc_txdescs[sc->sc_txnext].sipd_extsts |= extsts; 1230 #endif /* DP83820 */ 1231 1232 /* Sync the descriptors we're using. */ 1233 SIP_CDTXSYNC(sc, sc->sc_txnext, dmamap->dm_nsegs, 1234 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1235 1236 /* 1237 * Store a pointer to the packet so we can free it later, 1238 * and remember what txdirty will be once the packet is 1239 * done. 1240 */ 1241 txs->txs_mbuf = m0; 1242 txs->txs_firstdesc = sc->sc_txnext; 1243 txs->txs_lastdesc = lasttx; 1244 1245 /* Advance the tx pointer. */ 1246 sc->sc_txfree -= dmamap->dm_nsegs; 1247 sc->sc_txnext = nexttx; 1248 1249 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs, txs_q); 1250 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1251 1252 #if NBPFILTER > 0 1253 /* 1254 * Pass the packet to any BPF listeners. 1255 */ 1256 if (ifp->if_bpf) 1257 bpf_mtap(ifp->if_bpf, m0); 1258 #endif /* NBPFILTER > 0 */ 1259 } 1260 1261 if (txs == NULL || sc->sc_txfree == 0) { 1262 /* No more slots left; notify upper layer. */ 1263 ifp->if_flags |= IFF_OACTIVE; 1264 } 1265 1266 if (sc->sc_txfree != ofree) { 1267 /* 1268 * Cause a descriptor interrupt to happen on the 1269 * last packet we enqueued. 1270 */ 1271 sc->sc_txdescs[lasttx].sipd_cmdsts |= htole32(CMDSTS_INTR); 1272 SIP_CDTXSYNC(sc, lasttx, 1, 1273 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1274 1275 /* 1276 * The entire packet chain is set up. Give the 1277 * first descrptor to the chip now. 1278 */ 1279 sc->sc_txdescs[firsttx].sipd_cmdsts |= htole32(CMDSTS_OWN); 1280 SIP_CDTXSYNC(sc, firsttx, 1, 1281 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 1282 1283 /* 1284 * Start the transmit process. Note, the manual says 1285 * that if there are no pending transmissions in the 1286 * chip's internal queue (indicated by TXE being clear), 1287 * then the driver software must set the TXDP to the 1288 * first descriptor to be transmitted. However, if we 1289 * do this, it causes serious performance degredation on 1290 * the DP83820 under load, not setting TXDP doesn't seem 1291 * to adversely affect the SiS 900 or DP83815. 1292 * 1293 * Well, I guess it wouldn't be the first time a manual 1294 * has lied -- and they could be speaking of the NULL- 1295 * terminated descriptor list case, rather than OWN- 1296 * terminated rings. 1297 */ 1298 #if 0 1299 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) & 1300 CR_TXE) == 0) { 1301 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP, 1302 SIP_CDTXADDR(sc, firsttx)); 1303 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE); 1304 } 1305 #else 1306 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE); 1307 #endif 1308 1309 /* Set a watchdog timer in case the chip flakes out. */ 1310 ifp->if_timer = 5; 1311 } 1312 } 1313 1314 /* 1315 * sip_watchdog: [ifnet interface function] 1316 * 1317 * Watchdog timer handler. 1318 */ 1319 void 1320 SIP_DECL(watchdog)(struct ifnet *ifp) 1321 { 1322 struct sip_softc *sc = ifp->if_softc; 1323 1324 /* 1325 * The chip seems to ignore the CMDSTS_INTR bit sometimes! 1326 * If we get a timeout, try and sweep up transmit descriptors. 1327 * If we manage to sweep them all up, ignore the lack of 1328 * interrupt. 1329 */ 1330 SIP_DECL(txintr)(sc); 1331 1332 if (sc->sc_txfree != SIP_NTXDESC) { 1333 printf("%s: device timeout\n", sc->sc_dev.dv_xname); 1334 ifp->if_oerrors++; 1335 1336 /* Reset the interface. */ 1337 (void) SIP_DECL(init)(ifp); 1338 } else if (ifp->if_flags & IFF_DEBUG) 1339 printf("%s: recovered from device timeout\n", 1340 sc->sc_dev.dv_xname); 1341 1342 /* Try to get more packets going. */ 1343 SIP_DECL(start)(ifp); 1344 } 1345 1346 /* 1347 * sip_ioctl: [ifnet interface function] 1348 * 1349 * Handle control requests from the operator. 1350 */ 1351 int 1352 SIP_DECL(ioctl)(struct ifnet *ifp, u_long cmd, caddr_t data) 1353 { 1354 struct sip_softc *sc = ifp->if_softc; 1355 struct ifreq *ifr = (struct ifreq *)data; 1356 int s, error; 1357 1358 s = splnet(); 1359 1360 switch (cmd) { 1361 case SIOCSIFMEDIA: 1362 case SIOCGIFMEDIA: 1363 error = ifmedia_ioctl(ifp, ifr, &sc->sc_mii.mii_media, cmd); 1364 break; 1365 1366 default: 1367 error = ether_ioctl(ifp, cmd, data); 1368 if (error == ENETRESET) { 1369 /* 1370 * Multicast list has changed; set the hardware filter 1371 * accordingly. 1372 */ 1373 (*sc->sc_model->sip_variant->sipv_set_filter)(sc); 1374 error = 0; 1375 } 1376 break; 1377 } 1378 1379 /* Try to get more packets going. */ 1380 SIP_DECL(start)(ifp); 1381 1382 splx(s); 1383 return (error); 1384 } 1385 1386 /* 1387 * sip_intr: 1388 * 1389 * Interrupt service routine. 1390 */ 1391 int 1392 SIP_DECL(intr)(void *arg) 1393 { 1394 struct sip_softc *sc = arg; 1395 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1396 u_int32_t isr; 1397 int handled = 0; 1398 1399 for (;;) { 1400 /* Reading clears interrupt. */ 1401 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR); 1402 if ((isr & sc->sc_imr) == 0) 1403 break; 1404 1405 handled = 1; 1406 1407 if (isr & (ISR_RXORN|ISR_RXIDLE|ISR_RXDESC)) { 1408 SIP_EVCNT_INCR(&sc->sc_ev_rxintr); 1409 1410 /* Grab any new packets. */ 1411 SIP_DECL(rxintr)(sc); 1412 1413 if (isr & ISR_RXORN) { 1414 printf("%s: receive FIFO overrun\n", 1415 sc->sc_dev.dv_xname); 1416 1417 /* XXX adjust rx_drain_thresh? */ 1418 } 1419 1420 if (isr & ISR_RXIDLE) { 1421 printf("%s: receive ring overrun\n", 1422 sc->sc_dev.dv_xname); 1423 1424 /* Get the receive process going again. */ 1425 bus_space_write_4(sc->sc_st, sc->sc_sh, 1426 SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr)); 1427 bus_space_write_4(sc->sc_st, sc->sc_sh, 1428 SIP_CR, CR_RXE); 1429 } 1430 } 1431 1432 if (isr & (ISR_TXURN|ISR_TXDESC)) { 1433 SIP_EVCNT_INCR(&sc->sc_ev_txintr); 1434 1435 /* Sweep up transmit descriptors. */ 1436 SIP_DECL(txintr)(sc); 1437 1438 if (isr & ISR_TXURN) { 1439 u_int32_t thresh; 1440 1441 printf("%s: transmit FIFO underrun", 1442 sc->sc_dev.dv_xname); 1443 1444 thresh = sc->sc_tx_drain_thresh + 1; 1445 if (thresh <= TXCFG_DRTH && 1446 (thresh * 32) <= (SIP_TXFIFO_SIZE - 1447 (sc->sc_tx_fill_thresh * 32))) { 1448 printf("; increasing Tx drain " 1449 "threshold to %u bytes\n", 1450 thresh * 32); 1451 sc->sc_tx_drain_thresh = thresh; 1452 (void) SIP_DECL(init)(ifp); 1453 } else { 1454 (void) SIP_DECL(init)(ifp); 1455 printf("\n"); 1456 } 1457 } 1458 } 1459 1460 #if !defined(DP83820) 1461 if (sc->sc_imr & (ISR_PAUSE_END|ISR_PAUSE_ST)) { 1462 if (isr & ISR_PAUSE_ST) { 1463 sc->sc_flags |= SIPF_PAUSED; 1464 ifp->if_flags |= IFF_OACTIVE; 1465 } 1466 if (isr & ISR_PAUSE_END) { 1467 sc->sc_flags &= ~SIPF_PAUSED; 1468 ifp->if_flags &= ~IFF_OACTIVE; 1469 } 1470 } 1471 #endif /* ! DP83820 */ 1472 1473 if (isr & ISR_HIBERR) { 1474 #define PRINTERR(bit, str) \ 1475 if (isr & (bit)) \ 1476 printf("%s: %s\n", sc->sc_dev.dv_xname, str) 1477 PRINTERR(ISR_DPERR, "parity error"); 1478 PRINTERR(ISR_SSERR, "system error"); 1479 PRINTERR(ISR_RMABT, "master abort"); 1480 PRINTERR(ISR_RTABT, "target abort"); 1481 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun"); 1482 (void) SIP_DECL(init)(ifp); 1483 #undef PRINTERR 1484 } 1485 } 1486 1487 /* Try to get more packets going. */ 1488 SIP_DECL(start)(ifp); 1489 1490 return (handled); 1491 } 1492 1493 /* 1494 * sip_txintr: 1495 * 1496 * Helper; handle transmit interrupts. 1497 */ 1498 void 1499 SIP_DECL(txintr)(struct sip_softc *sc) 1500 { 1501 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1502 struct sip_txsoft *txs; 1503 u_int32_t cmdsts; 1504 1505 if ((sc->sc_flags & SIPF_PAUSED) == 0) 1506 ifp->if_flags &= ~IFF_OACTIVE; 1507 1508 /* 1509 * Go through our Tx list and free mbufs for those 1510 * frames which have been transmitted. 1511 */ 1512 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 1513 SIP_CDTXSYNC(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs, 1514 BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1515 1516 cmdsts = le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts); 1517 if (cmdsts & CMDSTS_OWN) 1518 break; 1519 1520 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q); 1521 1522 sc->sc_txfree += txs->txs_dmamap->dm_nsegs; 1523 1524 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 1525 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 1526 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 1527 m_freem(txs->txs_mbuf); 1528 txs->txs_mbuf = NULL; 1529 1530 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 1531 1532 /* 1533 * Check for errors and collisions. 1534 */ 1535 if (cmdsts & 1536 (CMDSTS_Tx_TXA|CMDSTS_Tx_TFU|CMDSTS_Tx_ED|CMDSTS_Tx_EC)) { 1537 ifp->if_oerrors++; 1538 if (cmdsts & CMDSTS_Tx_EC) 1539 ifp->if_collisions += 16; 1540 if (ifp->if_flags & IFF_DEBUG) { 1541 if (cmdsts & CMDSTS_Tx_ED) 1542 printf("%s: excessive deferral\n", 1543 sc->sc_dev.dv_xname); 1544 if (cmdsts & CMDSTS_Tx_EC) 1545 printf("%s: excessive collisions\n", 1546 sc->sc_dev.dv_xname); 1547 } 1548 } else { 1549 /* Packet was transmitted successfully. */ 1550 ifp->if_opackets++; 1551 ifp->if_collisions += CMDSTS_COLLISIONS(cmdsts); 1552 } 1553 } 1554 1555 /* 1556 * If there are no more pending transmissions, cancel the watchdog 1557 * timer. 1558 */ 1559 if (txs == NULL) 1560 ifp->if_timer = 0; 1561 } 1562 1563 #if defined(DP83820) 1564 /* 1565 * sip_rxintr: 1566 * 1567 * Helper; handle receive interrupts. 1568 */ 1569 void 1570 SIP_DECL(rxintr)(struct sip_softc *sc) 1571 { 1572 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1573 struct sip_rxsoft *rxs; 1574 struct mbuf *m, *tailm; 1575 u_int32_t cmdsts, extsts; 1576 int i, len; 1577 1578 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) { 1579 rxs = &sc->sc_rxsoft[i]; 1580 1581 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1582 1583 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts); 1584 extsts = le32toh(sc->sc_rxdescs[i].sipd_extsts); 1585 1586 /* 1587 * NOTE: OWN is set if owned by _consumer_. We're the 1588 * consumer of the receive ring, so if the bit is clear, 1589 * we have processed all of the packets. 1590 */ 1591 if ((cmdsts & CMDSTS_OWN) == 0) { 1592 /* 1593 * We have processed all of the receive buffers. 1594 */ 1595 break; 1596 } 1597 1598 if (__predict_false(sc->sc_rxdiscard)) { 1599 SIP_INIT_RXDESC(sc, i); 1600 if ((cmdsts & CMDSTS_MORE) == 0) { 1601 /* Reset our state. */ 1602 sc->sc_rxdiscard = 0; 1603 } 1604 continue; 1605 } 1606 1607 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1608 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1609 1610 m = rxs->rxs_mbuf; 1611 1612 /* 1613 * Add a new receive buffer to the ring. 1614 */ 1615 if (SIP_DECL(add_rxbuf)(sc, i) != 0) { 1616 /* 1617 * Failed, throw away what we've done so 1618 * far, and discard the rest of the packet. 1619 */ 1620 ifp->if_ierrors++; 1621 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1622 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1623 SIP_INIT_RXDESC(sc, i); 1624 if (cmdsts & CMDSTS_MORE) 1625 sc->sc_rxdiscard = 1; 1626 if (sc->sc_rxhead != NULL) 1627 m_freem(sc->sc_rxhead); 1628 SIP_RXCHAIN_RESET(sc); 1629 continue; 1630 } 1631 1632 SIP_RXCHAIN_LINK(sc, m); 1633 1634 /* 1635 * If this is not the end of the packet, keep 1636 * looking. 1637 */ 1638 if (cmdsts & CMDSTS_MORE) { 1639 sc->sc_rxlen += m->m_len; 1640 continue; 1641 } 1642 1643 /* 1644 * Okay, we have the entire packet now... 1645 */ 1646 *sc->sc_rxtailp = NULL; 1647 m = sc->sc_rxhead; 1648 tailm = sc->sc_rxtail; 1649 1650 SIP_RXCHAIN_RESET(sc); 1651 1652 /* 1653 * If an error occurred, update stats and drop the packet. 1654 */ 1655 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT| 1656 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) { 1657 ifp->if_ierrors++; 1658 if ((cmdsts & CMDSTS_Rx_RXA) != 0 && 1659 (cmdsts & CMDSTS_Rx_RXO) == 0) { 1660 /* Receive overrun handled elsewhere. */ 1661 printf("%s: receive descriptor error\n", 1662 sc->sc_dev.dv_xname); 1663 } 1664 #define PRINTERR(bit, str) \ 1665 if (cmdsts & (bit)) \ 1666 printf("%s: %s\n", sc->sc_dev.dv_xname, str) 1667 PRINTERR(CMDSTS_Rx_RUNT, "runt packet"); 1668 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error"); 1669 PRINTERR(CMDSTS_Rx_CRCE, "CRC error"); 1670 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error"); 1671 #undef PRINTERR 1672 m_freem(m); 1673 continue; 1674 } 1675 1676 /* 1677 * No errors. 1678 * 1679 * Note, the DP83820 includes the CRC with 1680 * every packet. 1681 */ 1682 len = CMDSTS_SIZE(cmdsts); 1683 tailm->m_len = len - sc->sc_rxlen; 1684 1685 /* 1686 * If the packet is small enough to fit in a 1687 * single header mbuf, allocate one and copy 1688 * the data into it. This greatly reduces 1689 * memory consumption when we receive lots 1690 * of small packets. 1691 */ 1692 if (SIP_DECL(copy_small) != 0 && len <= (MHLEN - 2)) { 1693 struct mbuf *nm; 1694 MGETHDR(nm, M_DONTWAIT, MT_DATA); 1695 if (nm == NULL) { 1696 ifp->if_ierrors++; 1697 m_freem(m); 1698 continue; 1699 } 1700 nm->m_data += 2; 1701 nm->m_pkthdr.len = nm->m_len = len; 1702 m_copydata(m, 0, len, mtod(nm, caddr_t)); 1703 m_freem(m); 1704 m = nm; 1705 } 1706 #ifndef __NO_STRICT_ALIGNMENT 1707 else { 1708 /* 1709 * The DP83820's receive buffers must be 4-byte 1710 * aligned. But this means that the data after 1711 * the Ethernet header is misaligned. To compensate, 1712 * we have artificially shortened the buffer size 1713 * in the descriptor, and we do an overlapping copy 1714 * of the data two bytes further in (in the first 1715 * buffer of the chain only). 1716 */ 1717 memmove(mtod(m, caddr_t) + 2, mtod(m, caddr_t), 1718 m->m_len); 1719 m->m_data += 2; 1720 } 1721 #endif /* ! __NO_STRICT_ALIGNMENT */ 1722 1723 /* 1724 * If VLANs are enabled, VLAN packets have been unwrapped 1725 * for us. Associate the tag with the packet. 1726 */ 1727 if (sc->sc_ethercom.ec_nvlans != 0 && 1728 (extsts & EXTSTS_VPKT) != 0) { 1729 struct mbuf *vtag; 1730 1731 vtag = m_aux_add(m, AF_LINK, ETHERTYPE_VLAN); 1732 if (vtag == NULL) { 1733 ifp->if_ierrors++; 1734 printf("%s: unable to allocate VLAN tag\n", 1735 sc->sc_dev.dv_xname); 1736 m_freem(m); 1737 continue; 1738 } 1739 1740 *mtod(vtag, int *) = ntohs(extsts & EXTSTS_VTCI); 1741 vtag->m_len = sizeof(int); 1742 } 1743 1744 /* 1745 * Set the incoming checksum information for the 1746 * packet. 1747 */ 1748 if ((extsts & EXTSTS_IPPKT) != 0) { 1749 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum); 1750 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 1751 if (extsts & EXTSTS_Rx_IPERR) 1752 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 1753 if (extsts & EXTSTS_TCPPKT) { 1754 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum); 1755 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 1756 if (extsts & EXTSTS_Rx_TCPERR) 1757 m->m_pkthdr.csum_flags |= 1758 M_CSUM_TCP_UDP_BAD; 1759 } else if (extsts & EXTSTS_UDPPKT) { 1760 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum); 1761 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 1762 if (extsts & EXTSTS_Rx_UDPERR) 1763 m->m_pkthdr.csum_flags |= 1764 M_CSUM_TCP_UDP_BAD; 1765 } 1766 } 1767 1768 ifp->if_ipackets++; 1769 m->m_flags |= M_HASFCS; 1770 m->m_pkthdr.rcvif = ifp; 1771 m->m_pkthdr.len = len; 1772 1773 #if NBPFILTER > 0 1774 /* 1775 * Pass this up to any BPF listeners, but only 1776 * pass if up the stack if it's for us. 1777 */ 1778 if (ifp->if_bpf) 1779 bpf_mtap(ifp->if_bpf, m); 1780 #endif /* NBPFILTER > 0 */ 1781 1782 /* Pass it on. */ 1783 (*ifp->if_input)(ifp, m); 1784 } 1785 1786 /* Update the receive pointer. */ 1787 sc->sc_rxptr = i; 1788 } 1789 #else /* ! DP83820 */ 1790 /* 1791 * sip_rxintr: 1792 * 1793 * Helper; handle receive interrupts. 1794 */ 1795 void 1796 SIP_DECL(rxintr)(struct sip_softc *sc) 1797 { 1798 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1799 struct sip_rxsoft *rxs; 1800 struct mbuf *m; 1801 u_int32_t cmdsts; 1802 int i, len; 1803 1804 for (i = sc->sc_rxptr;; i = SIP_NEXTRX(i)) { 1805 rxs = &sc->sc_rxsoft[i]; 1806 1807 SIP_CDRXSYNC(sc, i, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); 1808 1809 cmdsts = le32toh(sc->sc_rxdescs[i].sipd_cmdsts); 1810 1811 /* 1812 * NOTE: OWN is set if owned by _consumer_. We're the 1813 * consumer of the receive ring, so if the bit is clear, 1814 * we have processed all of the packets. 1815 */ 1816 if ((cmdsts & CMDSTS_OWN) == 0) { 1817 /* 1818 * We have processed all of the receive buffers. 1819 */ 1820 break; 1821 } 1822 1823 /* 1824 * If any collisions were seen on the wire, count one. 1825 */ 1826 if (cmdsts & CMDSTS_Rx_COL) 1827 ifp->if_collisions++; 1828 1829 /* 1830 * If an error occurred, update stats, clear the status 1831 * word, and leave the packet buffer in place. It will 1832 * simply be reused the next time the ring comes around. 1833 */ 1834 if (cmdsts & (CMDSTS_Rx_RXA|CMDSTS_Rx_RUNT| 1835 CMDSTS_Rx_ISE|CMDSTS_Rx_CRCE|CMDSTS_Rx_FAE)) { 1836 ifp->if_ierrors++; 1837 if ((cmdsts & CMDSTS_Rx_RXA) != 0 && 1838 (cmdsts & CMDSTS_Rx_RXO) == 0) { 1839 /* Receive overrun handled elsewhere. */ 1840 printf("%s: receive descriptor error\n", 1841 sc->sc_dev.dv_xname); 1842 } 1843 #define PRINTERR(bit, str) \ 1844 if (cmdsts & (bit)) \ 1845 printf("%s: %s\n", sc->sc_dev.dv_xname, str) 1846 PRINTERR(CMDSTS_Rx_RUNT, "runt packet"); 1847 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error"); 1848 PRINTERR(CMDSTS_Rx_CRCE, "CRC error"); 1849 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error"); 1850 #undef PRINTERR 1851 SIP_INIT_RXDESC(sc, i); 1852 continue; 1853 } 1854 1855 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1856 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 1857 1858 /* 1859 * No errors; receive the packet. Note, the SiS 900 1860 * includes the CRC with every packet. 1861 */ 1862 len = CMDSTS_SIZE(cmdsts); 1863 1864 #ifdef __NO_STRICT_ALIGNMENT 1865 /* 1866 * If the packet is small enough to fit in a 1867 * single header mbuf, allocate one and copy 1868 * the data into it. This greatly reduces 1869 * memory consumption when we receive lots 1870 * of small packets. 1871 * 1872 * Otherwise, we add a new buffer to the receive 1873 * chain. If this fails, we drop the packet and 1874 * recycle the old buffer. 1875 */ 1876 if (SIP_DECL(copy_small) != 0 && len <= MHLEN) { 1877 MGETHDR(m, M_DONTWAIT, MT_DATA); 1878 if (m == NULL) 1879 goto dropit; 1880 memcpy(mtod(m, caddr_t), 1881 mtod(rxs->rxs_mbuf, caddr_t), len); 1882 SIP_INIT_RXDESC(sc, i); 1883 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1884 rxs->rxs_dmamap->dm_mapsize, 1885 BUS_DMASYNC_PREREAD); 1886 } else { 1887 m = rxs->rxs_mbuf; 1888 if (SIP_DECL(add_rxbuf)(sc, i) != 0) { 1889 dropit: 1890 ifp->if_ierrors++; 1891 SIP_INIT_RXDESC(sc, i); 1892 bus_dmamap_sync(sc->sc_dmat, 1893 rxs->rxs_dmamap, 0, 1894 rxs->rxs_dmamap->dm_mapsize, 1895 BUS_DMASYNC_PREREAD); 1896 continue; 1897 } 1898 } 1899 #else 1900 /* 1901 * The SiS 900's receive buffers must be 4-byte aligned. 1902 * But this means that the data after the Ethernet header 1903 * is misaligned. We must allocate a new buffer and 1904 * copy the data, shifted forward 2 bytes. 1905 */ 1906 MGETHDR(m, M_DONTWAIT, MT_DATA); 1907 if (m == NULL) { 1908 dropit: 1909 ifp->if_ierrors++; 1910 SIP_INIT_RXDESC(sc, i); 1911 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1912 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1913 continue; 1914 } 1915 if (len > (MHLEN - 2)) { 1916 MCLGET(m, M_DONTWAIT); 1917 if ((m->m_flags & M_EXT) == 0) { 1918 m_freem(m); 1919 goto dropit; 1920 } 1921 } 1922 m->m_data += 2; 1923 1924 /* 1925 * Note that we use clusters for incoming frames, so the 1926 * buffer is virtually contiguous. 1927 */ 1928 memcpy(mtod(m, caddr_t), mtod(rxs->rxs_mbuf, caddr_t), len); 1929 1930 /* Allow the receive descriptor to continue using its mbuf. */ 1931 SIP_INIT_RXDESC(sc, i); 1932 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 1933 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 1934 #endif /* __NO_STRICT_ALIGNMENT */ 1935 1936 ifp->if_ipackets++; 1937 m->m_flags |= M_HASFCS; 1938 m->m_pkthdr.rcvif = ifp; 1939 m->m_pkthdr.len = m->m_len = len; 1940 1941 #if NBPFILTER > 0 1942 /* 1943 * Pass this up to any BPF listeners, but only 1944 * pass if up the stack if it's for us. 1945 */ 1946 if (ifp->if_bpf) 1947 bpf_mtap(ifp->if_bpf, m); 1948 #endif /* NBPFILTER > 0 */ 1949 1950 /* Pass it on. */ 1951 (*ifp->if_input)(ifp, m); 1952 } 1953 1954 /* Update the receive pointer. */ 1955 sc->sc_rxptr = i; 1956 } 1957 #endif /* DP83820 */ 1958 1959 /* 1960 * sip_tick: 1961 * 1962 * One second timer, used to tick the MII. 1963 */ 1964 void 1965 SIP_DECL(tick)(void *arg) 1966 { 1967 struct sip_softc *sc = arg; 1968 int s; 1969 1970 s = splnet(); 1971 mii_tick(&sc->sc_mii); 1972 splx(s); 1973 1974 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc); 1975 } 1976 1977 /* 1978 * sip_reset: 1979 * 1980 * Perform a soft reset on the SiS 900. 1981 */ 1982 void 1983 SIP_DECL(reset)(struct sip_softc *sc) 1984 { 1985 bus_space_tag_t st = sc->sc_st; 1986 bus_space_handle_t sh = sc->sc_sh; 1987 int i; 1988 1989 bus_space_write_4(st, sh, SIP_IER, 0); 1990 bus_space_write_4(st, sh, SIP_IMR, 0); 1991 bus_space_write_4(st, sh, SIP_RFCR, 0); 1992 bus_space_write_4(st, sh, SIP_CR, CR_RST); 1993 1994 for (i = 0; i < SIP_TIMEOUT; i++) { 1995 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0) 1996 break; 1997 delay(2); 1998 } 1999 2000 if (i == SIP_TIMEOUT) 2001 printf("%s: reset failed to complete\n", sc->sc_dev.dv_xname); 2002 2003 delay(1000); 2004 2005 #ifdef DP83820 2006 /* 2007 * Set the general purpose I/O bits. Do it here in case we 2008 * need to have GPIO set up to talk to the media interface. 2009 */ 2010 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior); 2011 delay(1000); 2012 #endif /* DP83820 */ 2013 } 2014 2015 /* 2016 * sip_init: [ ifnet interface function ] 2017 * 2018 * Initialize the interface. Must be called at splnet(). 2019 */ 2020 int 2021 SIP_DECL(init)(struct ifnet *ifp) 2022 { 2023 struct sip_softc *sc = ifp->if_softc; 2024 bus_space_tag_t st = sc->sc_st; 2025 bus_space_handle_t sh = sc->sc_sh; 2026 struct sip_txsoft *txs; 2027 struct sip_rxsoft *rxs; 2028 struct sip_desc *sipd; 2029 u_int32_t reg; 2030 int i, error = 0; 2031 2032 /* 2033 * Cancel any pending I/O. 2034 */ 2035 SIP_DECL(stop)(ifp, 0); 2036 2037 /* 2038 * Reset the chip to a known state. 2039 */ 2040 SIP_DECL(reset)(sc); 2041 2042 #if !defined(DP83820) 2043 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) { 2044 /* 2045 * DP83815 manual, page 78: 2046 * 4.4 Recommended Registers Configuration 2047 * For optimum performance of the DP83815, version noted 2048 * as DP83815CVNG (SRR = 203h), the listed register 2049 * modifications must be followed in sequence... 2050 * 2051 * It's not clear if this should be 302h or 203h because that 2052 * chip name is listed as SRR 302h in the description of the 2053 * SRR register. However, my revision 302h DP83815 on the 2054 * Netgear FA311 purchased in 02/2001 needs these settings 2055 * to avoid tons of errors in AcceptPerfectMatch (non- 2056 * IFF_PROMISC) mode. I do not know if other revisions need 2057 * this set or not. [briggs -- 09 March 2001] 2058 * 2059 * Note that only the low-order 12 bits of 0xe4 are documented 2060 * and that this sets reserved bits in that register. 2061 */ 2062 reg = bus_space_read_4(st, sh, SIP_NS_SRR); 2063 if (reg == 0x302) { 2064 bus_space_write_4(st, sh, 0x00cc, 0x0001); 2065 bus_space_write_4(st, sh, 0x00e4, 0x189C); 2066 bus_space_write_4(st, sh, 0x00fc, 0x0000); 2067 bus_space_write_4(st, sh, 0x00f4, 0x5040); 2068 bus_space_write_4(st, sh, 0x00f8, 0x008c); 2069 } 2070 } 2071 #endif /* ! DP83820 */ 2072 2073 /* 2074 * Initialize the transmit descriptor ring. 2075 */ 2076 for (i = 0; i < SIP_NTXDESC; i++) { 2077 sipd = &sc->sc_txdescs[i]; 2078 memset(sipd, 0, sizeof(struct sip_desc)); 2079 sipd->sipd_link = htole32(SIP_CDTXADDR(sc, SIP_NEXTTX(i))); 2080 } 2081 SIP_CDTXSYNC(sc, 0, SIP_NTXDESC, 2082 BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); 2083 sc->sc_txfree = SIP_NTXDESC; 2084 sc->sc_txnext = 0; 2085 2086 /* 2087 * Initialize the transmit job descriptors. 2088 */ 2089 SIMPLEQ_INIT(&sc->sc_txfreeq); 2090 SIMPLEQ_INIT(&sc->sc_txdirtyq); 2091 for (i = 0; i < SIP_TXQUEUELEN; i++) { 2092 txs = &sc->sc_txsoft[i]; 2093 txs->txs_mbuf = NULL; 2094 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 2095 } 2096 2097 /* 2098 * Initialize the receive descriptor and receive job 2099 * descriptor rings. 2100 */ 2101 for (i = 0; i < SIP_NRXDESC; i++) { 2102 rxs = &sc->sc_rxsoft[i]; 2103 if (rxs->rxs_mbuf == NULL) { 2104 if ((error = SIP_DECL(add_rxbuf)(sc, i)) != 0) { 2105 printf("%s: unable to allocate or map rx " 2106 "buffer %d, error = %d\n", 2107 sc->sc_dev.dv_xname, i, error); 2108 /* 2109 * XXX Should attempt to run with fewer receive 2110 * XXX buffers instead of just failing. 2111 */ 2112 SIP_DECL(rxdrain)(sc); 2113 goto out; 2114 } 2115 } else 2116 SIP_INIT_RXDESC(sc, i); 2117 } 2118 sc->sc_rxptr = 0; 2119 #ifdef DP83820 2120 sc->sc_rxdiscard = 0; 2121 SIP_RXCHAIN_RESET(sc); 2122 #endif /* DP83820 */ 2123 2124 /* 2125 * Set the configuration register; it's already initialized 2126 * in sip_attach(). 2127 */ 2128 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg); 2129 2130 /* 2131 * Initialize the prototype TXCFG register. 2132 */ 2133 #if defined(DP83820) 2134 sc->sc_txcfg = TXCFG_MXDMA_512; 2135 sc->sc_rxcfg = RXCFG_MXDMA_512; 2136 #else 2137 if ((SIP_SIS900_REV(sc, SIS_REV_635) || 2138 SIP_SIS900_REV(sc, SIS_REV_900B)) && 2139 (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG) & CFG_EDBMASTEN)) { 2140 sc->sc_txcfg = TXCFG_MXDMA_64; 2141 sc->sc_rxcfg = RXCFG_MXDMA_64; 2142 } else { 2143 sc->sc_txcfg = TXCFG_MXDMA_512; 2144 sc->sc_rxcfg = RXCFG_MXDMA_512; 2145 } 2146 #endif /* DP83820 */ 2147 2148 sc->sc_txcfg |= TXCFG_ATP | 2149 (sc->sc_tx_fill_thresh << TXCFG_FLTH_SHIFT) | 2150 sc->sc_tx_drain_thresh; 2151 bus_space_write_4(st, sh, SIP_TXCFG, sc->sc_txcfg); 2152 2153 /* 2154 * Initialize the receive drain threshold if we have never 2155 * done so. 2156 */ 2157 if (sc->sc_rx_drain_thresh == 0) { 2158 /* 2159 * XXX This value should be tuned. This is set to the 2160 * maximum of 248 bytes, and we may be able to improve 2161 * performance by decreasing it (although we should never 2162 * set this value lower than 2; 14 bytes are required to 2163 * filter the packet). 2164 */ 2165 sc->sc_rx_drain_thresh = RXCFG_DRTH >> RXCFG_DRTH_SHIFT; 2166 } 2167 2168 /* 2169 * Initialize the prototype RXCFG register. 2170 */ 2171 sc->sc_rxcfg |= (sc->sc_rx_drain_thresh << RXCFG_DRTH_SHIFT); 2172 bus_space_write_4(st, sh, SIP_RXCFG, sc->sc_rxcfg); 2173 2174 #ifdef DP83820 2175 /* 2176 * Initialize the VLAN/IP receive control register. 2177 * We enable checksum computation on all incoming 2178 * packets, and do not reject packets w/ bad checksums. 2179 */ 2180 reg = 0; 2181 if (ifp->if_capenable & 2182 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4)) 2183 reg |= VRCR_IPEN; 2184 if (sc->sc_ethercom.ec_nvlans != 0) 2185 reg |= VRCR_VTDEN|VRCR_VTREN; 2186 bus_space_write_4(st, sh, SIP_VRCR, reg); 2187 2188 /* 2189 * Initialize the VLAN/IP transmit control register. 2190 * We enable outgoing checksum computation on a 2191 * per-packet basis. 2192 */ 2193 reg = 0; 2194 if (ifp->if_capenable & 2195 (IFCAP_CSUM_IPv4|IFCAP_CSUM_TCPv4|IFCAP_CSUM_UDPv4)) 2196 reg |= VTCR_PPCHK; 2197 if (sc->sc_ethercom.ec_nvlans != 0) 2198 reg |= VTCR_VPPTI; 2199 bus_space_write_4(st, sh, SIP_VTCR, reg); 2200 2201 /* 2202 * If we're using VLANs, initialize the VLAN data register. 2203 * To understand why we bswap the VLAN Ethertype, see section 2204 * 4.2.36 of the DP83820 manual. 2205 */ 2206 if (sc->sc_ethercom.ec_nvlans != 0) 2207 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN)); 2208 #endif /* DP83820 */ 2209 2210 /* 2211 * Give the transmit and receive rings to the chip. 2212 */ 2213 bus_space_write_4(st, sh, SIP_TXDP, SIP_CDTXADDR(sc, sc->sc_txnext)); 2214 bus_space_write_4(st, sh, SIP_RXDP, SIP_CDRXADDR(sc, sc->sc_rxptr)); 2215 2216 /* 2217 * Initialize the interrupt mask. 2218 */ 2219 sc->sc_imr = ISR_DPERR|ISR_SSERR|ISR_RMABT|ISR_RTABT|ISR_RXSOVR| 2220 ISR_TXURN|ISR_TXDESC|ISR_RXORN|ISR_RXIDLE|ISR_RXDESC; 2221 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr); 2222 2223 /* Set up the receive filter. */ 2224 (*sc->sc_model->sip_variant->sipv_set_filter)(sc); 2225 2226 /* 2227 * Set the current media. Do this after initializing the prototype 2228 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow 2229 * control. 2230 */ 2231 mii_mediachg(&sc->sc_mii); 2232 2233 /* 2234 * Enable interrupts. 2235 */ 2236 bus_space_write_4(st, sh, SIP_IER, IER_IE); 2237 2238 /* 2239 * Start the transmit and receive processes. 2240 */ 2241 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE); 2242 2243 /* 2244 * Start the one second MII clock. 2245 */ 2246 callout_reset(&sc->sc_tick_ch, hz, SIP_DECL(tick), sc); 2247 2248 /* 2249 * ...all done! 2250 */ 2251 ifp->if_flags |= IFF_RUNNING; 2252 ifp->if_flags &= ~IFF_OACTIVE; 2253 2254 out: 2255 if (error) 2256 printf("%s: interface not running\n", sc->sc_dev.dv_xname); 2257 return (error); 2258 } 2259 2260 /* 2261 * sip_drain: 2262 * 2263 * Drain the receive queue. 2264 */ 2265 void 2266 SIP_DECL(rxdrain)(struct sip_softc *sc) 2267 { 2268 struct sip_rxsoft *rxs; 2269 int i; 2270 2271 for (i = 0; i < SIP_NRXDESC; i++) { 2272 rxs = &sc->sc_rxsoft[i]; 2273 if (rxs->rxs_mbuf != NULL) { 2274 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2275 m_freem(rxs->rxs_mbuf); 2276 rxs->rxs_mbuf = NULL; 2277 } 2278 } 2279 } 2280 2281 /* 2282 * sip_stop: [ ifnet interface function ] 2283 * 2284 * Stop transmission on the interface. 2285 */ 2286 void 2287 SIP_DECL(stop)(struct ifnet *ifp, int disable) 2288 { 2289 struct sip_softc *sc = ifp->if_softc; 2290 bus_space_tag_t st = sc->sc_st; 2291 bus_space_handle_t sh = sc->sc_sh; 2292 struct sip_txsoft *txs; 2293 u_int32_t cmdsts = 0; /* DEBUG */ 2294 2295 /* 2296 * Stop the one second clock. 2297 */ 2298 callout_stop(&sc->sc_tick_ch); 2299 2300 /* Down the MII. */ 2301 mii_down(&sc->sc_mii); 2302 2303 /* 2304 * Disable interrupts. 2305 */ 2306 bus_space_write_4(st, sh, SIP_IER, 0); 2307 2308 /* 2309 * Stop receiver and transmitter. 2310 */ 2311 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD); 2312 2313 /* 2314 * Release any queued transmit buffers. 2315 */ 2316 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 2317 if ((ifp->if_flags & IFF_DEBUG) != 0 && 2318 SIMPLEQ_NEXT(txs, txs_q) == NULL && 2319 (le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts) & 2320 CMDSTS_INTR) == 0) 2321 printf("%s: sip_stop: last descriptor does not " 2322 "have INTR bit set\n", sc->sc_dev.dv_xname); 2323 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs, txs_q); 2324 #ifdef DIAGNOSTIC 2325 if (txs->txs_mbuf == NULL) { 2326 printf("%s: dirty txsoft with no mbuf chain\n", 2327 sc->sc_dev.dv_xname); 2328 panic("sip_stop"); 2329 } 2330 #endif 2331 cmdsts |= /* DEBUG */ 2332 le32toh(sc->sc_txdescs[txs->txs_lastdesc].sipd_cmdsts); 2333 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2334 m_freem(txs->txs_mbuf); 2335 txs->txs_mbuf = NULL; 2336 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 2337 } 2338 2339 if (disable) 2340 SIP_DECL(rxdrain)(sc); 2341 2342 /* 2343 * Mark the interface down and cancel the watchdog timer. 2344 */ 2345 ifp->if_flags &= ~(IFF_RUNNING | IFF_OACTIVE); 2346 ifp->if_timer = 0; 2347 2348 if ((ifp->if_flags & IFF_DEBUG) != 0 && 2349 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != SIP_NTXDESC) 2350 printf("%s: sip_stop: no INTR bits set in dirty tx " 2351 "descriptors\n", sc->sc_dev.dv_xname); 2352 } 2353 2354 /* 2355 * sip_read_eeprom: 2356 * 2357 * Read data from the serial EEPROM. 2358 */ 2359 void 2360 SIP_DECL(read_eeprom)(struct sip_softc *sc, int word, int wordcnt, 2361 u_int16_t *data) 2362 { 2363 bus_space_tag_t st = sc->sc_st; 2364 bus_space_handle_t sh = sc->sc_sh; 2365 u_int16_t reg; 2366 int i, x; 2367 2368 for (i = 0; i < wordcnt; i++) { 2369 /* Send CHIP SELECT. */ 2370 reg = EROMAR_EECS; 2371 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2372 2373 /* Shift in the READ opcode. */ 2374 for (x = 3; x > 0; x--) { 2375 if (SIP_EEPROM_OPC_READ & (1 << (x - 1))) 2376 reg |= EROMAR_EEDI; 2377 else 2378 reg &= ~EROMAR_EEDI; 2379 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2380 bus_space_write_4(st, sh, SIP_EROMAR, 2381 reg | EROMAR_EESK); 2382 delay(4); 2383 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2384 delay(4); 2385 } 2386 2387 /* Shift in address. */ 2388 for (x = 6; x > 0; x--) { 2389 if ((word + i) & (1 << (x - 1))) 2390 reg |= EROMAR_EEDI; 2391 else 2392 reg &= ~EROMAR_EEDI; 2393 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2394 bus_space_write_4(st, sh, SIP_EROMAR, 2395 reg | EROMAR_EESK); 2396 delay(4); 2397 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2398 delay(4); 2399 } 2400 2401 /* Shift out data. */ 2402 reg = EROMAR_EECS; 2403 data[i] = 0; 2404 for (x = 16; x > 0; x--) { 2405 bus_space_write_4(st, sh, SIP_EROMAR, 2406 reg | EROMAR_EESK); 2407 delay(4); 2408 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO) 2409 data[i] |= (1 << (x - 1)); 2410 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2411 delay(4); 2412 } 2413 2414 /* Clear CHIP SELECT. */ 2415 bus_space_write_4(st, sh, SIP_EROMAR, 0); 2416 delay(4); 2417 } 2418 } 2419 2420 /* 2421 * sip_add_rxbuf: 2422 * 2423 * Add a receive buffer to the indicated descriptor. 2424 */ 2425 int 2426 SIP_DECL(add_rxbuf)(struct sip_softc *sc, int idx) 2427 { 2428 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx]; 2429 struct mbuf *m; 2430 int error; 2431 2432 MGETHDR(m, M_DONTWAIT, MT_DATA); 2433 if (m == NULL) 2434 return (ENOBUFS); 2435 2436 MCLGET(m, M_DONTWAIT); 2437 if ((m->m_flags & M_EXT) == 0) { 2438 m_freem(m); 2439 return (ENOBUFS); 2440 } 2441 2442 #if defined(DP83820) 2443 m->m_len = SIP_RXBUF_LEN; 2444 #endif /* DP83820 */ 2445 2446 if (rxs->rxs_mbuf != NULL) 2447 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2448 2449 rxs->rxs_mbuf = m; 2450 2451 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 2452 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 2453 BUS_DMA_READ|BUS_DMA_NOWAIT); 2454 if (error) { 2455 printf("%s: can't load rx DMA map %d, error = %d\n", 2456 sc->sc_dev.dv_xname, idx, error); 2457 panic("sip_add_rxbuf"); /* XXX */ 2458 } 2459 2460 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2461 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2462 2463 SIP_INIT_RXDESC(sc, idx); 2464 2465 return (0); 2466 } 2467 2468 #if !defined(DP83820) 2469 /* 2470 * sip_sis900_set_filter: 2471 * 2472 * Set up the receive filter. 2473 */ 2474 void 2475 SIP_DECL(sis900_set_filter)(struct sip_softc *sc) 2476 { 2477 bus_space_tag_t st = sc->sc_st; 2478 bus_space_handle_t sh = sc->sc_sh; 2479 struct ethercom *ec = &sc->sc_ethercom; 2480 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2481 struct ether_multi *enm; 2482 u_int8_t *cp; 2483 struct ether_multistep step; 2484 u_int32_t crc, mchash[16]; 2485 2486 /* 2487 * Initialize the prototype RFCR. 2488 */ 2489 sc->sc_rfcr = RFCR_RFEN; 2490 if (ifp->if_flags & IFF_BROADCAST) 2491 sc->sc_rfcr |= RFCR_AAB; 2492 if (ifp->if_flags & IFF_PROMISC) { 2493 sc->sc_rfcr |= RFCR_AAP; 2494 goto allmulti; 2495 } 2496 2497 /* 2498 * Set up the multicast address filter by passing all multicast 2499 * addresses through a CRC generator, and then using the high-order 2500 * 6 bits as an index into the 128 bit multicast hash table (only 2501 * the lower 16 bits of each 32 bit multicast hash register are 2502 * valid). The high order bits select the register, while the 2503 * rest of the bits select the bit within the register. 2504 */ 2505 2506 memset(mchash, 0, sizeof(mchash)); 2507 2508 ETHER_FIRST_MULTI(step, ec, enm); 2509 while (enm != NULL) { 2510 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2511 /* 2512 * We must listen to a range of multicast addresses. 2513 * For now, just accept all multicasts, rather than 2514 * trying to set only those filter bits needed to match 2515 * the range. (At this time, the only use of address 2516 * ranges is for IP multicast routing, for which the 2517 * range is big enough to require all bits set.) 2518 */ 2519 goto allmulti; 2520 } 2521 2522 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 2523 2524 if (SIP_SIS900_REV(sc, SIS_REV_635) || 2525 SIP_SIS900_REV(sc, SIS_REV_900B)) { 2526 /* Just want the 8 most significant bits. */ 2527 crc >>= 24; 2528 } else { 2529 /* Just want the 7 most significant bits. */ 2530 crc >>= 25; 2531 } 2532 2533 /* Set the corresponding bit in the hash table. */ 2534 mchash[crc >> 4] |= 1 << (crc & 0xf); 2535 2536 ETHER_NEXT_MULTI(step, enm); 2537 } 2538 2539 ifp->if_flags &= ~IFF_ALLMULTI; 2540 goto setit; 2541 2542 allmulti: 2543 ifp->if_flags |= IFF_ALLMULTI; 2544 sc->sc_rfcr |= RFCR_AAM; 2545 2546 setit: 2547 #define FILTER_EMIT(addr, data) \ 2548 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \ 2549 delay(1); \ 2550 bus_space_write_4(st, sh, SIP_RFDR, (data)); \ 2551 delay(1) 2552 2553 /* 2554 * Disable receive filter, and program the node address. 2555 */ 2556 cp = LLADDR(ifp->if_sadl); 2557 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]); 2558 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]); 2559 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]); 2560 2561 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2562 /* 2563 * Program the multicast hash table. 2564 */ 2565 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]); 2566 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]); 2567 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]); 2568 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]); 2569 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]); 2570 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]); 2571 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]); 2572 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]); 2573 if (SIP_SIS900_REV(sc, SIS_REV_635) || 2574 SIP_SIS900_REV(sc, SIS_REV_900B)) { 2575 FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]); 2576 FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]); 2577 FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]); 2578 FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]); 2579 FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]); 2580 FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]); 2581 FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]); 2582 FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]); 2583 } 2584 } 2585 #undef FILTER_EMIT 2586 2587 /* 2588 * Re-enable the receiver filter. 2589 */ 2590 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr); 2591 } 2592 #endif /* ! DP83820 */ 2593 2594 /* 2595 * sip_dp83815_set_filter: 2596 * 2597 * Set up the receive filter. 2598 */ 2599 void 2600 SIP_DECL(dp83815_set_filter)(struct sip_softc *sc) 2601 { 2602 bus_space_tag_t st = sc->sc_st; 2603 bus_space_handle_t sh = sc->sc_sh; 2604 struct ethercom *ec = &sc->sc_ethercom; 2605 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2606 struct ether_multi *enm; 2607 u_int8_t *cp; 2608 struct ether_multistep step; 2609 u_int32_t crc, hash, slot, bit; 2610 #ifdef DP83820 2611 #define MCHASH_NWORDS 128 2612 #else 2613 #define MCHASH_NWORDS 32 2614 #endif /* DP83820 */ 2615 u_int16_t mchash[MCHASH_NWORDS]; 2616 int i; 2617 2618 /* 2619 * Initialize the prototype RFCR. 2620 * Enable the receive filter, and accept on 2621 * Perfect (destination address) Match 2622 * If IFF_BROADCAST, also accept all broadcast packets. 2623 * If IFF_PROMISC, accept all unicast packets (and later, set 2624 * IFF_ALLMULTI and accept all multicast, too). 2625 */ 2626 sc->sc_rfcr = RFCR_RFEN | RFCR_APM; 2627 if (ifp->if_flags & IFF_BROADCAST) 2628 sc->sc_rfcr |= RFCR_AAB; 2629 if (ifp->if_flags & IFF_PROMISC) { 2630 sc->sc_rfcr |= RFCR_AAP; 2631 goto allmulti; 2632 } 2633 2634 #ifdef DP83820 2635 /* 2636 * Set up the DP83820 multicast address filter by passing all multicast 2637 * addresses through a CRC generator, and then using the high-order 2638 * 11 bits as an index into the 2048 bit multicast hash table. The 2639 * high-order 7 bits select the slot, while the low-order 4 bits 2640 * select the bit within the slot. Note that only the low 16-bits 2641 * of each filter word are used, and there are 128 filter words. 2642 */ 2643 #else 2644 /* 2645 * Set up the DP83815 multicast address filter by passing all multicast 2646 * addresses through a CRC generator, and then using the high-order 2647 * 9 bits as an index into the 512 bit multicast hash table. The 2648 * high-order 5 bits select the slot, while the low-order 4 bits 2649 * select the bit within the slot. Note that only the low 16-bits 2650 * of each filter word are used, and there are 32 filter words. 2651 */ 2652 #endif /* DP83820 */ 2653 2654 memset(mchash, 0, sizeof(mchash)); 2655 2656 ifp->if_flags &= ~IFF_ALLMULTI; 2657 ETHER_FIRST_MULTI(step, ec, enm); 2658 if (enm == NULL) 2659 goto setit; 2660 while (enm != NULL) { 2661 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 2662 /* 2663 * We must listen to a range of multicast addresses. 2664 * For now, just accept all multicasts, rather than 2665 * trying to set only those filter bits needed to match 2666 * the range. (At this time, the only use of address 2667 * ranges is for IP multicast routing, for which the 2668 * range is big enough to require all bits set.) 2669 */ 2670 goto allmulti; 2671 } 2672 2673 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 2674 2675 #ifdef DP83820 2676 /* Just want the 11 most significant bits. */ 2677 hash = crc >> 21; 2678 #else 2679 /* Just want the 9 most significant bits. */ 2680 hash = crc >> 23; 2681 #endif /* DP83820 */ 2682 2683 slot = hash >> 4; 2684 bit = hash & 0xf; 2685 2686 /* Set the corresponding bit in the hash table. */ 2687 mchash[slot] |= 1 << bit; 2688 2689 ETHER_NEXT_MULTI(step, enm); 2690 } 2691 sc->sc_rfcr |= RFCR_MHEN; 2692 goto setit; 2693 2694 allmulti: 2695 ifp->if_flags |= IFF_ALLMULTI; 2696 sc->sc_rfcr |= RFCR_AAM; 2697 2698 setit: 2699 #define FILTER_EMIT(addr, data) \ 2700 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \ 2701 delay(1); \ 2702 bus_space_write_4(st, sh, SIP_RFDR, (data)); \ 2703 delay(1) 2704 2705 /* 2706 * Disable receive filter, and program the node address. 2707 */ 2708 cp = LLADDR(ifp->if_sadl); 2709 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]); 2710 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]); 2711 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]); 2712 2713 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 2714 /* 2715 * Program the multicast hash table. 2716 */ 2717 for (i = 0; i < MCHASH_NWORDS; i++) { 2718 FILTER_EMIT(RFCR_NS_RFADDR_FILTMEM + (i * 2), 2719 mchash[i]); 2720 } 2721 } 2722 #undef FILTER_EMIT 2723 #undef MCHASH_NWORDS 2724 2725 /* 2726 * Re-enable the receiver filter. 2727 */ 2728 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr); 2729 } 2730 2731 #if defined(DP83820) 2732 /* 2733 * sip_dp83820_mii_readreg: [mii interface function] 2734 * 2735 * Read a PHY register on the MII of the DP83820. 2736 */ 2737 int 2738 SIP_DECL(dp83820_mii_readreg)(struct device *self, int phy, int reg) 2739 { 2740 2741 return (mii_bitbang_readreg(self, &SIP_DECL(dp83820_mii_bitbang_ops), 2742 phy, reg)); 2743 } 2744 2745 /* 2746 * sip_dp83820_mii_writereg: [mii interface function] 2747 * 2748 * Write a PHY register on the MII of the DP83820. 2749 */ 2750 void 2751 SIP_DECL(dp83820_mii_writereg)(struct device *self, int phy, int reg, int val) 2752 { 2753 2754 mii_bitbang_writereg(self, &SIP_DECL(dp83820_mii_bitbang_ops), 2755 phy, reg, val); 2756 } 2757 2758 /* 2759 * sip_dp83815_mii_statchg: [mii interface function] 2760 * 2761 * Callback from MII layer when media changes. 2762 */ 2763 void 2764 SIP_DECL(dp83820_mii_statchg)(struct device *self) 2765 { 2766 struct sip_softc *sc = (struct sip_softc *) self; 2767 u_int32_t cfg; 2768 2769 /* 2770 * Update TXCFG for full-duplex operation. 2771 */ 2772 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) 2773 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI); 2774 else 2775 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI); 2776 2777 /* 2778 * Update RXCFG for full-duplex or loopback. 2779 */ 2780 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 || 2781 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP) 2782 sc->sc_rxcfg |= RXCFG_ATX; 2783 else 2784 sc->sc_rxcfg &= ~RXCFG_ATX; 2785 2786 /* 2787 * Update CFG for MII/GMII. 2788 */ 2789 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000)) 2790 cfg = sc->sc_cfg | CFG_MODE_1000; 2791 else 2792 cfg = sc->sc_cfg; 2793 2794 /* 2795 * XXX 802.3x flow control. 2796 */ 2797 2798 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg); 2799 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg); 2800 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg); 2801 } 2802 2803 /* 2804 * sip_dp83820_mii_bitbang_read: [mii bit-bang interface function] 2805 * 2806 * Read the MII serial port for the MII bit-bang module. 2807 */ 2808 u_int32_t 2809 SIP_DECL(dp83820_mii_bitbang_read)(struct device *self) 2810 { 2811 struct sip_softc *sc = (void *) self; 2812 2813 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR)); 2814 } 2815 2816 /* 2817 * sip_dp83820_mii_bitbang_write: [mii big-bang interface function] 2818 * 2819 * Write the MII serial port for the MII bit-bang module. 2820 */ 2821 void 2822 SIP_DECL(dp83820_mii_bitbang_write)(struct device *self, u_int32_t val) 2823 { 2824 struct sip_softc *sc = (void *) self; 2825 2826 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val); 2827 } 2828 #else /* ! DP83820 */ 2829 /* 2830 * sip_sis900_mii_readreg: [mii interface function] 2831 * 2832 * Read a PHY register on the MII. 2833 */ 2834 int 2835 SIP_DECL(sis900_mii_readreg)(struct device *self, int phy, int reg) 2836 { 2837 struct sip_softc *sc = (struct sip_softc *) self; 2838 u_int32_t enphy; 2839 2840 /* 2841 * The SiS 900 has only an internal PHY on the MII. Only allow 2842 * MII address 0. 2843 */ 2844 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && 2845 sc->sc_rev < SIS_REV_635 && phy != 0) 2846 return (0); 2847 2848 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY, 2849 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) | 2850 ENPHY_RWCMD | ENPHY_ACCESS); 2851 do { 2852 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY); 2853 } while (enphy & ENPHY_ACCESS); 2854 return ((enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT); 2855 } 2856 2857 /* 2858 * sip_sis900_mii_writereg: [mii interface function] 2859 * 2860 * Write a PHY register on the MII. 2861 */ 2862 void 2863 SIP_DECL(sis900_mii_writereg)(struct device *self, int phy, int reg, int val) 2864 { 2865 struct sip_softc *sc = (struct sip_softc *) self; 2866 u_int32_t enphy; 2867 2868 /* 2869 * The SiS 900 has only an internal PHY on the MII. Only allow 2870 * MII address 0. 2871 */ 2872 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && 2873 sc->sc_rev < SIS_REV_635 && phy != 0) 2874 return; 2875 2876 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY, 2877 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) | 2878 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS); 2879 do { 2880 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY); 2881 } while (enphy & ENPHY_ACCESS); 2882 } 2883 2884 /* 2885 * sip_sis900_mii_statchg: [mii interface function] 2886 * 2887 * Callback from MII layer when media changes. 2888 */ 2889 void 2890 SIP_DECL(sis900_mii_statchg)(struct device *self) 2891 { 2892 struct sip_softc *sc = (struct sip_softc *) self; 2893 u_int32_t flowctl; 2894 2895 /* 2896 * Update TXCFG for full-duplex operation. 2897 */ 2898 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) 2899 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI); 2900 else 2901 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI); 2902 2903 /* 2904 * Update RXCFG for full-duplex or loopback. 2905 */ 2906 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 || 2907 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP) 2908 sc->sc_rxcfg |= RXCFG_ATX; 2909 else 2910 sc->sc_rxcfg &= ~RXCFG_ATX; 2911 2912 /* 2913 * Update IMR for use of 802.3x flow control. 2914 */ 2915 if ((sc->sc_mii.mii_media_active & IFM_FLOW) != 0) { 2916 sc->sc_imr |= (ISR_PAUSE_END|ISR_PAUSE_ST); 2917 flowctl = FLOWCTL_FLOWEN; 2918 } else { 2919 sc->sc_imr &= ~(ISR_PAUSE_END|ISR_PAUSE_ST); 2920 flowctl = 0; 2921 } 2922 2923 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg); 2924 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg); 2925 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr); 2926 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl); 2927 } 2928 2929 /* 2930 * sip_dp83815_mii_readreg: [mii interface function] 2931 * 2932 * Read a PHY register on the MII. 2933 */ 2934 int 2935 SIP_DECL(dp83815_mii_readreg)(struct device *self, int phy, int reg) 2936 { 2937 struct sip_softc *sc = (struct sip_softc *) self; 2938 u_int32_t val; 2939 2940 /* 2941 * The DP83815 only has an internal PHY. Only allow 2942 * MII address 0. 2943 */ 2944 if (phy != 0) 2945 return (0); 2946 2947 /* 2948 * Apparently, after a reset, the DP83815 can take a while 2949 * to respond. During this recovery period, the BMSR returns 2950 * a value of 0. Catch this -- it's not supposed to happen 2951 * (the BMSR has some hardcoded-to-1 bits), and wait for the 2952 * PHY to come back to life. 2953 * 2954 * This works out because the BMSR is the first register 2955 * read during the PHY probe process. 2956 */ 2957 do { 2958 val = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg)); 2959 } while (reg == MII_BMSR && val == 0); 2960 2961 return (val & 0xffff); 2962 } 2963 2964 /* 2965 * sip_dp83815_mii_writereg: [mii interface function] 2966 * 2967 * Write a PHY register to the MII. 2968 */ 2969 void 2970 SIP_DECL(dp83815_mii_writereg)(struct device *self, int phy, int reg, int val) 2971 { 2972 struct sip_softc *sc = (struct sip_softc *) self; 2973 2974 /* 2975 * The DP83815 only has an internal PHY. Only allow 2976 * MII address 0. 2977 */ 2978 if (phy != 0) 2979 return; 2980 2981 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val); 2982 } 2983 2984 /* 2985 * sip_dp83815_mii_statchg: [mii interface function] 2986 * 2987 * Callback from MII layer when media changes. 2988 */ 2989 void 2990 SIP_DECL(dp83815_mii_statchg)(struct device *self) 2991 { 2992 struct sip_softc *sc = (struct sip_softc *) self; 2993 2994 /* 2995 * Update TXCFG for full-duplex operation. 2996 */ 2997 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) 2998 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI); 2999 else 3000 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI); 3001 3002 /* 3003 * Update RXCFG for full-duplex or loopback. 3004 */ 3005 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 || 3006 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP) 3007 sc->sc_rxcfg |= RXCFG_ATX; 3008 else 3009 sc->sc_rxcfg &= ~RXCFG_ATX; 3010 3011 /* 3012 * XXX 802.3x flow control. 3013 */ 3014 3015 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXCFG, sc->sc_txcfg); 3016 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXCFG, sc->sc_rxcfg); 3017 } 3018 #endif /* DP83820 */ 3019 3020 #if defined(DP83820) 3021 void 3022 SIP_DECL(dp83820_read_macaddr)(struct sip_softc *sc, 3023 const struct pci_attach_args *pa, u_int8_t *enaddr) 3024 { 3025 u_int16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2]; 3026 u_int8_t cksum, *e, match; 3027 int i; 3028 3029 /* 3030 * EEPROM data format for the DP83820 can be found in 3031 * the DP83820 manual, section 4.2.4. 3032 */ 3033 3034 SIP_DECL(read_eeprom)(sc, 0, 3035 sizeof(eeprom_data) / sizeof(eeprom_data[0]), eeprom_data); 3036 3037 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8; 3038 match = ~(match - 1); 3039 3040 cksum = 0x55; 3041 e = (u_int8_t *) eeprom_data; 3042 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++) 3043 cksum += *e++; 3044 3045 if (cksum != match) 3046 printf("%s: Checksum (%x) mismatch (%x)", 3047 sc->sc_dev.dv_xname, cksum, match); 3048 3049 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff; 3050 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8; 3051 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff; 3052 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8; 3053 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff; 3054 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8; 3055 3056 /* Get the GPIOR bits. */ 3057 sc->sc_gpior = eeprom_data[0x04]; 3058 3059 /* Get various CFG related bits. */ 3060 if ((eeprom_data[0x05] >> 0) & 1) 3061 sc->sc_cfg |= CFG_EXT_125; 3062 if ((eeprom_data[0x05] >> 9) & 1) 3063 sc->sc_cfg |= CFG_TBI_EN; 3064 } 3065 #else /* ! DP83820 */ 3066 void 3067 SIP_DECL(sis900_read_macaddr)(struct sip_softc *sc, 3068 const struct pci_attach_args *pa, u_int8_t *enaddr) 3069 { 3070 u_int16_t myea[ETHER_ADDR_LEN / 2]; 3071 3072 switch (sc->sc_rev) { 3073 case SIS_REV_630S: 3074 case SIS_REV_630E: 3075 case SIS_REV_630EA1: 3076 case SIS_REV_630ET: 3077 case SIS_REV_635: 3078 /* 3079 * The MAC address for the on-board Ethernet of 3080 * the SiS 630 chipset is in the NVRAM. Kick 3081 * the chip into re-loading it from NVRAM, and 3082 * read the MAC address out of the filter registers. 3083 */ 3084 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD); 3085 3086 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR, 3087 RFCR_RFADDR_NODE0); 3088 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) & 3089 0xffff; 3090 3091 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR, 3092 RFCR_RFADDR_NODE2); 3093 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) & 3094 0xffff; 3095 3096 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR, 3097 RFCR_RFADDR_NODE4); 3098 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) & 3099 0xffff; 3100 break; 3101 3102 default: 3103 SIP_DECL(read_eeprom)(sc, SIP_EEPROM_ETHERNET_ID0 >> 1, 3104 sizeof(myea) / sizeof(myea[0]), myea); 3105 } 3106 3107 enaddr[0] = myea[0] & 0xff; 3108 enaddr[1] = myea[0] >> 8; 3109 enaddr[2] = myea[1] & 0xff; 3110 enaddr[3] = myea[1] >> 8; 3111 enaddr[4] = myea[2] & 0xff; 3112 enaddr[5] = myea[2] >> 8; 3113 } 3114 3115 /* Table and macro to bit-reverse an octet. */ 3116 static const u_int8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15}; 3117 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf]) 3118 3119 void 3120 SIP_DECL(dp83815_read_macaddr)(struct sip_softc *sc, 3121 const struct pci_attach_args *pa, u_int8_t *enaddr) 3122 { 3123 u_int16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea; 3124 u_int8_t cksum, *e, match; 3125 int i; 3126 3127 SIP_DECL(read_eeprom)(sc, 0, sizeof(eeprom_data) / 3128 sizeof(eeprom_data[0]), eeprom_data); 3129 3130 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8; 3131 match = ~(match - 1); 3132 3133 cksum = 0x55; 3134 e = (u_int8_t *) eeprom_data; 3135 for (i=0 ; i<SIP_DP83815_EEPROM_CHECKSUM ; i++) { 3136 cksum += *e++; 3137 } 3138 if (cksum != match) { 3139 printf("%s: Checksum (%x) mismatch (%x)", 3140 sc->sc_dev.dv_xname, cksum, match); 3141 } 3142 3143 /* 3144 * Unrolled because it makes slightly more sense this way. 3145 * The DP83815 stores the MAC address in bit 0 of word 6 3146 * through bit 15 of word 8. 3147 */ 3148 ea = &eeprom_data[6]; 3149 enaddr[0] = ((*ea & 0x1) << 7); 3150 ea++; 3151 enaddr[0] |= ((*ea & 0xFE00) >> 9); 3152 enaddr[1] = ((*ea & 0x1FE) >> 1); 3153 enaddr[2] = ((*ea & 0x1) << 7); 3154 ea++; 3155 enaddr[2] |= ((*ea & 0xFE00) >> 9); 3156 enaddr[3] = ((*ea & 0x1FE) >> 1); 3157 enaddr[4] = ((*ea & 0x1) << 7); 3158 ea++; 3159 enaddr[4] |= ((*ea & 0xFE00) >> 9); 3160 enaddr[5] = ((*ea & 0x1FE) >> 1); 3161 3162 /* 3163 * In case that's not weird enough, we also need to reverse 3164 * the bits in each byte. This all actually makes more sense 3165 * if you think about the EEPROM storage as an array of bits 3166 * being shifted into bytes, but that's not how we're looking 3167 * at it here... 3168 */ 3169 for (i = 0; i < 6 ;i++) 3170 enaddr[i] = bbr(enaddr[i]); 3171 } 3172 #endif /* DP83820 */ 3173 3174 /* 3175 * sip_mediastatus: [ifmedia interface function] 3176 * 3177 * Get the current interface media status. 3178 */ 3179 void 3180 SIP_DECL(mediastatus)(struct ifnet *ifp, struct ifmediareq *ifmr) 3181 { 3182 struct sip_softc *sc = ifp->if_softc; 3183 3184 mii_pollstat(&sc->sc_mii); 3185 ifmr->ifm_status = sc->sc_mii.mii_media_status; 3186 ifmr->ifm_active = sc->sc_mii.mii_media_active; 3187 } 3188 3189 /* 3190 * sip_mediachange: [ifmedia interface function] 3191 * 3192 * Set hardware to newly-selected media. 3193 */ 3194 int 3195 SIP_DECL(mediachange)(struct ifnet *ifp) 3196 { 3197 struct sip_softc *sc = ifp->if_softc; 3198 3199 if (ifp->if_flags & IFF_UP) 3200 mii_mediachg(&sc->sc_mii); 3201 return (0); 3202 } 3203