1 /* $NetBSD: if_sip.c,v 1.188 2022/09/11 15:23:39 ryo Exp $ */ 2 3 /*- 4 * Copyright (c) 2001, 2002 The NetBSD Foundation, Inc. 5 * All rights reserved. 6 * 7 * This code is derived from software contributed to The NetBSD Foundation 8 * by Jason R. Thorpe. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS 20 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 21 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 23 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 24 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 25 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 26 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 27 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 28 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 29 * POSSIBILITY OF SUCH DAMAGE. 30 */ 31 32 /*- 33 * Copyright (c) 1999 Network Computer, Inc. 34 * All rights reserved. 35 * 36 * Redistribution and use in source and binary forms, with or without 37 * modification, are permitted provided that the following conditions 38 * are met: 39 * 1. Redistributions of source code must retain the above copyright 40 * notice, this list of conditions and the following disclaimer. 41 * 2. Redistributions in binary form must reproduce the above copyright 42 * notice, this list of conditions and the following disclaimer in the 43 * documentation and/or other materials provided with the distribution. 44 * 3. Neither the name of Network Computer, Inc. nor the names of its 45 * contributors may be used to endorse or promote products derived 46 * from this software without specific prior written permission. 47 * 48 * THIS SOFTWARE IS PROVIDED BY NETWORK COMPUTER, INC. AND CONTRIBUTORS 49 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 50 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 51 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS 52 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 53 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 54 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 55 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 56 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 57 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 58 * POSSIBILITY OF SUCH DAMAGE. 59 */ 60 61 /* 62 * Device driver for the Silicon Integrated Systems SiS 900, 63 * SiS 7016 10/100, National Semiconductor DP83815 10/100, and 64 * National Semiconductor DP83820 10/100/1000 PCI Ethernet 65 * controllers. 66 * 67 * Originally written to support the SiS 900 by Jason R. Thorpe for 68 * Network Computer, Inc. 69 * 70 * TODO: 71 * 72 * - Reduce the Rx interrupt load. 73 */ 74 75 #include <sys/cdefs.h> 76 __KERNEL_RCSID(0, "$NetBSD: if_sip.c,v 1.188 2022/09/11 15:23:39 ryo Exp $"); 77 78 #include <sys/param.h> 79 #include <sys/systm.h> 80 #include <sys/callout.h> 81 #include <sys/mbuf.h> 82 #include <sys/malloc.h> 83 #include <sys/kernel.h> 84 #include <sys/socket.h> 85 #include <sys/ioctl.h> 86 #include <sys/errno.h> 87 #include <sys/device.h> 88 #include <sys/queue.h> 89 #include <sys/rndsource.h> 90 91 #include <net/if.h> 92 #include <net/if_dl.h> 93 #include <net/if_media.h> 94 #include <net/if_ether.h> 95 #include <net/bpf.h> 96 97 #include <sys/bus.h> 98 #include <sys/intr.h> 99 #include <machine/endian.h> 100 101 #include <dev/mii/mii.h> 102 #include <dev/mii/miivar.h> 103 #include <dev/mii/mii_bitbang.h> 104 105 #include <dev/pci/pcireg.h> 106 #include <dev/pci/pcivar.h> 107 #include <dev/pci/pcidevs.h> 108 109 #include <dev/pci/if_sipreg.h> 110 111 /* 112 * Transmit descriptor list size. This is arbitrary, but allocate 113 * enough descriptors for 128 pending transmissions, and 8 segments 114 * per packet (64 for DP83820 for jumbo frames). 115 * 116 * This MUST work out to a power of 2. 117 */ 118 #define GSIP_NTXSEGS_ALLOC 16 119 #define SIP_NTXSEGS_ALLOC 8 120 121 #define SIP_TXQUEUELEN 256 122 #define MAX_SIP_NTXDESC \ 123 (SIP_TXQUEUELEN * MAX(SIP_NTXSEGS_ALLOC, GSIP_NTXSEGS_ALLOC)) 124 125 /* 126 * Receive descriptor list size. We have one Rx buffer per incoming 127 * packet, so this logic is a little simpler. 128 * 129 * Actually, on the DP83820, we allow the packet to consume more than 130 * one buffer, in order to support jumbo Ethernet frames. In that 131 * case, a packet may consume up to 5 buffers (assuming a 2048 byte 132 * mbuf cluster). 256 receive buffers is only 51 maximum size packets, 133 * so we'd better be quick about handling receive interrupts. 134 */ 135 #define GSIP_NRXDESC 256 136 #define SIP_NRXDESC 128 137 138 #define MAX_SIP_NRXDESC MAX(GSIP_NRXDESC, SIP_NRXDESC) 139 140 /* 141 * Set this to 1 to force-disable using the 64-bit data path 142 * on DP83820. 143 */ 144 static int gsip_disable_data64 = 0; 145 146 /* 147 * Control structures are DMA'd to the SiS900 chip. We allocate them in 148 * a single clump that maps to a single DMA segment to make several things 149 * easier. 150 */ 151 struct sip_control_data { 152 /* 153 * The transmit descriptors. 154 */ 155 struct sip_desc scd_txdescs[MAX_SIP_NTXDESC]; 156 157 /* 158 * The receive descriptors. 159 */ 160 struct sip_desc scd_rxdescs[MAX_SIP_NRXDESC]; 161 }; 162 163 #define SIP_CDOFF(x) offsetof(struct sip_control_data, x) 164 #define SIP_CDTXOFF(x) SIP_CDOFF(scd_txdescs[(x)]) 165 #define SIP_CDRXOFF(x) SIP_CDOFF(scd_rxdescs[(x)]) 166 167 /* 168 * Software state for transmit jobs. 169 */ 170 struct sip_txsoft { 171 struct mbuf *txs_mbuf; /* head of our mbuf chain */ 172 bus_dmamap_t txs_dmamap; /* our DMA map */ 173 int txs_firstdesc; /* first descriptor in packet */ 174 int txs_lastdesc; /* last descriptor in packet */ 175 SIMPLEQ_ENTRY(sip_txsoft) txs_q; 176 }; 177 178 SIMPLEQ_HEAD(sip_txsq, sip_txsoft); 179 180 /* 181 * Software state for receive jobs. 182 */ 183 struct sip_rxsoft { 184 struct mbuf *rxs_mbuf; /* head of our mbuf chain */ 185 bus_dmamap_t rxs_dmamap; /* our DMA map */ 186 }; 187 188 enum sip_attach_stage { 189 SIP_ATTACH_FIN = 0 190 , SIP_ATTACH_CREATE_RXMAP 191 , SIP_ATTACH_CREATE_TXMAP 192 , SIP_ATTACH_LOAD_MAP 193 , SIP_ATTACH_CREATE_MAP 194 , SIP_ATTACH_MAP_MEM 195 , SIP_ATTACH_ALLOC_MEM 196 , SIP_ATTACH_INTR 197 , SIP_ATTACH_MAP 198 }; 199 200 /* 201 * Software state per device. 202 */ 203 struct sip_softc { 204 device_t sc_dev; /* generic device information */ 205 device_suspensor_t sc_suspensor; 206 pmf_qual_t sc_qual; 207 208 bus_space_tag_t sc_st; /* bus space tag */ 209 bus_space_handle_t sc_sh; /* bus space handle */ 210 bus_size_t sc_sz; /* bus space size */ 211 bus_dma_tag_t sc_dmat; /* bus DMA tag */ 212 pci_chipset_tag_t sc_pc; 213 bus_dma_segment_t sc_seg; 214 struct ethercom sc_ethercom; /* ethernet common data */ 215 216 const struct sip_product *sc_model; /* which model are we? */ 217 bool sc_gigabit; /* 1: 83820, 0: other */ 218 bool sc_dma64; /* using 64-bit DMA addresses */ 219 int sc_rev; /* chip revision */ 220 221 unsigned int sc_bufptr_idx; 222 unsigned int sc_cmdsts_idx; 223 unsigned int sc_extsts_idx; /* DP83820 only */ 224 225 void *sc_ih; /* interrupt cookie */ 226 227 struct mii_data sc_mii; /* MII/media information */ 228 229 callout_t sc_tick_ch; /* tick callout */ 230 231 bus_dmamap_t sc_cddmamap; /* control data DMA map */ 232 #define sc_cddma sc_cddmamap->dm_segs[0].ds_addr 233 234 /* 235 * Software state for transmit and receive descriptors. 236 */ 237 struct sip_txsoft sc_txsoft[SIP_TXQUEUELEN]; 238 struct sip_rxsoft sc_rxsoft[MAX_SIP_NRXDESC]; 239 240 /* 241 * Control data structures. 242 */ 243 struct sip_control_data *sc_control_data; 244 #define sc_txdescs sc_control_data->scd_txdescs 245 #define sc_rxdescs sc_control_data->scd_rxdescs 246 247 #ifdef SIP_EVENT_COUNTERS 248 /* 249 * Event counters. 250 */ 251 struct evcnt sc_ev_txdstall; /* Tx stalled due to no txd */ 252 struct evcnt sc_ev_txforceintr; /* Tx interrupts forced */ 253 struct evcnt sc_ev_txdintr; /* Tx descriptor interrupts */ 254 struct evcnt sc_ev_txiintr; /* Tx idle interrupts */ 255 struct evcnt sc_ev_rxintr; /* Rx interrupts */ 256 struct evcnt sc_ev_hiberr; /* HIBERR interrupts */ 257 struct evcnt sc_ev_rxpause; /* PAUSE received */ 258 /* DP83820 only */ 259 struct evcnt sc_ev_txpause; /* PAUSE transmitted */ 260 struct evcnt sc_ev_rxipsum; /* IP checksums checked in-bound */ 261 struct evcnt sc_ev_rxtcpsum; /* TCP checksums checked in-bound */ 262 struct evcnt sc_ev_rxudpsum; /* UDP checksums checked in-boudn */ 263 struct evcnt sc_ev_txipsum; /* IP checksums comp. out-bound */ 264 struct evcnt sc_ev_txtcpsum; /* TCP checksums comp. out-bound */ 265 struct evcnt sc_ev_txudpsum; /* UDP checksums comp. out-bound */ 266 #endif /* SIP_EVENT_COUNTERS */ 267 268 uint32_t sc_txcfg; /* prototype TXCFG register */ 269 uint32_t sc_rxcfg; /* prototype RXCFG register */ 270 uint32_t sc_imr; /* prototype IMR register */ 271 uint32_t sc_rfcr; /* prototype RFCR register */ 272 273 uint32_t sc_cfg; /* prototype CFG register */ 274 275 uint32_t sc_gpior; /* prototype GPIOR register */ 276 277 uint32_t sc_tx_fill_thresh; /* transmit fill threshold */ 278 uint32_t sc_tx_drain_thresh; /* transmit drain threshold */ 279 280 uint32_t sc_rx_drain_thresh; /* receive drain threshold */ 281 282 int sc_flowflags; /* 802.3x flow control flags */ 283 int sc_rx_flow_thresh; /* Rx FIFO threshold for flow control */ 284 int sc_paused; /* paused indication */ 285 286 int sc_txfree; /* number of free Tx descriptors */ 287 int sc_txnext; /* next ready Tx descriptor */ 288 int sc_txwin; /* Tx descriptors since last intr */ 289 290 struct sip_txsq sc_txfreeq; /* free Tx descsofts */ 291 struct sip_txsq sc_txdirtyq; /* dirty Tx descsofts */ 292 293 /* values of interface state at last init */ 294 struct { 295 /* if_capenable */ 296 uint64_t if_capenable; 297 /* ec_capenable */ 298 int ec_capenable; 299 /* VLAN_ATTACHED */ 300 int is_vlan; 301 } sc_prev; 302 303 u_short sc_if_flags; 304 305 int sc_rxptr; /* next ready Rx descriptor/descsoft */ 306 int sc_rxdiscard; 307 int sc_rxlen; 308 struct mbuf *sc_rxhead; 309 struct mbuf *sc_rxtail; 310 struct mbuf **sc_rxtailp; 311 312 int sc_ntxdesc; 313 int sc_ntxdesc_mask; 314 315 int sc_nrxdesc_mask; 316 317 const struct sip_parm { 318 const struct sip_regs { 319 int r_rxcfg; 320 int r_txcfg; 321 } p_regs; 322 323 const struct sip_bits { 324 uint32_t b_txcfg_mxdma_8; 325 uint32_t b_txcfg_mxdma_16; 326 uint32_t b_txcfg_mxdma_32; 327 uint32_t b_txcfg_mxdma_64; 328 uint32_t b_txcfg_mxdma_128; 329 uint32_t b_txcfg_mxdma_256; 330 uint32_t b_txcfg_mxdma_512; 331 uint32_t b_txcfg_flth_mask; 332 uint32_t b_txcfg_drth_mask; 333 334 uint32_t b_rxcfg_mxdma_8; 335 uint32_t b_rxcfg_mxdma_16; 336 uint32_t b_rxcfg_mxdma_32; 337 uint32_t b_rxcfg_mxdma_64; 338 uint32_t b_rxcfg_mxdma_128; 339 uint32_t b_rxcfg_mxdma_256; 340 uint32_t b_rxcfg_mxdma_512; 341 342 uint32_t b_isr_txrcmp; 343 uint32_t b_isr_rxrcmp; 344 uint32_t b_isr_dperr; 345 uint32_t b_isr_sserr; 346 uint32_t b_isr_rmabt; 347 uint32_t b_isr_rtabt; 348 349 uint32_t b_cmdsts_size_mask; 350 } p_bits; 351 int p_filtmem; 352 int p_rxbuf_len; 353 bus_size_t p_tx_dmamap_size; 354 int p_ntxsegs; 355 int p_ntxsegs_alloc; 356 int p_nrxdesc; 357 } *sc_parm; 358 359 void (*sc_rxintr)(struct sip_softc *); 360 361 krndsource_t rnd_source; /* random source */ 362 }; 363 364 #define sc_bits sc_parm->p_bits 365 #define sc_regs sc_parm->p_regs 366 367 static const struct sip_parm sip_parm = { 368 .p_filtmem = OTHER_RFCR_NS_RFADDR_FILTMEM 369 , .p_rxbuf_len = MCLBYTES - 1 /* field width */ 370 , .p_tx_dmamap_size = MCLBYTES 371 , .p_ntxsegs = 16 372 , .p_ntxsegs_alloc = SIP_NTXSEGS_ALLOC 373 , .p_nrxdesc = SIP_NRXDESC 374 , .p_bits = { 375 .b_txcfg_mxdma_8 = 0x00200000 /* 8 bytes */ 376 , .b_txcfg_mxdma_16 = 0x00300000 /* 16 bytes */ 377 , .b_txcfg_mxdma_32 = 0x00400000 /* 32 bytes */ 378 , .b_txcfg_mxdma_64 = 0x00500000 /* 64 bytes */ 379 , .b_txcfg_mxdma_128 = 0x00600000 /* 128 bytes */ 380 , .b_txcfg_mxdma_256 = 0x00700000 /* 256 bytes */ 381 , .b_txcfg_mxdma_512 = 0x00000000 /* 512 bytes */ 382 , .b_txcfg_flth_mask = 0x00003f00 /* Tx fill threshold */ 383 , .b_txcfg_drth_mask = 0x0000003f /* Tx drain threshold */ 384 385 , .b_rxcfg_mxdma_8 = 0x00200000 /* 8 bytes */ 386 , .b_rxcfg_mxdma_16 = 0x00300000 /* 16 bytes */ 387 , .b_rxcfg_mxdma_32 = 0x00400000 /* 32 bytes */ 388 , .b_rxcfg_mxdma_64 = 0x00500000 /* 64 bytes */ 389 , .b_rxcfg_mxdma_128 = 0x00600000 /* 128 bytes */ 390 , .b_rxcfg_mxdma_256 = 0x00700000 /* 256 bytes */ 391 , .b_rxcfg_mxdma_512 = 0x00000000 /* 512 bytes */ 392 393 , .b_isr_txrcmp = 0x02000000 /* transmit reset complete */ 394 , .b_isr_rxrcmp = 0x01000000 /* receive reset complete */ 395 , .b_isr_dperr = 0x00800000 /* detected parity error */ 396 , .b_isr_sserr = 0x00400000 /* signalled system error */ 397 , .b_isr_rmabt = 0x00200000 /* received master abort */ 398 , .b_isr_rtabt = 0x00100000 /* received target abort */ 399 , .b_cmdsts_size_mask = OTHER_CMDSTS_SIZE_MASK 400 } 401 , .p_regs = { 402 .r_rxcfg = OTHER_SIP_RXCFG, 403 .r_txcfg = OTHER_SIP_TXCFG 404 } 405 }, gsip_parm = { 406 .p_filtmem = DP83820_RFCR_NS_RFADDR_FILTMEM 407 , .p_rxbuf_len = MCLBYTES - 8 408 , .p_tx_dmamap_size = ETHER_MAX_LEN_JUMBO 409 , .p_ntxsegs = 64 410 , .p_ntxsegs_alloc = GSIP_NTXSEGS_ALLOC 411 , .p_nrxdesc = GSIP_NRXDESC 412 , .p_bits = { 413 .b_txcfg_mxdma_8 = 0x00100000 /* 8 bytes */ 414 , .b_txcfg_mxdma_16 = 0x00200000 /* 16 bytes */ 415 , .b_txcfg_mxdma_32 = 0x00300000 /* 32 bytes */ 416 , .b_txcfg_mxdma_64 = 0x00400000 /* 64 bytes */ 417 , .b_txcfg_mxdma_128 = 0x00500000 /* 128 bytes */ 418 , .b_txcfg_mxdma_256 = 0x00600000 /* 256 bytes */ 419 , .b_txcfg_mxdma_512 = 0x00700000 /* 512 bytes */ 420 , .b_txcfg_flth_mask = 0x0000ff00 /* Fx fill threshold */ 421 , .b_txcfg_drth_mask = 0x000000ff /* Tx drain threshold */ 422 423 , .b_rxcfg_mxdma_8 = 0x00100000 /* 8 bytes */ 424 , .b_rxcfg_mxdma_16 = 0x00200000 /* 16 bytes */ 425 , .b_rxcfg_mxdma_32 = 0x00300000 /* 32 bytes */ 426 , .b_rxcfg_mxdma_64 = 0x00400000 /* 64 bytes */ 427 , .b_rxcfg_mxdma_128 = 0x00500000 /* 128 bytes */ 428 , .b_rxcfg_mxdma_256 = 0x00600000 /* 256 bytes */ 429 , .b_rxcfg_mxdma_512 = 0x00700000 /* 512 bytes */ 430 431 , .b_isr_txrcmp = 0x00400000 /* transmit reset complete */ 432 , .b_isr_rxrcmp = 0x00200000 /* receive reset complete */ 433 , .b_isr_dperr = 0x00100000 /* detected parity error */ 434 , .b_isr_sserr = 0x00080000 /* signalled system error */ 435 , .b_isr_rmabt = 0x00040000 /* received master abort */ 436 , .b_isr_rtabt = 0x00020000 /* received target abort */ 437 , .b_cmdsts_size_mask = DP83820_CMDSTS_SIZE_MASK 438 } 439 , .p_regs = { 440 .r_rxcfg = DP83820_SIP_RXCFG, 441 .r_txcfg = DP83820_SIP_TXCFG 442 } 443 }; 444 445 static inline int 446 sip_nexttx(const struct sip_softc *sc, int x) 447 { 448 return (x + 1) & sc->sc_ntxdesc_mask; 449 } 450 451 static inline int 452 sip_nextrx(const struct sip_softc *sc, int x) 453 { 454 return (x + 1) & sc->sc_nrxdesc_mask; 455 } 456 457 /* 83820 only */ 458 static inline void 459 sip_rxchain_reset(struct sip_softc *sc) 460 { 461 sc->sc_rxtailp = &sc->sc_rxhead; 462 *sc->sc_rxtailp = NULL; 463 sc->sc_rxlen = 0; 464 } 465 466 /* 83820 only */ 467 static inline void 468 sip_rxchain_link(struct sip_softc *sc, struct mbuf *m) 469 { 470 *sc->sc_rxtailp = sc->sc_rxtail = m; 471 sc->sc_rxtailp = &m->m_next; 472 } 473 474 #ifdef SIP_EVENT_COUNTERS 475 #define SIP_EVCNT_INCR(ev) (ev)->ev_count++ 476 #else 477 #define SIP_EVCNT_INCR(ev) /* nothing */ 478 #endif 479 480 #define SIP_CDTXADDR(sc, x) ((sc)->sc_cddma + SIP_CDTXOFF((x))) 481 #define SIP_CDRXADDR(sc, x) ((sc)->sc_cddma + SIP_CDRXOFF((x))) 482 483 static inline void 484 sip_set_rxdp(struct sip_softc *sc, bus_addr_t addr) 485 { 486 if (sc->sc_gigabit) 487 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXDP_HI, 488 BUS_ADDR_HI32(addr)); 489 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RXDP, BUS_ADDR_LO32(addr)); 490 } 491 492 static inline void 493 sip_set_txdp(struct sip_softc *sc, bus_addr_t addr) 494 { 495 if (sc->sc_gigabit) 496 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP_HI, 497 BUS_ADDR_HI32(addr)); 498 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_TXDP, BUS_ADDR_LO32(addr)); 499 } 500 501 static inline void 502 sip_cdtxsync(struct sip_softc *sc, const int x0, const int n0, const int ops) 503 { 504 int x, n; 505 506 x = x0; 507 n = n0; 508 509 /* If it will wrap around, sync to the end of the ring. */ 510 if (x + n > sc->sc_ntxdesc) { 511 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, 512 SIP_CDTXOFF(x), sizeof(struct sip_desc) * 513 (sc->sc_ntxdesc - x), ops); 514 n -= (sc->sc_ntxdesc - x); 515 x = 0; 516 } 517 518 /* Now sync whatever is left. */ 519 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, 520 SIP_CDTXOFF(x), sizeof(struct sip_desc) * n, ops); 521 } 522 523 static inline void 524 sip_cdrxsync(struct sip_softc *sc, int x, int ops) 525 { 526 bus_dmamap_sync(sc->sc_dmat, sc->sc_cddmamap, 527 SIP_CDRXOFF(x), sizeof(struct sip_desc), ops); 528 } 529 530 static void 531 sip_init_txring(struct sip_softc *sc) 532 { 533 struct sip_desc *sipd; 534 bus_addr_t next_desc; 535 int i; 536 537 memset(sc->sc_txdescs, 0, sizeof(sc->sc_txdescs)); 538 for (i = 0; i < sc->sc_ntxdesc; i++) { 539 sipd = &sc->sc_txdescs[i]; 540 next_desc = SIP_CDTXADDR(sc, sip_nexttx(sc, i)); 541 if (sc->sc_dma64) { 542 sipd->sipd_words[GSIP64_DESC_LINK_LO] = 543 htole32(BUS_ADDR_LO32(next_desc)); 544 sipd->sipd_words[GSIP64_DESC_LINK_HI] = 545 htole32(BUS_ADDR_HI32(next_desc)); 546 } else { 547 /* SIP_DESC_LINK == GSIP_DESC_LINK */ 548 sipd->sipd_words[SIP_DESC_LINK] = htole32(next_desc); 549 } 550 } 551 sip_cdtxsync(sc, 0, sc->sc_ntxdesc, 552 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 553 sc->sc_txfree = sc->sc_ntxdesc; 554 sc->sc_txnext = 0; 555 sc->sc_txwin = 0; 556 } 557 558 static inline void 559 sip_init_txdesc(struct sip_softc *sc, int x, bus_addr_t bufptr, uint32_t cmdsts) 560 { 561 struct sip_desc *sipd = &sc->sc_txdescs[x]; 562 563 if (sc->sc_dma64) { 564 sipd->sipd_words[GSIP64_DESC_BUFPTR_LO] = 565 htole32(BUS_ADDR_LO32(bufptr)); 566 sipd->sipd_words[GSIP64_DESC_BUFPTR_HI] = 567 htole32(BUS_ADDR_HI32(bufptr)); 568 } else { 569 sipd->sipd_words[sc->sc_bufptr_idx] = htole32(bufptr); 570 } 571 sipd->sipd_words[sc->sc_extsts_idx] = 0; 572 sipd->sipd_words[sc->sc_cmdsts_idx] = htole32(cmdsts); 573 /* sip_cdtxsync() will be done later. */ 574 } 575 576 static inline void 577 sip_init_rxdesc(struct sip_softc *sc, int x) 578 { 579 struct sip_rxsoft *rxs = &sc->sc_rxsoft[x]; 580 struct sip_desc *sipd = &sc->sc_rxdescs[x]; 581 const bus_addr_t next_desc = SIP_CDRXADDR(sc, sip_nextrx(sc, x)); 582 583 if (sc->sc_dma64) { 584 sipd->sipd_words[GSIP64_DESC_LINK_LO] = 585 htole32(BUS_ADDR_LO32(next_desc)); 586 sipd->sipd_words[GSIP64_DESC_LINK_HI] = 587 htole32(BUS_ADDR_HI32(next_desc)); 588 sipd->sipd_words[GSIP64_DESC_BUFPTR_LO] = 589 htole32(BUS_ADDR_LO32(rxs->rxs_dmamap->dm_segs[0].ds_addr)); 590 sipd->sipd_words[GSIP64_DESC_BUFPTR_HI] = 591 htole32(BUS_ADDR_HI32(rxs->rxs_dmamap->dm_segs[0].ds_addr)); 592 } else { 593 sipd->sipd_words[SIP_DESC_LINK] = htole32(next_desc); 594 sipd->sipd_words[sc->sc_bufptr_idx] = 595 htole32(rxs->rxs_dmamap->dm_segs[0].ds_addr); 596 } 597 sipd->sipd_words[sc->sc_extsts_idx] = 0; 598 sip_cdrxsync(sc, x, BUS_DMASYNC_PREWRITE); 599 sipd->sipd_words[sc->sc_cmdsts_idx] = 600 htole32(CMDSTS_INTR | (sc->sc_parm->p_rxbuf_len & 601 sc->sc_bits.b_cmdsts_size_mask)); 602 sip_cdrxsync(sc, x, BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 603 } 604 605 #define SIP_CHIP_VERS(sc, v, p, r) \ 606 ((sc)->sc_model->sip_vendor == (v) && \ 607 (sc)->sc_model->sip_product == (p) && \ 608 (sc)->sc_rev == (r)) 609 610 #define SIP_CHIP_MODEL(sc, v, p) \ 611 ((sc)->sc_model->sip_vendor == (v) && \ 612 (sc)->sc_model->sip_product == (p)) 613 614 #define SIP_SIS900_REV(sc, rev) \ 615 SIP_CHIP_VERS((sc), PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, (rev)) 616 617 #define SIP_TIMEOUT 1000 618 619 static int sip_ifflags_cb(struct ethercom *); 620 static void sipcom_start(struct ifnet *); 621 static void sipcom_watchdog(struct ifnet *); 622 static int sipcom_ioctl(struct ifnet *, u_long, void *); 623 static int sipcom_init(struct ifnet *); 624 static void sipcom_stop(struct ifnet *, int); 625 626 static bool sipcom_reset(struct sip_softc *); 627 static void sipcom_rxdrain(struct sip_softc *); 628 static int sipcom_add_rxbuf(struct sip_softc *, int); 629 static void sipcom_read_eeprom(struct sip_softc *, int, int, 630 uint16_t *); 631 static void sipcom_tick(void *); 632 633 static void sipcom_sis900_set_filter(struct sip_softc *); 634 static void sipcom_dp83815_set_filter(struct sip_softc *); 635 636 static void sipcom_dp83820_read_macaddr(struct sip_softc *, 637 const struct pci_attach_args *, uint8_t *); 638 static void sipcom_sis900_eeprom_delay(struct sip_softc *sc); 639 static void sipcom_sis900_read_macaddr(struct sip_softc *, 640 const struct pci_attach_args *, uint8_t *); 641 static void sipcom_dp83815_read_macaddr(struct sip_softc *, 642 const struct pci_attach_args *, uint8_t *); 643 644 static int sipcom_intr(void *); 645 static void sipcom_txintr(struct sip_softc *); 646 static void sip_rxintr(struct sip_softc *); 647 static void gsip_rxintr(struct sip_softc *); 648 649 static int sipcom_dp83820_mii_readreg(device_t, int, int, uint16_t *); 650 static int sipcom_dp83820_mii_writereg(device_t, int, int, uint16_t); 651 static void sipcom_dp83820_mii_statchg(struct ifnet *); 652 653 static int sipcom_sis900_mii_readreg(device_t, int, int, uint16_t *); 654 static int sipcom_sis900_mii_writereg(device_t, int, int, uint16_t); 655 static void sipcom_sis900_mii_statchg(struct ifnet *); 656 657 static int sipcom_dp83815_mii_readreg(device_t, int, int, uint16_t *); 658 static int sipcom_dp83815_mii_writereg(device_t, int, int, uint16_t); 659 static void sipcom_dp83815_mii_statchg(struct ifnet *); 660 661 static void sipcom_mediastatus(struct ifnet *, struct ifmediareq *); 662 663 static int sipcom_match(device_t, cfdata_t, void *); 664 static void sipcom_attach(device_t, device_t, void *); 665 static void sipcom_do_detach(device_t, enum sip_attach_stage); 666 static int sipcom_detach(device_t, int); 667 static bool sipcom_resume(device_t, const pmf_qual_t *); 668 static bool sipcom_suspend(device_t, const pmf_qual_t *); 669 670 int gsip_copy_small = 0; 671 int sip_copy_small = 0; 672 673 CFATTACH_DECL3_NEW(gsip, sizeof(struct sip_softc), 674 sipcom_match, sipcom_attach, sipcom_detach, NULL, NULL, NULL, 675 DVF_DETACH_SHUTDOWN); 676 CFATTACH_DECL3_NEW(sip, sizeof(struct sip_softc), 677 sipcom_match, sipcom_attach, sipcom_detach, NULL, NULL, NULL, 678 DVF_DETACH_SHUTDOWN); 679 680 /* 681 * Descriptions of the variants of the SiS900. 682 */ 683 struct sip_variant { 684 int (*sipv_mii_readreg)(device_t, int, int, uint16_t *); 685 int (*sipv_mii_writereg)(device_t, int, int, uint16_t); 686 void (*sipv_mii_statchg)(struct ifnet *); 687 void (*sipv_set_filter)(struct sip_softc *); 688 void (*sipv_read_macaddr)(struct sip_softc *, 689 const struct pci_attach_args *, uint8_t *); 690 }; 691 692 static uint32_t sipcom_mii_bitbang_read(device_t); 693 static void sipcom_mii_bitbang_write(device_t, uint32_t); 694 695 static const struct mii_bitbang_ops sipcom_mii_bitbang_ops = { 696 sipcom_mii_bitbang_read, 697 sipcom_mii_bitbang_write, 698 { 699 EROMAR_MDIO, /* MII_BIT_MDO */ 700 EROMAR_MDIO, /* MII_BIT_MDI */ 701 EROMAR_MDC, /* MII_BIT_MDC */ 702 EROMAR_MDDIR, /* MII_BIT_DIR_HOST_PHY */ 703 0, /* MII_BIT_DIR_PHY_HOST */ 704 } 705 }; 706 707 static const struct sip_variant sipcom_variant_dp83820 = { 708 sipcom_dp83820_mii_readreg, 709 sipcom_dp83820_mii_writereg, 710 sipcom_dp83820_mii_statchg, 711 sipcom_dp83815_set_filter, 712 sipcom_dp83820_read_macaddr, 713 }; 714 715 static const struct sip_variant sipcom_variant_sis900 = { 716 sipcom_sis900_mii_readreg, 717 sipcom_sis900_mii_writereg, 718 sipcom_sis900_mii_statchg, 719 sipcom_sis900_set_filter, 720 sipcom_sis900_read_macaddr, 721 }; 722 723 static const struct sip_variant sipcom_variant_dp83815 = { 724 sipcom_dp83815_mii_readreg, 725 sipcom_dp83815_mii_writereg, 726 sipcom_dp83815_mii_statchg, 727 sipcom_dp83815_set_filter, 728 sipcom_dp83815_read_macaddr, 729 }; 730 731 732 /* 733 * Devices supported by this driver. 734 */ 735 static const struct sip_product { 736 pci_vendor_id_t sip_vendor; 737 pci_product_id_t sip_product; 738 const char *sip_name; 739 const struct sip_variant *sip_variant; 740 bool sip_gigabit; 741 } sipcom_products[] = { 742 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83820, 743 "NatSemi DP83820 Gigabit Ethernet", 744 &sipcom_variant_dp83820, true }, 745 746 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_900, 747 "SiS 900 10/100 Ethernet", 748 &sipcom_variant_sis900, false }, 749 { PCI_VENDOR_SIS, PCI_PRODUCT_SIS_7016, 750 "SiS 7016 10/100 Ethernet", 751 &sipcom_variant_sis900, false }, 752 753 { PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815, 754 "NatSemi DP83815 10/100 Ethernet", 755 &sipcom_variant_dp83815, false }, 756 757 { 0, 0, 758 NULL, 759 NULL, false }, 760 }; 761 762 static const struct sip_product * 763 sipcom_lookup(const struct pci_attach_args *pa, bool gigabit) 764 { 765 const struct sip_product *sip; 766 767 for (sip = sipcom_products; sip->sip_name != NULL; sip++) { 768 if (PCI_VENDOR(pa->pa_id) == sip->sip_vendor && 769 PCI_PRODUCT(pa->pa_id) == sip->sip_product && 770 sip->sip_gigabit == gigabit) 771 return sip; 772 } 773 return NULL; 774 } 775 776 /* 777 * I really hate stupid hardware vendors. There's a bit in the EEPROM 778 * which indicates if the card can do 64-bit data transfers. Unfortunately, 779 * several vendors of 32-bit cards fail to clear this bit in the EEPROM, 780 * which means we try to use 64-bit data transfers on those cards if we 781 * happen to be plugged into a 32-bit slot. 782 * 783 * What we do is use this table of cards known to be 64-bit cards. If 784 * you have a 64-bit card who's subsystem ID is not listed in this table, 785 * send the output of "pcictl dump ..." of the device to me so that your 786 * card will use the 64-bit data path when plugged into a 64-bit slot. 787 * 788 * -- Jason R. Thorpe <thorpej@NetBSD.org> 789 * June 30, 2002 790 */ 791 static int 792 sipcom_check_64bit(const struct pci_attach_args *pa) 793 { 794 static const struct { 795 pci_vendor_id_t c64_vendor; 796 pci_product_id_t c64_product; 797 } card64[] = { 798 /* Asante GigaNIX */ 799 { 0x128a, 0x0002 }, 800 801 /* Accton EN1407-T, Planex GN-1000TE */ 802 { 0x1113, 0x1407 }, 803 804 /* Netgear GA621 */ 805 { 0x1385, 0x621a }, 806 807 /* Netgear GA622 */ 808 { 0x1385, 0x622a }, 809 810 /* SMC EZ Card 1000 (9462TX) */ 811 { 0x10b8, 0x9462 }, 812 813 { 0, 0} 814 }; 815 pcireg_t subsys; 816 int i; 817 818 subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); 819 820 for (i = 0; card64[i].c64_vendor != 0; i++) { 821 if (PCI_VENDOR(subsys) == card64[i].c64_vendor && 822 PCI_PRODUCT(subsys) == card64[i].c64_product) 823 return 1; 824 } 825 826 return 0; 827 } 828 829 static int 830 sipcom_match(device_t parent, cfdata_t cf, void *aux) 831 { 832 struct pci_attach_args *pa = aux; 833 834 if (sipcom_lookup(pa, strcmp(cf->cf_name, "gsip") == 0) != NULL) 835 return 1; 836 837 return 0; 838 } 839 840 static void 841 sipcom_dp83820_attach(struct sip_softc *sc, struct pci_attach_args *pa) 842 { 843 uint32_t reg; 844 int i; 845 846 /* 847 * Cause the chip to load configuration data from the EEPROM. 848 */ 849 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_PTSCR, PTSCR_EELOAD_EN); 850 for (i = 0; i < 10000; i++) { 851 delay(10); 852 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) & 853 PTSCR_EELOAD_EN) == 0) 854 break; 855 } 856 if (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_PTSCR) & 857 PTSCR_EELOAD_EN) { 858 printf("%s: timeout loading configuration from EEPROM\n", 859 device_xname(sc->sc_dev)); 860 return; 861 } 862 863 sc->sc_gpior = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_GPIOR); 864 865 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG); 866 if (reg & CFG_PCI64_DET) { 867 const char *using64 = NULL; 868 869 if (reg & CFG_DATA64_EN) { 870 /* 871 * Check to see if this card is 64-bit. If so, 872 * enable 64-bit data transfers. 873 * 874 * We can't trust the DATA64_EN bit in the EEPROM, 875 * because vendors of 32-bit cards fail to clear 876 * that bit in many cases (yet the card still detects 877 * that it's in a 64-bit slot because I guess they 878 * wired up ACK64# and REQ64#). 879 */ 880 if (gsip_disable_data64) 881 using64 = "force-disabled"; 882 else if (sipcom_check_64bit(pa)) { 883 sc->sc_cfg |= CFG_DATA64_EN; 884 using64 = "enabled"; 885 } else 886 using64 = "disabled (32-bit card)"; 887 } else { 888 using64 = "disabled in EEPROM"; 889 } 890 printf("%s: 64-bit slot detected, 64-bit transfers %s\n", 891 device_xname(sc->sc_dev), using64); 892 } 893 894 /* 895 * The T64ADDR bit is loaded by the chip from the EEPROM and 896 * is read-only. 897 */ 898 if (reg & CFG_T64ADDR) 899 sc->sc_cfg |= CFG_T64ADDR; 900 901 /* 902 * We can use 64-bit DMA addressing regardless of what 903 * sort of slot we're in. 904 */ 905 if (pci_dma64_available(pa)) { 906 sc->sc_dmat = pa->pa_dmat64; 907 sc->sc_cfg |= CFG_M64ADDR; 908 sc->sc_dma64 = true; 909 } 910 911 if (reg & (CFG_TBI_EN | CFG_EXT_125)) { 912 const char *sep = ""; 913 printf("%s: using ", device_xname(sc->sc_dev)); 914 if (reg & CFG_EXT_125) { 915 sc->sc_cfg |= CFG_EXT_125; 916 printf("%sexternal 125MHz clock", sep); 917 sep = ", "; 918 } 919 if (reg & CFG_TBI_EN) { 920 sc->sc_cfg |= CFG_TBI_EN; 921 printf("%sten-bit interface", sep); 922 sep = ", "; 923 } 924 printf("\n"); 925 } 926 if ((pa->pa_flags & PCI_FLAGS_MRM_OKAY) == 0 || 927 (reg & CFG_MRM_DIS) != 0) 928 sc->sc_cfg |= CFG_MRM_DIS; 929 if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0 || 930 (reg & CFG_MWI_DIS) != 0) 931 sc->sc_cfg |= CFG_MWI_DIS; 932 933 /* 934 * Use the extended descriptor format on the DP83820. This 935 * gives us an interface to VLAN tagging and IPv4/TCP/UDP 936 * checksumming. 937 */ 938 sc->sc_cfg |= CFG_EXTSTS_EN; 939 } 940 941 static int 942 sipcom_detach(device_t self, int flags) 943 { 944 int s; 945 946 s = splnet(); 947 sipcom_do_detach(self, SIP_ATTACH_FIN); 948 splx(s); 949 950 return 0; 951 } 952 953 static void 954 sipcom_do_detach(device_t self, enum sip_attach_stage stage) 955 { 956 int i; 957 struct sip_softc *sc = device_private(self); 958 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 959 960 /* 961 * Free any resources we've allocated during attach. 962 * Do this in reverse order and fall through. 963 */ 964 switch (stage) { 965 case SIP_ATTACH_FIN: 966 sipcom_stop(ifp, 1); 967 pmf_device_deregister(self); 968 #ifdef SIP_EVENT_COUNTERS 969 /* 970 * Attach event counters. 971 */ 972 evcnt_detach(&sc->sc_ev_txforceintr); 973 evcnt_detach(&sc->sc_ev_txdstall); 974 evcnt_detach(&sc->sc_ev_hiberr); 975 evcnt_detach(&sc->sc_ev_rxintr); 976 evcnt_detach(&sc->sc_ev_txiintr); 977 evcnt_detach(&sc->sc_ev_txdintr); 978 if (!sc->sc_gigabit) { 979 evcnt_detach(&sc->sc_ev_rxpause); 980 } else { 981 evcnt_detach(&sc->sc_ev_txudpsum); 982 evcnt_detach(&sc->sc_ev_txtcpsum); 983 evcnt_detach(&sc->sc_ev_txipsum); 984 evcnt_detach(&sc->sc_ev_rxudpsum); 985 evcnt_detach(&sc->sc_ev_rxtcpsum); 986 evcnt_detach(&sc->sc_ev_rxipsum); 987 evcnt_detach(&sc->sc_ev_txpause); 988 evcnt_detach(&sc->sc_ev_rxpause); 989 } 990 #endif /* SIP_EVENT_COUNTERS */ 991 992 rnd_detach_source(&sc->rnd_source); 993 994 ether_ifdetach(ifp); 995 if_detach(ifp); 996 mii_detach(&sc->sc_mii, MII_PHY_ANY, MII_OFFSET_ANY); 997 ifmedia_fini(&sc->sc_mii.mii_media); 998 999 /*FALLTHROUGH*/ 1000 case SIP_ATTACH_CREATE_RXMAP: 1001 for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) { 1002 if (sc->sc_rxsoft[i].rxs_dmamap != NULL) 1003 bus_dmamap_destroy(sc->sc_dmat, 1004 sc->sc_rxsoft[i].rxs_dmamap); 1005 } 1006 /*FALLTHROUGH*/ 1007 case SIP_ATTACH_CREATE_TXMAP: 1008 for (i = 0; i < SIP_TXQUEUELEN; i++) { 1009 if (sc->sc_txsoft[i].txs_dmamap != NULL) 1010 bus_dmamap_destroy(sc->sc_dmat, 1011 sc->sc_txsoft[i].txs_dmamap); 1012 } 1013 /*FALLTHROUGH*/ 1014 case SIP_ATTACH_LOAD_MAP: 1015 bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); 1016 /*FALLTHROUGH*/ 1017 case SIP_ATTACH_CREATE_MAP: 1018 bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); 1019 /*FALLTHROUGH*/ 1020 case SIP_ATTACH_MAP_MEM: 1021 bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, 1022 sizeof(struct sip_control_data)); 1023 /*FALLTHROUGH*/ 1024 case SIP_ATTACH_ALLOC_MEM: 1025 bus_dmamem_free(sc->sc_dmat, &sc->sc_seg, 1); 1026 /* FALLTHROUGH*/ 1027 case SIP_ATTACH_INTR: 1028 pci_intr_disestablish(sc->sc_pc, sc->sc_ih); 1029 /* FALLTHROUGH*/ 1030 case SIP_ATTACH_MAP: 1031 bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_sz); 1032 break; 1033 default: 1034 break; 1035 } 1036 return; 1037 } 1038 1039 static bool 1040 sipcom_resume(device_t self, const pmf_qual_t *qual) 1041 { 1042 struct sip_softc *sc = device_private(self); 1043 1044 return sipcom_reset(sc); 1045 } 1046 1047 static bool 1048 sipcom_suspend(device_t self, const pmf_qual_t *qual) 1049 { 1050 struct sip_softc *sc = device_private(self); 1051 1052 sipcom_rxdrain(sc); 1053 return true; 1054 } 1055 1056 static void 1057 sipcom_attach(device_t parent, device_t self, void *aux) 1058 { 1059 struct sip_softc *sc = device_private(self); 1060 struct pci_attach_args *pa = aux; 1061 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1062 struct mii_data * const mii = &sc->sc_mii; 1063 pci_chipset_tag_t pc = pa->pa_pc; 1064 pci_intr_handle_t ih; 1065 const char *intrstr = NULL; 1066 bus_space_tag_t iot, memt; 1067 bus_space_handle_t ioh, memh; 1068 bus_size_t iosz, memsz; 1069 int ioh_valid, memh_valid; 1070 int i, rseg, error; 1071 const struct sip_product *sip; 1072 uint8_t enaddr[ETHER_ADDR_LEN]; 1073 pcireg_t csr; 1074 pcireg_t memtype; 1075 bus_size_t tx_dmamap_size; 1076 int ntxsegs_alloc; 1077 cfdata_t cf = device_cfdata(self); 1078 char intrbuf[PCI_INTRSTR_LEN]; 1079 1080 callout_init(&sc->sc_tick_ch, 0); 1081 callout_setfunc(&sc->sc_tick_ch, sipcom_tick, sc); 1082 1083 sip = sipcom_lookup(pa, strcmp(cf->cf_name, "gsip") == 0); 1084 if (sip == NULL) { 1085 aprint_error("\n"); 1086 panic("%s: impossible", __func__); 1087 } 1088 sc->sc_dev = self; 1089 sc->sc_gigabit = sip->sip_gigabit; 1090 sc->sc_dma64 = false; 1091 pmf_self_suspensor_init(self, &sc->sc_suspensor, &sc->sc_qual); 1092 sc->sc_pc = pc; 1093 1094 if (sc->sc_gigabit) { 1095 if (sc->sc_dma64) { 1096 sc->sc_bufptr_idx = GSIP64_DESC_BUFPTR_LO; 1097 sc->sc_cmdsts_idx = GSIP64_DESC_CMDSTS; 1098 sc->sc_extsts_idx = GSIP64_DESC_EXTSTS; 1099 } else { 1100 sc->sc_bufptr_idx = GSIP_DESC_BUFPTR; 1101 sc->sc_cmdsts_idx = GSIP_DESC_CMDSTS; 1102 sc->sc_extsts_idx = GSIP_DESC_EXTSTS; 1103 } 1104 sc->sc_rxintr = gsip_rxintr; 1105 sc->sc_parm = &gsip_parm; 1106 } else { 1107 sc->sc_rxintr = sip_rxintr; 1108 sc->sc_parm = &sip_parm; 1109 sc->sc_bufptr_idx = SIP_DESC_BUFPTR; 1110 sc->sc_cmdsts_idx = SIP_DESC_CMDSTS; 1111 /* 1112 * EXTSTS doesn't really exist on non-GigE parts, 1113 * but we initialize the index for simplicity later. 1114 */ 1115 sc->sc_extsts_idx = GSIP_DESC_EXTSTS; 1116 } 1117 tx_dmamap_size = sc->sc_parm->p_tx_dmamap_size; 1118 ntxsegs_alloc = sc->sc_parm->p_ntxsegs_alloc; 1119 sc->sc_ntxdesc = SIP_TXQUEUELEN * ntxsegs_alloc; 1120 sc->sc_ntxdesc_mask = sc->sc_ntxdesc - 1; 1121 sc->sc_nrxdesc_mask = sc->sc_parm->p_nrxdesc - 1; 1122 1123 sc->sc_rev = PCI_REVISION(pa->pa_class); 1124 1125 aprint_naive("\n"); 1126 aprint_normal(": %s, rev %#02x\n", sip->sip_name, sc->sc_rev); 1127 1128 sc->sc_model = sip; 1129 1130 /* 1131 * XXX Work-around broken PXE firmware on some boards. 1132 * 1133 * The DP83815 shares an address decoder with the MEM BAR 1134 * and the ROM BAR. Make sure the ROM BAR is disabled, 1135 * so that memory mapped access works. 1136 */ 1137 pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM, 1138 pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_MAPREG_ROM) & 1139 ~PCI_MAPREG_ROM_ENABLE); 1140 1141 /* 1142 * Map the device. 1143 */ 1144 ioh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGIOA, 1145 PCI_MAPREG_TYPE_IO, 0, 1146 &iot, &ioh, NULL, &iosz) == 0); 1147 if (sc->sc_gigabit) { 1148 memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, SIP_PCI_CFGMA); 1149 switch (memtype) { 1150 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT: 1151 case PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_64BIT: 1152 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA, 1153 memtype, 0, &memt, &memh, NULL, &memsz) == 0); 1154 break; 1155 default: 1156 memh_valid = 0; 1157 } 1158 } else { 1159 memh_valid = (pci_mapreg_map(pa, SIP_PCI_CFGMA, 1160 PCI_MAPREG_TYPE_MEM | PCI_MAPREG_MEM_TYPE_32BIT, 0, 1161 &memt, &memh, NULL, &memsz) == 0); 1162 } 1163 1164 if (memh_valid) { 1165 sc->sc_st = memt; 1166 sc->sc_sh = memh; 1167 sc->sc_sz = memsz; 1168 } else if (ioh_valid) { 1169 sc->sc_st = iot; 1170 sc->sc_sh = ioh; 1171 sc->sc_sz = iosz; 1172 } else { 1173 aprint_error_dev(self, "unable to map device registers\n"); 1174 return; 1175 } 1176 1177 sc->sc_dmat = pa->pa_dmat; 1178 1179 /* 1180 * Make sure bus mastering is enabled. Also make sure 1181 * Write/Invalidate is enabled if we're allowed to use it. 1182 */ 1183 csr = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); 1184 if (pa->pa_flags & PCI_FLAGS_MWI_OKAY) 1185 csr |= PCI_COMMAND_INVALIDATE_ENABLE; 1186 pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, 1187 csr | PCI_COMMAND_MASTER_ENABLE); 1188 1189 /* Power up chip */ 1190 error = pci_activate(pa->pa_pc, pa->pa_tag, self, pci_activate_null); 1191 if (error != 0 && error != EOPNOTSUPP) { 1192 aprint_error_dev(sc->sc_dev, "cannot activate %d\n", error); 1193 return; 1194 } 1195 1196 /* 1197 * Map and establish our interrupt. 1198 */ 1199 if (pci_intr_map(pa, &ih)) { 1200 aprint_error_dev(sc->sc_dev, "unable to map interrupt\n"); 1201 return; 1202 } 1203 intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); 1204 sc->sc_ih = pci_intr_establish_xname(pc, ih, IPL_NET, sipcom_intr, sc, 1205 device_xname(self)); 1206 if (sc->sc_ih == NULL) { 1207 aprint_error_dev(sc->sc_dev, "unable to establish interrupt"); 1208 if (intrstr != NULL) 1209 aprint_error(" at %s", intrstr); 1210 aprint_error("\n"); 1211 sipcom_do_detach(self, SIP_ATTACH_MAP); 1212 return; 1213 } 1214 aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); 1215 1216 SIMPLEQ_INIT(&sc->sc_txfreeq); 1217 SIMPLEQ_INIT(&sc->sc_txdirtyq); 1218 1219 /* 1220 * Allocate the control data structures, and create and load the 1221 * DMA map for it. 1222 */ 1223 if ((error = bus_dmamem_alloc(sc->sc_dmat, 1224 sizeof(struct sip_control_data), PAGE_SIZE, 0, &sc->sc_seg, 1, 1225 &rseg, 0)) != 0) { 1226 aprint_error_dev(sc->sc_dev, 1227 "unable to allocate control data, error = %d\n", error); 1228 sipcom_do_detach(self, SIP_ATTACH_INTR); 1229 return; 1230 } 1231 1232 if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_seg, rseg, 1233 sizeof(struct sip_control_data), (void **)&sc->sc_control_data, 1234 BUS_DMA_COHERENT)) != 0) { 1235 aprint_error_dev(sc->sc_dev, 1236 "unable to map control data, error = %d\n", error); 1237 sipcom_do_detach(self, SIP_ATTACH_ALLOC_MEM); 1238 } 1239 1240 if ((error = bus_dmamap_create(sc->sc_dmat, 1241 sizeof(struct sip_control_data), 1, 1242 sizeof(struct sip_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { 1243 aprint_error_dev(self, "unable to create control data DMA map" 1244 ", error = %d\n", error); 1245 sipcom_do_detach(self, SIP_ATTACH_MAP_MEM); 1246 } 1247 1248 if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, 1249 sc->sc_control_data, sizeof(struct sip_control_data), NULL, 1250 0)) != 0) { 1251 aprint_error_dev(self, "unable to load control data DMA map" 1252 ", error = %d\n", error); 1253 sipcom_do_detach(self, SIP_ATTACH_CREATE_MAP); 1254 } 1255 1256 /* 1257 * Create the transmit buffer DMA maps. 1258 */ 1259 for (i = 0; i < SIP_TXQUEUELEN; i++) { 1260 if ((error = bus_dmamap_create(sc->sc_dmat, tx_dmamap_size, 1261 sc->sc_parm->p_ntxsegs, MCLBYTES, 0, 0, 1262 &sc->sc_txsoft[i].txs_dmamap)) != 0) { 1263 aprint_error_dev(self, "unable to create tx DMA map %d" 1264 ", error = %d\n", i, error); 1265 sipcom_do_detach(self, SIP_ATTACH_CREATE_TXMAP); 1266 } 1267 } 1268 1269 /* 1270 * Create the receive buffer DMA maps. 1271 */ 1272 for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) { 1273 if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, 1274 MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { 1275 aprint_error_dev(self, "unable to create rx DMA map %d" 1276 ", error = %d\n", i, error); 1277 sipcom_do_detach(self, SIP_ATTACH_CREATE_RXMAP); 1278 } 1279 sc->sc_rxsoft[i].rxs_mbuf = NULL; 1280 } 1281 1282 /* 1283 * Reset the chip to a known state. 1284 */ 1285 sipcom_reset(sc); 1286 1287 /* 1288 * Read the Ethernet address from the EEPROM. This might 1289 * also fetch other stuff from the EEPROM and stash it 1290 * in the softc. 1291 */ 1292 sc->sc_cfg = 0; 1293 if (!sc->sc_gigabit) { 1294 if (SIP_SIS900_REV(sc, SIS_REV_635) || 1295 SIP_SIS900_REV(sc, SIS_REV_900B)) 1296 sc->sc_cfg |= (CFG_PESEL | CFG_RNDCNT); 1297 1298 if (SIP_SIS900_REV(sc, SIS_REV_635) || 1299 SIP_SIS900_REV(sc, SIS_REV_960) || 1300 SIP_SIS900_REV(sc, SIS_REV_900B)) 1301 sc->sc_cfg |= 1302 (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CFG) & 1303 CFG_EDBMASTEN); 1304 } 1305 1306 (*sip->sip_variant->sipv_read_macaddr)(sc, pa, enaddr); 1307 1308 aprint_normal_dev(self, "Ethernet address %s\n",ether_sprintf(enaddr)); 1309 1310 /* 1311 * Initialize the configuration register: aggressive PCI 1312 * bus request algorithm, default backoff, default OW timer, 1313 * default parity error detection. 1314 * 1315 * NOTE: "Big endian mode" is useless on the SiS900 and 1316 * friends -- it affects packet data, not descriptors. 1317 */ 1318 if (sc->sc_gigabit) 1319 sipcom_dp83820_attach(sc, pa); 1320 1321 /* 1322 * Initialize our media structures and probe the MII. 1323 */ 1324 mii->mii_ifp = ifp; 1325 mii->mii_readreg = sip->sip_variant->sipv_mii_readreg; 1326 mii->mii_writereg = sip->sip_variant->sipv_mii_writereg; 1327 mii->mii_statchg = sip->sip_variant->sipv_mii_statchg; 1328 sc->sc_ethercom.ec_mii = mii; 1329 ifmedia_init(&mii->mii_media, IFM_IMASK, ether_mediachange, 1330 sipcom_mediastatus); 1331 1332 /* 1333 * XXX We cannot handle flow control on the DP83815. 1334 */ 1335 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) 1336 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 1337 MII_OFFSET_ANY, 0); 1338 else 1339 mii_attach(sc->sc_dev, mii, 0xffffffff, MII_PHY_ANY, 1340 MII_OFFSET_ANY, MIIF_DOPAUSE); 1341 if (LIST_FIRST(&mii->mii_phys) == NULL) { 1342 ifmedia_add(&mii->mii_media, IFM_ETHER | IFM_NONE, 0, NULL); 1343 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_NONE); 1344 } else 1345 ifmedia_set(&mii->mii_media, IFM_ETHER | IFM_AUTO); 1346 1347 ifp = &sc->sc_ethercom.ec_if; 1348 strlcpy(ifp->if_xname, device_xname(sc->sc_dev), IFNAMSIZ); 1349 ifp->if_softc = sc; 1350 ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; 1351 sc->sc_if_flags = ifp->if_flags; 1352 ifp->if_ioctl = sipcom_ioctl; 1353 ifp->if_start = sipcom_start; 1354 ifp->if_watchdog = sipcom_watchdog; 1355 ifp->if_init = sipcom_init; 1356 ifp->if_stop = sipcom_stop; 1357 IFQ_SET_READY(&ifp->if_snd); 1358 1359 /* 1360 * We can support 802.1Q VLAN-sized frames. 1361 */ 1362 sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; 1363 1364 if (sc->sc_gigabit) { 1365 /* 1366 * And the DP83820 can do VLAN tagging in hardware, and 1367 * support the jumbo Ethernet MTU. 1368 */ 1369 sc->sc_ethercom.ec_capabilities |= 1370 ETHERCAP_VLAN_HWTAGGING | ETHERCAP_JUMBO_MTU; 1371 sc->sc_ethercom.ec_capenable |= ETHERCAP_VLAN_HWTAGGING; 1372 1373 /* 1374 * The DP83820 can do IPv4, TCPv4, and UDPv4 checksums 1375 * in hardware. 1376 */ 1377 ifp->if_capabilities |= 1378 IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 1379 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 1380 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx; 1381 } 1382 1383 /* 1384 * Attach the interface. 1385 */ 1386 if_attach(ifp); 1387 if_deferred_start_init(ifp, NULL); 1388 ether_ifattach(ifp, enaddr); 1389 ether_set_ifflags_cb(&sc->sc_ethercom, sip_ifflags_cb); 1390 sc->sc_prev.ec_capenable = sc->sc_ethercom.ec_capenable; 1391 sc->sc_prev.is_vlan = VLAN_ATTACHED(&(sc)->sc_ethercom); 1392 sc->sc_prev.if_capenable = ifp->if_capenable; 1393 rnd_attach_source(&sc->rnd_source, device_xname(sc->sc_dev), 1394 RND_TYPE_NET, RND_FLAG_DEFAULT); 1395 1396 /* 1397 * The number of bytes that must be available in 1398 * the Tx FIFO before the bus master can DMA more 1399 * data into the FIFO. 1400 */ 1401 sc->sc_tx_fill_thresh = 64 / 32; 1402 1403 /* 1404 * Start at a drain threshold of 512 bytes. We will 1405 * increase it if a DMA underrun occurs. 1406 * 1407 * XXX The minimum value of this variable should be 1408 * tuned. We may be able to improve performance 1409 * by starting with a lower value. That, however, 1410 * may trash the first few outgoing packets if the 1411 * PCI bus is saturated. 1412 */ 1413 if (sc->sc_gigabit) 1414 sc->sc_tx_drain_thresh = 6400 / 32; /* from FreeBSD nge(4) */ 1415 else 1416 sc->sc_tx_drain_thresh = 1504 / 32; 1417 1418 /* 1419 * Initialize the Rx FIFO drain threshold. 1420 * 1421 * This is in units of 8 bytes. 1422 * 1423 * We should never set this value lower than 2; 14 bytes are 1424 * required to filter the packet. 1425 */ 1426 sc->sc_rx_drain_thresh = 128 / 8; 1427 1428 #ifdef SIP_EVENT_COUNTERS 1429 /* 1430 * Attach event counters. 1431 */ 1432 evcnt_attach_dynamic(&sc->sc_ev_txdstall, EVCNT_TYPE_MISC, 1433 NULL, device_xname(sc->sc_dev), "txdstall"); 1434 evcnt_attach_dynamic(&sc->sc_ev_txforceintr, EVCNT_TYPE_INTR, 1435 NULL, device_xname(sc->sc_dev), "txforceintr"); 1436 evcnt_attach_dynamic(&sc->sc_ev_txdintr, EVCNT_TYPE_INTR, 1437 NULL, device_xname(sc->sc_dev), "txdintr"); 1438 evcnt_attach_dynamic(&sc->sc_ev_txiintr, EVCNT_TYPE_INTR, 1439 NULL, device_xname(sc->sc_dev), "txiintr"); 1440 evcnt_attach_dynamic(&sc->sc_ev_rxintr, EVCNT_TYPE_INTR, 1441 NULL, device_xname(sc->sc_dev), "rxintr"); 1442 evcnt_attach_dynamic(&sc->sc_ev_hiberr, EVCNT_TYPE_INTR, 1443 NULL, device_xname(sc->sc_dev), "hiberr"); 1444 if (!sc->sc_gigabit) { 1445 evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_INTR, 1446 NULL, device_xname(sc->sc_dev), "rxpause"); 1447 } else { 1448 evcnt_attach_dynamic(&sc->sc_ev_rxpause, EVCNT_TYPE_MISC, 1449 NULL, device_xname(sc->sc_dev), "rxpause"); 1450 evcnt_attach_dynamic(&sc->sc_ev_txpause, EVCNT_TYPE_MISC, 1451 NULL, device_xname(sc->sc_dev), "txpause"); 1452 evcnt_attach_dynamic(&sc->sc_ev_rxipsum, EVCNT_TYPE_MISC, 1453 NULL, device_xname(sc->sc_dev), "rxipsum"); 1454 evcnt_attach_dynamic(&sc->sc_ev_rxtcpsum, EVCNT_TYPE_MISC, 1455 NULL, device_xname(sc->sc_dev), "rxtcpsum"); 1456 evcnt_attach_dynamic(&sc->sc_ev_rxudpsum, EVCNT_TYPE_MISC, 1457 NULL, device_xname(sc->sc_dev), "rxudpsum"); 1458 evcnt_attach_dynamic(&sc->sc_ev_txipsum, EVCNT_TYPE_MISC, 1459 NULL, device_xname(sc->sc_dev), "txipsum"); 1460 evcnt_attach_dynamic(&sc->sc_ev_txtcpsum, EVCNT_TYPE_MISC, 1461 NULL, device_xname(sc->sc_dev), "txtcpsum"); 1462 evcnt_attach_dynamic(&sc->sc_ev_txudpsum, EVCNT_TYPE_MISC, 1463 NULL, device_xname(sc->sc_dev), "txudpsum"); 1464 } 1465 #endif /* SIP_EVENT_COUNTERS */ 1466 1467 if (pmf_device_register(self, sipcom_suspend, sipcom_resume)) 1468 pmf_class_network_register(self, ifp); 1469 else 1470 aprint_error_dev(self, "couldn't establish power handler\n"); 1471 } 1472 1473 static inline void 1474 sipcom_set_extsts(struct sip_softc *sc, int lasttx, struct mbuf *m0, 1475 uint64_t capenable) 1476 { 1477 uint32_t extsts = 0; 1478 #ifdef DEBUG 1479 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1480 #endif 1481 /* 1482 * If VLANs are enabled and the packet has a VLAN tag, set 1483 * up the descriptor to encapsulate the packet for us. 1484 * 1485 * This apparently has to be on the last descriptor of 1486 * the packet. 1487 */ 1488 1489 /* 1490 * Byte swapping is tricky. We need to provide the tag 1491 * in a network byte order. On a big-endian machine, 1492 * the byteorder is correct, but we need to swap it 1493 * anyway, because this will be undone by the outside 1494 * htole32(). That's why there must be an 1495 * unconditional swap instead of htons() inside. 1496 */ 1497 if (vlan_has_tag(m0)) { 1498 sc->sc_txdescs[lasttx].sipd_words[sc->sc_extsts_idx] |= 1499 htole32(EXTSTS_VPKT | 1500 (bswap16(vlan_get_tag(m0)) & 1501 EXTSTS_VTCI)); 1502 } 1503 1504 /* 1505 * If the upper-layer has requested IPv4/TCPv4/UDPv4 1506 * checksumming, set up the descriptor to do this work 1507 * for us. 1508 * 1509 * This apparently has to be on the first descriptor of 1510 * the packet. 1511 * 1512 * Byte-swap constants so the compiler can optimize. 1513 */ 1514 if (m0->m_pkthdr.csum_flags & M_CSUM_IPv4) { 1515 KDASSERT(ifp->if_capenable & IFCAP_CSUM_IPv4_Tx); 1516 SIP_EVCNT_INCR(&sc->sc_ev_txipsum); 1517 extsts |= htole32(EXTSTS_IPPKT); 1518 } 1519 if (m0->m_pkthdr.csum_flags & M_CSUM_TCPv4) { 1520 KDASSERT(ifp->if_capenable & IFCAP_CSUM_TCPv4_Tx); 1521 SIP_EVCNT_INCR(&sc->sc_ev_txtcpsum); 1522 extsts |= htole32(EXTSTS_TCPPKT); 1523 } else if (m0->m_pkthdr.csum_flags & M_CSUM_UDPv4) { 1524 KDASSERT(ifp->if_capenable & IFCAP_CSUM_UDPv4_Tx); 1525 SIP_EVCNT_INCR(&sc->sc_ev_txudpsum); 1526 extsts |= htole32(EXTSTS_UDPPKT); 1527 } 1528 sc->sc_txdescs[sc->sc_txnext].sipd_words[sc->sc_extsts_idx] |= extsts; 1529 } 1530 1531 /* 1532 * sip_start: [ifnet interface function] 1533 * 1534 * Start packet transmission on the interface. 1535 */ 1536 static void 1537 sipcom_start(struct ifnet *ifp) 1538 { 1539 struct sip_softc *sc = ifp->if_softc; 1540 struct mbuf *m0; 1541 struct mbuf *m; 1542 struct sip_txsoft *txs; 1543 bus_dmamap_t dmamap; 1544 int error, nexttx, lasttx, seg; 1545 int ofree = sc->sc_txfree; 1546 uint32_t cmdsts; 1547 #if 0 1548 int firsttx = sc->sc_txnext; 1549 #endif 1550 1551 /* 1552 * If we've been told to pause, don't transmit any more packets. 1553 */ 1554 if (!sc->sc_gigabit && sc->sc_paused) 1555 return; 1556 1557 if ((ifp->if_flags & IFF_RUNNING) != IFF_RUNNING) 1558 return; 1559 1560 /* 1561 * Loop through the send queue, setting up transmit descriptors 1562 * until we drain the queue, or use up all available transmit 1563 * descriptors. 1564 */ 1565 while ((txs = SIMPLEQ_FIRST(&sc->sc_txfreeq)) != NULL) { 1566 /* 1567 * Grab a packet off the queue. 1568 */ 1569 IFQ_POLL(&ifp->if_snd, m0); 1570 if (m0 == NULL) 1571 break; 1572 m = NULL; 1573 1574 dmamap = txs->txs_dmamap; 1575 1576 /* 1577 * Load the DMA map. If this fails, the packet either 1578 * didn't fit in the alloted number of segments, or we 1579 * were short on resources. 1580 */ 1581 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, m0, 1582 BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1583 /* In the non-gigabit case, we'll copy and try again. */ 1584 if (error != 0 && !sc->sc_gigabit) { 1585 MGETHDR(m, M_DONTWAIT, MT_DATA); 1586 if (m == NULL) { 1587 printf("%s: unable to allocate Tx mbuf\n", 1588 device_xname(sc->sc_dev)); 1589 break; 1590 } 1591 MCLAIM(m, &sc->sc_ethercom.ec_tx_mowner); 1592 if (m0->m_pkthdr.len > MHLEN) { 1593 MCLGET(m, M_DONTWAIT); 1594 if ((m->m_flags & M_EXT) == 0) { 1595 printf("%s: unable to allocate Tx " 1596 "cluster\n", 1597 device_xname(sc->sc_dev)); 1598 m_freem(m); 1599 break; 1600 } 1601 } 1602 m_copydata(m0, 0, m0->m_pkthdr.len, mtod(m, void *)); 1603 m->m_pkthdr.len = m->m_len = m0->m_pkthdr.len; 1604 error = bus_dmamap_load_mbuf(sc->sc_dmat, dmamap, 1605 m, BUS_DMA_WRITE | BUS_DMA_NOWAIT); 1606 if (error) { 1607 printf("%s: unable to load Tx buffer, error = " 1608 "%d\n", device_xname(sc->sc_dev), error); 1609 break; 1610 } 1611 } else if (error == EFBIG) { 1612 /* 1613 * For the too-many-segments case, we simply 1614 * report an error and drop the packet, 1615 * since we can't sanely copy a jumbo packet 1616 * to a single buffer. 1617 */ 1618 printf("%s: Tx packet consumes too many DMA segments, " 1619 "dropping...\n", device_xname(sc->sc_dev)); 1620 IFQ_DEQUEUE(&ifp->if_snd, m0); 1621 m_freem(m0); 1622 continue; 1623 } else if (error != 0) { 1624 /* 1625 * Short on resources, just stop for now. 1626 */ 1627 break; 1628 } 1629 1630 /* 1631 * Ensure we have enough descriptors free to describe 1632 * the packet. Note, we always reserve one descriptor 1633 * at the end of the ring as a termination point, to 1634 * prevent wrap-around. 1635 */ 1636 if (dmamap->dm_nsegs > (sc->sc_txfree - 1)) { 1637 /* 1638 * Not enough free descriptors to transmit this 1639 * packet. 1640 */ 1641 bus_dmamap_unload(sc->sc_dmat, dmamap); 1642 if (m != NULL) 1643 m_freem(m); 1644 SIP_EVCNT_INCR(&sc->sc_ev_txdstall); 1645 break; 1646 } 1647 1648 IFQ_DEQUEUE(&ifp->if_snd, m0); 1649 if (m != NULL) { 1650 m_freem(m0); 1651 m0 = m; 1652 } 1653 1654 /* 1655 * WE ARE NOW COMMITTED TO TRANSMITTING THE PACKET. 1656 */ 1657 1658 /* Sync the DMA map. */ 1659 bus_dmamap_sync(sc->sc_dmat, dmamap, 0, dmamap->dm_mapsize, 1660 BUS_DMASYNC_PREWRITE); 1661 1662 /* 1663 * Initialize the transmit descriptors. 1664 */ 1665 for (nexttx = lasttx = sc->sc_txnext, seg = 0; 1666 seg < dmamap->dm_nsegs; 1667 seg++, nexttx = sip_nexttx(sc, nexttx)) { 1668 /* 1669 * If this is the first descriptor we're 1670 * enqueueing, don't set the OWN bit just 1671 * yet. That could cause a race condition. 1672 * We'll do it below. 1673 */ 1674 1675 cmdsts = dmamap->dm_segs[seg].ds_len; 1676 if (nexttx != sc->sc_txnext) 1677 cmdsts |= CMDSTS_OWN; 1678 if (seg < dmamap->dm_nsegs - 1) 1679 cmdsts |= CMDSTS_MORE; 1680 sip_init_txdesc(sc, nexttx, 1681 dmamap->dm_segs[seg].ds_addr, cmdsts); 1682 lasttx = nexttx; 1683 } 1684 1685 /* 1686 * If we're in the interrupt delay window, delay the 1687 * interrupt. 1688 */ 1689 if (++sc->sc_txwin >= (SIP_TXQUEUELEN * 2 / 3)) { 1690 SIP_EVCNT_INCR(&sc->sc_ev_txforceintr); 1691 sc->sc_txdescs[lasttx].sipd_words[sc->sc_cmdsts_idx] |= 1692 htole32(CMDSTS_INTR); 1693 sc->sc_txwin = 0; 1694 } 1695 1696 if (sc->sc_gigabit) 1697 sipcom_set_extsts(sc, lasttx, m0, ifp->if_capenable); 1698 1699 /* Sync the descriptors we're using. */ 1700 sip_cdtxsync(sc, sc->sc_txnext, dmamap->dm_nsegs, 1701 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1702 1703 /* 1704 * The entire packet is set up. Give the first descriptor 1705 * to the chip now. 1706 */ 1707 sc->sc_txdescs[sc->sc_txnext].sipd_words[sc->sc_cmdsts_idx] |= 1708 htole32(CMDSTS_OWN); 1709 sip_cdtxsync(sc, sc->sc_txnext, 1, 1710 BUS_DMASYNC_PREREAD | BUS_DMASYNC_PREWRITE); 1711 1712 /* 1713 * Store a pointer to the packet so we can free it later, 1714 * and remember what txdirty will be once the packet is 1715 * done. 1716 */ 1717 txs->txs_mbuf = m0; 1718 txs->txs_firstdesc = sc->sc_txnext; 1719 txs->txs_lastdesc = lasttx; 1720 1721 /* Advance the tx pointer. */ 1722 sc->sc_txfree -= dmamap->dm_nsegs; 1723 sc->sc_txnext = nexttx; 1724 1725 SIMPLEQ_REMOVE_HEAD(&sc->sc_txfreeq, txs_q); 1726 SIMPLEQ_INSERT_TAIL(&sc->sc_txdirtyq, txs, txs_q); 1727 1728 /* Pass the packet to any BPF listeners. */ 1729 bpf_mtap(ifp, m0, BPF_D_OUT); 1730 } 1731 1732 if (sc->sc_txfree != ofree) { 1733 /* 1734 * Start the transmit process. Note, the manual says 1735 * that if there are no pending transmissions in the 1736 * chip's internal queue (indicated by TXE being clear), 1737 * then the driver software must set the TXDP to the 1738 * first descriptor to be transmitted. However, if we 1739 * do this, it causes serious performance degradation on 1740 * the DP83820 under load, not setting TXDP doesn't seem 1741 * to adversely affect the SiS 900 or DP83815. 1742 * 1743 * Well, I guess it wouldn't be the first time a manual 1744 * has lied -- and they could be speaking of the NULL- 1745 * terminated descriptor list case, rather than OWN- 1746 * terminated rings. 1747 */ 1748 #if 0 1749 if ((bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR) & 1750 CR_TXE) == 0) { 1751 sip_set_txdp(sc, SIP_CDTXADDR(sc, firsttx)); 1752 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE); 1753 } 1754 #else 1755 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_TXE); 1756 #endif 1757 1758 /* Set a watchdog timer in case the chip flakes out. */ 1759 /* Gigabit autonegotiation takes 5 seconds. */ 1760 ifp->if_timer = (sc->sc_gigabit) ? 10 : 5; 1761 } 1762 } 1763 1764 /* 1765 * sip_watchdog: [ifnet interface function] 1766 * 1767 * Watchdog timer handler. 1768 */ 1769 static void 1770 sipcom_watchdog(struct ifnet *ifp) 1771 { 1772 struct sip_softc *sc = ifp->if_softc; 1773 1774 /* 1775 * The chip seems to ignore the CMDSTS_INTR bit sometimes! 1776 * If we get a timeout, try and sweep up transmit descriptors. 1777 * If we manage to sweep them all up, ignore the lack of 1778 * interrupt. 1779 */ 1780 sipcom_txintr(sc); 1781 1782 if (sc->sc_txfree != sc->sc_ntxdesc) { 1783 printf("%s: device timeout\n", device_xname(sc->sc_dev)); 1784 if_statinc(ifp, if_oerrors); 1785 1786 /* Reset the interface. */ 1787 (void) sipcom_init(ifp); 1788 } else if (ifp->if_flags & IFF_DEBUG) 1789 printf("%s: recovered from device timeout\n", 1790 device_xname(sc->sc_dev)); 1791 1792 /* Try to get more packets going. */ 1793 sipcom_start(ifp); 1794 } 1795 1796 /* If the interface is up and running, only modify the receive 1797 * filter when setting promiscuous or debug mode. Otherwise fall 1798 * through to ether_ioctl, which will reset the chip. 1799 */ 1800 static int 1801 sip_ifflags_cb(struct ethercom *ec) 1802 { 1803 #define COMPARE_EC(sc) (((sc)->sc_prev.ec_capenable \ 1804 == (sc)->sc_ethercom.ec_capenable) \ 1805 && ((sc)->sc_prev.is_vlan == \ 1806 VLAN_ATTACHED(&(sc)->sc_ethercom) )) 1807 #define COMPARE_IC(sc, ifp) ((sc)->sc_prev.if_capenable == (ifp)->if_capenable) 1808 struct ifnet *ifp = &ec->ec_if; 1809 struct sip_softc *sc = ifp->if_softc; 1810 u_short change = ifp->if_flags ^ sc->sc_if_flags; 1811 1812 if ((change & ~(IFF_CANTCHANGE | IFF_DEBUG)) != 0 || !COMPARE_EC(sc) || 1813 !COMPARE_IC(sc, ifp)) 1814 return ENETRESET; 1815 /* Set up the receive filter. */ 1816 (*sc->sc_model->sip_variant->sipv_set_filter)(sc); 1817 return 0; 1818 } 1819 1820 /* 1821 * sip_ioctl: [ifnet interface function] 1822 * 1823 * Handle control requests from the operator. 1824 */ 1825 static int 1826 sipcom_ioctl(struct ifnet *ifp, u_long cmd, void *data) 1827 { 1828 struct sip_softc *sc = ifp->if_softc; 1829 struct ifreq *ifr = (struct ifreq *)data; 1830 int s, error; 1831 1832 s = splnet(); 1833 1834 switch (cmd) { 1835 case SIOCSIFMEDIA: 1836 /* Flow control requires full-duplex mode. */ 1837 if (IFM_SUBTYPE(ifr->ifr_media) == IFM_AUTO || 1838 (ifr->ifr_media & IFM_FDX) == 0) 1839 ifr->ifr_media &= ~IFM_ETH_FMASK; 1840 1841 /* XXX */ 1842 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) 1843 ifr->ifr_media &= ~IFM_ETH_FMASK; 1844 if (IFM_SUBTYPE(ifr->ifr_media) != IFM_AUTO) { 1845 if (sc->sc_gigabit && 1846 (ifr->ifr_media & IFM_ETH_FMASK) == IFM_FLOW) { 1847 /* We can do both TXPAUSE and RXPAUSE. */ 1848 ifr->ifr_media |= 1849 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1850 } else if (ifr->ifr_media & IFM_FLOW) { 1851 /* 1852 * Both TXPAUSE and RXPAUSE must be set. 1853 * (SiS900 and DP83815 don't have PAUSE_ASYM 1854 * feature.) 1855 * 1856 * XXX Can SiS900 and DP83815 send PAUSE? 1857 */ 1858 ifr->ifr_media |= 1859 IFM_ETH_TXPAUSE | IFM_ETH_RXPAUSE; 1860 } 1861 sc->sc_flowflags = ifr->ifr_media & IFM_ETH_FMASK; 1862 } 1863 /*FALLTHROUGH*/ 1864 default: 1865 if ((error = ether_ioctl(ifp, cmd, data)) != ENETRESET) 1866 break; 1867 1868 error = 0; 1869 1870 if (cmd == SIOCSIFCAP) 1871 error = if_init(ifp); 1872 else if (cmd != SIOCADDMULTI && cmd != SIOCDELMULTI) 1873 ; 1874 else if (ifp->if_flags & IFF_RUNNING) { 1875 /* 1876 * Multicast list has changed; set the hardware filter 1877 * accordingly. 1878 */ 1879 (*sc->sc_model->sip_variant->sipv_set_filter)(sc); 1880 } 1881 break; 1882 } 1883 1884 /* Try to get more packets going. */ 1885 sipcom_start(ifp); 1886 1887 sc->sc_if_flags = ifp->if_flags; 1888 splx(s); 1889 return error; 1890 } 1891 1892 /* 1893 * sip_intr: 1894 * 1895 * Interrupt service routine. 1896 */ 1897 static int 1898 sipcom_intr(void *arg) 1899 { 1900 struct sip_softc *sc = arg; 1901 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 1902 uint32_t isr; 1903 int handled = 0; 1904 1905 if (!device_activation(sc->sc_dev, DEVACT_LEVEL_DRIVER)) 1906 return 0; 1907 1908 /* Disable interrupts. */ 1909 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, 0); 1910 1911 for (;;) { 1912 /* Reading clears interrupt. */ 1913 isr = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ISR); 1914 if ((isr & sc->sc_imr) == 0) 1915 break; 1916 1917 rnd_add_uint32(&sc->rnd_source, isr); 1918 1919 handled = 1; 1920 1921 if ((ifp->if_flags & IFF_RUNNING) == 0) 1922 break; 1923 1924 if (isr & (ISR_RXORN | ISR_RXIDLE | ISR_RXDESC)) { 1925 SIP_EVCNT_INCR(&sc->sc_ev_rxintr); 1926 1927 /* Grab any new packets. */ 1928 (*sc->sc_rxintr)(sc); 1929 1930 if (isr & ISR_RXORN) { 1931 printf("%s: receive FIFO overrun\n", 1932 device_xname(sc->sc_dev)); 1933 1934 /* XXX adjust rx_drain_thresh? */ 1935 } 1936 1937 if (isr & ISR_RXIDLE) { 1938 printf("%s: receive ring overrun\n", 1939 device_xname(sc->sc_dev)); 1940 1941 /* Get the receive process going again. */ 1942 sip_set_rxdp(sc, 1943 SIP_CDRXADDR(sc, sc->sc_rxptr)); 1944 bus_space_write_4(sc->sc_st, sc->sc_sh, 1945 SIP_CR, CR_RXE); 1946 } 1947 } 1948 1949 if (isr & (ISR_TXURN | ISR_TXDESC | ISR_TXIDLE)) { 1950 #ifdef SIP_EVENT_COUNTERS 1951 if (isr & ISR_TXDESC) 1952 SIP_EVCNT_INCR(&sc->sc_ev_txdintr); 1953 else if (isr & ISR_TXIDLE) 1954 SIP_EVCNT_INCR(&sc->sc_ev_txiintr); 1955 #endif 1956 1957 /* Sweep up transmit descriptors. */ 1958 sipcom_txintr(sc); 1959 1960 if (isr & ISR_TXURN) { 1961 uint32_t thresh; 1962 int txfifo_size = (sc->sc_gigabit) 1963 ? DP83820_SIP_TXFIFO_SIZE 1964 : OTHER_SIP_TXFIFO_SIZE; 1965 1966 printf("%s: transmit FIFO underrun", 1967 device_xname(sc->sc_dev)); 1968 thresh = sc->sc_tx_drain_thresh + 1; 1969 if (thresh <= __SHIFTOUT_MASK(sc->sc_bits.b_txcfg_drth_mask) 1970 && (thresh * 32) <= (txfifo_size - 1971 (sc->sc_tx_fill_thresh * 32))) { 1972 printf("; increasing Tx drain " 1973 "threshold to %u bytes\n", 1974 thresh * 32); 1975 sc->sc_tx_drain_thresh = thresh; 1976 (void) sipcom_init(ifp); 1977 } else { 1978 (void) sipcom_init(ifp); 1979 printf("\n"); 1980 } 1981 } 1982 } 1983 1984 if (sc->sc_imr & (ISR_PAUSE_END | ISR_PAUSE_ST)) { 1985 if (isr & ISR_PAUSE_ST) { 1986 sc->sc_paused = 1; 1987 SIP_EVCNT_INCR(&sc->sc_ev_rxpause); 1988 } 1989 if (isr & ISR_PAUSE_END) { 1990 sc->sc_paused = 0; 1991 } 1992 } 1993 1994 if (isr & ISR_HIBERR) { 1995 int want_init = 0; 1996 1997 SIP_EVCNT_INCR(&sc->sc_ev_hiberr); 1998 1999 #define PRINTERR(bit, str) \ 2000 do { \ 2001 if ((isr & (bit)) != 0) { \ 2002 if ((ifp->if_flags & IFF_DEBUG) != 0) \ 2003 printf("%s: %s\n", \ 2004 device_xname(sc->sc_dev), str); \ 2005 want_init = 1; \ 2006 } \ 2007 } while (/*CONSTCOND*/0) 2008 2009 PRINTERR(sc->sc_bits.b_isr_dperr, "parity error"); 2010 PRINTERR(sc->sc_bits.b_isr_sserr, "system error"); 2011 PRINTERR(sc->sc_bits.b_isr_rmabt, "master abort"); 2012 PRINTERR(sc->sc_bits.b_isr_rtabt, "target abort"); 2013 PRINTERR(ISR_RXSOVR, "receive status FIFO overrun"); 2014 /* 2015 * Ignore: 2016 * Tx reset complete 2017 * Rx reset complete 2018 */ 2019 if (want_init) 2020 (void) sipcom_init(ifp); 2021 #undef PRINTERR 2022 } 2023 } 2024 2025 /* Re-enable interrupts. */ 2026 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IER, IER_IE); 2027 2028 /* Try to get more packets going. */ 2029 if_schedule_deferred_start(ifp); 2030 2031 return handled; 2032 } 2033 2034 /* 2035 * sip_txintr: 2036 * 2037 * Helper; handle transmit interrupts. 2038 */ 2039 static void 2040 sipcom_txintr(struct sip_softc *sc) 2041 { 2042 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2043 struct sip_txsoft *txs; 2044 uint32_t cmdsts; 2045 2046 /* 2047 * Go through our Tx list and free mbufs for those 2048 * frames which have been transmitted. 2049 */ 2050 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 2051 sip_cdtxsync(sc, txs->txs_firstdesc, txs->txs_dmamap->dm_nsegs, 2052 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2053 2054 cmdsts = le32toh(sc->sc_txdescs[ 2055 txs->txs_lastdesc].sipd_words[sc->sc_cmdsts_idx]); 2056 if (cmdsts & CMDSTS_OWN) 2057 break; 2058 2059 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 2060 2061 sc->sc_txfree += txs->txs_dmamap->dm_nsegs; 2062 2063 bus_dmamap_sync(sc->sc_dmat, txs->txs_dmamap, 2064 0, txs->txs_dmamap->dm_mapsize, BUS_DMASYNC_POSTWRITE); 2065 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2066 m_freem(txs->txs_mbuf); 2067 txs->txs_mbuf = NULL; 2068 2069 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 2070 2071 /* Check for errors and collisions. */ 2072 net_stat_ref_t nsr = IF_STAT_GETREF(ifp); 2073 if (cmdsts & (CMDSTS_Tx_TXA | CMDSTS_Tx_TFU | CMDSTS_Tx_ED | 2074 CMDSTS_Tx_EC)) { 2075 if_statinc_ref(nsr, if_oerrors); 2076 if (cmdsts & CMDSTS_Tx_EC) 2077 if_statadd_ref(nsr, if_collisions, 16); 2078 if (ifp->if_flags & IFF_DEBUG) { 2079 if (cmdsts & CMDSTS_Tx_ED) 2080 printf("%s: excessive deferral\n", 2081 device_xname(sc->sc_dev)); 2082 if (cmdsts & CMDSTS_Tx_EC) 2083 printf("%s: excessive collisions\n", 2084 device_xname(sc->sc_dev)); 2085 } 2086 } else { 2087 /* Packet was transmitted successfully. */ 2088 if_statinc_ref(nsr, if_opackets); 2089 if (CMDSTS_COLLISIONS(cmdsts)) 2090 if_statadd_ref(nsr, if_collisions, 2091 CMDSTS_COLLISIONS(cmdsts)); 2092 } 2093 IF_STAT_PUTREF(ifp); 2094 } 2095 2096 /* 2097 * If there are no more pending transmissions, cancel the watchdog 2098 * timer. 2099 */ 2100 if (txs == NULL) { 2101 ifp->if_timer = 0; 2102 sc->sc_txwin = 0; 2103 } 2104 } 2105 2106 /* 2107 * gsip_rxintr: 2108 * 2109 * Helper; handle receive interrupts on gigabit parts. 2110 */ 2111 static void 2112 gsip_rxintr(struct sip_softc *sc) 2113 { 2114 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2115 struct sip_rxsoft *rxs; 2116 struct mbuf *m; 2117 uint32_t cmdsts, extsts; 2118 int i, len; 2119 2120 for (i = sc->sc_rxptr;; i = sip_nextrx(sc, i)) { 2121 rxs = &sc->sc_rxsoft[i]; 2122 2123 sip_cdrxsync(sc, i, 2124 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2125 2126 cmdsts = 2127 le32toh(sc->sc_rxdescs[i].sipd_words[sc->sc_cmdsts_idx]); 2128 2129 /* 2130 * NOTE: OWN is set if owned by _consumer_. We're the 2131 * consumer of the receive ring, so if the bit is clear, 2132 * we have processed all of the packets. 2133 */ 2134 if ((cmdsts & CMDSTS_OWN) == 0) { 2135 /* 2136 * We have processed all of the receive buffers. 2137 */ 2138 break; 2139 } 2140 2141 sip_cdrxsync(sc, i, BUS_DMASYNC_POSTREAD); 2142 2143 extsts = 2144 le32toh(sc->sc_rxdescs[i].sipd_words[sc->sc_extsts_idx]); 2145 len = CMDSTS_SIZE(sc, cmdsts); 2146 2147 if (__predict_false(sc->sc_rxdiscard)) { 2148 sip_init_rxdesc(sc, i); 2149 if ((cmdsts & CMDSTS_MORE) == 0) { 2150 /* Reset our state. */ 2151 sc->sc_rxdiscard = 0; 2152 } 2153 continue; 2154 } 2155 2156 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2157 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2158 2159 m = rxs->rxs_mbuf; 2160 2161 /* 2162 * Add a new receive buffer to the ring. 2163 */ 2164 if (sipcom_add_rxbuf(sc, i) != 0) { 2165 /* 2166 * Failed, throw away what we've done so 2167 * far, and discard the rest of the packet. 2168 */ 2169 if_statinc(ifp, if_ierrors); 2170 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2171 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2172 sip_init_rxdesc(sc, i); 2173 if (cmdsts & CMDSTS_MORE) 2174 sc->sc_rxdiscard = 1; 2175 if (sc->sc_rxhead != NULL) 2176 m_freem(sc->sc_rxhead); 2177 sip_rxchain_reset(sc); 2178 continue; 2179 } 2180 2181 sip_rxchain_link(sc, m); 2182 2183 m->m_len = len; 2184 2185 /* 2186 * If this is not the end of the packet, keep 2187 * looking. 2188 */ 2189 if (cmdsts & CMDSTS_MORE) { 2190 sc->sc_rxlen += len; 2191 continue; 2192 } 2193 2194 /* 2195 * Okay, we have the entire packet now. The chip includes 2196 * the FCS, so we need to trim it. 2197 */ 2198 m->m_len -= ETHER_CRC_LEN; 2199 2200 *sc->sc_rxtailp = NULL; 2201 len = m->m_len + sc->sc_rxlen; 2202 m = sc->sc_rxhead; 2203 2204 sip_rxchain_reset(sc); 2205 2206 /* If an error occurred, update stats and drop the packet. */ 2207 if (cmdsts & (CMDSTS_Rx_RXA | CMDSTS_Rx_LONG | CMDSTS_Rx_RUNT | 2208 CMDSTS_Rx_ISE | CMDSTS_Rx_CRCE | CMDSTS_Rx_FAE)) { 2209 if_statinc(ifp, if_ierrors); 2210 if ((cmdsts & CMDSTS_Rx_RXA) != 0 && 2211 (cmdsts & CMDSTS_Rx_RXO) == 0) { 2212 /* Receive overrun handled elsewhere. */ 2213 printf("%s: receive descriptor error\n", 2214 device_xname(sc->sc_dev)); 2215 } 2216 #define PRINTERR(bit, str) \ 2217 if ((ifp->if_flags & IFF_DEBUG) != 0 && \ 2218 (cmdsts & (bit)) != 0) \ 2219 printf("%s: %s\n", device_xname(sc->sc_dev), str) 2220 PRINTERR(CMDSTS_Rx_LONG, "Too long packet"); 2221 PRINTERR(CMDSTS_Rx_RUNT, "runt packet"); 2222 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error"); 2223 PRINTERR(CMDSTS_Rx_CRCE, "CRC error"); 2224 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error"); 2225 #undef PRINTERR 2226 m_freem(m); 2227 continue; 2228 } 2229 2230 /* 2231 * If the packet is small enough to fit in a 2232 * single header mbuf, allocate one and copy 2233 * the data into it. This greatly reduces 2234 * memory consumption when we receive lots 2235 * of small packets. 2236 */ 2237 if (gsip_copy_small != 0 && len <= (MHLEN - 2)) { 2238 struct mbuf *nm; 2239 MGETHDR(nm, M_DONTWAIT, MT_DATA); 2240 if (nm == NULL) { 2241 if_statinc(ifp, if_ierrors); 2242 m_freem(m); 2243 continue; 2244 } 2245 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 2246 nm->m_data += 2; 2247 nm->m_pkthdr.len = nm->m_len = len; 2248 m_copydata(m, 0, len, mtod(nm, void *)); 2249 m_freem(m); 2250 m = nm; 2251 } 2252 #ifndef __NO_STRICT_ALIGNMENT 2253 else { 2254 /* 2255 * The DP83820's receive buffers must be 4-byte 2256 * aligned. But this means that the data after 2257 * the Ethernet header is misaligned. To compensate, 2258 * we have artificially shortened the buffer size 2259 * in the descriptor, and we do an overlapping copy 2260 * of the data two bytes further in (in the first 2261 * buffer of the chain only). 2262 */ 2263 memmove(mtod(m, char *) + 2, mtod(m, void *), 2264 m->m_len); 2265 m->m_data += 2; 2266 } 2267 #endif /* ! __NO_STRICT_ALIGNMENT */ 2268 2269 /* 2270 * If VLANs are enabled, VLAN packets have been unwrapped 2271 * for us. Associate the tag with the packet. 2272 */ 2273 2274 /* 2275 * Again, byte swapping is tricky. Hardware provided 2276 * the tag in the network byte order, but extsts was 2277 * passed through le32toh() in the meantime. On a 2278 * big-endian machine, we need to swap it again. On a 2279 * little-endian machine, we need to convert from the 2280 * network to host byte order. This means that we must 2281 * swap it in any case, so unconditional swap instead 2282 * of htons() is used. 2283 */ 2284 if ((extsts & EXTSTS_VPKT) != 0) { 2285 vlan_set_tag(m, bswap16(extsts & EXTSTS_VTCI)); 2286 } 2287 2288 /* 2289 * Set the incoming checksum information for the 2290 * packet. 2291 */ 2292 if ((extsts & EXTSTS_IPPKT) != 0) { 2293 SIP_EVCNT_INCR(&sc->sc_ev_rxipsum); 2294 m->m_pkthdr.csum_flags |= M_CSUM_IPv4; 2295 if (extsts & EXTSTS_Rx_IPERR) 2296 m->m_pkthdr.csum_flags |= M_CSUM_IPv4_BAD; 2297 if (extsts & EXTSTS_TCPPKT) { 2298 SIP_EVCNT_INCR(&sc->sc_ev_rxtcpsum); 2299 m->m_pkthdr.csum_flags |= M_CSUM_TCPv4; 2300 if (extsts & EXTSTS_Rx_TCPERR) 2301 m->m_pkthdr.csum_flags |= 2302 M_CSUM_TCP_UDP_BAD; 2303 } else if (extsts & EXTSTS_UDPPKT) { 2304 SIP_EVCNT_INCR(&sc->sc_ev_rxudpsum); 2305 m->m_pkthdr.csum_flags |= M_CSUM_UDPv4; 2306 if (extsts & EXTSTS_Rx_UDPERR) 2307 m->m_pkthdr.csum_flags |= 2308 M_CSUM_TCP_UDP_BAD; 2309 } 2310 } 2311 2312 m_set_rcvif(m, ifp); 2313 m->m_pkthdr.len = len; 2314 2315 /* Pass it on. */ 2316 if_percpuq_enqueue(ifp->if_percpuq, m); 2317 } 2318 2319 /* Update the receive pointer. */ 2320 sc->sc_rxptr = i; 2321 } 2322 2323 /* 2324 * sip_rxintr: 2325 * 2326 * Helper; handle receive interrupts on 10/100 parts. 2327 */ 2328 static void 2329 sip_rxintr(struct sip_softc *sc) 2330 { 2331 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 2332 struct sip_rxsoft *rxs; 2333 struct mbuf *m; 2334 uint32_t cmdsts; 2335 int i, len; 2336 2337 for (i = sc->sc_rxptr;; i = sip_nextrx(sc, i)) { 2338 rxs = &sc->sc_rxsoft[i]; 2339 2340 sip_cdrxsync(sc, i, 2341 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE); 2342 2343 cmdsts = 2344 le32toh(sc->sc_rxdescs[i].sipd_words[sc->sc_cmdsts_idx]); 2345 2346 /* 2347 * NOTE: OWN is set if owned by _consumer_. We're the 2348 * consumer of the receive ring, so if the bit is clear, 2349 * we have processed all of the packets. 2350 */ 2351 if ((cmdsts & CMDSTS_OWN) == 0) { 2352 /* 2353 * We have processed all of the receive buffers. 2354 */ 2355 break; 2356 } 2357 2358 /* If any collisions were seen on the wire, count one. */ 2359 if (cmdsts & CMDSTS_Rx_COL) 2360 if_statinc(ifp, if_collisions); 2361 2362 /* 2363 * If an error occurred, update stats, clear the status 2364 * word, and leave the packet buffer in place. It will 2365 * simply be reused the next time the ring comes around. 2366 */ 2367 if (cmdsts & (CMDSTS_Rx_RXA | CMDSTS_Rx_LONG | CMDSTS_Rx_RUNT | 2368 CMDSTS_Rx_ISE | CMDSTS_Rx_CRCE | CMDSTS_Rx_FAE)) { 2369 if_statinc(ifp, if_ierrors); 2370 if ((cmdsts & CMDSTS_Rx_RXA) != 0 && 2371 (cmdsts & CMDSTS_Rx_RXO) == 0) { 2372 /* Receive overrun handled elsewhere. */ 2373 printf("%s: receive descriptor error\n", 2374 device_xname(sc->sc_dev)); 2375 } 2376 #define PRINTERR(bit, str) \ 2377 if ((ifp->if_flags & IFF_DEBUG) != 0 && \ 2378 (cmdsts & (bit)) != 0) \ 2379 printf("%s: %s\n", device_xname(sc->sc_dev), str) 2380 PRINTERR(CMDSTS_Rx_LONG, "Too long packet"); 2381 PRINTERR(CMDSTS_Rx_RUNT, "runt packet"); 2382 PRINTERR(CMDSTS_Rx_ISE, "invalid symbol error"); 2383 PRINTERR(CMDSTS_Rx_CRCE, "CRC error"); 2384 PRINTERR(CMDSTS_Rx_FAE, "frame alignment error"); 2385 #undef PRINTERR 2386 sip_init_rxdesc(sc, i); 2387 continue; 2388 } 2389 2390 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2391 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_POSTREAD); 2392 2393 /* 2394 * No errors; receive the packet. Note, the SiS 900 2395 * includes the CRC with every packet. 2396 */ 2397 len = CMDSTS_SIZE(sc, cmdsts) - ETHER_CRC_LEN; 2398 2399 #ifdef __NO_STRICT_ALIGNMENT 2400 /* 2401 * If the packet is small enough to fit in a 2402 * single header mbuf, allocate one and copy 2403 * the data into it. This greatly reduces 2404 * memory consumption when we receive lots 2405 * of small packets. 2406 * 2407 * Otherwise, we add a new buffer to the receive 2408 * chain. If this fails, we drop the packet and 2409 * recycle the old buffer. 2410 */ 2411 if (sip_copy_small != 0 && len <= MHLEN) { 2412 MGETHDR(m, M_DONTWAIT, MT_DATA); 2413 if (m == NULL) 2414 goto dropit; 2415 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 2416 memcpy(mtod(m, void *), 2417 mtod(rxs->rxs_mbuf, void *), len); 2418 sip_init_rxdesc(sc, i); 2419 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2420 rxs->rxs_dmamap->dm_mapsize, 2421 BUS_DMASYNC_PREREAD); 2422 } else { 2423 m = rxs->rxs_mbuf; 2424 if (sipcom_add_rxbuf(sc, i) != 0) { 2425 dropit: 2426 if_statinc(ifp, if_ierrors); 2427 sip_init_rxdesc(sc, i); 2428 bus_dmamap_sync(sc->sc_dmat, 2429 rxs->rxs_dmamap, 0, 2430 rxs->rxs_dmamap->dm_mapsize, 2431 BUS_DMASYNC_PREREAD); 2432 continue; 2433 } 2434 } 2435 #else 2436 /* 2437 * The SiS 900's receive buffers must be 4-byte aligned. 2438 * But this means that the data after the Ethernet header 2439 * is misaligned. We must allocate a new buffer and 2440 * copy the data, shifted forward 2 bytes. 2441 */ 2442 MGETHDR(m, M_DONTWAIT, MT_DATA); 2443 if (m == NULL) { 2444 dropit: 2445 if_statinc(ifp, if_ierrors); 2446 sip_init_rxdesc(sc, i); 2447 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2448 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2449 continue; 2450 } 2451 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 2452 if (len > (MHLEN - 2)) { 2453 MCLGET(m, M_DONTWAIT); 2454 if ((m->m_flags & M_EXT) == 0) { 2455 m_freem(m); 2456 goto dropit; 2457 } 2458 } 2459 m->m_data += 2; 2460 2461 /* 2462 * Note that we use clusters for incoming frames, so the 2463 * buffer is virtually contiguous. 2464 */ 2465 memcpy(mtod(m, void *), mtod(rxs->rxs_mbuf, void *), len); 2466 2467 /* Allow the receive descriptor to continue using its mbuf. */ 2468 sip_init_rxdesc(sc, i); 2469 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 2470 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 2471 #endif /* __NO_STRICT_ALIGNMENT */ 2472 2473 m_set_rcvif(m, ifp); 2474 m->m_pkthdr.len = m->m_len = len; 2475 2476 /* Pass it on. */ 2477 if_percpuq_enqueue(ifp->if_percpuq, m); 2478 } 2479 2480 /* Update the receive pointer. */ 2481 sc->sc_rxptr = i; 2482 } 2483 2484 /* 2485 * sip_tick: 2486 * 2487 * One second timer, used to tick the MII. 2488 */ 2489 static void 2490 sipcom_tick(void *arg) 2491 { 2492 struct sip_softc *sc = arg; 2493 int s; 2494 2495 s = splnet(); 2496 #ifdef SIP_EVENT_COUNTERS 2497 if (sc->sc_gigabit) { 2498 /* Read PAUSE related counts from MIB registers. */ 2499 sc->sc_ev_rxpause.ev_count += 2500 bus_space_read_4(sc->sc_st, sc->sc_sh, 2501 SIP_NS_MIB(MIB_RXPauseFrames)) & 0xffff; 2502 sc->sc_ev_txpause.ev_count += 2503 bus_space_read_4(sc->sc_st, sc->sc_sh, 2504 SIP_NS_MIB(MIB_TXPauseFrames)) & 0xffff; 2505 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_MIBC, MIBC_ACLR); 2506 } 2507 #endif /* SIP_EVENT_COUNTERS */ 2508 mii_tick(&sc->sc_mii); 2509 splx(s); 2510 2511 callout_schedule(&sc->sc_tick_ch, hz); 2512 } 2513 2514 /* 2515 * sip_reset: 2516 * 2517 * Perform a soft reset on the SiS 900. 2518 */ 2519 static bool 2520 sipcom_reset(struct sip_softc *sc) 2521 { 2522 bus_space_tag_t st = sc->sc_st; 2523 bus_space_handle_t sh = sc->sc_sh; 2524 int i; 2525 2526 bus_space_write_4(st, sh, SIP_IER, 0); 2527 bus_space_write_4(st, sh, SIP_IMR, 0); 2528 bus_space_write_4(st, sh, SIP_RFCR, 0); 2529 bus_space_write_4(st, sh, SIP_CR, CR_RST); 2530 2531 for (i = 0; i < SIP_TIMEOUT; i++) { 2532 if ((bus_space_read_4(st, sh, SIP_CR) & CR_RST) == 0) 2533 break; 2534 delay(2); 2535 } 2536 2537 if (i == SIP_TIMEOUT) { 2538 printf("%s: reset failed to complete\n", 2539 device_xname(sc->sc_dev)); 2540 return false; 2541 } 2542 2543 delay(1000); 2544 2545 if (sc->sc_gigabit) { 2546 /* 2547 * Set the general purpose I/O bits. Do it here in case we 2548 * need to have GPIO set up to talk to the media interface. 2549 */ 2550 bus_space_write_4(st, sh, SIP_GPIOR, sc->sc_gpior); 2551 delay(1000); 2552 } 2553 return true; 2554 } 2555 2556 static void 2557 sipcom_dp83820_init(struct sip_softc *sc, uint64_t capenable) 2558 { 2559 uint32_t reg; 2560 bus_space_tag_t st = sc->sc_st; 2561 bus_space_handle_t sh = sc->sc_sh; 2562 /* 2563 * Initialize the VLAN/IP receive control register. 2564 * We enable checksum computation on all incoming 2565 * packets, and do not reject packets w/ bad checksums. 2566 */ 2567 reg = 0; 2568 if (capenable & 2569 (IFCAP_CSUM_IPv4_Rx | IFCAP_CSUM_TCPv4_Rx | IFCAP_CSUM_UDPv4_Rx)) 2570 reg |= VRCR_IPEN; 2571 if (VLAN_ATTACHED(&sc->sc_ethercom)) 2572 reg |= VRCR_VTDEN | VRCR_VTREN; 2573 bus_space_write_4(st, sh, SIP_VRCR, reg); 2574 2575 /* 2576 * Initialize the VLAN/IP transmit control register. 2577 * We enable outgoing checksum computation on a 2578 * per-packet basis. 2579 */ 2580 reg = 0; 2581 if (capenable & 2582 (IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_UDPv4_Tx)) 2583 reg |= VTCR_PPCHK; 2584 if (VLAN_ATTACHED(&sc->sc_ethercom)) 2585 reg |= VTCR_VPPTI; 2586 bus_space_write_4(st, sh, SIP_VTCR, reg); 2587 2588 /* 2589 * If we're using VLANs, initialize the VLAN data register. 2590 * To understand why we bswap the VLAN Ethertype, see section 2591 * 4.2.36 of the DP83820 manual. 2592 */ 2593 if (VLAN_ATTACHED(&sc->sc_ethercom)) 2594 bus_space_write_4(st, sh, SIP_VDR, bswap16(ETHERTYPE_VLAN)); 2595 } 2596 2597 /* 2598 * sip_init: [ ifnet interface function ] 2599 * 2600 * Initialize the interface. Must be called at splnet(). 2601 */ 2602 static int 2603 sipcom_init(struct ifnet *ifp) 2604 { 2605 struct sip_softc *sc = ifp->if_softc; 2606 bus_space_tag_t st = sc->sc_st; 2607 bus_space_handle_t sh = sc->sc_sh; 2608 struct sip_txsoft *txs; 2609 struct sip_rxsoft *rxs; 2610 int i, error = 0; 2611 2612 if (device_is_active(sc->sc_dev)) { 2613 /* 2614 * Cancel any pending I/O. 2615 */ 2616 sipcom_stop(ifp, 0); 2617 } else if (!pmf_device_subtree_resume(sc->sc_dev, &sc->sc_qual) || 2618 !device_is_active(sc->sc_dev)) 2619 return 0; 2620 2621 /* 2622 * Reset the chip to a known state. 2623 */ 2624 if (!sipcom_reset(sc)) 2625 return EBUSY; 2626 2627 if (SIP_CHIP_MODEL(sc, PCI_VENDOR_NS, PCI_PRODUCT_NS_DP83815)) { 2628 /* 2629 * DP83815 manual, page 78: 2630 * 4.4 Recommended Registers Configuration 2631 * For optimum performance of the DP83815, version noted 2632 * as DP83815CVNG (SRR = 203h), the listed register 2633 * modifications must be followed in sequence... 2634 * 2635 * It's not clear if this should be 302h or 203h because that 2636 * chip name is listed as SRR 302h in the description of the 2637 * SRR register. However, my revision 302h DP83815 on the 2638 * Netgear FA311 purchased in 02/2001 needs these settings 2639 * to avoid tons of errors in AcceptPerfectMatch (non- 2640 * IFF_PROMISC) mode. I do not know if other revisions need 2641 * this set or not. [briggs -- 09 March 2001] 2642 * 2643 * Note that only the low-order 12 bits of 0xe4 are documented 2644 * and that this sets reserved bits in that register. 2645 */ 2646 bus_space_write_4(st, sh, 0x00cc, 0x0001); 2647 2648 bus_space_write_4(st, sh, 0x00e4, 0x189C); 2649 bus_space_write_4(st, sh, 0x00fc, 0x0000); 2650 bus_space_write_4(st, sh, 0x00f4, 0x5040); 2651 bus_space_write_4(st, sh, 0x00f8, 0x008c); 2652 2653 bus_space_write_4(st, sh, 0x00cc, 0x0000); 2654 } 2655 2656 /* Initialize the transmit descriptor ring. */ 2657 sip_init_txring(sc); 2658 2659 /* 2660 * Initialize the transmit job descriptors. 2661 */ 2662 SIMPLEQ_INIT(&sc->sc_txfreeq); 2663 SIMPLEQ_INIT(&sc->sc_txdirtyq); 2664 for (i = 0; i < SIP_TXQUEUELEN; i++) { 2665 txs = &sc->sc_txsoft[i]; 2666 txs->txs_mbuf = NULL; 2667 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 2668 } 2669 2670 /* 2671 * Initialize the receive descriptor and receive job 2672 * descriptor rings. 2673 */ 2674 for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) { 2675 rxs = &sc->sc_rxsoft[i]; 2676 if (rxs->rxs_mbuf == NULL) { 2677 if ((error = sipcom_add_rxbuf(sc, i)) != 0) { 2678 printf("%s: unable to allocate or map rx " 2679 "buffer %d, error = %d\n", 2680 device_xname(sc->sc_dev), i, error); 2681 /* 2682 * XXX Should attempt to run with fewer receive 2683 * XXX buffers instead of just failing. 2684 */ 2685 sipcom_rxdrain(sc); 2686 goto out; 2687 } 2688 } else 2689 sip_init_rxdesc(sc, i); 2690 } 2691 sc->sc_rxptr = 0; 2692 sc->sc_rxdiscard = 0; 2693 sip_rxchain_reset(sc); 2694 2695 /* 2696 * Set the configuration register; it's already initialized 2697 * in sip_attach(). 2698 */ 2699 bus_space_write_4(st, sh, SIP_CFG, sc->sc_cfg); 2700 2701 /* 2702 * Initialize the prototype TXCFG register. 2703 */ 2704 if (sc->sc_gigabit) { 2705 sc->sc_txcfg = sc->sc_bits.b_txcfg_mxdma_512; 2706 sc->sc_rxcfg = sc->sc_bits.b_rxcfg_mxdma_512; 2707 } else if ((SIP_SIS900_REV(sc, SIS_REV_635) || 2708 SIP_SIS900_REV(sc, SIS_REV_960) || 2709 SIP_SIS900_REV(sc, SIS_REV_900B)) && 2710 (sc->sc_cfg & CFG_EDBMASTEN)) { 2711 sc->sc_txcfg = sc->sc_bits.b_txcfg_mxdma_64; 2712 sc->sc_rxcfg = sc->sc_bits.b_rxcfg_mxdma_64; 2713 } else { 2714 sc->sc_txcfg = sc->sc_bits.b_txcfg_mxdma_512; 2715 sc->sc_rxcfg = sc->sc_bits.b_rxcfg_mxdma_512; 2716 } 2717 2718 sc->sc_txcfg |= TXCFG_ATP | 2719 __SHIFTIN(sc->sc_tx_fill_thresh, sc->sc_bits.b_txcfg_flth_mask) | 2720 sc->sc_tx_drain_thresh; 2721 bus_space_write_4(st, sh, sc->sc_regs.r_txcfg, sc->sc_txcfg); 2722 2723 /* 2724 * Initialize the receive drain threshold if we have never 2725 * done so. 2726 */ 2727 if (sc->sc_rx_drain_thresh == 0) { 2728 /* 2729 * XXX This value should be tuned. This is set to the 2730 * maximum of 248 bytes, and we may be able to improve 2731 * performance by decreasing it (although we should never 2732 * set this value lower than 2; 14 bytes are required to 2733 * filter the packet). 2734 */ 2735 sc->sc_rx_drain_thresh = __SHIFTOUT_MASK(RXCFG_DRTH_MASK); 2736 } 2737 2738 /* 2739 * Initialize the prototype RXCFG register. 2740 */ 2741 sc->sc_rxcfg |= __SHIFTIN(sc->sc_rx_drain_thresh, RXCFG_DRTH_MASK); 2742 /* 2743 * Accept long packets (including FCS) so we can handle 2744 * 802.1q-tagged frames and jumbo frames properly. 2745 */ 2746 if ((sc->sc_gigabit && ifp->if_mtu > ETHERMTU) || 2747 (sc->sc_ethercom.ec_capenable & ETHERCAP_VLAN_MTU)) 2748 sc->sc_rxcfg |= RXCFG_ALP; 2749 2750 /* 2751 * Checksum offloading is disabled if the user selects an MTU 2752 * larger than 8109. (FreeBSD says 8152, but there is empirical 2753 * evidence that >8109 does not work on some boards, such as the 2754 * Planex GN-1000TE). 2755 */ 2756 if (sc->sc_gigabit && ifp->if_mtu > 8109 && 2757 (ifp->if_capenable & 2758 (IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2759 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2760 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx))) { 2761 printf("%s: Checksum offloading does not work if MTU > 8109 - " 2762 "disabled.\n", device_xname(sc->sc_dev)); 2763 ifp->if_capenable &= 2764 ~(IFCAP_CSUM_IPv4_Tx | IFCAP_CSUM_IPv4_Rx | 2765 IFCAP_CSUM_TCPv4_Tx | IFCAP_CSUM_TCPv4_Rx | 2766 IFCAP_CSUM_UDPv4_Tx | IFCAP_CSUM_UDPv4_Rx); 2767 ifp->if_csum_flags_tx = 0; 2768 ifp->if_csum_flags_rx = 0; 2769 } 2770 2771 bus_space_write_4(st, sh, sc->sc_regs.r_rxcfg, sc->sc_rxcfg); 2772 2773 if (sc->sc_gigabit) 2774 sipcom_dp83820_init(sc, ifp->if_capenable); 2775 2776 /* 2777 * Give the transmit and receive rings to the chip. 2778 */ 2779 sip_set_txdp(sc, SIP_CDTXADDR(sc, sc->sc_txnext)); 2780 sip_set_rxdp(sc, SIP_CDRXADDR(sc, sc->sc_rxptr)); 2781 2782 /* 2783 * Initialize the interrupt mask. 2784 */ 2785 sc->sc_imr = sc->sc_bits.b_isr_dperr | 2786 sc->sc_bits.b_isr_sserr | 2787 sc->sc_bits.b_isr_rmabt | 2788 sc->sc_bits.b_isr_rtabt | 2789 ISR_RXSOVR | ISR_TXURN | ISR_TXDESC | ISR_TXIDLE | ISR_RXORN | 2790 ISR_RXIDLE | ISR_RXDESC; 2791 bus_space_write_4(st, sh, SIP_IMR, sc->sc_imr); 2792 2793 /* Set up the receive filter. */ 2794 (*sc->sc_model->sip_variant->sipv_set_filter)(sc); 2795 2796 /* 2797 * Tune sc_rx_flow_thresh. 2798 * XXX "More than 8KB" is too short for jumbo frames. 2799 * XXX TODO: Threshold value should be user-settable. 2800 */ 2801 sc->sc_rx_flow_thresh = (PCR_PS_STHI_8 | PCR_PS_STLO_4 | 2802 PCR_PS_FFHI_8 | PCR_PS_FFLO_4 | 2803 (PCR_PAUSE_CNT & PCR_PAUSE_CNT_MASK)); 2804 2805 /* 2806 * Set the current media. Do this after initializing the prototype 2807 * IMR, since sip_mii_statchg() modifies the IMR for 802.3x flow 2808 * control. 2809 */ 2810 if ((error = ether_mediachange(ifp)) != 0) 2811 goto out; 2812 2813 /* 2814 * Set the interrupt hold-off timer to 100us. 2815 */ 2816 if (sc->sc_gigabit) 2817 bus_space_write_4(st, sh, SIP_IHR, 0x01); 2818 2819 /* 2820 * Enable interrupts. 2821 */ 2822 bus_space_write_4(st, sh, SIP_IER, IER_IE); 2823 2824 /* 2825 * Start the transmit and receive processes. 2826 */ 2827 bus_space_write_4(st, sh, SIP_CR, CR_RXE | CR_TXE); 2828 2829 /* 2830 * Start the one second MII clock. 2831 */ 2832 callout_schedule(&sc->sc_tick_ch, hz); 2833 2834 /* 2835 * ...all done! 2836 */ 2837 ifp->if_flags |= IFF_RUNNING; 2838 sc->sc_if_flags = ifp->if_flags; 2839 sc->sc_prev.ec_capenable = sc->sc_ethercom.ec_capenable; 2840 sc->sc_prev.is_vlan = VLAN_ATTACHED(&(sc)->sc_ethercom); 2841 sc->sc_prev.if_capenable = ifp->if_capenable; 2842 2843 out: 2844 if (error) 2845 printf("%s: interface not running\n", device_xname(sc->sc_dev)); 2846 return error; 2847 } 2848 2849 /* 2850 * sip_drain: 2851 * 2852 * Drain the receive queue. 2853 */ 2854 static void 2855 sipcom_rxdrain(struct sip_softc *sc) 2856 { 2857 struct sip_rxsoft *rxs; 2858 int i; 2859 2860 for (i = 0; i < sc->sc_parm->p_nrxdesc; i++) { 2861 rxs = &sc->sc_rxsoft[i]; 2862 if (rxs->rxs_mbuf != NULL) { 2863 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 2864 m_freem(rxs->rxs_mbuf); 2865 rxs->rxs_mbuf = NULL; 2866 } 2867 } 2868 } 2869 2870 /* 2871 * sip_stop: [ ifnet interface function ] 2872 * 2873 * Stop transmission on the interface. 2874 */ 2875 static void 2876 sipcom_stop(struct ifnet *ifp, int disable) 2877 { 2878 struct sip_softc *sc = ifp->if_softc; 2879 bus_space_tag_t st = sc->sc_st; 2880 bus_space_handle_t sh = sc->sc_sh; 2881 struct sip_txsoft *txs; 2882 uint32_t cmdsts = 0; /* DEBUG */ 2883 2884 /* 2885 * Stop the one second clock. 2886 */ 2887 callout_stop(&sc->sc_tick_ch); 2888 2889 /* Down the MII. */ 2890 mii_down(&sc->sc_mii); 2891 2892 if (device_is_active(sc->sc_dev)) { 2893 /* 2894 * Disable interrupts. 2895 */ 2896 bus_space_write_4(st, sh, SIP_IER, 0); 2897 2898 /* 2899 * Stop receiver and transmitter. 2900 */ 2901 bus_space_write_4(st, sh, SIP_CR, CR_RXD | CR_TXD); 2902 } 2903 2904 /* 2905 * Release any queued transmit buffers. 2906 */ 2907 while ((txs = SIMPLEQ_FIRST(&sc->sc_txdirtyq)) != NULL) { 2908 if ((ifp->if_flags & IFF_DEBUG) != 0 && 2909 SIMPLEQ_NEXT(txs, txs_q) == NULL && 2910 (sc->sc_txdescs[ 2911 txs->txs_lastdesc].sipd_words[ 2912 sc->sc_cmdsts_idx] & htole32(CMDSTS_INTR)) == 0) 2913 printf("%s: sip_stop: last descriptor does not " 2914 "have INTR bit set\n", device_xname(sc->sc_dev)); 2915 SIMPLEQ_REMOVE_HEAD(&sc->sc_txdirtyq, txs_q); 2916 #ifdef DIAGNOSTIC 2917 if (txs->txs_mbuf == NULL) { 2918 printf("%s: dirty txsoft with no mbuf chain\n", 2919 device_xname(sc->sc_dev)); 2920 panic("sip_stop"); 2921 } 2922 #endif 2923 cmdsts |= /* DEBUG */ 2924 le32toh(sc->sc_txdescs[ 2925 txs->txs_lastdesc].sipd_words[sc->sc_cmdsts_idx]); 2926 bus_dmamap_unload(sc->sc_dmat, txs->txs_dmamap); 2927 m_freem(txs->txs_mbuf); 2928 txs->txs_mbuf = NULL; 2929 SIMPLEQ_INSERT_TAIL(&sc->sc_txfreeq, txs, txs_q); 2930 } 2931 2932 /* 2933 * Mark the interface down and cancel the watchdog timer. 2934 */ 2935 ifp->if_flags &= ~IFF_RUNNING; 2936 ifp->if_timer = 0; 2937 2938 if (disable) 2939 pmf_device_recursive_suspend(sc->sc_dev, &sc->sc_qual); 2940 2941 if ((ifp->if_flags & IFF_DEBUG) != 0 && 2942 (cmdsts & CMDSTS_INTR) == 0 && sc->sc_txfree != sc->sc_ntxdesc) 2943 printf("%s: sip_stop: no INTR bits set in dirty tx " 2944 "descriptors\n", device_xname(sc->sc_dev)); 2945 } 2946 2947 /* 2948 * sip_read_eeprom: 2949 * 2950 * Read data from the serial EEPROM. 2951 */ 2952 static void 2953 sipcom_read_eeprom(struct sip_softc *sc, int word, int wordcnt, 2954 uint16_t *data) 2955 { 2956 bus_space_tag_t st = sc->sc_st; 2957 bus_space_handle_t sh = sc->sc_sh; 2958 uint16_t reg; 2959 int i, x; 2960 2961 for (i = 0; i < wordcnt; i++) { 2962 /* Send CHIP SELECT. */ 2963 reg = EROMAR_EECS; 2964 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2965 2966 /* Shift in the READ opcode. */ 2967 for (x = 3; x > 0; x--) { 2968 if (SIP_EEPROM_OPC_READ & (1 << (x - 1))) 2969 reg |= EROMAR_EEDI; 2970 else 2971 reg &= ~EROMAR_EEDI; 2972 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2973 bus_space_write_4(st, sh, SIP_EROMAR, 2974 reg | EROMAR_EESK); 2975 delay(4); 2976 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2977 delay(4); 2978 } 2979 2980 /* Shift in address. */ 2981 for (x = 6; x > 0; x--) { 2982 if ((word + i) & (1 << (x - 1))) 2983 reg |= EROMAR_EEDI; 2984 else 2985 reg &= ~EROMAR_EEDI; 2986 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2987 bus_space_write_4(st, sh, SIP_EROMAR, 2988 reg | EROMAR_EESK); 2989 delay(4); 2990 bus_space_write_4(st, sh, SIP_EROMAR, reg); 2991 delay(4); 2992 } 2993 2994 /* Shift out data. */ 2995 reg = EROMAR_EECS; 2996 data[i] = 0; 2997 for (x = 16; x > 0; x--) { 2998 bus_space_write_4(st, sh, SIP_EROMAR, 2999 reg | EROMAR_EESK); 3000 delay(4); 3001 if (bus_space_read_4(st, sh, SIP_EROMAR) & EROMAR_EEDO) 3002 data[i] |= (1 << (x - 1)); 3003 bus_space_write_4(st, sh, SIP_EROMAR, reg); 3004 delay(4); 3005 } 3006 3007 /* Clear CHIP SELECT. */ 3008 bus_space_write_4(st, sh, SIP_EROMAR, 0); 3009 delay(4); 3010 } 3011 } 3012 3013 /* 3014 * sipcom_add_rxbuf: 3015 * 3016 * Add a receive buffer to the indicated descriptor. 3017 */ 3018 static int 3019 sipcom_add_rxbuf(struct sip_softc *sc, int idx) 3020 { 3021 struct sip_rxsoft *rxs = &sc->sc_rxsoft[idx]; 3022 struct mbuf *m; 3023 int error; 3024 3025 MGETHDR(m, M_DONTWAIT, MT_DATA); 3026 if (m == NULL) 3027 return ENOBUFS; 3028 MCLAIM(m, &sc->sc_ethercom.ec_rx_mowner); 3029 3030 MCLGET(m, M_DONTWAIT); 3031 if ((m->m_flags & M_EXT) == 0) { 3032 m_freem(m); 3033 return ENOBUFS; 3034 } 3035 3036 /* XXX I don't believe this is necessary. --dyoung */ 3037 if (sc->sc_gigabit) 3038 m->m_len = sc->sc_parm->p_rxbuf_len; 3039 3040 if (rxs->rxs_mbuf != NULL) 3041 bus_dmamap_unload(sc->sc_dmat, rxs->rxs_dmamap); 3042 3043 rxs->rxs_mbuf = m; 3044 3045 error = bus_dmamap_load(sc->sc_dmat, rxs->rxs_dmamap, 3046 m->m_ext.ext_buf, m->m_ext.ext_size, NULL, 3047 BUS_DMA_READ | BUS_DMA_NOWAIT); 3048 if (error) { 3049 printf("%s: can't load rx DMA map %d, error = %d\n", 3050 device_xname(sc->sc_dev), idx, error); 3051 panic("%s", __func__); /* XXX */ 3052 } 3053 3054 bus_dmamap_sync(sc->sc_dmat, rxs->rxs_dmamap, 0, 3055 rxs->rxs_dmamap->dm_mapsize, BUS_DMASYNC_PREREAD); 3056 3057 sip_init_rxdesc(sc, idx); 3058 3059 return 0; 3060 } 3061 3062 /* 3063 * sip_sis900_set_filter: 3064 * 3065 * Set up the receive filter. 3066 */ 3067 static void 3068 sipcom_sis900_set_filter(struct sip_softc *sc) 3069 { 3070 bus_space_tag_t st = sc->sc_st; 3071 bus_space_handle_t sh = sc->sc_sh; 3072 struct ethercom *ec = &sc->sc_ethercom; 3073 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3074 struct ether_multi *enm; 3075 const uint8_t *cp; 3076 struct ether_multistep step; 3077 uint32_t crc, mchash[16]; 3078 3079 /* 3080 * Initialize the prototype RFCR. 3081 */ 3082 sc->sc_rfcr = RFCR_RFEN; 3083 if (ifp->if_flags & IFF_BROADCAST) 3084 sc->sc_rfcr |= RFCR_AAB; 3085 if (ifp->if_flags & IFF_PROMISC) { 3086 sc->sc_rfcr |= RFCR_AAP; 3087 goto allmulti; 3088 } 3089 3090 /* 3091 * Set up the multicast address filter by passing all multicast 3092 * addresses through a CRC generator, and then using the high-order 3093 * 6 bits as an index into the 128 bit multicast hash table (only 3094 * the lower 16 bits of each 32 bit multicast hash register are 3095 * valid). The high order bits select the register, while the 3096 * rest of the bits select the bit within the register. 3097 */ 3098 3099 memset(mchash, 0, sizeof(mchash)); 3100 3101 /* 3102 * SiS900 (at least SiS963) requires us to register the address of 3103 * the PAUSE packet (01:80:c2:00:00:01) into the address filter. 3104 */ 3105 crc = 0x0ed423f9; 3106 3107 if (SIP_SIS900_REV(sc, SIS_REV_635) || 3108 SIP_SIS900_REV(sc, SIS_REV_960) || 3109 SIP_SIS900_REV(sc, SIS_REV_900B)) { 3110 /* Just want the 8 most significant bits. */ 3111 crc >>= 24; 3112 } else { 3113 /* Just want the 7 most significant bits. */ 3114 crc >>= 25; 3115 } 3116 3117 /* Set the corresponding bit in the hash table. */ 3118 mchash[crc >> 4] |= 1 << (crc & 0xf); 3119 3120 ETHER_LOCK(ec); 3121 ETHER_FIRST_MULTI(step, ec, enm); 3122 while (enm != NULL) { 3123 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3124 /* 3125 * We must listen to a range of multicast addresses. 3126 * For now, just accept all multicasts, rather than 3127 * trying to set only those filter bits needed to match 3128 * the range. (At this time, the only use of address 3129 * ranges is for IP multicast routing, for which the 3130 * range is big enough to require all bits set.) 3131 */ 3132 ETHER_UNLOCK(ec); 3133 goto allmulti; 3134 } 3135 3136 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 3137 3138 if (SIP_SIS900_REV(sc, SIS_REV_635) || 3139 SIP_SIS900_REV(sc, SIS_REV_960) || 3140 SIP_SIS900_REV(sc, SIS_REV_900B)) { 3141 /* Just want the 8 most significant bits. */ 3142 crc >>= 24; 3143 } else { 3144 /* Just want the 7 most significant bits. */ 3145 crc >>= 25; 3146 } 3147 3148 /* Set the corresponding bit in the hash table. */ 3149 mchash[crc >> 4] |= 1 << (crc & 0xf); 3150 3151 ETHER_NEXT_MULTI(step, enm); 3152 } 3153 ETHER_UNLOCK(ec); 3154 3155 ifp->if_flags &= ~IFF_ALLMULTI; 3156 goto setit; 3157 3158 allmulti: 3159 ifp->if_flags |= IFF_ALLMULTI; 3160 sc->sc_rfcr |= RFCR_AAM; 3161 3162 setit: 3163 #define FILTER_EMIT(addr, data) \ 3164 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \ 3165 delay(1); \ 3166 bus_space_write_4(st, sh, SIP_RFDR, (data)); \ 3167 delay(1) 3168 3169 /* 3170 * Disable receive filter, and program the node address. 3171 */ 3172 cp = CLLADDR(ifp->if_sadl); 3173 FILTER_EMIT(RFCR_RFADDR_NODE0, (cp[1] << 8) | cp[0]); 3174 FILTER_EMIT(RFCR_RFADDR_NODE2, (cp[3] << 8) | cp[2]); 3175 FILTER_EMIT(RFCR_RFADDR_NODE4, (cp[5] << 8) | cp[4]); 3176 3177 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 3178 /* 3179 * Program the multicast hash table. 3180 */ 3181 FILTER_EMIT(RFCR_RFADDR_MC0, mchash[0]); 3182 FILTER_EMIT(RFCR_RFADDR_MC1, mchash[1]); 3183 FILTER_EMIT(RFCR_RFADDR_MC2, mchash[2]); 3184 FILTER_EMIT(RFCR_RFADDR_MC3, mchash[3]); 3185 FILTER_EMIT(RFCR_RFADDR_MC4, mchash[4]); 3186 FILTER_EMIT(RFCR_RFADDR_MC5, mchash[5]); 3187 FILTER_EMIT(RFCR_RFADDR_MC6, mchash[6]); 3188 FILTER_EMIT(RFCR_RFADDR_MC7, mchash[7]); 3189 if (SIP_SIS900_REV(sc, SIS_REV_635) || 3190 SIP_SIS900_REV(sc, SIS_REV_960) || 3191 SIP_SIS900_REV(sc, SIS_REV_900B)) { 3192 FILTER_EMIT(RFCR_RFADDR_MC8, mchash[8]); 3193 FILTER_EMIT(RFCR_RFADDR_MC9, mchash[9]); 3194 FILTER_EMIT(RFCR_RFADDR_MC10, mchash[10]); 3195 FILTER_EMIT(RFCR_RFADDR_MC11, mchash[11]); 3196 FILTER_EMIT(RFCR_RFADDR_MC12, mchash[12]); 3197 FILTER_EMIT(RFCR_RFADDR_MC13, mchash[13]); 3198 FILTER_EMIT(RFCR_RFADDR_MC14, mchash[14]); 3199 FILTER_EMIT(RFCR_RFADDR_MC15, mchash[15]); 3200 } 3201 } 3202 #undef FILTER_EMIT 3203 3204 /* 3205 * Re-enable the receiver filter. 3206 */ 3207 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr); 3208 } 3209 3210 /* 3211 * sip_dp83815_set_filter: 3212 * 3213 * Set up the receive filter. 3214 */ 3215 static void 3216 sipcom_dp83815_set_filter(struct sip_softc *sc) 3217 { 3218 bus_space_tag_t st = sc->sc_st; 3219 bus_space_handle_t sh = sc->sc_sh; 3220 struct ethercom *ec = &sc->sc_ethercom; 3221 struct ifnet *ifp = &sc->sc_ethercom.ec_if; 3222 struct ether_multi *enm; 3223 const uint8_t *cp; 3224 struct ether_multistep step; 3225 uint32_t crc, hash, slot, bit; 3226 #define MCHASH_NWORDS_83820 128 3227 #define MCHASH_NWORDS_83815 32 3228 #define MCHASH_NWORDS MAX(MCHASH_NWORDS_83820, MCHASH_NWORDS_83815) 3229 uint16_t mchash[MCHASH_NWORDS]; 3230 int i; 3231 3232 /* 3233 * Initialize the prototype RFCR. 3234 * Enable the receive filter, and accept on 3235 * Perfect (destination address) Match 3236 * If IFF_BROADCAST, also accept all broadcast packets. 3237 * If IFF_PROMISC, accept all unicast packets (and later, set 3238 * IFF_ALLMULTI and accept all multicast, too). 3239 */ 3240 sc->sc_rfcr = RFCR_RFEN | RFCR_APM; 3241 if (ifp->if_flags & IFF_BROADCAST) 3242 sc->sc_rfcr |= RFCR_AAB; 3243 if (ifp->if_flags & IFF_PROMISC) { 3244 sc->sc_rfcr |= RFCR_AAP; 3245 goto allmulti; 3246 } 3247 3248 /* 3249 * Set up the DP83820/DP83815 multicast address filter by 3250 * passing all multicast addresses through a CRC generator, 3251 * and then using the high-order 11/9 bits as an index into 3252 * the 2048/512 bit multicast hash table. The high-order 3253 * 7/5 bits select the slot, while the low-order 4 bits 3254 * select the bit within the slot. Note that only the low 3255 * 16-bits of each filter word are used, and there are 3256 * 128/32 filter words. 3257 */ 3258 3259 memset(mchash, 0, sizeof(mchash)); 3260 3261 ifp->if_flags &= ~IFF_ALLMULTI; 3262 ETHER_FIRST_MULTI(step, ec, enm); 3263 if (enm == NULL) 3264 goto setit; 3265 while (enm != NULL) { 3266 if (memcmp(enm->enm_addrlo, enm->enm_addrhi, ETHER_ADDR_LEN)) { 3267 /* 3268 * We must listen to a range of multicast addresses. 3269 * For now, just accept all multicasts, rather than 3270 * trying to set only those filter bits needed to match 3271 * the range. (At this time, the only use of address 3272 * ranges is for IP multicast routing, for which the 3273 * range is big enough to require all bits set.) 3274 */ 3275 goto allmulti; 3276 } 3277 3278 crc = ether_crc32_be(enm->enm_addrlo, ETHER_ADDR_LEN); 3279 3280 if (sc->sc_gigabit) { 3281 /* Just want the 11 most significant bits. */ 3282 hash = crc >> 21; 3283 } else { 3284 /* Just want the 9 most significant bits. */ 3285 hash = crc >> 23; 3286 } 3287 3288 slot = hash >> 4; 3289 bit = hash & 0xf; 3290 3291 /* Set the corresponding bit in the hash table. */ 3292 mchash[slot] |= 1 << bit; 3293 3294 ETHER_NEXT_MULTI(step, enm); 3295 } 3296 sc->sc_rfcr |= RFCR_MHEN; 3297 goto setit; 3298 3299 allmulti: 3300 ifp->if_flags |= IFF_ALLMULTI; 3301 sc->sc_rfcr |= RFCR_AAM; 3302 3303 setit: 3304 #define FILTER_EMIT(addr, data) \ 3305 bus_space_write_4(st, sh, SIP_RFCR, (addr)); \ 3306 delay(1); \ 3307 bus_space_write_4(st, sh, SIP_RFDR, (data)); \ 3308 delay(1) 3309 3310 /* 3311 * Disable receive filter, and program the node address. 3312 */ 3313 cp = CLLADDR(ifp->if_sadl); 3314 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH0, (cp[1] << 8) | cp[0]); 3315 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH2, (cp[3] << 8) | cp[2]); 3316 FILTER_EMIT(RFCR_NS_RFADDR_PMATCH4, (cp[5] << 8) | cp[4]); 3317 3318 if ((ifp->if_flags & IFF_ALLMULTI) == 0) { 3319 int nwords = 3320 sc->sc_gigabit ? MCHASH_NWORDS_83820 : MCHASH_NWORDS_83815; 3321 /* 3322 * Program the multicast hash table. 3323 */ 3324 for (i = 0; i < nwords; i++) { 3325 FILTER_EMIT(sc->sc_parm->p_filtmem + (i * 2), mchash[i]); 3326 } 3327 } 3328 #undef FILTER_EMIT 3329 #undef MCHASH_NWORDS 3330 #undef MCHASH_NWORDS_83815 3331 #undef MCHASH_NWORDS_83820 3332 3333 /* 3334 * Re-enable the receiver filter. 3335 */ 3336 bus_space_write_4(st, sh, SIP_RFCR, sc->sc_rfcr); 3337 } 3338 3339 /* 3340 * sip_dp83820_mii_readreg: [mii interface function] 3341 * 3342 * Read a PHY register on the MII of the DP83820. 3343 */ 3344 static int 3345 sipcom_dp83820_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 3346 { 3347 struct sip_softc *sc = device_private(self); 3348 3349 if (sc->sc_cfg & CFG_TBI_EN) { 3350 bus_addr_t tbireg; 3351 3352 if (phy != 0) 3353 return -1; 3354 3355 switch (reg) { 3356 case MII_BMCR: tbireg = SIP_TBICR; break; 3357 case MII_BMSR: tbireg = SIP_TBISR; break; 3358 case MII_ANAR: tbireg = SIP_TANAR; break; 3359 case MII_ANLPAR: tbireg = SIP_TANLPAR; break; 3360 case MII_ANER: tbireg = SIP_TANER; break; 3361 case MII_EXTSR: 3362 /* 3363 * Don't even bother reading the TESR register. 3364 * The manual documents that the device has 3365 * 1000baseX full/half capability, but the 3366 * register itself seems read back 0 on some 3367 * boards. Just hard-code the result. 3368 */ 3369 *val = (EXTSR_1000XFDX | EXTSR_1000XHDX); 3370 return 0; 3371 3372 default: 3373 return 0; 3374 } 3375 3376 *val = bus_space_read_4(sc->sc_st, sc->sc_sh, tbireg) & 0xffff; 3377 if (tbireg == SIP_TBISR) { 3378 /* LINK and ACOMP are switched! */ 3379 int sr = *val; 3380 3381 *val = 0; 3382 if (sr & TBISR_MR_LINK_STATUS) 3383 *val |= BMSR_LINK; 3384 if (sr & TBISR_MR_AN_COMPLETE) 3385 *val |= BMSR_ACOMP; 3386 3387 /* 3388 * The manual claims this register reads back 0 3389 * on hard and soft reset. But we want to let 3390 * the gentbi driver know that we support auto- 3391 * negotiation, so hard-code this bit in the 3392 * result. 3393 */ 3394 *val |= BMSR_ANEG | BMSR_EXTSTAT; 3395 } 3396 3397 return 0; 3398 } 3399 3400 return mii_bitbang_readreg(self, &sipcom_mii_bitbang_ops, phy, reg, 3401 val); 3402 } 3403 3404 /* 3405 * sip_dp83820_mii_writereg: [mii interface function] 3406 * 3407 * Write a PHY register on the MII of the DP83820. 3408 */ 3409 static int 3410 sipcom_dp83820_mii_writereg(device_t self, int phy, int reg, uint16_t val) 3411 { 3412 struct sip_softc *sc = device_private(self); 3413 3414 if (sc->sc_cfg & CFG_TBI_EN) { 3415 bus_addr_t tbireg; 3416 3417 if (phy != 0) 3418 return -1; 3419 3420 switch (reg) { 3421 case MII_BMCR: tbireg = SIP_TBICR; break; 3422 case MII_ANAR: tbireg = SIP_TANAR; break; 3423 case MII_ANLPAR: tbireg = SIP_TANLPAR; break; 3424 default: 3425 return 0; 3426 } 3427 3428 bus_space_write_4(sc->sc_st, sc->sc_sh, tbireg, val); 3429 return 0; 3430 } 3431 3432 return mii_bitbang_writereg(self, &sipcom_mii_bitbang_ops, phy, reg, 3433 val); 3434 } 3435 3436 /* 3437 * sip_dp83820_mii_statchg: [mii interface function] 3438 * 3439 * Callback from MII layer when media changes. 3440 */ 3441 static void 3442 sipcom_dp83820_mii_statchg(struct ifnet *ifp) 3443 { 3444 struct sip_softc *sc = ifp->if_softc; 3445 struct mii_data *mii = &sc->sc_mii; 3446 uint32_t cfg, pcr; 3447 3448 /* 3449 * Get flow control negotiation result. 3450 */ 3451 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 3452 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 3453 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 3454 mii->mii_media_active &= ~IFM_ETH_FMASK; 3455 } 3456 3457 /* 3458 * Update TXCFG for full-duplex operation. 3459 */ 3460 if ((mii->mii_media_active & IFM_FDX) != 0) 3461 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI); 3462 else 3463 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI); 3464 3465 /* 3466 * Update RXCFG for full-duplex or loopback. 3467 */ 3468 if ((mii->mii_media_active & IFM_FDX) != 0 || 3469 IFM_SUBTYPE(mii->mii_media_active) == IFM_LOOP) 3470 sc->sc_rxcfg |= RXCFG_ATX; 3471 else 3472 sc->sc_rxcfg &= ~RXCFG_ATX; 3473 3474 /* 3475 * Update CFG for MII/GMII. 3476 */ 3477 if (sc->sc_ethercom.ec_if.if_baudrate == IF_Mbps(1000)) 3478 cfg = sc->sc_cfg | CFG_MODE_1000; 3479 else 3480 cfg = sc->sc_cfg; 3481 3482 /* 3483 * 802.3x flow control. 3484 */ 3485 pcr = 0; 3486 if (sc->sc_flowflags & IFM_FLOW) { 3487 if (sc->sc_flowflags & IFM_ETH_TXPAUSE) 3488 pcr |= sc->sc_rx_flow_thresh; 3489 if (sc->sc_flowflags & IFM_ETH_RXPAUSE) 3490 pcr |= PCR_PSEN | PCR_PS_MCAST; 3491 } 3492 3493 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CFG, cfg); 3494 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_txcfg, 3495 sc->sc_txcfg); 3496 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_rxcfg, 3497 sc->sc_rxcfg); 3498 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PCR, pcr); 3499 } 3500 3501 /* 3502 * sip_mii_bitbang_read: [mii bit-bang interface function] 3503 * 3504 * Read the MII serial port for the MII bit-bang module. 3505 */ 3506 static uint32_t 3507 sipcom_mii_bitbang_read(device_t self) 3508 { 3509 struct sip_softc *sc = device_private(self); 3510 3511 return (bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_EROMAR)); 3512 } 3513 3514 /* 3515 * sip_mii_bitbang_write: [mii big-bang interface function] 3516 * 3517 * Write the MII serial port for the MII bit-bang module. 3518 */ 3519 static void 3520 sipcom_mii_bitbang_write(device_t self, uint32_t val) 3521 { 3522 struct sip_softc *sc = device_private(self); 3523 3524 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_EROMAR, val); 3525 } 3526 3527 /* 3528 * sip_sis900_mii_readreg: [mii interface function] 3529 * 3530 * Read a PHY register on the MII. 3531 */ 3532 static int 3533 sipcom_sis900_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 3534 { 3535 struct sip_softc *sc = device_private(self); 3536 uint32_t enphy; 3537 3538 /* 3539 * The PHY of recent SiS chipsets is accessed through bitbang 3540 * operations. 3541 */ 3542 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900) 3543 return mii_bitbang_readreg(self, &sipcom_mii_bitbang_ops, 3544 phy, reg, val); 3545 3546 #ifndef SIS900_MII_RESTRICT 3547 /* 3548 * The SiS 900 has only an internal PHY on the MII. Only allow 3549 * MII address 0. 3550 */ 3551 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0) 3552 return -1; 3553 #endif 3554 3555 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY, 3556 (phy << ENPHY_PHYADDR_SHIFT) | (reg << ENPHY_REGADDR_SHIFT) | 3557 ENPHY_RWCMD | ENPHY_ACCESS); 3558 do { 3559 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY); 3560 } while (enphy & ENPHY_ACCESS); 3561 3562 *val = (enphy & ENPHY_PHYDATA) >> ENPHY_DATA_SHIFT; 3563 return 0; 3564 } 3565 3566 /* 3567 * sip_sis900_mii_writereg: [mii interface function] 3568 * 3569 * Write a PHY register on the MII. 3570 */ 3571 static int 3572 sipcom_sis900_mii_writereg(device_t self, int phy, int reg, uint16_t val) 3573 { 3574 struct sip_softc *sc = device_private(self); 3575 uint32_t enphy; 3576 3577 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900) { 3578 return mii_bitbang_writereg(self, &sipcom_mii_bitbang_ops, 3579 phy, reg, val); 3580 } 3581 3582 #ifndef SIS900_MII_RESTRICT 3583 /* 3584 * The SiS 900 has only an internal PHY on the MII. Only allow 3585 * MII address 0. 3586 */ 3587 if (sc->sc_model->sip_product == PCI_PRODUCT_SIS_900 && phy != 0) 3588 return -1; 3589 #endif 3590 3591 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_ENPHY, 3592 (val << ENPHY_DATA_SHIFT) | (phy << ENPHY_PHYADDR_SHIFT) | 3593 (reg << ENPHY_REGADDR_SHIFT) | ENPHY_ACCESS); 3594 do { 3595 enphy = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_ENPHY); 3596 } while (enphy & ENPHY_ACCESS); 3597 3598 return 0; 3599 } 3600 3601 /* 3602 * sip_sis900_mii_statchg: [mii interface function] 3603 * 3604 * Callback from MII layer when media changes. 3605 */ 3606 static void 3607 sipcom_sis900_mii_statchg(struct ifnet *ifp) 3608 { 3609 struct sip_softc *sc = ifp->if_softc; 3610 struct mii_data *mii = &sc->sc_mii; 3611 uint32_t flowctl; 3612 3613 /* 3614 * Get flow control negotiation result. 3615 */ 3616 if (IFM_SUBTYPE(mii->mii_media.ifm_cur->ifm_media) == IFM_AUTO && 3617 (mii->mii_media_active & IFM_ETH_FMASK) != sc->sc_flowflags) { 3618 sc->sc_flowflags = mii->mii_media_active & IFM_ETH_FMASK; 3619 mii->mii_media_active &= ~IFM_ETH_FMASK; 3620 } 3621 3622 /* 3623 * Update TXCFG for full-duplex operation. 3624 */ 3625 if ((mii->mii_media_active & IFM_FDX) != 0) 3626 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI); 3627 else 3628 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI); 3629 3630 /* 3631 * Update RXCFG for full-duplex or loopback. 3632 */ 3633 if ((mii->mii_media_active & IFM_FDX) != 0 || 3634 IFM_SUBTYPE(mii->mii_media_active) == IFM_LOOP) 3635 sc->sc_rxcfg |= RXCFG_ATX; 3636 else 3637 sc->sc_rxcfg &= ~RXCFG_ATX; 3638 3639 /* 3640 * Update IMR for use of 802.3x flow control. 3641 */ 3642 if (sc->sc_flowflags & IFM_FLOW) { 3643 sc->sc_imr |= (ISR_PAUSE_END | ISR_PAUSE_ST); 3644 flowctl = FLOWCTL_FLOWEN; 3645 } else { 3646 sc->sc_imr &= ~(ISR_PAUSE_END | ISR_PAUSE_ST); 3647 flowctl = 0; 3648 } 3649 3650 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_txcfg, 3651 sc->sc_txcfg); 3652 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_rxcfg, 3653 sc->sc_rxcfg); 3654 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_IMR, sc->sc_imr); 3655 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_FLOWCTL, flowctl); 3656 } 3657 3658 /* 3659 * sip_dp83815_mii_readreg: [mii interface function] 3660 * 3661 * Read a PHY register on the MII. 3662 */ 3663 static int 3664 sipcom_dp83815_mii_readreg(device_t self, int phy, int reg, uint16_t *val) 3665 { 3666 struct sip_softc *sc = device_private(self); 3667 uint32_t data; 3668 3669 /* 3670 * The DP83815 only has an internal PHY. Only allow 3671 * MII address 0. 3672 */ 3673 if (phy != 0) 3674 return -1; 3675 3676 /* 3677 * Apparently, after a reset, the DP83815 can take a while 3678 * to respond. During this recovery period, the BMSR returns 3679 * a value of 0. Catch this -- it's not supposed to happen 3680 * (the BMSR has some hardcoded-to-1 bits), and wait for the 3681 * PHY to come back to life. 3682 * 3683 * This works out because the BMSR is the first register 3684 * read during the PHY probe process. 3685 */ 3686 do { 3687 data = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg)); 3688 } while (reg == MII_BMSR && data == 0); 3689 3690 *val = data & 0xffff; 3691 return 0; 3692 } 3693 3694 /* 3695 * sip_dp83815_mii_writereg: [mii interface function] 3696 * 3697 * Write a PHY register to the MII. 3698 */ 3699 static int 3700 sipcom_dp83815_mii_writereg(device_t self, int phy, int reg, uint16_t val) 3701 { 3702 struct sip_softc *sc = device_private(self); 3703 3704 /* 3705 * The DP83815 only has an internal PHY. Only allow 3706 * MII address 0. 3707 */ 3708 if (phy != 0) 3709 return -1; 3710 3711 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_NS_PHY(reg), val); 3712 3713 return 0; 3714 } 3715 3716 /* 3717 * sip_dp83815_mii_statchg: [mii interface function] 3718 * 3719 * Callback from MII layer when media changes. 3720 */ 3721 static void 3722 sipcom_dp83815_mii_statchg(struct ifnet *ifp) 3723 { 3724 struct sip_softc *sc = ifp->if_softc; 3725 3726 /* 3727 * Update TXCFG for full-duplex operation. 3728 */ 3729 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0) 3730 sc->sc_txcfg |= (TXCFG_CSI | TXCFG_HBI); 3731 else 3732 sc->sc_txcfg &= ~(TXCFG_CSI | TXCFG_HBI); 3733 3734 /* 3735 * Update RXCFG for full-duplex or loopback. 3736 */ 3737 if ((sc->sc_mii.mii_media_active & IFM_FDX) != 0 || 3738 IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_LOOP) 3739 sc->sc_rxcfg |= RXCFG_ATX; 3740 else 3741 sc->sc_rxcfg &= ~RXCFG_ATX; 3742 3743 /* 3744 * XXX 802.3x flow control. 3745 */ 3746 3747 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_txcfg, 3748 sc->sc_txcfg); 3749 bus_space_write_4(sc->sc_st, sc->sc_sh, sc->sc_regs.r_rxcfg, 3750 sc->sc_rxcfg); 3751 3752 /* 3753 * Some DP83815s experience problems when used with short 3754 * (< 30m/100ft) Ethernet cables in 100BaseTX mode. This 3755 * sequence adjusts the DSP's signal attenuation to fix the 3756 * problem. 3757 */ 3758 if (IFM_SUBTYPE(sc->sc_mii.mii_media_active) == IFM_100_TX) { 3759 uint32_t reg; 3760 3761 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0x0001); 3762 3763 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4); 3764 reg &= 0x0fff; 3765 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4, reg | 0x1000); 3766 delay(100); 3767 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00fc); 3768 reg &= 0x00ff; 3769 if ((reg & 0x0080) == 0 || (reg >= 0x00d8)) { 3770 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00fc, 3771 0x00e8); 3772 reg = bus_space_read_4(sc->sc_st, sc->sc_sh, 0x00f4); 3773 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00f4, 3774 reg | 0x20); 3775 } 3776 3777 bus_space_write_4(sc->sc_st, sc->sc_sh, 0x00cc, 0); 3778 } 3779 } 3780 3781 static void 3782 sipcom_dp83820_read_macaddr(struct sip_softc *sc, 3783 const struct pci_attach_args *pa, uint8_t *enaddr) 3784 { 3785 uint16_t eeprom_data[SIP_DP83820_EEPROM_LENGTH / 2]; 3786 uint8_t cksum, *e, match; 3787 int i; 3788 3789 /* 3790 * EEPROM data format for the DP83820 can be found in 3791 * the DP83820 manual, section 4.2.4. 3792 */ 3793 3794 sipcom_read_eeprom(sc, 0, __arraycount(eeprom_data), eeprom_data); 3795 3796 match = eeprom_data[SIP_DP83820_EEPROM_CHECKSUM / 2] >> 8; 3797 match = ~(match - 1); 3798 3799 cksum = 0x55; 3800 e = (uint8_t *)eeprom_data; 3801 for (i = 0; i < SIP_DP83820_EEPROM_CHECKSUM; i++) 3802 cksum += *e++; 3803 3804 if (cksum != match) 3805 printf("%s: Checksum (%x) mismatch (%x)", 3806 device_xname(sc->sc_dev), cksum, match); 3807 3808 enaddr[0] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] & 0xff; 3809 enaddr[1] = eeprom_data[SIP_DP83820_EEPROM_PMATCH2 / 2] >> 8; 3810 enaddr[2] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] & 0xff; 3811 enaddr[3] = eeprom_data[SIP_DP83820_EEPROM_PMATCH1 / 2] >> 8; 3812 enaddr[4] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] & 0xff; 3813 enaddr[5] = eeprom_data[SIP_DP83820_EEPROM_PMATCH0 / 2] >> 8; 3814 } 3815 3816 static void 3817 sipcom_sis900_eeprom_delay(struct sip_softc *sc) 3818 { 3819 int i; 3820 3821 /* 3822 * FreeBSD goes from (300/33)+1 [10] to 0. There must be 3823 * a reason, but I don't know it. 3824 */ 3825 for (i = 0; i < 10; i++) 3826 bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_CR); 3827 } 3828 3829 static void 3830 sipcom_sis900_read_macaddr(struct sip_softc *sc, 3831 const struct pci_attach_args *pa, uint8_t *enaddr) 3832 { 3833 uint16_t myea[ETHER_ADDR_LEN / 2]; 3834 3835 switch (sc->sc_rev) { 3836 case SIS_REV_630S: 3837 case SIS_REV_630E: 3838 case SIS_REV_630EA1: 3839 case SIS_REV_630ET: 3840 case SIS_REV_635: 3841 /* 3842 * The MAC address for the on-board Ethernet of 3843 * the SiS 630 chipset is in the NVRAM. Kick 3844 * the chip into re-loading it from NVRAM, and 3845 * read the MAC address out of the filter registers. 3846 */ 3847 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_CR, CR_RLD); 3848 3849 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR, 3850 RFCR_RFADDR_NODE0); 3851 myea[0] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) & 3852 0xffff; 3853 3854 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR, 3855 RFCR_RFADDR_NODE2); 3856 myea[1] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) & 3857 0xffff; 3858 3859 bus_space_write_4(sc->sc_st, sc->sc_sh, SIP_RFCR, 3860 RFCR_RFADDR_NODE4); 3861 myea[2] = bus_space_read_4(sc->sc_st, sc->sc_sh, SIP_RFDR) & 3862 0xffff; 3863 break; 3864 3865 case SIS_REV_960: 3866 { 3867 #define SIS_SET_EROMAR(x, y) \ 3868 bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \ 3869 bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) | (y)) 3870 3871 #define SIS_CLR_EROMAR(x, y) \ 3872 bus_space_write_4(x->sc_st, x->sc_sh, SIP_EROMAR, \ 3873 bus_space_read_4(x->sc_st, x->sc_sh, SIP_EROMAR) & ~(y)) 3874 3875 int waittime, i; 3876 3877 /* Allow to read EEPROM from LAN. It is shared 3878 * between a 1394 controller and the NIC and each 3879 * time we access it, we need to set SIS_EECMD_REQ. 3880 */ 3881 SIS_SET_EROMAR(sc, EROMAR_REQ); 3882 3883 for (waittime = 0; waittime < 1000; waittime++) { /* 1 ms max */ 3884 /* Force EEPROM to idle state. */ 3885 3886 /* 3887 * XXX-cube This is ugly. 3888 * I'll look for docs about it. 3889 */ 3890 SIS_SET_EROMAR(sc, EROMAR_EECS); 3891 sipcom_sis900_eeprom_delay(sc); 3892 for (i = 0; i <= 25; i++) { /* Yes, 26 times. */ 3893 SIS_SET_EROMAR(sc, EROMAR_EESK); 3894 sipcom_sis900_eeprom_delay(sc); 3895 SIS_CLR_EROMAR(sc, EROMAR_EESK); 3896 sipcom_sis900_eeprom_delay(sc); 3897 } 3898 SIS_CLR_EROMAR(sc, EROMAR_EECS); 3899 sipcom_sis900_eeprom_delay(sc); 3900 bus_space_write_4(sc->sc_st, sc->sc_sh, 3901 SIP_EROMAR, 0); 3902 3903 if (bus_space_read_4(sc->sc_st, sc->sc_sh, 3904 SIP_EROMAR) & EROMAR_GNT) { 3905 sipcom_read_eeprom(sc, 3906 SIP_EEPROM_ETHERNET_ID0 >> 1, 3907 sizeof(myea) / sizeof(myea[0]), 3908 myea); 3909 break; 3910 } 3911 DELAY(1); 3912 } 3913 3914 /* 3915 * Set SIS_EECTL_CLK to high, so a other master 3916 * can operate on the i2c bus. 3917 */ 3918 SIS_SET_EROMAR(sc, EROMAR_EESK); 3919 3920 /* Refuse EEPROM access by LAN */ 3921 SIS_SET_EROMAR(sc, EROMAR_DONE); 3922 } break; 3923 3924 default: 3925 sipcom_read_eeprom(sc, SIP_EEPROM_ETHERNET_ID0 >> 1, 3926 sizeof(myea) / sizeof(myea[0]), myea); 3927 } 3928 3929 enaddr[0] = myea[0] & 0xff; 3930 enaddr[1] = myea[0] >> 8; 3931 enaddr[2] = myea[1] & 0xff; 3932 enaddr[3] = myea[1] >> 8; 3933 enaddr[4] = myea[2] & 0xff; 3934 enaddr[5] = myea[2] >> 8; 3935 } 3936 3937 /* Table and macro to bit-reverse an octet. */ 3938 static const uint8_t bbr4[] = {0,8,4,12,2,10,6,14,1,9,5,13,3,11,7,15}; 3939 #define bbr(v) ((bbr4[(v)&0xf] << 4) | bbr4[((v)>>4) & 0xf]) 3940 3941 static void 3942 sipcom_dp83815_read_macaddr(struct sip_softc *sc, 3943 const struct pci_attach_args *pa, uint8_t *enaddr) 3944 { 3945 uint16_t eeprom_data[SIP_DP83815_EEPROM_LENGTH / 2], *ea; 3946 uint8_t cksum, *e, match; 3947 int i; 3948 3949 sipcom_read_eeprom(sc, 0, sizeof(eeprom_data) / 3950 sizeof(eeprom_data[0]), eeprom_data); 3951 3952 match = eeprom_data[SIP_DP83815_EEPROM_CHECKSUM/2] >> 8; 3953 match = ~(match - 1); 3954 3955 cksum = 0x55; 3956 e = (uint8_t *)eeprom_data; 3957 for (i = 0; i < SIP_DP83815_EEPROM_CHECKSUM; i++) 3958 cksum += *e++; 3959 3960 if (cksum != match) 3961 printf("%s: Checksum (%x) mismatch (%x)", 3962 device_xname(sc->sc_dev), cksum, match); 3963 3964 /* 3965 * Unrolled because it makes slightly more sense this way. 3966 * The DP83815 stores the MAC address in bit 0 of word 6 3967 * through bit 15 of word 8. 3968 */ 3969 ea = &eeprom_data[6]; 3970 enaddr[0] = ((*ea & 0x1) << 7); 3971 ea++; 3972 enaddr[0] |= ((*ea & 0xFE00) >> 9); 3973 enaddr[1] = ((*ea & 0x1FE) >> 1); 3974 enaddr[2] = ((*ea & 0x1) << 7); 3975 ea++; 3976 enaddr[2] |= ((*ea & 0xFE00) >> 9); 3977 enaddr[3] = ((*ea & 0x1FE) >> 1); 3978 enaddr[4] = ((*ea & 0x1) << 7); 3979 ea++; 3980 enaddr[4] |= ((*ea & 0xFE00) >> 9); 3981 enaddr[5] = ((*ea & 0x1FE) >> 1); 3982 3983 /* 3984 * In case that's not weird enough, we also need to reverse 3985 * the bits in each byte. This all actually makes more sense 3986 * if you think about the EEPROM storage as an array of bits 3987 * being shifted into bytes, but that's not how we're looking 3988 * at it here... 3989 */ 3990 for (i = 0; i < 6 ;i++) 3991 enaddr[i] = bbr(enaddr[i]); 3992 } 3993 3994 /* 3995 * sip_mediastatus: [ifmedia interface function] 3996 * 3997 * Get the current interface media status. 3998 */ 3999 static void 4000 sipcom_mediastatus(struct ifnet *ifp, struct ifmediareq *ifmr) 4001 { 4002 struct sip_softc *sc = ifp->if_softc; 4003 4004 if (!device_is_active(sc->sc_dev)) { 4005 ifmr->ifm_active = IFM_ETHER | IFM_NONE; 4006 ifmr->ifm_status = 0; 4007 return; 4008 } 4009 ether_mediastatus(ifp, ifmr); 4010 ifmr->ifm_active = (ifmr->ifm_active & ~IFM_ETH_FMASK) | 4011 sc->sc_flowflags; 4012 } 4013